aws-sdk-s3-1.143.0/0000755000004100000410000000000014563445240013613 5ustar www-datawww-dataaws-sdk-s3-1.143.0/aws-sdk-s3.gemspec0000644000004100000410000001643414563445240017064 0ustar www-datawww-data######################################################### # This file has been automatically generated by gem2tgz # ######################################################### # -*- encoding: utf-8 -*- # stub: aws-sdk-s3 1.143.0 ruby lib Gem::Specification.new do |s| s.name = "aws-sdk-s3".freeze s.version = "1.143.0" s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= s.metadata = { "changelog_uri" => "https://github.com/aws/aws-sdk-ruby/tree/version-3/gems/aws-sdk-s3/CHANGELOG.md", "source_code_uri" => "https://github.com/aws/aws-sdk-ruby/tree/version-3/gems/aws-sdk-s3" } if s.respond_to? :metadata= s.require_paths = ["lib".freeze] s.authors = ["Amazon Web Services".freeze] s.date = "2024-01-26" s.description = "Official AWS Ruby gem for Amazon Simple Storage Service (Amazon S3). This gem is part of the AWS SDK for Ruby.".freeze s.email = ["aws-dr-rubygems@amazon.com".freeze] s.files = ["CHANGELOG.md".freeze, "LICENSE.txt".freeze, "VERSION".freeze, "lib/aws-sdk-s3.rb".freeze, "lib/aws-sdk-s3/bucket.rb".freeze, "lib/aws-sdk-s3/bucket_acl.rb".freeze, "lib/aws-sdk-s3/bucket_cors.rb".freeze, "lib/aws-sdk-s3/bucket_lifecycle.rb".freeze, "lib/aws-sdk-s3/bucket_lifecycle_configuration.rb".freeze, "lib/aws-sdk-s3/bucket_logging.rb".freeze, "lib/aws-sdk-s3/bucket_notification.rb".freeze, "lib/aws-sdk-s3/bucket_policy.rb".freeze, "lib/aws-sdk-s3/bucket_region_cache.rb".freeze, "lib/aws-sdk-s3/bucket_request_payment.rb".freeze, "lib/aws-sdk-s3/bucket_tagging.rb".freeze, "lib/aws-sdk-s3/bucket_versioning.rb".freeze, "lib/aws-sdk-s3/bucket_website.rb".freeze, "lib/aws-sdk-s3/client.rb".freeze, "lib/aws-sdk-s3/client_api.rb".freeze, "lib/aws-sdk-s3/customizations.rb".freeze, "lib/aws-sdk-s3/customizations/bucket.rb".freeze, "lib/aws-sdk-s3/customizations/errors.rb".freeze, "lib/aws-sdk-s3/customizations/multipart_upload.rb".freeze, "lib/aws-sdk-s3/customizations/object.rb".freeze, "lib/aws-sdk-s3/customizations/object_summary.rb".freeze, "lib/aws-sdk-s3/customizations/types/list_object_versions_output.rb".freeze, "lib/aws-sdk-s3/customizations/types/permanent_redirect.rb".freeze, "lib/aws-sdk-s3/encryption.rb".freeze, "lib/aws-sdk-s3/encryption/client.rb".freeze, "lib/aws-sdk-s3/encryption/decrypt_handler.rb".freeze, "lib/aws-sdk-s3/encryption/default_cipher_provider.rb".freeze, "lib/aws-sdk-s3/encryption/default_key_provider.rb".freeze, "lib/aws-sdk-s3/encryption/encrypt_handler.rb".freeze, "lib/aws-sdk-s3/encryption/errors.rb".freeze, "lib/aws-sdk-s3/encryption/io_auth_decrypter.rb".freeze, "lib/aws-sdk-s3/encryption/io_decrypter.rb".freeze, "lib/aws-sdk-s3/encryption/io_encrypter.rb".freeze, "lib/aws-sdk-s3/encryption/key_provider.rb".freeze, "lib/aws-sdk-s3/encryption/kms_cipher_provider.rb".freeze, "lib/aws-sdk-s3/encryption/materials.rb".freeze, "lib/aws-sdk-s3/encryption/utils.rb".freeze, "lib/aws-sdk-s3/encryptionV2/client.rb".freeze, "lib/aws-sdk-s3/encryptionV2/decrypt_handler.rb".freeze, "lib/aws-sdk-s3/encryptionV2/default_cipher_provider.rb".freeze, "lib/aws-sdk-s3/encryptionV2/default_key_provider.rb".freeze, "lib/aws-sdk-s3/encryptionV2/encrypt_handler.rb".freeze, "lib/aws-sdk-s3/encryptionV2/errors.rb".freeze, "lib/aws-sdk-s3/encryptionV2/io_auth_decrypter.rb".freeze, "lib/aws-sdk-s3/encryptionV2/io_decrypter.rb".freeze, "lib/aws-sdk-s3/encryptionV2/io_encrypter.rb".freeze, "lib/aws-sdk-s3/encryptionV2/key_provider.rb".freeze, "lib/aws-sdk-s3/encryptionV2/kms_cipher_provider.rb".freeze, "lib/aws-sdk-s3/encryptionV2/materials.rb".freeze, "lib/aws-sdk-s3/encryptionV2/utils.rb".freeze, "lib/aws-sdk-s3/encryption_v2.rb".freeze, "lib/aws-sdk-s3/endpoint_parameters.rb".freeze, "lib/aws-sdk-s3/endpoint_provider.rb".freeze, "lib/aws-sdk-s3/endpoints.rb".freeze, "lib/aws-sdk-s3/errors.rb".freeze, "lib/aws-sdk-s3/event_streams.rb".freeze, "lib/aws-sdk-s3/express_credentials.rb".freeze, "lib/aws-sdk-s3/express_credentials_cache.rb".freeze, "lib/aws-sdk-s3/express_credentials_provider.rb".freeze, "lib/aws-sdk-s3/file_downloader.rb".freeze, "lib/aws-sdk-s3/file_part.rb".freeze, "lib/aws-sdk-s3/file_uploader.rb".freeze, "lib/aws-sdk-s3/legacy_signer.rb".freeze, "lib/aws-sdk-s3/multipart_file_uploader.rb".freeze, "lib/aws-sdk-s3/multipart_stream_uploader.rb".freeze, "lib/aws-sdk-s3/multipart_upload.rb".freeze, "lib/aws-sdk-s3/multipart_upload_error.rb".freeze, "lib/aws-sdk-s3/multipart_upload_part.rb".freeze, "lib/aws-sdk-s3/object.rb".freeze, "lib/aws-sdk-s3/object_acl.rb".freeze, "lib/aws-sdk-s3/object_copier.rb".freeze, "lib/aws-sdk-s3/object_multipart_copier.rb".freeze, "lib/aws-sdk-s3/object_summary.rb".freeze, "lib/aws-sdk-s3/object_version.rb".freeze, "lib/aws-sdk-s3/plugins/accelerate.rb".freeze, "lib/aws-sdk-s3/plugins/arn.rb".freeze, "lib/aws-sdk-s3/plugins/bucket_dns.rb".freeze, "lib/aws-sdk-s3/plugins/bucket_name_restrictions.rb".freeze, "lib/aws-sdk-s3/plugins/dualstack.rb".freeze, "lib/aws-sdk-s3/plugins/endpoints.rb".freeze, "lib/aws-sdk-s3/plugins/expect_100_continue.rb".freeze, "lib/aws-sdk-s3/plugins/express_session_auth.rb".freeze, "lib/aws-sdk-s3/plugins/get_bucket_location_fix.rb".freeze, "lib/aws-sdk-s3/plugins/http_200_errors.rb".freeze, "lib/aws-sdk-s3/plugins/iad_regional_endpoint.rb".freeze, "lib/aws-sdk-s3/plugins/location_constraint.rb".freeze, "lib/aws-sdk-s3/plugins/md5s.rb".freeze, "lib/aws-sdk-s3/plugins/redirects.rb".freeze, "lib/aws-sdk-s3/plugins/s3_host_id.rb".freeze, "lib/aws-sdk-s3/plugins/s3_signer.rb".freeze, "lib/aws-sdk-s3/plugins/skip_whole_multipart_get_checksums.rb".freeze, "lib/aws-sdk-s3/plugins/sse_cpk.rb".freeze, "lib/aws-sdk-s3/plugins/streaming_retry.rb".freeze, "lib/aws-sdk-s3/plugins/url_encoded_keys.rb".freeze, "lib/aws-sdk-s3/presigned_post.rb".freeze, "lib/aws-sdk-s3/presigner.rb".freeze, "lib/aws-sdk-s3/resource.rb".freeze, "lib/aws-sdk-s3/types.rb".freeze, "lib/aws-sdk-s3/waiters.rb".freeze, "sig/bucket.rbs".freeze, "sig/bucket_acl.rbs".freeze, "sig/bucket_cors.rbs".freeze, "sig/bucket_lifecycle.rbs".freeze, "sig/bucket_lifecycle_configuration.rbs".freeze, "sig/bucket_logging.rbs".freeze, "sig/bucket_notification.rbs".freeze, "sig/bucket_policy.rbs".freeze, "sig/bucket_request_payment.rbs".freeze, "sig/bucket_tagging.rbs".freeze, "sig/bucket_versioning.rbs".freeze, "sig/bucket_website.rbs".freeze, "sig/client.rbs".freeze, "sig/errors.rbs".freeze, "sig/multipart_upload.rbs".freeze, "sig/multipart_upload_part.rbs".freeze, "sig/object.rbs".freeze, "sig/object_acl.rbs".freeze, "sig/object_summary.rbs".freeze, "sig/object_version.rbs".freeze, "sig/resource.rbs".freeze, "sig/types.rbs".freeze, "sig/waiters.rbs".freeze] s.homepage = "https://github.com/aws/aws-sdk-ruby".freeze s.licenses = ["Apache-2.0".freeze] s.required_ruby_version = Gem::Requirement.new(">= 2.5".freeze) s.rubygems_version = "3.3.15".freeze s.summary = "AWS SDK for Ruby - Amazon S3".freeze if s.respond_to? :specification_version then s.specification_version = 4 end if s.respond_to? :add_runtime_dependency then s.add_runtime_dependency(%q.freeze, ["~> 3", ">= 3.191.0"]) s.add_runtime_dependency(%q.freeze, ["~> 1"]) s.add_runtime_dependency(%q.freeze, ["~> 1.8"]) else s.add_dependency(%q.freeze, ["~> 3", ">= 3.191.0"]) s.add_dependency(%q.freeze, ["~> 1"]) s.add_dependency(%q.freeze, ["~> 1.8"]) end end aws-sdk-s3-1.143.0/lib/0000755000004100000410000000000014563445240014361 5ustar www-datawww-dataaws-sdk-s3-1.143.0/lib/aws-sdk-s3.rb0000644000004100000410000000462514563445240016611 0ustar www-datawww-data# frozen_string_literal: true # WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE require 'aws-sdk-kms' require 'aws-sigv4' require 'aws-sdk-core' require_relative 'aws-sdk-s3/types' require_relative 'aws-sdk-s3/client_api' require_relative 'aws-sdk-s3/plugins/endpoints.rb' require_relative 'aws-sdk-s3/client' require_relative 'aws-sdk-s3/errors' require_relative 'aws-sdk-s3/waiters' require_relative 'aws-sdk-s3/resource' require_relative 'aws-sdk-s3/endpoint_parameters' require_relative 'aws-sdk-s3/endpoint_provider' require_relative 'aws-sdk-s3/endpoints' require_relative 'aws-sdk-s3/bucket' require_relative 'aws-sdk-s3/bucket_acl' require_relative 'aws-sdk-s3/bucket_cors' require_relative 'aws-sdk-s3/bucket_lifecycle' require_relative 'aws-sdk-s3/bucket_lifecycle_configuration' require_relative 'aws-sdk-s3/bucket_logging' require_relative 'aws-sdk-s3/bucket_notification' require_relative 'aws-sdk-s3/bucket_policy' require_relative 'aws-sdk-s3/bucket_request_payment' require_relative 'aws-sdk-s3/bucket_tagging' require_relative 'aws-sdk-s3/bucket_versioning' require_relative 'aws-sdk-s3/bucket_website' require_relative 'aws-sdk-s3/multipart_upload' require_relative 'aws-sdk-s3/multipart_upload_part' require_relative 'aws-sdk-s3/object' require_relative 'aws-sdk-s3/object_acl' require_relative 'aws-sdk-s3/object_summary' require_relative 'aws-sdk-s3/object_version' require_relative 'aws-sdk-s3/customizations' require_relative 'aws-sdk-s3/event_streams' # This module provides support for Amazon Simple Storage Service. This module is available in the # `aws-sdk-s3` gem. # # # Client # # The {Client} class provides one method for each API operation. Operation # methods each accept a hash of request parameters and return a response # structure. # # s3 = Aws::S3::Client.new # resp = s3.abort_multipart_upload(params) # # See {Client} for more information. # # # Errors # # Errors returned from Amazon Simple Storage Service are defined in the # {Errors} module and all extend {Errors::ServiceError}. # # begin # # do stuff # rescue Aws::S3::Errors::ServiceError # # rescues all Amazon Simple Storage Service API errors # end # # See {Errors} for more information. # # @!group service module Aws::S3 GEM_VERSION = '1.143.0' end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/0000755000004100000410000000000014563445240016255 5ustar www-datawww-dataaws-sdk-s3-1.143.0/lib/aws-sdk-s3/waiters.rb0000644000004100000410000001660114563445240020264 0ustar www-datawww-data# frozen_string_literal: true # WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE require 'aws-sdk-core/waiters' module Aws::S3 # Waiters are utility methods that poll for a particular state to occur # on a client. Waiters can fail after a number of attempts at a polling # interval defined for the service client. # # For a list of operations that can be waited for and the # client methods called for each operation, see the table below or the # {Client#wait_until} field documentation for the {Client}. # # # Invoking a Waiter # To invoke a waiter, call #wait_until on a {Client}. The first parameter # is the waiter name, which is specific to the service client and indicates # which operation is being waited for. The second parameter is a hash of # parameters that are passed to the client method called by the waiter, # which varies according to the waiter name. # # # Wait Failures # To catch errors in a waiter, use WaiterFailed, # as shown in the following example. # # rescue rescue Aws::Waiters::Errors::WaiterFailed => error # puts "failed waiting for instance running: #{error.message} # end # # # Configuring a Waiter # Each waiter has a default polling interval and a maximum number of # attempts it will make before returning control to your program. # To set these values, use the `max_attempts` and `delay` parameters # in your `#wait_until` call. # The following example waits for up to 25 seconds, polling every five seconds. # # client.wait_until(...) do |w| # w.max_attempts = 5 # w.delay = 5 # end # # To disable wait failures, set the value of either of these parameters # to `nil`. # # # Extending a Waiter # To modify the behavior of waiters, you can register callbacks that are # triggered before each polling attempt and before waiting. # # The following example implements an exponential backoff in a waiter # by doubling the amount of time to wait on every attempt. # # client.wait_until(...) do |w| # w.interval = 0 # disable normal sleep # w.before_wait do |n, resp| # sleep(n ** 2) # end # end # # # Available Waiters # # The following table lists the valid waiter names, the operations they call, # and the default `:delay` and `:max_attempts` values. # # | waiter_name | params | :delay | :max_attempts | # | ----------------- | -------------------- | -------- | ------------- | # | bucket_exists | {Client#head_bucket} | 5 | 20 | # | bucket_not_exists | {Client#head_bucket} | 5 | 20 | # | object_exists | {Client#head_object} | 5 | 20 | # | object_not_exists | {Client#head_object} | 5 | 20 | # module Waiters class BucketExists # @param [Hash] options # @option options [required, Client] :client # @option options [Integer] :max_attempts (20) # @option options [Integer] :delay (5) # @option options [Proc] :before_attempt # @option options [Proc] :before_wait def initialize(options) @client = options.fetch(:client) @waiter = Aws::Waiters::Waiter.new({ max_attempts: 20, delay: 5, poller: Aws::Waiters::Poller.new( operation_name: :head_bucket, acceptors: [ { "expected" => 200, "matcher" => "status", "state" => "success" }, { "expected" => 301, "matcher" => "status", "state" => "success" }, { "expected" => 403, "matcher" => "status", "state" => "success" }, { "expected" => 404, "matcher" => "status", "state" => "retry" } ] ) }.merge(options)) end # @option (see Client#head_bucket) # @return (see Client#head_bucket) def wait(params = {}) @waiter.wait(client: @client, params: params) end # @api private attr_reader :waiter end class BucketNotExists # @param [Hash] options # @option options [required, Client] :client # @option options [Integer] :max_attempts (20) # @option options [Integer] :delay (5) # @option options [Proc] :before_attempt # @option options [Proc] :before_wait def initialize(options) @client = options.fetch(:client) @waiter = Aws::Waiters::Waiter.new({ max_attempts: 20, delay: 5, poller: Aws::Waiters::Poller.new( operation_name: :head_bucket, acceptors: [{ "expected" => 404, "matcher" => "status", "state" => "success" }] ) }.merge(options)) end # @option (see Client#head_bucket) # @return (see Client#head_bucket) def wait(params = {}) @waiter.wait(client: @client, params: params) end # @api private attr_reader :waiter end class ObjectExists # @param [Hash] options # @option options [required, Client] :client # @option options [Integer] :max_attempts (20) # @option options [Integer] :delay (5) # @option options [Proc] :before_attempt # @option options [Proc] :before_wait def initialize(options) @client = options.fetch(:client) @waiter = Aws::Waiters::Waiter.new({ max_attempts: 20, delay: 5, poller: Aws::Waiters::Poller.new( operation_name: :head_object, acceptors: [ { "expected" => 200, "matcher" => "status", "state" => "success" }, { "expected" => 404, "matcher" => "status", "state" => "retry" } ] ) }.merge(options)) end # @option (see Client#head_object) # @return (see Client#head_object) def wait(params = {}) @waiter.wait(client: @client, params: params) end # @api private attr_reader :waiter end class ObjectNotExists # @param [Hash] options # @option options [required, Client] :client # @option options [Integer] :max_attempts (20) # @option options [Integer] :delay (5) # @option options [Proc] :before_attempt # @option options [Proc] :before_wait def initialize(options) @client = options.fetch(:client) @waiter = Aws::Waiters::Waiter.new({ max_attempts: 20, delay: 5, poller: Aws::Waiters::Poller.new( operation_name: :head_object, acceptors: [{ "expected" => 404, "matcher" => "status", "state" => "success" }] ) }.merge(options)) end # @option (see Client#head_object) # @return (see Client#head_object) def wait(params = {}) @waiter.wait(client: @client, params: params) end # @api private attr_reader :waiter end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/encryption/0000755000004100000410000000000014563445240020447 5ustar www-datawww-dataaws-sdk-s3-1.143.0/lib/aws-sdk-s3/encryption/io_decrypter.rb0000644000004100000410000000156114563445240023467 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 module Encryption # @api private class IODecrypter # @param [OpenSSL::Cipher] cipher # @param [IO#write] io An IO-like object that responds to `#write`. def initialize(cipher, io) @cipher = cipher # Ensure that IO is reset between retries @io = io.tap { |io| io.truncate(0) if io.respond_to?(:truncate) } @cipher_buffer = String.new end # @return [#write] attr_reader :io def write(chunk) # decrypt and write if @cipher.method(:update).arity == 1 @io.write(@cipher.update(chunk)) else @io.write(@cipher.update(chunk, @cipher_buffer)) end end def finalize @io.write(@cipher.final) end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/encryption/io_encrypter.rb0000644000004100000410000000340414563445240023477 0ustar www-datawww-data# frozen_string_literal: true require 'stringio' require 'tempfile' module Aws module S3 module Encryption # Provides an IO wrapper encrpyting a stream of data. # It is possible to use this same object for decrypting. You must # initialize it with a decryptiion cipher in that case and the # IO object must contain cipher text instead of plain text. # @api private class IOEncrypter # @api private ONE_MEGABYTE = 1024 * 1024 def initialize(cipher, io) @encrypted = io.size <= ONE_MEGABYTE ? encrypt_to_stringio(cipher, io.read) : encrypt_to_tempfile(cipher, io) @size = @encrypted.size end # @return [Integer] attr_reader :size def read(bytes = nil, output_buffer = nil) if Tempfile === @encrypted && @encrypted.closed? @encrypted.open @encrypted.binmode end @encrypted.read(bytes, output_buffer) end def rewind @encrypted.rewind end # @api private def close @encrypted.close if Tempfile === @encrypted end private def encrypt_to_stringio(cipher, plain_text) if plain_text.empty? StringIO.new(cipher.final) else StringIO.new(cipher.update(plain_text) + cipher.final) end end def encrypt_to_tempfile(cipher, io) encrypted = Tempfile.new(self.object_id.to_s) encrypted.binmode while chunk = io.read(ONE_MEGABYTE) encrypted.write(cipher.update(chunk)) end encrypted.write(cipher.final) encrypted.rewind encrypted end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/encryption/encrypt_handler.rb0000644000004100000410000000377614563445240024172 0ustar www-datawww-data# frozen_string_literal: true require 'base64' module Aws module S3 module Encryption # @api private class EncryptHandler < Seahorse::Client::Handler def call(context) envelope, cipher = context[:encryption][:cipher_provider].encryption_cipher apply_encryption_envelope(context, envelope, cipher) apply_encryption_cipher(context, cipher) apply_cse_user_agent(context) @handler.call(context) end private def apply_encryption_envelope(context, envelope, cipher) context[:encryption][:cipher] = cipher if context[:encryption][:envelope_location] == :metadata context.params[:metadata] ||= {} context.params[:metadata].update(envelope) else # :instruction_file suffix = context[:encryption][:instruction_file_suffix] context.client.put_object( bucket: context.params[:bucket], key: context.params[:key] + suffix, body: Json.dump(envelope) ) end end def apply_encryption_cipher(context, cipher) io = context.params[:body] || '' io = StringIO.new(io) if String === io context.params[:body] = IOEncrypter.new(cipher, io) context.params[:metadata] ||= {} context.params[:metadata]['x-amz-unencrypted-content-length'] = io.size if context.params.delete(:content_md5) warn('Setting content_md5 on client side encrypted objects is deprecated') end context.http_response.on_headers do context.params[:body].close end end def apply_cse_user_agent(context) if context.config.user_agent_suffix.nil? context.config.user_agent_suffix = EC_USER_AGENT elsif !context.config.user_agent_suffix.include? EC_USER_AGENT context.config.user_agent_suffix += " #{EC_USER_AGENT}" end end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/encryption/materials.rb0000644000004100000410000000327414563445240022763 0ustar www-datawww-data# frozen_string_literal: true require 'base64' module Aws module S3 module Encryption class Materials # @option options [required, OpenSSL::PKey::RSA, String] :key # The master key to use for encrypting/decrypting all objects. # # @option options [String] :description ('{}') # The encryption materials description. This is must be # a JSON document string. # def initialize(options = {}) @key = validate_key(options[:key]) @description = validate_desc(options[:description]) end # @return [OpenSSL::PKey::RSA, String] attr_reader :key # @return [String] attr_reader :description private def validate_key(key) case key when OpenSSL::PKey::RSA then key when String if [32, 24, 16].include?(key.bytesize) key else msg = 'invalid key, symmetric key required to be 16, 24, or '\ '32 bytes in length, saw length ' + key.bytesize.to_s raise ArgumentError, msg end else msg = 'invalid encryption key, expected an OpenSSL::PKey::RSA key '\ '(for asymmetric encryption) or a String (for symmetric '\ 'encryption).' raise ArgumentError, msg end end def validate_desc(description) Json.load(description) description rescue Json::ParseError, EncodingError msg = 'expected description to be a valid JSON document string' raise ArgumentError, msg end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/encryption/default_key_provider.rb0000644000004100000410000000224214563445240025202 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 module Encryption # The default key provider is constructed with a single key # that is used for both encryption and decryption, ignoring # the possible per-object envelope encryption materials description. # @api private class DefaultKeyProvider include KeyProvider # @option options [required, OpenSSL::PKey::RSA, String] :encryption_key # The master key to use for encrypting objects. # @option options [String] :materials_description ('{}') # A description of the encryption key. def initialize(options = {}) @encryption_materials = Materials.new( key: options[:encryption_key], description: options[:materials_description] || '{}' ) end # @return [Materials] def encryption_materials @encryption_materials end # @param [String] materials_description # @return Returns the key given in the constructor. def key_for(materials_description) @encryption_materials.key end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/encryption/decrypt_handler.rb0000644000004100000410000001657014563445240024154 0ustar www-datawww-data# frozen_string_literal: true require 'base64' module Aws module S3 module Encryption # @api private class DecryptHandler < Seahorse::Client::Handler @@warned_response_target_proc = false V1_ENVELOPE_KEYS = %w( x-amz-key x-amz-iv x-amz-matdesc ) V2_ENVELOPE_KEYS = %w( x-amz-key-v2 x-amz-iv x-amz-cek-alg x-amz-wrap-alg x-amz-matdesc ) V2_OPTIONAL_KEYS = %w(x-amz-tag-len) POSSIBLE_ENVELOPE_KEYS = (V1_ENVELOPE_KEYS + V2_ENVELOPE_KEYS + V2_OPTIONAL_KEYS).uniq POSSIBLE_WRAPPING_FORMATS = %w( AES/GCM kms kms+context RSA-OAEP-SHA1 ) POSSIBLE_ENCRYPTION_FORMATS = %w( AES/GCM/NoPadding AES/CBC/PKCS5Padding AES/CBC/PKCS7Padding ) AUTH_REQUIRED_CEK_ALGS = %w(AES/GCM/NoPadding) def call(context) attach_http_event_listeners(context) apply_cse_user_agent(context) if context[:response_target].is_a?(Proc) && !@@warned_response_target_proc @@warned_response_target_proc = true warn(':response_target is a Proc, or a block was provided. ' \ 'Read the entire object to the ' \ 'end before you start using the decrypted data. This is to ' \ 'verify that the object has not been modified since it ' \ 'was encrypted.') end @handler.call(context) end private def attach_http_event_listeners(context) context.http_response.on_headers(200) do cipher, envelope = decryption_cipher(context) decrypter = body_contains_auth_tag?(envelope) ? authenticated_decrypter(context, cipher, envelope) : IODecrypter.new(cipher, context.http_response.body) context.http_response.body = decrypter end context.http_response.on_success(200) do decrypter = context.http_response.body decrypter.finalize decrypter.io.rewind if decrypter.io.respond_to?(:rewind) context.http_response.body = decrypter.io end context.http_response.on_error do if context.http_response.body.respond_to?(:io) context.http_response.body = context.http_response.body.io end end end def decryption_cipher(context) if (envelope = get_encryption_envelope(context)) cipher = context[:encryption][:cipher_provider] .decryption_cipher( envelope, context[:encryption] ) [cipher, envelope] else raise Errors::DecryptionError, "unable to locate encryption envelope" end end def get_encryption_envelope(context) if context[:encryption][:envelope_location] == :metadata envelope_from_metadata(context) || envelope_from_instr_file(context) else envelope_from_instr_file(context) || envelope_from_metadata(context) end end def envelope_from_metadata(context) possible_envelope = {} POSSIBLE_ENVELOPE_KEYS.each do |suffix| if value = context.http_response.headers["x-amz-meta-#{suffix}"] possible_envelope[suffix] = value end end extract_envelope(possible_envelope) end def envelope_from_instr_file(context) suffix = context[:encryption][:instruction_file_suffix] possible_envelope = Json.load(context.client.get_object( bucket: context.params[:bucket], key: context.params[:key] + suffix ).body.read) extract_envelope(possible_envelope) rescue S3::Errors::ServiceError, Json::ParseError nil end def extract_envelope(hash) return nil unless hash return v1_envelope(hash) if hash.key?('x-amz-key') return v2_envelope(hash) if hash.key?('x-amz-key-v2') if hash.keys.any? { |key| key.match(/^x-amz-key-(.+)$/) } msg = "unsupported envelope encryption version #{$1}" raise Errors::DecryptionError, msg end end def v1_envelope(envelope) envelope end def v2_envelope(envelope) unless POSSIBLE_ENCRYPTION_FORMATS.include? envelope['x-amz-cek-alg'] alg = envelope['x-amz-cek-alg'].inspect msg = "unsupported content encrypting key (cek) format: #{alg}" raise Errors::DecryptionError, msg end unless POSSIBLE_WRAPPING_FORMATS.include? envelope['x-amz-wrap-alg'] alg = envelope['x-amz-wrap-alg'].inspect msg = "unsupported key wrapping algorithm: #{alg}" raise Errors::DecryptionError, msg end unless (missing_keys = V2_ENVELOPE_KEYS - envelope.keys).empty? msg = "incomplete v2 encryption envelope:\n" msg += " missing: #{missing_keys.join(',')}\n" raise Errors::DecryptionError, msg end envelope end # This method fetches the tag from the end of the object by # making a GET Object w/range request. This auth tag is used # to initialize the cipher, and the decrypter truncates the # auth tag from the body when writing the final bytes. def authenticated_decrypter(context, cipher, envelope) http_resp = context.http_response content_length = http_resp.headers['content-length'].to_i auth_tag_length = auth_tag_length(envelope) auth_tag = context.client.get_object( bucket: context.params[:bucket], key: context.params[:key], range: "bytes=-#{auth_tag_length}" ).body.read cipher.auth_tag = auth_tag cipher.auth_data = '' # The encrypted object contains both the cipher text # plus a trailing auth tag. IOAuthDecrypter.new( io: http_resp.body, encrypted_content_length: content_length - auth_tag_length, cipher: cipher) end def body_contains_auth_tag?(envelope) AUTH_REQUIRED_CEK_ALGS.include?(envelope['x-amz-cek-alg']) end # Determine the auth tag length from the algorithm # Validate it against the value provided in the x-amz-tag-len # Return the tag length in bytes def auth_tag_length(envelope) tag_length = case envelope['x-amz-cek-alg'] when 'AES/GCM/NoPadding' then AES_GCM_TAG_LEN_BYTES else raise ArgumentError, 'Unsupported cek-alg: ' \ "#{envelope['x-amz-cek-alg']}" end if (tag_length * 8) != envelope['x-amz-tag-len'].to_i raise Errors::DecryptionError, 'x-amz-tag-len does not match expected' end tag_length end def apply_cse_user_agent(context) if context.config.user_agent_suffix.nil? context.config.user_agent_suffix = EC_USER_AGENT elsif !context.config.user_agent_suffix.include? EC_USER_AGENT context.config.user_agent_suffix += " #{EC_USER_AGENT}" end end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/encryption/utils.rb0000644000004100000410000000720514563445240022140 0ustar www-datawww-data# frozen_string_literal: true require 'openssl' module Aws module S3 module Encryption # @api private module Utils UNSAFE_MSG = "unsafe encryption, data is longer than key length" class << self def encrypt(key, data) case key when OpenSSL::PKey::RSA # asymmetric encryption warn(UNSAFE_MSG) if key.public_key.n.num_bits < cipher_size(data) key.public_encrypt(data) when String # symmetric encryption warn(UNSAFE_MSG) if cipher_size(key) < cipher_size(data) cipher = aes_encryption_cipher(:ECB, key) cipher.update(data) + cipher.final end end def decrypt(key, data) begin case key when OpenSSL::PKey::RSA # asymmetric decryption key.private_decrypt(data) when String # symmetric Decryption cipher = aes_cipher(:decrypt, :ECB, key, nil) cipher.update(data) + cipher.final end rescue OpenSSL::Cipher::CipherError msg = 'decryption failed, possible incorrect key' raise Errors::DecryptionError, msg end end def decrypt_aes_gcm(key, data, auth_data) # data is iv (12B) + key + tag (16B) buf = data.unpack('C*') iv = buf[0,12].pack('C*') # iv will always be 12 bytes tag = buf[-16, 16].pack('C*') # tag is 16 bytes enc_key = buf[12, buf.size - (12+16)].pack('C*') cipher = aes_cipher(:decrypt, :GCM, key, iv) cipher.auth_tag = tag cipher.auth_data = auth_data cipher.update(enc_key) + cipher.final end # returns the decrypted data + auth_data def decrypt_rsa(key, enc_data) # Plaintext must be KeyLengthInBytes (1 Byte) + DataKey + AuthData buf = key.private_decrypt(enc_data, OpenSSL::PKey::RSA::PKCS1_OAEP_PADDING).unpack('C*') key_length = buf[0] data = buf[1, key_length].pack('C*') auth_data = buf[key_length+1, buf.length - key_length].pack('C*') [data, auth_data] end # @param [String] block_mode "CBC" or "ECB" # @param [OpenSSL::PKey::RSA, String, nil] key # @param [String, nil] iv The initialization vector def aes_encryption_cipher(block_mode, key = nil, iv = nil) aes_cipher(:encrypt, block_mode, key, iv) end # @param [String] block_mode "CBC" or "ECB" # @param [OpenSSL::PKey::RSA, String, nil] key # @param [String, nil] iv The initialization vector def aes_decryption_cipher(block_mode, key = nil, iv = nil) aes_cipher(:decrypt, block_mode, key, iv) end # @param [String] mode "encrypt" or "decrypt" # @param [String] block_mode "CBC" or "ECB" # @param [OpenSSL::PKey::RSA, String, nil] key # @param [String, nil] iv The initialization vector def aes_cipher(mode, block_mode, key, iv) cipher = key ? OpenSSL::Cipher.new("aes-#{cipher_size(key)}-#{block_mode.downcase}") : OpenSSL::Cipher.new("aes-256-#{block_mode.downcase}") cipher.send(mode) # encrypt or decrypt cipher.key = key if key cipher.iv = iv if iv cipher end # @param [String] key # @return [Integer] # @raise ArgumentError def cipher_size(key) key.bytesize * 8 end end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/encryption/io_auth_decrypter.rb0000644000004100000410000000324214563445240024506 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 module Encryption # @api private class IOAuthDecrypter # @option options [required, IO#write] :io # An IO-like object that responds to {#write}. # @option options [required, Integer] :encrypted_content_length # The number of bytes to decrypt from the `:io` object. # This should be the total size of `:io` minus the length of # the cipher auth tag. # @option options [required, OpenSSL::Cipher] :cipher An initialized # cipher that can be used to decrypt the bytes as they are # written to the `:io` object. The cipher should already have # its `#auth_tag` set. def initialize(options = {}) @decrypter = IODecrypter.new(options[:cipher], options[:io]) @max_bytes = options[:encrypted_content_length] @bytes_written = 0 end def write(chunk) chunk = truncate_chunk(chunk) if chunk.bytesize > 0 @bytes_written += chunk.bytesize @decrypter.write(chunk) end end def finalize @decrypter.finalize end def io @decrypter.io end private def truncate_chunk(chunk) if chunk.bytesize + @bytes_written <= @max_bytes chunk elsif @bytes_written < @max_bytes chunk[0..(@max_bytes - @bytes_written - 1)] else # If the tag was sent over after the full body has been read, # we don't want to accidentally append it. "" end end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/encryption/default_cipher_provider.rb0000644000004100000410000000675714563445240025703 0ustar www-datawww-data# frozen_string_literal: true require 'base64' module Aws module S3 module Encryption # @api private class DefaultCipherProvider def initialize(options = {}) @key_provider = options[:key_provider] end # @return [Array] Creates an returns a new encryption # envelope and encryption cipher. def encryption_cipher cipher = Utils.aes_encryption_cipher(:CBC) envelope = { 'x-amz-key' => encode64(encrypt(envelope_key(cipher))), 'x-amz-iv' => encode64(envelope_iv(cipher)), 'x-amz-matdesc' => materials_description, } [envelope, cipher] end # @return [Cipher] Given an encryption envelope, returns a # decryption cipher. def decryption_cipher(envelope, options = {}) master_key = @key_provider.key_for(envelope['x-amz-matdesc']) if envelope.key? 'x-amz-key' # Support for decryption of legacy objects key = Utils.decrypt(master_key, decode64(envelope['x-amz-key'])) iv = decode64(envelope['x-amz-iv']) Utils.aes_decryption_cipher(:CBC, key, iv) else if envelope['x-amz-cek-alg'] != 'AES/GCM/NoPadding' raise ArgumentError, 'Unsupported cek-alg: ' \ "#{envelope['x-amz-cek-alg']}" end key = case envelope['x-amz-wrap-alg'] when 'AES/GCM' if master_key.is_a? OpenSSL::PKey::RSA raise ArgumentError, 'Key mismatch - Client is configured' \ ' with an RSA key and the x-amz-wrap-alg is AES/GCM.' end Utils.decrypt_aes_gcm(master_key, decode64(envelope['x-amz-key-v2']), envelope['x-amz-cek-alg']) when 'RSA-OAEP-SHA1' unless master_key.is_a? OpenSSL::PKey::RSA raise ArgumentError, 'Key mismatch - Client is configured' \ ' with an AES key and the x-amz-wrap-alg is RSA-OAEP-SHA1.' end key, cek_alg = Utils.decrypt_rsa(master_key, decode64(envelope['x-amz-key-v2'])) raise Errors::DecryptionError unless cek_alg == envelope['x-amz-cek-alg'] key when 'kms+context' raise ArgumentError, 'Key mismatch - Client is configured' \ ' with a user provided key and the x-amz-wrap-alg is' \ ' kms+context. Please configure the client with the' \ ' required kms_key_id' else raise ArgumentError, 'Unsupported wrap-alg: ' \ "#{envelope['x-amz-wrap-alg']}" end iv = decode64(envelope['x-amz-iv']) Utils.aes_decryption_cipher(:GCM, key, iv) end end private def envelope_key(cipher) cipher.key = cipher.random_key end def envelope_iv(cipher) cipher.iv = cipher.random_iv end def encrypt(data) Utils.encrypt(@key_provider.encryption_materials.key, data) end def materials_description @key_provider.encryption_materials.description end def encode64(str) Base64.encode64(str).split("\n") * "" end def decode64(str) Base64.decode64(str) end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/encryption/kms_cipher_provider.rb0000644000004100000410000000705314563445240025037 0ustar www-datawww-data# frozen_string_literal: true require 'base64' module Aws module S3 module Encryption # @api private class KmsCipherProvider def initialize(options = {}) @kms_key_id = options[:kms_key_id] @kms_client = options[:kms_client] end # @return [Array] Creates an returns a new encryption # envelope and encryption cipher. def encryption_cipher encryption_context = { "kms_cmk_id" => @kms_key_id } key_data = Aws::Plugins::UserAgent.feature('S3CryptoV1n') do @kms_client.generate_data_key( key_id: @kms_key_id, encryption_context: encryption_context, key_spec: 'AES_256' ) end cipher = Utils.aes_encryption_cipher(:CBC) cipher.key = key_data.plaintext envelope = { 'x-amz-key-v2' => encode64(key_data.ciphertext_blob), 'x-amz-iv' => encode64(cipher.iv = cipher.random_iv), 'x-amz-cek-alg' => 'AES/CBC/PKCS5Padding', 'x-amz-wrap-alg' => 'kms', 'x-amz-matdesc' => Json.dump(encryption_context) } [envelope, cipher] end # @return [Cipher] Given an encryption envelope, returns a # decryption cipher. def decryption_cipher(envelope, options = {}) encryption_context = Json.load(envelope['x-amz-matdesc']) cek_alg = envelope['x-amz-cek-alg'] case envelope['x-amz-wrap-alg'] when 'kms'; # NO OP when 'kms+context' if cek_alg != encryption_context['aws:x-amz-cek-alg'] raise Errors::DecryptionError, 'Value of cek-alg from envelope'\ ' does not match the value in the encryption context' end when 'AES/GCM' raise ArgumentError, 'Key mismatch - Client is configured' \ ' with a KMS key and the x-amz-wrap-alg is AES/GCM.' when 'RSA-OAEP-SHA1' raise ArgumentError, 'Key mismatch - Client is configured' \ ' with a KMS key and the x-amz-wrap-alg is RSA-OAEP-SHA1.' else raise ArgumentError, 'Unsupported wrap-alg: ' \ "#{envelope['x-amz-wrap-alg']}" end key = Aws::Plugins::UserAgent.feature('S3CryptoV1n') do @kms_client.decrypt( ciphertext_blob: decode64(envelope['x-amz-key-v2']), encryption_context: encryption_context ).plaintext end iv = decode64(envelope['x-amz-iv']) block_mode = case cek_alg when 'AES/CBC/PKCS5Padding' :CBC when 'AES/CBC/PKCS7Padding' :CBC when 'AES/GCM/NoPadding' :GCM else type = envelope['x-amz-cek-alg'].inspect msg = "unsupported content encrypting key (cek) format: #{type}" raise Errors::DecryptionError, msg end Utils.aes_decryption_cipher(block_mode, key, iv) end private def build_encryption_context(cek_alg, options = {}) kms_context = (options[:kms_encryption_context] || {}) .each_with_object({}) { |(k, v), h| h[k.to_s] = v } { 'aws:x-amz-cek-alg' => cek_alg }.merge(kms_context) end def encode64(str) Base64.encode64(str).split("\n") * "" end def decode64(str) Base64.decode64(str) end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/encryption/client.rb0000644000004100000410000003557214563445240022266 0ustar www-datawww-data# frozen_string_literal: true require 'forwardable' module Aws module S3 # [MAINTENANCE MODE] There is a new version of the Encryption Client. # AWS strongly recommends upgrading to the {Aws::S3::EncryptionV2::Client}, # which provides updated data security best practices. # See documentation for {Aws::S3::EncryptionV2::Client}. # Provides an encryption client that encrypts and decrypts data client-side, # storing the encrypted data in Amazon S3. # # This client uses a process called "envelope encryption". Your private # encryption keys and your data's plain-text are **never** sent to # Amazon S3. **If you lose you encryption keys, you will not be able to # decrypt your data.** # # ## Envelope Encryption Overview # # The goal of envelope encryption is to combine the performance of # fast symmetric encryption while maintaining the secure key management # that asymmetric keys provide. # # A one-time-use symmetric key (envelope key) is generated client-side. # This is used to encrypt the data client-side. This key is then # encrypted by your master key and stored alongside your data in Amazon # S3. # # When accessing your encrypted data with the encryption client, # the encrypted envelope key is retrieved and decrypted client-side # with your master key. The envelope key is then used to decrypt the # data client-side. # # One of the benefits of envelope encryption is that if your master key # is compromised, you have the option of just re-encrypting the stored # envelope symmetric keys, instead of re-encrypting all of the # data in your account. # # ## Basic Usage # # The encryption client requires an {Aws::S3::Client}. If you do not # provide a `:client`, then a client will be constructed for you. # # require 'openssl' # key = OpenSSL::PKey::RSA.new(1024) # # # encryption client # s3 = Aws::S3::Encryption::Client.new(encryption_key: key) # # # round-trip an object, encrypted/decrypted locally # s3.put_object(bucket:'aws-sdk', key:'secret', body:'handshake') # s3.get_object(bucket:'aws-sdk', key:'secret').body.read # #=> 'handshake' # # # reading encrypted object without the encryption client # # results in the getting the cipher text # Aws::S3::Client.new.get_object(bucket:'aws-sdk', key:'secret').body.read # #=> "... cipher text ..." # # ## Keys # # For client-side encryption to work, you must provide one of the following: # # * An encryption key # * A {KeyProvider} # * A KMS encryption key id # # ### An Encryption Key # # You can pass a single encryption key. This is used as a master key # encrypting and decrypting all object keys. # # key = OpenSSL::Cipher.new("AES-256-ECB").random_key # symmetric key # key = OpenSSL::PKey::RSA.new(1024) # asymmetric key pair # # s3 = Aws::S3::Encryption::Client.new(encryption_key: key) # # ### Key Provider # # Alternatively, you can use a {KeyProvider}. A key provider makes # it easy to work with multiple keys and simplifies key rotation. # # ### KMS Encryption Key Id # # If you pass the id to an AWS Key Management Service (KMS) key, # then KMS will be used to generate, encrypt and decrypt object keys. # # # keep track of the kms key id # kms = Aws::KMS::Client.new # key_id = kms.create_key.key_metadata.key_id # # Aws::S3::Encryption::Client.new( # kms_key_id: key_id, # kms_client: kms, # ) # # ## Custom Key Providers # # A {KeyProvider} is any object that responds to: # # * `#encryption_materials` # * `#key_for(materials_description)` # # Here is a trivial implementation of an in-memory key provider. # This is provided as a demonstration of the key provider interface, # and should not be used in production: # # class KeyProvider # # def initialize(default_key_name, keys) # @keys = keys # @encryption_materials = Aws::S3::Encryption::Materials.new( # key: @keys[default_key_name], # description: JSON.dump(key: default_key_name), # ) # end # # attr_reader :encryption_materials # # def key_for(matdesc) # key_name = JSON.parse(matdesc)['key'] # if key = @keys[key_name] # key # else # raise "encryption key not found for: #{matdesc.inspect}" # end # end # end # # Given the above key provider, you can create an encryption client that # chooses the key to use based on the materials description stored with # the encrypted object. This makes it possible to use multiple keys # and simplifies key rotation. # # # uses "new-key" for encrypting objects, uses either for decrypting # keys = KeyProvider.new('new-key', { # "old-key" => Base64.decode64("kM5UVbhE/4rtMZJfsadYEdm2vaKFsmV2f5+URSeUCV4="), # "new-key" => Base64.decode64("w1WLio3agRWRTSJK/Ouh8NHoqRQ6fn5WbSXDTHjXMSo="), # }), # # # chooses the key based on the materials description stored # # with the encrypted object # s3 = Aws::S3::Encryption::Client.new(key_provider: keys) # # ## Materials Description # # A materials description is JSON document string that is stored # in the metadata (or instruction file) of an encrypted object. # The {DefaultKeyProvider} uses the empty JSON document `"{}"`. # # When building a key provider, you are free to store whatever # information you need to identify the master key that was used # to encrypt the object. # # ## Envelope Location # # By default, the encryption client store the encryption envelope # with the object, as metadata. You can choose to have the envelope # stored in a separate "instruction file". An instruction file # is an object, with the key of the encrypted object, suffixed with # `".instruction"`. # # Specify the `:envelope_location` option as `:instruction_file` to # use an instruction file for storing the envelope. # # # default behavior # s3 = Aws::S3::Encryption::Client.new( # key_provider: ..., # envelope_location: :metadata, # ) # # # store envelope in a separate object # s3 = Aws::S3::Encryption::Client.new( # key_provider: ..., # envelope_location: :instruction_file, # instruction_file_suffix: '.instruction' # default # ) # # When using an instruction file, multiple requests are made when # putting and getting the object. **This may cause issues if you are # issuing concurrent PUT and GET requests to an encrypted object.** # module Encryption class Client extend Deprecations extend Forwardable def_delegators :@client, :config, :delete_object, :head_object, :build_request # Creates a new encryption client. You must provide one of the following # options: # # * `:encryption_key` # * `:kms_key_id` # * `:key_provider` # # You may also pass any other options accepted by `Client#initialize`. # # @option options [S3::Client] :client A basic S3 client that is used # to make api calls. If a `:client` is not provided, a new {S3::Client} # will be constructed. # # @option options [OpenSSL::PKey::RSA, String] :encryption_key The master # key to use for encrypting/decrypting all objects. # # @option options [String] :kms_key_id When you provide a `:kms_key_id`, # then AWS Key Management Service (KMS) will be used to manage the # object encryption keys. By default a {KMS::Client} will be # constructed for KMS API calls. Alternatively, you can provide # your own via `:kms_client`. # # @option options [#key_for] :key_provider Any object that responds # to `#key_for`. This method should accept a materials description # JSON document string and return return an encryption key. # # @option options [Symbol] :envelope_location (:metadata) Where to # store the envelope encryption keys. By default, the envelope is # stored with the encrypted object. If you pass `:instruction_file`, # then the envelope is stored in a separate object in Amazon S3. # # @option options [String] :instruction_file_suffix ('.instruction') # When `:envelope_location` is `:instruction_file` then the # instruction file uses the object key with this suffix appended. # # @option options [KMS::Client] :kms_client A default {KMS::Client} # is constructed when using KMS to manage encryption keys. # def initialize(options = {}) @client = extract_client(options) @cipher_provider = cipher_provider(options) @envelope_location = extract_location(options) @instruction_file_suffix = extract_suffix(options) end deprecated :initialize, message: '[MAINTENANCE MODE] This version of the S3 Encryption client is currently in maintenance mode. ' \ 'AWS strongly recommends upgrading to the Aws::S3::EncryptionV2::Client, ' \ 'which provides updated data security best practices. ' \ 'See documentation for Aws::S3::EncryptionV2::Client.' # @return [S3::Client] attr_reader :client # @return [KeyProvider, nil] Returns `nil` if you are using # AWS Key Management Service (KMS). attr_reader :key_provider # @return [Symbol<:metadata, :instruction_file>] attr_reader :envelope_location # @return [String] When {#envelope_location} is `:instruction_file`, # the envelope is stored in the object with the object key suffixed # by this string. attr_reader :instruction_file_suffix # Uploads an object to Amazon S3, encrypting data client-side. # See {S3::Client#put_object} for documentation on accepted # request parameters. # @option (see S3::Client#put_object) # @return (see S3::Client#put_object) # @see S3::Client#put_object def put_object(params = {}) req = @client.build_request(:put_object, params) req.handlers.add(EncryptHandler, priority: 95) req.context[:encryption] = { cipher_provider: @cipher_provider, envelope_location: @envelope_location, instruction_file_suffix: @instruction_file_suffix, } Aws::Plugins::UserAgent.feature('S3CryptoV1n') do req.send_request end end # Gets an object from Amazon S3, decrypting data locally. # See {S3::Client#get_object} for documentation on accepted # request parameters. # @option params [String] :instruction_file_suffix The suffix # used to find the instruction file containing the encryption # envelope. You should not set this option when the envelope # is stored in the object metadata. Defaults to # {#instruction_file_suffix}. # @option params [String] :instruction_file_suffix # @option (see S3::Client#get_object) # @return (see S3::Client#get_object) # @see S3::Client#get_object # @note The `:range` request parameter is not yet supported. def get_object(params = {}, &block) if params[:range] raise NotImplementedError, '#get_object with :range not supported yet' end envelope_location, instruction_file_suffix = envelope_options(params) req = @client.build_request(:get_object, params) req.handlers.add(DecryptHandler) req.context[:encryption] = { cipher_provider: @cipher_provider, envelope_location: envelope_location, instruction_file_suffix: instruction_file_suffix, } Aws::Plugins::UserAgent.feature('S3CryptoV1n') do req.send_request(target: block) end end private def extract_client(options) options[:client] || begin options = options.dup options.delete(:kms_key_id) options.delete(:kms_client) options.delete(:key_provider) options.delete(:encryption_key) options.delete(:envelope_location) options.delete(:instruction_file_suffix) S3::Client.new(options) end end def kms_client(options) options[:kms_client] || begin KMS::Client.new( region: @client.config.region, credentials: @client.config.credentials, ) end end def cipher_provider(options) if options[:kms_key_id] KmsCipherProvider.new( kms_key_id: options[:kms_key_id], kms_client: kms_client(options), ) else # kept here for backwards compatability, {#key_provider} is deprecated @key_provider = extract_key_provider(options) DefaultCipherProvider.new(key_provider: @key_provider) end end def extract_key_provider(options) if options[:key_provider] options[:key_provider] elsif options[:encryption_key] DefaultKeyProvider.new(options) else msg = 'you must pass a :kms_key_id, :key_provider, or :encryption_key' raise ArgumentError, msg end end def envelope_options(params) location = params.delete(:envelope_location) || @envelope_location suffix = params.delete(:instruction_file_suffix) if suffix [:instruction_file, suffix] else [location, @instruction_file_suffix] end end def extract_location(options) location = options[:envelope_location] || :metadata if [:metadata, :instruction_file].include?(location) location else msg = ':envelope_location must be :metadata or :instruction_file '\ "got #{location.inspect}" raise ArgumentError, msg end end def extract_suffix(options) suffix = options[:instruction_file_suffix] || '.instruction' if String === suffix suffix else msg = ':instruction_file_suffix must be a String' raise ArgumentError, msg end end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/encryption/errors.rb0000644000004100000410000000034314563445240022310 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 module Encryption module Errors class DecryptionError < RuntimeError; end class EncryptionError < RuntimeError; end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/encryption/key_provider.rb0000644000004100000410000000157214563445240023503 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 module Encryption # This module defines the interface required for a {Client#key_provider}. # A key provider is any object that: # # * Responds to {#encryption_materials} with an {Materials} object. # # * Responds to {#key_for}, receiving a JSON document String, # returning an encryption key. The returned encryption key # must be one of: # # * `OpenSSL::PKey::RSA` - for asymmetric encryption # * `String` - 32, 24, or 16 bytes long, for symmetric encryption # module KeyProvider # @return [Materials] def encryption_materials; end # @param [String] materials_description # @return [OpenSSL::PKey::RSA, String] encryption_key def key_for(materials_description); end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/encryption.rb0000644000004100000410000000136514563445240021001 0ustar www-datawww-data# frozen_string_literal: true require 'aws-sdk-s3/encryption/client' require 'aws-sdk-s3/encryption/decrypt_handler' require 'aws-sdk-s3/encryption/default_cipher_provider' require 'aws-sdk-s3/encryption/encrypt_handler' require 'aws-sdk-s3/encryption/errors' require 'aws-sdk-s3/encryption/io_encrypter' require 'aws-sdk-s3/encryption/io_decrypter' require 'aws-sdk-s3/encryption/io_auth_decrypter' require 'aws-sdk-s3/encryption/key_provider' require 'aws-sdk-s3/encryption/kms_cipher_provider' require 'aws-sdk-s3/encryption/materials' require 'aws-sdk-s3/encryption/utils' require 'aws-sdk-s3/encryption/default_key_provider' module Aws module S3 module Encryption; end AES_GCM_TAG_LEN_BYTES = 16 EC_USER_AGENT = 'S3CryptoV1n' end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/bucket_policy.rb0000644000004100000410000002475014563445240021446 0ustar www-datawww-data# frozen_string_literal: true # WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws::S3 class BucketPolicy extend Aws::Deprecations # @overload def initialize(bucket_name, options = {}) # @param [String] bucket_name # @option options [Client] :client # @overload def initialize(options = {}) # @option options [required, String] :bucket_name # @option options [Client] :client def initialize(*args) options = Hash === args.last ? args.pop.dup : {} @bucket_name = extract_bucket_name(args, options) @data = options.delete(:data) @client = options.delete(:client) || Client.new(options) @waiter_block_warned = false end # @!group Read-Only Attributes # @return [String] def bucket_name @bucket_name end # The bucket policy as a JSON document. # @return [IO] def policy data[:policy] end # @!endgroup # @return [Client] def client @client end # Loads, or reloads {#data} for the current {BucketPolicy}. # Returns `self` making it possible to chain methods. # # bucket_policy.reload.data # # @return [self] def load resp = Aws::Plugins::UserAgent.feature('resource') do @client.get_bucket_policy(bucket: @bucket_name) end @data = resp.data self end alias :reload :load # @return [Types::GetBucketPolicyOutput] # Returns the data for this {BucketPolicy}. Calls # {Client#get_bucket_policy} if {#data_loaded?} is `false`. def data load unless @data @data end # @return [Boolean] # Returns `true` if this resource is loaded. Accessing attributes or # {#data} on an unloaded resource will trigger a call to {#load}. def data_loaded? !!@data end # @deprecated Use [Aws::S3::Client] #wait_until instead # # Waiter polls an API operation until a resource enters a desired # state. # # @note The waiting operation is performed on a copy. The original resource # remains unchanged. # # ## Basic Usage # # Waiter will polls until it is successful, it fails by # entering a terminal state, or until a maximum number of attempts # are made. # # # polls in a loop until condition is true # resource.wait_until(options) {|resource| condition} # # ## Example # # instance.wait_until(max_attempts:10, delay:5) do |instance| # instance.state.name == 'running' # end # # ## Configuration # # You can configure the maximum number of polling attempts, and the # delay (in seconds) between each polling attempt. The waiting condition is # set by passing a block to {#wait_until}: # # # poll for ~25 seconds # resource.wait_until(max_attempts:5,delay:5) {|resource|...} # # ## Callbacks # # You can be notified before each polling attempt and before each # delay. If you throw `:success` or `:failure` from these callbacks, # it will terminate the waiter. # # started_at = Time.now # # poll for 1 hour, instead of a number of attempts # proc = Proc.new do |attempts, response| # throw :failure if Time.now - started_at > 3600 # end # # # disable max attempts # instance.wait_until(before_wait:proc, max_attempts:nil) {...} # # ## Handling Errors # # When a waiter is successful, it returns the Resource. When a waiter # fails, it raises an error. # # begin # resource.wait_until(...) # rescue Aws::Waiters::Errors::WaiterFailed # # resource did not enter the desired state in time # end # # @yieldparam [Resource] resource to be used in the waiting condition. # # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter # terminates because the waiter has entered a state that it will not # transition out of, preventing success. # # yet successful. # # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is # encountered while polling for a resource that is not expected. # # @raise [NotImplementedError] Raised when the resource does not # # @option options [Integer] :max_attempts (10) Maximum number of # attempts # @option options [Integer] :delay (10) Delay between each # attempt in seconds # @option options [Proc] :before_attempt (nil) Callback # invoked before each attempt # @option options [Proc] :before_wait (nil) Callback # invoked before each wait # @return [Resource] if the waiter was successful def wait_until(options = {}, &block) self_copy = self.dup attempts = 0 options[:max_attempts] = 10 unless options.key?(:max_attempts) options[:delay] ||= 10 options[:poller] = Proc.new do attempts += 1 if block.call(self_copy) [:success, self_copy] else self_copy.reload unless attempts == options[:max_attempts] :retry end end Aws::Plugins::UserAgent.feature('resource') do Aws::Waiters::Waiter.new(options).wait({}) end end # @!group Actions # @example Request syntax with placeholder values # # bucket_policy.delete({ # expected_bucket_owner: "AccountId", # }) # @param [Hash] options ({}) # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # For directory buckets, this header is not supported in this API # operation. If you specify this header, the request fails with the HTTP # status code `501 Not Implemented`. # # # @return [EmptyStructure] def delete(options = {}) options = options.merge(bucket: @bucket_name) resp = Aws::Plugins::UserAgent.feature('resource') do @client.delete_bucket_policy(options) end resp.data end # @example Request syntax with placeholder values # # bucket_policy.put({ # content_md5: "ContentMD5", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # confirm_remove_self_bucket_access: false, # policy: "Policy", # required # expected_bucket_owner: "AccountId", # }) # @param [Hash] options ({}) # @option options [String] :content_md5 # The MD5 hash of the request body. # # For requests made using the Amazon Web Services Command Line Interface # (CLI) or Amazon Web Services SDKs, this field is calculated # automatically. # # This functionality is not supported for directory buckets. # # # @option options [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum-algorithm ` or # `x-amz-trailer` header sent. Otherwise, Amazon S3 fails the request # with the HTTP status code `400 Bad Request`. # # For the `x-amz-checksum-algorithm ` header, replace ` algorithm ` with # the supported algorithm from the following list: # # * CRC32 # # * CRC32C # # * SHA1 # # * SHA256 # # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # If the individual checksum value you provide through # `x-amz-checksum-algorithm ` doesn't match the checksum algorithm you # set through `x-amz-sdk-checksum-algorithm`, Amazon S3 ignores any # provided `ChecksumAlgorithm` parameter and uses the checksum algorithm # that matches the provided value in `x-amz-checksum-algorithm `. # # For directory buckets, when you use Amazon Web Services SDKs, `CRC32` # is the default checksum algorithm that's used for performance. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [Boolean] :confirm_remove_self_bucket_access # Set this parameter to true to confirm that you want to remove your # permissions to change this bucket policy in the future. # # This functionality is not supported for directory buckets. # # # @option options [required, String] :policy # The bucket policy as a JSON document. # # For directory buckets, the only IAM action supported in the bucket # policy is `s3express:CreateSession`. # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # For directory buckets, this header is not supported in this API # operation. If you specify this header, the request fails with the HTTP # status code `501 Not Implemented`. # # # @return [EmptyStructure] def put(options = {}) options = options.merge(bucket: @bucket_name) resp = Aws::Plugins::UserAgent.feature('resource') do @client.put_bucket_policy(options) end resp.data end # @!group Associations # @return [Bucket] def bucket Bucket.new( name: @bucket_name, client: @client ) end # @deprecated # @api private def identifiers { bucket_name: @bucket_name } end deprecated(:identifiers) private def extract_bucket_name(args, options) value = args[0] || options.delete(:bucket_name) case value when String then value when nil then raise ArgumentError, "missing required option :bucket_name" else msg = "expected :bucket_name to be a String, got #{value.class}" raise ArgumentError, msg end end class Collection < Aws::Resources::Collection; end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/customizations.rb0000644000004100000410000000261414563445240021700 0ustar www-datawww-data# frozen_string_literal: true # utility classes require 'aws-sdk-s3/bucket_region_cache' require 'aws-sdk-s3/encryption' require 'aws-sdk-s3/encryption_v2' require 'aws-sdk-s3/file_part' require 'aws-sdk-s3/file_uploader' require 'aws-sdk-s3/file_downloader' require 'aws-sdk-s3/legacy_signer' require 'aws-sdk-s3/multipart_file_uploader' require 'aws-sdk-s3/multipart_stream_uploader' require 'aws-sdk-s3/multipart_upload_error' require 'aws-sdk-s3/object_copier' require 'aws-sdk-s3/object_multipart_copier' require 'aws-sdk-s3/presigned_post' require 'aws-sdk-s3/presigner' # s3 express session auth require 'aws-sdk-s3/express_credentials' require 'aws-sdk-s3/express_credentials_cache' require 'aws-sdk-s3/express_credentials_provider' # customizations to generated classes require 'aws-sdk-s3/customizations/bucket' require 'aws-sdk-s3/customizations/errors' require 'aws-sdk-s3/customizations/object' require 'aws-sdk-s3/customizations/object_summary' require 'aws-sdk-s3/customizations/multipart_upload' require 'aws-sdk-s3/customizations/types/list_object_versions_output' require 'aws-sdk-s3/customizations/types/permanent_redirect' [ Aws::S3::Object::Collection, Aws::S3::ObjectSummary::Collection, Aws::S3::ObjectVersion::Collection, ].each do |klass| klass.send(:alias_method, :delete, :batch_delete!) klass.extend Aws::Deprecations klass.send(:deprecated, :delete, use: :batch_delete!) end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/object_version.rb0000644000004100000410000010121114563445240021611 0ustar www-datawww-data# frozen_string_literal: true # WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws::S3 class ObjectVersion extend Aws::Deprecations # @overload def initialize(bucket_name, object_key, id, options = {}) # @param [String] bucket_name # @param [String] object_key # @param [String] id # @option options [Client] :client # @overload def initialize(options = {}) # @option options [required, String] :bucket_name # @option options [required, String] :object_key # @option options [required, String] :id # @option options [Client] :client def initialize(*args) options = Hash === args.last ? args.pop.dup : {} @bucket_name = extract_bucket_name(args, options) @object_key = extract_object_key(args, options) @id = extract_id(args, options) @data = options.delete(:data) @client = options.delete(:client) || Client.new(options) @waiter_block_warned = false end # @!group Read-Only Attributes # @return [String] def bucket_name @bucket_name end # @return [String] def object_key @object_key end # @return [String] def id @id end # The entity tag is an MD5 hash of that version of the object. # @return [String] def etag data[:etag] end # The algorithm that was used to create a checksum of the object. # @return [Array] def checksum_algorithm data[:checksum_algorithm] end # Size in bytes of the object. # @return [Integer] def size data[:size] end # The class of storage used to store the object. # @return [String] def storage_class data[:storage_class] end # The object key. # @return [String] def key data[:key] end # Version ID of an object. # @return [String] def version_id data[:version_id] end # Specifies whether the object is (true) or is not (false) the latest # version of an object. # @return [Boolean] def is_latest data[:is_latest] end # Date and time when the object was last modified. # @return [Time] def last_modified data[:last_modified] end # Specifies the owner of the object. # @return [Types::Owner] def owner data[:owner] end # Specifies the restoration status of an object. Objects in certain # storage classes must be restored before they can be retrieved. For # more information about these storage classes and how to work with # archived objects, see [ Working with archived objects][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html # @return [Types::RestoreStatus] def restore_status data[:restore_status] end # @!endgroup # @return [Client] def client @client end # @raise [NotImplementedError] # @api private def load msg = "#load is not implemented, data only available via enumeration" raise NotImplementedError, msg end alias :reload :load # @raise [NotImplementedError] Raises when {#data_loaded?} is `false`. # @return [Types::ObjectVersion] # Returns the data for this {ObjectVersion}. def data load unless @data @data end # @return [Boolean] # Returns `true` if this resource is loaded. Accessing attributes or # {#data} on an unloaded resource will trigger a call to {#load}. def data_loaded? !!@data end # @deprecated Use [Aws::S3::Client] #wait_until instead # # Waiter polls an API operation until a resource enters a desired # state. # # @note The waiting operation is performed on a copy. The original resource # remains unchanged. # # ## Basic Usage # # Waiter will polls until it is successful, it fails by # entering a terminal state, or until a maximum number of attempts # are made. # # # polls in a loop until condition is true # resource.wait_until(options) {|resource| condition} # # ## Example # # instance.wait_until(max_attempts:10, delay:5) do |instance| # instance.state.name == 'running' # end # # ## Configuration # # You can configure the maximum number of polling attempts, and the # delay (in seconds) between each polling attempt. The waiting condition is # set by passing a block to {#wait_until}: # # # poll for ~25 seconds # resource.wait_until(max_attempts:5,delay:5) {|resource|...} # # ## Callbacks # # You can be notified before each polling attempt and before each # delay. If you throw `:success` or `:failure` from these callbacks, # it will terminate the waiter. # # started_at = Time.now # # poll for 1 hour, instead of a number of attempts # proc = Proc.new do |attempts, response| # throw :failure if Time.now - started_at > 3600 # end # # # disable max attempts # instance.wait_until(before_wait:proc, max_attempts:nil) {...} # # ## Handling Errors # # When a waiter is successful, it returns the Resource. When a waiter # fails, it raises an error. # # begin # resource.wait_until(...) # rescue Aws::Waiters::Errors::WaiterFailed # # resource did not enter the desired state in time # end # # @yieldparam [Resource] resource to be used in the waiting condition. # # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter # terminates because the waiter has entered a state that it will not # transition out of, preventing success. # # yet successful. # # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is # encountered while polling for a resource that is not expected. # # @raise [NotImplementedError] Raised when the resource does not # # @option options [Integer] :max_attempts (10) Maximum number of # attempts # @option options [Integer] :delay (10) Delay between each # attempt in seconds # @option options [Proc] :before_attempt (nil) Callback # invoked before each attempt # @option options [Proc] :before_wait (nil) Callback # invoked before each wait # @return [Resource] if the waiter was successful def wait_until(options = {}, &block) self_copy = self.dup attempts = 0 options[:max_attempts] = 10 unless options.key?(:max_attempts) options[:delay] ||= 10 options[:poller] = Proc.new do attempts += 1 if block.call(self_copy) [:success, self_copy] else self_copy.reload unless attempts == options[:max_attempts] :retry end end Aws::Plugins::UserAgent.feature('resource') do Aws::Waiters::Waiter.new(options).wait({}) end end # @!group Actions # @example Request syntax with placeholder values # # object_version.delete({ # mfa: "MFA", # request_payer: "requester", # accepts requester # bypass_governance_retention: false, # expected_bucket_owner: "AccountId", # }) # @param [Hash] options ({}) # @option options [String] :mfa # The concatenation of the authentication device's serial number, a # space, and the value that is displayed on your authentication device. # Required to permanently delete a versioned object if versioning is # configured with MFA delete enabled. # # This functionality is not supported for directory buckets. # # # @option options [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @option options [Boolean] :bypass_governance_retention # Indicates whether S3 Object Lock should bypass Governance-mode # restrictions to process this operation. To use this header, you must # have the `s3:BypassGovernanceRetention` permission. # # This functionality is not supported for directory buckets. # # # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @return [Types::DeleteObjectOutput] def delete(options = {}) options = options.merge( bucket: @bucket_name, key: @object_key, version_id: @id ) resp = Aws::Plugins::UserAgent.feature('resource') do @client.delete_object(options) end resp.data end # @example Request syntax with placeholder values # # object_version.get({ # if_match: "IfMatch", # if_modified_since: Time.now, # if_none_match: "IfNoneMatch", # if_unmodified_since: Time.now, # range: "Range", # response_cache_control: "ResponseCacheControl", # response_content_disposition: "ResponseContentDisposition", # response_content_encoding: "ResponseContentEncoding", # response_content_language: "ResponseContentLanguage", # response_content_type: "ResponseContentType", # response_expires: Time.now, # sse_customer_algorithm: "SSECustomerAlgorithm", # sse_customer_key: "SSECustomerKey", # sse_customer_key_md5: "SSECustomerKeyMD5", # request_payer: "requester", # accepts requester # part_number: 1, # expected_bucket_owner: "AccountId", # checksum_mode: "ENABLED", # accepts ENABLED # }) # @param [Hash] options ({}) # @option options [String] :if_match # Return the object only if its entity tag (ETag) is the same as the one # specified in this header; otherwise, return a `412 Precondition # Failed` error. # # If both of the `If-Match` and `If-Unmodified-Since` headers are # present in the request as follows: `If-Match` condition evaluates to # `true`, and; `If-Unmodified-Since` condition evaluates to `false`; # then, S3 returns `200 OK` and the data requested. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # @option options [Time,DateTime,Date,Integer,String] :if_modified_since # Return the object only if it has been modified since the specified # time; otherwise, return a `304 Not Modified` error. # # If both of the `If-None-Match` and `If-Modified-Since` headers are # present in the request as follows:` If-None-Match` condition evaluates # to `false`, and; `If-Modified-Since` condition evaluates to `true`; # then, S3 returns `304 Not Modified` status code. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # @option options [String] :if_none_match # Return the object only if its entity tag (ETag) is different from the # one specified in this header; otherwise, return a `304 Not Modified` # error. # # If both of the `If-None-Match` and `If-Modified-Since` headers are # present in the request as follows:` If-None-Match` condition evaluates # to `false`, and; `If-Modified-Since` condition evaluates to `true`; # then, S3 returns `304 Not Modified` HTTP status code. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # @option options [Time,DateTime,Date,Integer,String] :if_unmodified_since # Return the object only if it has not been modified since the specified # time; otherwise, return a `412 Precondition Failed` error. # # If both of the `If-Match` and `If-Unmodified-Since` headers are # present in the request as follows: `If-Match` condition evaluates to # `true`, and; `If-Unmodified-Since` condition evaluates to `false`; # then, S3 returns `200 OK` and the data requested. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # @option options [String] :range # Downloads the specified byte range of an object. For more information # about the HTTP Range header, see # [https://www.rfc-editor.org/rfc/rfc9110.html#name-range][1]. # # Amazon S3 doesn't support retrieving multiple ranges of data per # `GET` request. # # # # # # [1]: https://www.rfc-editor.org/rfc/rfc9110.html#name-range # @option options [String] :response_cache_control # Sets the `Cache-Control` header of the response. # @option options [String] :response_content_disposition # Sets the `Content-Disposition` header of the response. # @option options [String] :response_content_encoding # Sets the `Content-Encoding` header of the response. # @option options [String] :response_content_language # Sets the `Content-Language` header of the response. # @option options [String] :response_content_type # Sets the `Content-Type` header of the response. # @option options [Time,DateTime,Date,Integer,String] :response_expires # Sets the `Expires` header of the response. # @option options [String] :sse_customer_algorithm # Specifies the algorithm to use when decrypting the object (for # example, `AES256`). # # If you encrypt an object by using server-side encryption with # customer-provided encryption keys (SSE-C) when you store the object in # Amazon S3, then when you GET the object, you must use the following # headers: # # * `x-amz-server-side-encryption-customer-algorithm` # # * `x-amz-server-side-encryption-customer-key` # # * `x-amz-server-side-encryption-customer-key-MD5` # # For more information about SSE-C, see [Server-Side Encryption (Using # Customer-Provided Encryption Keys)][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # @option options [String] :sse_customer_key # Specifies the customer-provided encryption key that you originally # provided for Amazon S3 to encrypt the data before storing it. This # value is used to decrypt the object when recovering it and must match # the one used when storing the data. The key must be appropriate for # use with the algorithm specified in the # `x-amz-server-side-encryption-customer-algorithm` header. # # If you encrypt an object by using server-side encryption with # customer-provided encryption keys (SSE-C) when you store the object in # Amazon S3, then when you GET the object, you must use the following # headers: # # * `x-amz-server-side-encryption-customer-algorithm` # # * `x-amz-server-side-encryption-customer-key` # # * `x-amz-server-side-encryption-customer-key-MD5` # # For more information about SSE-C, see [Server-Side Encryption (Using # Customer-Provided Encryption Keys)][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # @option options [String] :sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the customer-provided encryption # key according to RFC 1321. Amazon S3 uses this header for a message # integrity check to ensure that the encryption key was transmitted # without error. # # If you encrypt an object by using server-side encryption with # customer-provided encryption keys (SSE-C) when you store the object in # Amazon S3, then when you GET the object, you must use the following # headers: # # * `x-amz-server-side-encryption-customer-algorithm` # # * `x-amz-server-side-encryption-customer-key` # # * `x-amz-server-side-encryption-customer-key-MD5` # # For more information about SSE-C, see [Server-Side Encryption (Using # Customer-Provided Encryption Keys)][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # @option options [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @option options [Integer] :part_number # Part number of the object being read. This is a positive integer # between 1 and 10,000. Effectively performs a 'ranged' GET request # for the part specified. Useful for downloading just a part of an # object. # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @option options [String] :checksum_mode # To retrieve the checksum, this mode must be enabled. # @return [Types::GetObjectOutput] def get(options = {}, &block) options = options.merge( bucket: @bucket_name, key: @object_key, version_id: @id ) resp = Aws::Plugins::UserAgent.feature('resource') do @client.get_object(options, &block) end resp.data end # @example Request syntax with placeholder values # # object_version.head({ # if_match: "IfMatch", # if_modified_since: Time.now, # if_none_match: "IfNoneMatch", # if_unmodified_since: Time.now, # range: "Range", # sse_customer_algorithm: "SSECustomerAlgorithm", # sse_customer_key: "SSECustomerKey", # sse_customer_key_md5: "SSECustomerKeyMD5", # request_payer: "requester", # accepts requester # part_number: 1, # expected_bucket_owner: "AccountId", # checksum_mode: "ENABLED", # accepts ENABLED # }) # @param [Hash] options ({}) # @option options [String] :if_match # Return the object only if its entity tag (ETag) is the same as the one # specified; otherwise, return a 412 (precondition failed) error. # # If both of the `If-Match` and `If-Unmodified-Since` headers are # present in the request as follows: # # * `If-Match` condition evaluates to `true`, and; # # * `If-Unmodified-Since` condition evaluates to `false`; # # Then Amazon S3 returns `200 OK` and the data requested. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # @option options [Time,DateTime,Date,Integer,String] :if_modified_since # Return the object only if it has been modified since the specified # time; otherwise, return a 304 (not modified) error. # # If both of the `If-None-Match` and `If-Modified-Since` headers are # present in the request as follows: # # * `If-None-Match` condition evaluates to `false`, and; # # * `If-Modified-Since` condition evaluates to `true`; # # Then Amazon S3 returns the `304 Not Modified` response code. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # @option options [String] :if_none_match # Return the object only if its entity tag (ETag) is different from the # one specified; otherwise, return a 304 (not modified) error. # # If both of the `If-None-Match` and `If-Modified-Since` headers are # present in the request as follows: # # * `If-None-Match` condition evaluates to `false`, and; # # * `If-Modified-Since` condition evaluates to `true`; # # Then Amazon S3 returns the `304 Not Modified` response code. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # @option options [Time,DateTime,Date,Integer,String] :if_unmodified_since # Return the object only if it has not been modified since the specified # time; otherwise, return a 412 (precondition failed) error. # # If both of the `If-Match` and `If-Unmodified-Since` headers are # present in the request as follows: # # * `If-Match` condition evaluates to `true`, and; # # * `If-Unmodified-Since` condition evaluates to `false`; # # Then Amazon S3 returns `200 OK` and the data requested. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # @option options [String] :range # HeadObject returns only the metadata for an object. If the Range is # satisfiable, only the `ContentLength` is affected in the response. If # the Range is not satisfiable, S3 returns a `416 - Requested Range Not # Satisfiable` error. # @option options [String] :sse_customer_algorithm # Specifies the algorithm to use when encrypting the object (for # example, AES256). # # This functionality is not supported for directory buckets. # # # @option options [String] :sse_customer_key # Specifies the customer-provided encryption key for Amazon S3 to use in # encrypting data. This value is used to store the object and then it is # discarded; Amazon S3 does not store the encryption key. The key must # be appropriate for use with the algorithm specified in the # `x-amz-server-side-encryption-customer-algorithm` header. # # This functionality is not supported for directory buckets. # # # @option options [String] :sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the encryption key according to # RFC 1321. Amazon S3 uses this header for a message integrity check to # ensure that the encryption key was transmitted without error. # # This functionality is not supported for directory buckets. # # # @option options [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @option options [Integer] :part_number # Part number of the object being read. This is a positive integer # between 1 and 10,000. Effectively performs a 'ranged' HEAD request # for the part specified. Useful querying about the size of the part and # the number of parts in this object. # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @option options [String] :checksum_mode # To retrieve the checksum, this parameter must be enabled. # # In addition, if you enable `ChecksumMode` and the object is encrypted # with Amazon Web Services Key Management Service (Amazon Web Services # KMS), you must have permission to use the `kms:Decrypt` action for the # request to succeed. # @return [Types::HeadObjectOutput] def head(options = {}) options = options.merge( bucket: @bucket_name, key: @object_key, version_id: @id ) resp = Aws::Plugins::UserAgent.feature('resource') do @client.head_object(options) end resp.data end # @!group Associations # @return [Object] def object Object.new( bucket_name: @bucket_name, key: @object_key, client: @client ) end # @deprecated # @api private def identifiers { bucket_name: @bucket_name, object_key: @object_key, id: @id } end deprecated(:identifiers) private def extract_bucket_name(args, options) value = args[0] || options.delete(:bucket_name) case value when String then value when nil then raise ArgumentError, "missing required option :bucket_name" else msg = "expected :bucket_name to be a String, got #{value.class}" raise ArgumentError, msg end end def extract_object_key(args, options) value = args[1] || options.delete(:object_key) case value when String then value when nil then raise ArgumentError, "missing required option :object_key" else msg = "expected :object_key to be a String, got #{value.class}" raise ArgumentError, msg end end def extract_id(args, options) value = args[2] || options.delete(:id) case value when String then value when nil then raise ArgumentError, "missing required option :id" else msg = "expected :id to be a String, got #{value.class}" raise ArgumentError, msg end end class Collection < Aws::Resources::Collection # @!group Batch Actions # @example Request syntax with placeholder values # # object_version.batch_delete!({ # mfa: "MFA", # request_payer: "requester", # accepts requester # bypass_governance_retention: false, # expected_bucket_owner: "AccountId", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # }) # @param options ({}) # @option options [String] :mfa # The concatenation of the authentication device's serial number, a # space, and the value that is displayed on your authentication device. # Required to permanently delete a versioned object if versioning is # configured with MFA delete enabled. # # When performing the `DeleteObjects` operation on an MFA delete enabled # bucket, which attempts to delete the specified versioned objects, you # must include an MFA token. If you don't provide an MFA token, the # entire request will fail, even if there are non-versioned objects that # you are trying to delete. If you provide an invalid token, whether # there are versioned object keys in the request or not, the entire # Multi-Object Delete request will fail. For information about MFA # Delete, see [ MFA Delete][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete # @option options [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @option options [Boolean] :bypass_governance_retention # Specifies whether you want to delete this object even if it has a # Governance-type Object Lock in place. To use this header, you must # have the `s3:BypassGovernanceRetention` permission. # # This functionality is not supported for directory buckets. # # # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @option options [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum-algorithm ` or # `x-amz-trailer` header sent. Otherwise, Amazon S3 fails the request # with the HTTP status code `400 Bad Request`. # # For the `x-amz-checksum-algorithm ` header, replace ` algorithm ` with # the supported algorithm from the following list: # # * CRC32 # # * CRC32C # # * SHA1 # # * SHA256 # # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # If the individual checksum value you provide through # `x-amz-checksum-algorithm ` doesn't match the checksum algorithm you # set through `x-amz-sdk-checksum-algorithm`, Amazon S3 ignores any # provided `ChecksumAlgorithm` parameter and uses the checksum algorithm # that matches the provided value in `x-amz-checksum-algorithm `. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [void] def batch_delete!(options = {}) batch_enum.each do |batch| params = Aws::Util.copy_hash(options) params[:bucket] = batch[0].bucket_name params[:delete] ||= {} params[:delete][:objects] ||= [] batch.each do |item| params[:delete][:objects] << { key: item.object_key, version_id: item.id } end Aws::Plugins::UserAgent.feature('resource') do batch[0].client.delete_objects(params) end end nil end # @!endgroup end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/resource.rb0000644000004100000410000002000714563445240020430 0ustar www-datawww-data# frozen_string_literal: true # WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws::S3 # This class provides a resource oriented interface for S3. # To create a resource object: # # resource = Aws::S3::Resource.new(region: 'us-west-2') # # You can supply a client object with custom configuration that will be used for all resource operations. # If you do not pass `:client`, a default client will be constructed. # # client = Aws::S3::Client.new(region: 'us-west-2') # resource = Aws::S3::Resource.new(client: client) # class Resource # @param options ({}) # @option options [Client] :client def initialize(options = {}) @client = options[:client] || Client.new(options) end # @return [Client] def client @client end # @!group Actions # @example Request syntax with placeholder values # # bucket = s3.create_bucket({ # acl: "private", # accepts private, public-read, public-read-write, authenticated-read # bucket: "BucketName", # required # create_bucket_configuration: { # location_constraint: "af-south-1", # accepts af-south-1, ap-east-1, ap-northeast-1, ap-northeast-2, ap-northeast-3, ap-south-1, ap-south-2, ap-southeast-1, ap-southeast-2, ap-southeast-3, ca-central-1, cn-north-1, cn-northwest-1, EU, eu-central-1, eu-north-1, eu-south-1, eu-south-2, eu-west-1, eu-west-2, eu-west-3, me-south-1, sa-east-1, us-east-2, us-gov-east-1, us-gov-west-1, us-west-1, us-west-2 # location: { # type: "AvailabilityZone", # accepts AvailabilityZone # name: "LocationNameAsString", # }, # bucket: { # data_redundancy: "SingleAvailabilityZone", # accepts SingleAvailabilityZone # type: "Directory", # accepts Directory # }, # }, # grant_full_control: "GrantFullControl", # grant_read: "GrantRead", # grant_read_acp: "GrantReadACP", # grant_write: "GrantWrite", # grant_write_acp: "GrantWriteACP", # object_lock_enabled_for_bucket: false, # object_ownership: "BucketOwnerPreferred", # accepts BucketOwnerPreferred, ObjectWriter, BucketOwnerEnforced # }) # @param [Hash] options ({}) # @option options [String] :acl # The canned ACL to apply to the bucket. # # This functionality is not supported for directory buckets. # # # @option options [required, String] :bucket # The name of the bucket to create. # # **General purpose buckets** - For information about bucket naming # restrictions, see [Bucket naming rules][1] in the *Amazon S3 User # Guide*. # # Directory buckets - When you use this operation with a # directory bucket, you must use path-style requests in the format # `https://s3express-control.region_code.amazonaws.com/bucket-name `. # Virtual-hosted-style requests aren't supported. Directory bucket # names must be unique in the chosen Availability Zone. Bucket names # must also follow the format ` bucket_base_name--az_id--x-s3` (for # example, ` DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about # bucket naming restrictions, see [Directory bucket naming rules][2] in # the *Amazon S3 User Guide* # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # @option options [Types::CreateBucketConfiguration] :create_bucket_configuration # The configuration information for the bucket. # @option options [String] :grant_full_control # Allows grantee the read, write, read ACP, and write ACP permissions on # the bucket. # # This functionality is not supported for directory buckets. # # # @option options [String] :grant_read # Allows grantee to list the objects in the bucket. # # This functionality is not supported for directory buckets. # # # @option options [String] :grant_read_acp # Allows grantee to read the bucket ACL. # # This functionality is not supported for directory buckets. # # # @option options [String] :grant_write # Allows grantee to create new objects in the bucket. # # For the bucket and object owners of existing objects, also allows # deletions and overwrites of those objects. # # This functionality is not supported for directory buckets. # # # @option options [String] :grant_write_acp # Allows grantee to write the ACL for the applicable bucket. # # This functionality is not supported for directory buckets. # # # @option options [Boolean] :object_lock_enabled_for_bucket # Specifies whether you want S3 Object Lock to be enabled for the new # bucket. # # This functionality is not supported for directory buckets. # # # @option options [String] :object_ownership # The container element for object ownership for a bucket's ownership # controls. # # `BucketOwnerPreferred` - Objects uploaded to the bucket change # ownership to the bucket owner if the objects are uploaded with the # `bucket-owner-full-control` canned ACL. # # `ObjectWriter` - The uploading account will own the object if the # object is uploaded with the `bucket-owner-full-control` canned ACL. # # `BucketOwnerEnforced` - Access control lists (ACLs) are disabled and # no longer affect permissions. The bucket owner automatically owns and # has full control over every object in the bucket. The bucket only # accepts PUT requests that don't specify an ACL or specify bucket # owner full control ACLs (such as the predefined # `bucket-owner-full-control` canned ACL or a custom ACL in XML format # that grants the same permissions). # # By default, `ObjectOwnership` is set to `BucketOwnerEnforced` and ACLs # are disabled. We recommend keeping ACLs disabled, except in uncommon # use cases where you must control access for each object individually. # For more information about S3 Object Ownership, see [Controlling # ownership of objects and disabling ACLs for your bucket][1] in the # *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. Directory # buckets use the bucket owner enforced setting for S3 Object Ownership. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html # @return [Bucket] def create_bucket(options = {}) Aws::Plugins::UserAgent.feature('resource') do @client.create_bucket(options) end Bucket.new( name: options[:bucket], client: @client ) end # @!group Associations # @param [String] name # @return [Bucket] def bucket(name) Bucket.new( name: name, client: @client ) end # @example Request syntax with placeholder values # # s3.buckets() # @param [Hash] options ({}) # @return [Bucket::Collection] def buckets(options = {}) batches = Enumerator.new do |y| batch = [] resp = Aws::Plugins::UserAgent.feature('resource') do @client.list_buckets(options) end resp.data.buckets.each do |b| batch << Bucket.new( name: b.name, data: b, client: @client ) end y.yield(batch) end Bucket::Collection.new(batches) end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/express_credentials.rb0000644000004100000410000000232014563445240022645 0ustar www-datawww-data# frozen_string_literal: true require 'set' module Aws module S3 # @api private class ExpressCredentials include CredentialProvider include RefreshingCredentials SYNC_EXPIRATION_LENGTH = 60 # 1 minute ASYNC_EXPIRATION_LENGTH = 120 # 2 minutes def initialize(options = {}) @client = options[:client] @create_session_params = {} options.each_pair do |key, value| if self.class.create_session_options.include?(key) @create_session_params[key] = value end end @async_refresh = true super end # @return [S3::Client] attr_reader :client private def refresh c = @client.create_session(@create_session_params).credentials @credentials = Credentials.new( c.access_key_id, c.secret_access_key, c.session_token ) @expiration = c.expiration end class << self # @api private def create_session_options @cso ||= begin input = S3::Client.api.operation(:create_session).input Set.new(input.shape.member_names) end end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/bucket_lifecycle_configuration.rb0000644000004100000410000002515614563445240025036 0ustar www-datawww-data# frozen_string_literal: true # WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws::S3 class BucketLifecycleConfiguration extend Aws::Deprecations # @overload def initialize(bucket_name, options = {}) # @param [String] bucket_name # @option options [Client] :client # @overload def initialize(options = {}) # @option options [required, String] :bucket_name # @option options [Client] :client def initialize(*args) options = Hash === args.last ? args.pop.dup : {} @bucket_name = extract_bucket_name(args, options) @data = options.delete(:data) @client = options.delete(:client) || Client.new(options) @waiter_block_warned = false end # @!group Read-Only Attributes # @return [String] def bucket_name @bucket_name end # Container for a lifecycle rule. # @return [Array] def rules data[:rules] end # @!endgroup # @return [Client] def client @client end # Loads, or reloads {#data} for the current {BucketLifecycleConfiguration}. # Returns `self` making it possible to chain methods. # # bucket_lifecycle_configuration.reload.data # # @return [self] def load resp = Aws::Plugins::UserAgent.feature('resource') do @client.get_bucket_lifecycle_configuration(bucket: @bucket_name) end @data = resp.data self end alias :reload :load # @return [Types::GetBucketLifecycleConfigurationOutput] # Returns the data for this {BucketLifecycleConfiguration}. Calls # {Client#get_bucket_lifecycle_configuration} if {#data_loaded?} is `false`. def data load unless @data @data end # @return [Boolean] # Returns `true` if this resource is loaded. Accessing attributes or # {#data} on an unloaded resource will trigger a call to {#load}. def data_loaded? !!@data end # @deprecated Use [Aws::S3::Client] #wait_until instead # # Waiter polls an API operation until a resource enters a desired # state. # # @note The waiting operation is performed on a copy. The original resource # remains unchanged. # # ## Basic Usage # # Waiter will polls until it is successful, it fails by # entering a terminal state, or until a maximum number of attempts # are made. # # # polls in a loop until condition is true # resource.wait_until(options) {|resource| condition} # # ## Example # # instance.wait_until(max_attempts:10, delay:5) do |instance| # instance.state.name == 'running' # end # # ## Configuration # # You can configure the maximum number of polling attempts, and the # delay (in seconds) between each polling attempt. The waiting condition is # set by passing a block to {#wait_until}: # # # poll for ~25 seconds # resource.wait_until(max_attempts:5,delay:5) {|resource|...} # # ## Callbacks # # You can be notified before each polling attempt and before each # delay. If you throw `:success` or `:failure` from these callbacks, # it will terminate the waiter. # # started_at = Time.now # # poll for 1 hour, instead of a number of attempts # proc = Proc.new do |attempts, response| # throw :failure if Time.now - started_at > 3600 # end # # # disable max attempts # instance.wait_until(before_wait:proc, max_attempts:nil) {...} # # ## Handling Errors # # When a waiter is successful, it returns the Resource. When a waiter # fails, it raises an error. # # begin # resource.wait_until(...) # rescue Aws::Waiters::Errors::WaiterFailed # # resource did not enter the desired state in time # end # # @yieldparam [Resource] resource to be used in the waiting condition. # # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter # terminates because the waiter has entered a state that it will not # transition out of, preventing success. # # yet successful. # # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is # encountered while polling for a resource that is not expected. # # @raise [NotImplementedError] Raised when the resource does not # # @option options [Integer] :max_attempts (10) Maximum number of # attempts # @option options [Integer] :delay (10) Delay between each # attempt in seconds # @option options [Proc] :before_attempt (nil) Callback # invoked before each attempt # @option options [Proc] :before_wait (nil) Callback # invoked before each wait # @return [Resource] if the waiter was successful def wait_until(options = {}, &block) self_copy = self.dup attempts = 0 options[:max_attempts] = 10 unless options.key?(:max_attempts) options[:delay] ||= 10 options[:poller] = Proc.new do attempts += 1 if block.call(self_copy) [:success, self_copy] else self_copy.reload unless attempts == options[:max_attempts] :retry end end Aws::Plugins::UserAgent.feature('resource') do Aws::Waiters::Waiter.new(options).wait({}) end end # @!group Actions # @example Request syntax with placeholder values # # bucket_lifecycle_configuration.delete({ # expected_bucket_owner: "AccountId", # }) # @param [Hash] options ({}) # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @return [EmptyStructure] def delete(options = {}) options = options.merge(bucket: @bucket_name) resp = Aws::Plugins::UserAgent.feature('resource') do @client.delete_bucket_lifecycle(options) end resp.data end # @example Request syntax with placeholder values # # bucket_lifecycle_configuration.put({ # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # lifecycle_configuration: { # rules: [ # required # { # expiration: { # date: Time.now, # days: 1, # expired_object_delete_marker: false, # }, # id: "ID", # prefix: "Prefix", # filter: { # prefix: "Prefix", # tag: { # key: "ObjectKey", # required # value: "Value", # required # }, # object_size_greater_than: 1, # object_size_less_than: 1, # and: { # prefix: "Prefix", # tags: [ # { # key: "ObjectKey", # required # value: "Value", # required # }, # ], # object_size_greater_than: 1, # object_size_less_than: 1, # }, # }, # status: "Enabled", # required, accepts Enabled, Disabled # transitions: [ # { # date: Time.now, # days: 1, # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR # }, # ], # noncurrent_version_transitions: [ # { # noncurrent_days: 1, # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR # newer_noncurrent_versions: 1, # }, # ], # noncurrent_version_expiration: { # noncurrent_days: 1, # newer_noncurrent_versions: 1, # }, # abort_incomplete_multipart_upload: { # days_after_initiation: 1, # }, # }, # ], # }, # expected_bucket_owner: "AccountId", # }) # @param [Hash] options ({}) # @option options [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [Types::BucketLifecycleConfiguration] :lifecycle_configuration # Container for lifecycle rules. You can add as many as 1,000 rules. # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @return [EmptyStructure] def put(options = {}) options = options.merge(bucket: @bucket_name) resp = Aws::Plugins::UserAgent.feature('resource') do @client.put_bucket_lifecycle_configuration(options) end resp.data end # @!group Associations # @return [Bucket] def bucket Bucket.new( name: @bucket_name, client: @client ) end # @deprecated # @api private def identifiers { bucket_name: @bucket_name } end deprecated(:identifiers) private def extract_bucket_name(args, options) value = args[0] || options.delete(:bucket_name) case value when String then value when nil then raise ArgumentError, "missing required option :bucket_name" else msg = "expected :bucket_name to be a String, got #{value.class}" raise ArgumentError, msg end end class Collection < Aws::Resources::Collection; end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/legacy_signer.rb0000644000004100000410000001252314563445240021420 0ustar www-datawww-data# frozen_string_literal: true require 'set' require 'time' require 'openssl' require 'cgi' require 'aws-sdk-core/query' module Aws module S3 # @api private class LegacySigner SIGNED_QUERYSTRING_PARAMS = Set.new(%w( acl delete cors lifecycle location logging notification partNumber policy requestPayment restore tagging torrent uploadId uploads versionId versioning versions website replication requestPayment accelerate response-content-type response-content-language response-expires response-cache-control response-content-disposition response-content-encoding )) def self.sign(context) new( context.config.credentials, context.params, context.config.force_path_style ).sign(context.http_request) end # @param [CredentialProvider] credentials def initialize(credentials, params, force_path_style) @credentials = credentials.credentials @params = Query::ParamList.new params.each_pair do |param_name, param_value| @params.set(param_name, param_value) end @force_path_style = force_path_style end attr_reader :credentials, :params def sign(request) if token = credentials.session_token request.headers["X-Amz-Security-Token"] = token end request.headers['Authorization'] = authorization(request) end def authorization(request) "AWS #{credentials.access_key_id}:#{signature(request)}" end def signature(request) string_to_sign = string_to_sign(request) signature = digest(credentials.secret_access_key, string_to_sign) uri_escape(signature) end def digest(secret, string_to_sign) Base64.encode64(hmac(secret, string_to_sign)).strip end def hmac(key, value) OpenSSL::HMAC.digest(OpenSSL::Digest.new('sha1'), key, value) end # From the S3 developer guide: # # StringToSign = # HTTP-Verb ` "\n" ` # content-md5 ` "\n" ` # content-type ` "\n" ` # date ` "\n" ` # CanonicalizedAmzHeaders + CanonicalizedResource; # def string_to_sign(request) [ request.http_method, request.headers.values_at('Content-Md5', 'Content-Type').join("\n"), signing_string_date(request), canonicalized_headers(request), canonicalized_resource(request.endpoint), ].flatten.compact.join("\n") end def signing_string_date(request) # if a date is provided via x-amz-date then we should omit the # Date header from the signing string (should appear as a blank line) if request.headers.detect{|k,v| k.to_s =~ /^x-amz-date$/i } '' else request.headers['Date'] = Time.now.httpdate end end # CanonicalizedAmzHeaders # # See the developer guide for more information on how this element # is generated. # def canonicalized_headers(request) x_amz = request.headers.select{|k, v| k =~ /^x-amz-/i } x_amz = x_amz.collect{|k, v| [k.downcase, v] } x_amz = x_amz.sort_by{|k, v| k } x_amz = x_amz.collect{|k, v| "#{k}:#{v.to_s.strip}" }.join("\n") x_amz == '' ? nil : x_amz end # From the S3 developer guide # # CanonicalizedResource = # [ "/" ` Bucket ] ` # + # [ sub-resource, if present. e.g. "?acl", "?location", # "?logging", or "?torrent"]; # # @api private def canonicalized_resource(endpoint) parts = [] # virtual hosted-style requests require the hostname to appear # in the canonicalized resource prefixed by a forward slash. if bucket = params[:bucket] bucket = bucket.value ssl = endpoint.scheme == 'https' if Plugins::BucketDns.dns_compatible?(bucket, ssl) && !@force_path_style parts << "/#{bucket}" end end # append the path name (no querystring) parts << endpoint.path # lastly any sub resource querystring params need to be appened # in lexigraphical ordered joined by '&' and prefixed by '?' params = signed_querystring_params(endpoint) unless params.empty? parts << '?' parts << params.sort.collect{|p| p.to_s }.join('&') end parts.join end def signed_querystring_params(endpoint) endpoint.query.to_s.split('&').select do |p| SIGNED_QUERYSTRING_PARAMS.include?(p.split('=')[0]) end.map { |p| CGI.unescape(p) } end def uri_escape(s) #URI.escape(s) # (0..255).each {|c| # s = [c].pack("C") # e = [ # CGI.escape(s), # ERB::Util.url_encode(s), # URI.encode_www_form_component(s), # WEBrick::HTTPUtils.escape_form(s), # WEBrick::HTTPUtils.escape(s), # URI.escape(s), # URI::DEFAULT_PARSER.escape(s) # ] # next if e.uniq.length == 1 # puts("%5s %5s %5s %5s %5s %5s %5s %5s" % ([s.inspect] + e)) # } URI::DEFAULT_PARSER.escape(s) end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/file_downloader.rb0000644000004100000410000002020714563445240021740 0ustar www-datawww-data# frozen_string_literal: true require 'pathname' require 'thread' require 'set' require 'tmpdir' module Aws module S3 # @api private class FileDownloader MIN_CHUNK_SIZE = 5 * 1024 * 1024 MAX_PARTS = 10_000 THREAD_COUNT = 10 def initialize(options = {}) @client = options[:client] || Client.new end # @return [Client] attr_reader :client def download(destination, options = {}) @path = destination @mode = options[:mode] || 'auto' @thread_count = options[:thread_count] || THREAD_COUNT @chunk_size = options[:chunk_size] @params = { bucket: options[:bucket], key: options[:key], } @params[:version_id] = options[:version_id] if options[:version_id] # checksum_mode only supports the value "ENABLED" # falsey values (false/nil) or "DISABLED" should be considered # disabled and the api parameter should be unset. if (checksum_mode = options.fetch(:checksum_mode, 'ENABLED')) @params[:checksum_mode] = checksum_mode unless checksum_mode.upcase == 'DISABLED' end @on_checksum_validated = options[:on_checksum_validated] @progress_callback = options[:progress_callback] validate! Aws::Plugins::UserAgent.feature('s3-transfer') do case @mode when 'auto' then multipart_download when 'single_request' then single_request when 'get_range' if @chunk_size resp = @client.head_object(@params) multithreaded_get_by_ranges(resp.content_length) else msg = 'In :get_range mode, :chunk_size must be provided' raise ArgumentError, msg end else msg = "Invalid mode #{@mode} provided, "\ 'mode should be :single_request, :get_range or :auto' raise ArgumentError, msg end end end private def validate! if @on_checksum_validated && @params[:checksum_mode] != 'ENABLED' raise ArgumentError, "You must set checksum_mode: 'ENABLED' " + "when providing a on_checksum_validated callback" end if @on_checksum_validated && !@on_checksum_validated.respond_to?(:call) raise ArgumentError, 'on_checksum_validated must be callable' end end def multipart_download resp = @client.head_object(@params.merge(part_number: 1)) count = resp.parts_count if count.nil? || count <= 1 if resp.content_length <= MIN_CHUNK_SIZE single_request else multithreaded_get_by_ranges(resp.content_length) end else # partNumber is an option resp = @client.head_object(@params) if resp.content_length <= MIN_CHUNK_SIZE single_request else compute_mode(resp.content_length, count) end end end def compute_mode(file_size, count) chunk_size = compute_chunk(file_size) part_size = (file_size.to_f / count.to_f).ceil if chunk_size < part_size multithreaded_get_by_ranges(file_size) else multithreaded_get_by_parts(count, file_size) end end def construct_chunks(file_size) offset = 0 default_chunk_size = compute_chunk(file_size) chunks = [] while offset < file_size progress = offset + default_chunk_size progress = file_size if progress > file_size chunks << "bytes=#{offset}-#{progress - 1}" offset = progress end chunks end def compute_chunk(file_size) if @chunk_size && @chunk_size > file_size raise ArgumentError, ":chunk_size shouldn't exceed total file size." else @chunk_size || [ (file_size.to_f / MAX_PARTS).ceil, MIN_CHUNK_SIZE ].max.to_i end end def batches(chunks, mode) chunks = (1..chunks) if mode.eql? 'part_number' chunks.each_slice(@thread_count).to_a end def multithreaded_get_by_ranges(file_size) offset = 0 default_chunk_size = compute_chunk(file_size) chunks = [] part_number = 1 # parts start at 1 while offset < file_size progress = offset + default_chunk_size progress = file_size if progress > file_size range = "bytes=#{offset}-#{progress - 1}" chunks << Part.new( part_number: part_number, size: (progress-offset), params: @params.merge(range: range) ) part_number += 1 offset = progress end download_in_threads(PartList.new(chunks), file_size) end def multithreaded_get_by_parts(n_parts, total_size) parts = (1..n_parts).map do |part| Part.new(part_number: part, params: @params.merge(part_number: part)) end download_in_threads(PartList.new(parts), total_size) end def download_in_threads(pending, total_size) threads = [] if @progress_callback progress = MultipartProgress.new(pending, total_size, @progress_callback) end @thread_count.times do thread = Thread.new do begin while part = pending.shift if progress part.params[:on_chunk_received] = proc do |_chunk, bytes, total| progress.call(part.part_number, bytes, total) end end resp = @client.get_object(part.params) write(resp) if @on_checksum_validated && resp.checksum_validated @on_checksum_validated.call(resp.checksum_validated, resp) end end nil rescue => error # keep other threads from downloading other parts pending.clear! raise error end end threads << thread end threads.map(&:value).compact end def write(resp) range, _ = resp.content_range.split(' ').last.split('/') head, _ = range.split('-').map {|s| s.to_i} File.write(@path, resp.body.read, head) end def single_request params = @params.merge(response_target: @path) params[:on_chunk_received] = single_part_progress if @progress_callback resp = @client.get_object(params) return resp unless @on_checksum_validated if resp.checksum_validated @on_checksum_validated.call(resp.checksum_validated, resp) end resp end def single_part_progress proc do |_chunk, bytes_read, total_size| @progress_callback.call([bytes_read], [total_size], total_size) end end class Part < Struct.new(:part_number, :size, :params) include Aws::Structure end # @api private class PartList include Enumerable def initialize(parts = []) @parts = parts @mutex = Mutex.new end def shift @mutex.synchronize { @parts.shift } end def size @mutex.synchronize { @parts.size } end def clear! @mutex.synchronize { @parts.clear } end def each(&block) @mutex.synchronize { @parts.each(&block) } end end # @api private class MultipartProgress def initialize(parts, total_size, progress_callback) @bytes_received = Array.new(parts.size, 0) @part_sizes = parts.map(&:size) @total_size = total_size @progress_callback = progress_callback end def call(part_number, bytes_received, total) # part numbers start at 1 @bytes_received[part_number - 1] = bytes_received # part size may not be known until we get the first response @part_sizes[part_number - 1] ||= total @progress_callback.call(@bytes_received, @part_sizes, @total_size) end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/multipart_upload_error.rb0000644000004100000410000000055314563445240023403 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 class MultipartUploadError < StandardError def initialize(message, errors) @errors = errors super(message) end # @return [Array] The list of errors encountered # when uploading or aborting the upload. attr_reader :errors end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/bucket_tagging.rb0000644000004100000410000002225514563445240021565 0ustar www-datawww-data# frozen_string_literal: true # WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws::S3 class BucketTagging extend Aws::Deprecations # @overload def initialize(bucket_name, options = {}) # @param [String] bucket_name # @option options [Client] :client # @overload def initialize(options = {}) # @option options [required, String] :bucket_name # @option options [Client] :client def initialize(*args) options = Hash === args.last ? args.pop.dup : {} @bucket_name = extract_bucket_name(args, options) @data = options.delete(:data) @client = options.delete(:client) || Client.new(options) @waiter_block_warned = false end # @!group Read-Only Attributes # @return [String] def bucket_name @bucket_name end # Contains the tag set. # @return [Array] def tag_set data[:tag_set] end # @!endgroup # @return [Client] def client @client end # Loads, or reloads {#data} for the current {BucketTagging}. # Returns `self` making it possible to chain methods. # # bucket_tagging.reload.data # # @return [self] def load resp = Aws::Plugins::UserAgent.feature('resource') do @client.get_bucket_tagging(bucket: @bucket_name) end @data = resp.data self end alias :reload :load # @return [Types::GetBucketTaggingOutput] # Returns the data for this {BucketTagging}. Calls # {Client#get_bucket_tagging} if {#data_loaded?} is `false`. def data load unless @data @data end # @return [Boolean] # Returns `true` if this resource is loaded. Accessing attributes or # {#data} on an unloaded resource will trigger a call to {#load}. def data_loaded? !!@data end # @deprecated Use [Aws::S3::Client] #wait_until instead # # Waiter polls an API operation until a resource enters a desired # state. # # @note The waiting operation is performed on a copy. The original resource # remains unchanged. # # ## Basic Usage # # Waiter will polls until it is successful, it fails by # entering a terminal state, or until a maximum number of attempts # are made. # # # polls in a loop until condition is true # resource.wait_until(options) {|resource| condition} # # ## Example # # instance.wait_until(max_attempts:10, delay:5) do |instance| # instance.state.name == 'running' # end # # ## Configuration # # You can configure the maximum number of polling attempts, and the # delay (in seconds) between each polling attempt. The waiting condition is # set by passing a block to {#wait_until}: # # # poll for ~25 seconds # resource.wait_until(max_attempts:5,delay:5) {|resource|...} # # ## Callbacks # # You can be notified before each polling attempt and before each # delay. If you throw `:success` or `:failure` from these callbacks, # it will terminate the waiter. # # started_at = Time.now # # poll for 1 hour, instead of a number of attempts # proc = Proc.new do |attempts, response| # throw :failure if Time.now - started_at > 3600 # end # # # disable max attempts # instance.wait_until(before_wait:proc, max_attempts:nil) {...} # # ## Handling Errors # # When a waiter is successful, it returns the Resource. When a waiter # fails, it raises an error. # # begin # resource.wait_until(...) # rescue Aws::Waiters::Errors::WaiterFailed # # resource did not enter the desired state in time # end # # @yieldparam [Resource] resource to be used in the waiting condition. # # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter # terminates because the waiter has entered a state that it will not # transition out of, preventing success. # # yet successful. # # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is # encountered while polling for a resource that is not expected. # # @raise [NotImplementedError] Raised when the resource does not # # @option options [Integer] :max_attempts (10) Maximum number of # attempts # @option options [Integer] :delay (10) Delay between each # attempt in seconds # @option options [Proc] :before_attempt (nil) Callback # invoked before each attempt # @option options [Proc] :before_wait (nil) Callback # invoked before each wait # @return [Resource] if the waiter was successful def wait_until(options = {}, &block) self_copy = self.dup attempts = 0 options[:max_attempts] = 10 unless options.key?(:max_attempts) options[:delay] ||= 10 options[:poller] = Proc.new do attempts += 1 if block.call(self_copy) [:success, self_copy] else self_copy.reload unless attempts == options[:max_attempts] :retry end end Aws::Plugins::UserAgent.feature('resource') do Aws::Waiters::Waiter.new(options).wait({}) end end # @!group Actions # @example Request syntax with placeholder values # # bucket_tagging.delete({ # expected_bucket_owner: "AccountId", # }) # @param [Hash] options ({}) # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @return [EmptyStructure] def delete(options = {}) options = options.merge(bucket: @bucket_name) resp = Aws::Plugins::UserAgent.feature('resource') do @client.delete_bucket_tagging(options) end resp.data end # @example Request syntax with placeholder values # # bucket_tagging.put({ # content_md5: "ContentMD5", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # tagging: { # required # tag_set: [ # required # { # key: "ObjectKey", # required # value: "Value", # required # }, # ], # }, # expected_bucket_owner: "AccountId", # }) # @param [Hash] options ({}) # @option options [String] :content_md5 # The base64-encoded 128-bit MD5 digest of the data. You must use this # header as a message integrity check to verify that the request body # was not corrupted in transit. For more information, see [RFC 1864][1]. # # For requests made using the Amazon Web Services Command Line Interface # (CLI) or Amazon Web Services SDKs, this field is calculated # automatically. # # # # [1]: http://www.ietf.org/rfc/rfc1864.txt # @option options [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [required, Types::Tagging] :tagging # Container for the `TagSet` and `Tag` elements. # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @return [EmptyStructure] def put(options = {}) options = options.merge(bucket: @bucket_name) resp = Aws::Plugins::UserAgent.feature('resource') do @client.put_bucket_tagging(options) end resp.data end # @!group Associations # @return [Bucket] def bucket Bucket.new( name: @bucket_name, client: @client ) end # @deprecated # @api private def identifiers { bucket_name: @bucket_name } end deprecated(:identifiers) private def extract_bucket_name(args, options) value = args[0] || options.delete(:bucket_name) case value when String then value when nil then raise ArgumentError, "missing required option :bucket_name" else msg = "expected :bucket_name to be a String, got #{value.class}" raise ArgumentError, msg end end class Collection < Aws::Resources::Collection; end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/multipart_upload_part.rb0000644000004100000410000007135514563445240023230 0ustar www-datawww-data# frozen_string_literal: true # WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws::S3 class MultipartUploadPart extend Aws::Deprecations # @overload def initialize(bucket_name, object_key, multipart_upload_id, part_number, options = {}) # @param [String] bucket_name # @param [String] object_key # @param [String] multipart_upload_id # @param [Integer] part_number # @option options [Client] :client # @overload def initialize(options = {}) # @option options [required, String] :bucket_name # @option options [required, String] :object_key # @option options [required, String] :multipart_upload_id # @option options [required, Integer] :part_number # @option options [Client] :client def initialize(*args) options = Hash === args.last ? args.pop.dup : {} @bucket_name = extract_bucket_name(args, options) @object_key = extract_object_key(args, options) @multipart_upload_id = extract_multipart_upload_id(args, options) @part_number = extract_part_number(args, options) @data = options.delete(:data) @client = options.delete(:client) || Client.new(options) @waiter_block_warned = false end # @!group Read-Only Attributes # @return [String] def bucket_name @bucket_name end # @return [String] def object_key @object_key end # @return [String] def multipart_upload_id @multipart_upload_id end # @return [Integer] def part_number @part_number end # Date and time at which the part was uploaded. # @return [Time] def last_modified data[:last_modified] end # Entity tag returned when the part was uploaded. # @return [String] def etag data[:etag] end # Size in bytes of the uploaded part data. # @return [Integer] def size data[:size] end # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 32-bit CRC32 checksum of the object. For # more information, see [Checking object integrity][1] in the *Amazon S3 # User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] def checksum_crc32 data[:checksum_crc32] end # The base64-encoded, 32-bit CRC32C checksum of the object. This will # only be present if it was uploaded with the object. When you use an # API operation on an object that was uploaded using multipart uploads, # this value may not be a direct checksum value of the full object. # Instead, it's a calculation based on the checksum values of each # individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object integrity][1] # in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] def checksum_crc32c data[:checksum_crc32c] end # The base64-encoded, 160-bit SHA-1 digest of the object. This will only # be present if it was uploaded with the object. When you use the API # operation on an object that was uploaded using multipart uploads, this # value may not be a direct checksum value of the full object. Instead, # it's a calculation based on the checksum values of each individual # part. For more information about how checksums are calculated with # multipart uploads, see [ Checking object integrity][1] in the *Amazon # S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] def checksum_sha1 data[:checksum_sha1] end # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 256-bit SHA-256 digest of the object. # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] def checksum_sha256 data[:checksum_sha256] end # @!endgroup # @return [Client] def client @client end # @raise [NotImplementedError] # @api private def load msg = "#load is not implemented, data only available via enumeration" raise NotImplementedError, msg end alias :reload :load # @raise [NotImplementedError] Raises when {#data_loaded?} is `false`. # @return [Types::Part] # Returns the data for this {MultipartUploadPart}. def data load unless @data @data end # @return [Boolean] # Returns `true` if this resource is loaded. Accessing attributes or # {#data} on an unloaded resource will trigger a call to {#load}. def data_loaded? !!@data end # @deprecated Use [Aws::S3::Client] #wait_until instead # # Waiter polls an API operation until a resource enters a desired # state. # # @note The waiting operation is performed on a copy. The original resource # remains unchanged. # # ## Basic Usage # # Waiter will polls until it is successful, it fails by # entering a terminal state, or until a maximum number of attempts # are made. # # # polls in a loop until condition is true # resource.wait_until(options) {|resource| condition} # # ## Example # # instance.wait_until(max_attempts:10, delay:5) do |instance| # instance.state.name == 'running' # end # # ## Configuration # # You can configure the maximum number of polling attempts, and the # delay (in seconds) between each polling attempt. The waiting condition is # set by passing a block to {#wait_until}: # # # poll for ~25 seconds # resource.wait_until(max_attempts:5,delay:5) {|resource|...} # # ## Callbacks # # You can be notified before each polling attempt and before each # delay. If you throw `:success` or `:failure` from these callbacks, # it will terminate the waiter. # # started_at = Time.now # # poll for 1 hour, instead of a number of attempts # proc = Proc.new do |attempts, response| # throw :failure if Time.now - started_at > 3600 # end # # # disable max attempts # instance.wait_until(before_wait:proc, max_attempts:nil) {...} # # ## Handling Errors # # When a waiter is successful, it returns the Resource. When a waiter # fails, it raises an error. # # begin # resource.wait_until(...) # rescue Aws::Waiters::Errors::WaiterFailed # # resource did not enter the desired state in time # end # # @yieldparam [Resource] resource to be used in the waiting condition. # # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter # terminates because the waiter has entered a state that it will not # transition out of, preventing success. # # yet successful. # # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is # encountered while polling for a resource that is not expected. # # @raise [NotImplementedError] Raised when the resource does not # # @option options [Integer] :max_attempts (10) Maximum number of # attempts # @option options [Integer] :delay (10) Delay between each # attempt in seconds # @option options [Proc] :before_attempt (nil) Callback # invoked before each attempt # @option options [Proc] :before_wait (nil) Callback # invoked before each wait # @return [Resource] if the waiter was successful def wait_until(options = {}, &block) self_copy = self.dup attempts = 0 options[:max_attempts] = 10 unless options.key?(:max_attempts) options[:delay] ||= 10 options[:poller] = Proc.new do attempts += 1 if block.call(self_copy) [:success, self_copy] else self_copy.reload unless attempts == options[:max_attempts] :retry end end Aws::Plugins::UserAgent.feature('resource') do Aws::Waiters::Waiter.new(options).wait({}) end end # @!group Actions # @example Request syntax with placeholder values # # multipart_upload_part.copy_from({ # copy_source: "CopySource", # required # copy_source_if_match: "CopySourceIfMatch", # copy_source_if_modified_since: Time.now, # copy_source_if_none_match: "CopySourceIfNoneMatch", # copy_source_if_unmodified_since: Time.now, # copy_source_range: "CopySourceRange", # sse_customer_algorithm: "SSECustomerAlgorithm", # sse_customer_key: "SSECustomerKey", # sse_customer_key_md5: "SSECustomerKeyMD5", # copy_source_sse_customer_algorithm: "CopySourceSSECustomerAlgorithm", # copy_source_sse_customer_key: "CopySourceSSECustomerKey", # copy_source_sse_customer_key_md5: "CopySourceSSECustomerKeyMD5", # request_payer: "requester", # accepts requester # expected_bucket_owner: "AccountId", # expected_source_bucket_owner: "AccountId", # }) # @param [Hash] options ({}) # @option options [required, String] :copy_source # Specifies the source object for the copy operation. You specify the # value in one of two formats, depending on whether you want to access # the source object through an [access point][1]: # # * For objects not accessed through an access point, specify the name # of the source bucket and key of the source object, separated by a # slash (/). For example, to copy the object `reports/january.pdf` # from the bucket `awsexamplebucket`, use # `awsexamplebucket/reports/january.pdf`. The value must be # URL-encoded. # # * For objects accessed through access points, specify the Amazon # Resource Name (ARN) of the object as accessed through the access # point, in the format # `arn:aws:s3:::accesspoint//object/`. # For example, to copy the object `reports/january.pdf` through access # point `my-access-point` owned by account `123456789012` in Region # `us-west-2`, use the URL encoding of # `arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf`. # The value must be URL encoded. # # * Amazon S3 supports copy operations using Access points only when # the source and destination buckets are in the same Amazon Web # Services Region. # # * Access points are not supported by directory buckets. # # # # Alternatively, for objects accessed through Amazon S3 on Outposts, # specify the ARN of the object as accessed in the format # `arn:aws:s3-outposts:::outpost//object/`. # For example, to copy the object `reports/january.pdf` through # outpost `my-outpost` owned by account `123456789012` in Region # `us-west-2`, use the URL encoding of # `arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf`. # The value must be URL-encoded. # # If your bucket has versioning enabled, you could have multiple # versions of the same object. By default, `x-amz-copy-source` # identifies the current version of the source object to copy. To copy a # specific version of the source object to copy, append # `?versionId=` to the `x-amz-copy-source` request header # (for example, `x-amz-copy-source: # /awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893`). # # If the current version is a delete marker and you don't specify a # versionId in the `x-amz-copy-source` request header, Amazon S3 returns # a `404 Not Found` error, because the object does not exist. If you # specify versionId in the `x-amz-copy-source` and the versionId is a # delete marker, Amazon S3 returns an HTTP `400 Bad Request` error, # because you are not allowed to specify a delete marker as a version # for the `x-amz-copy-source`. # # **Directory buckets** - S3 Versioning isn't enabled and supported for # directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html # @option options [String] :copy_source_if_match # Copies the object if its entity tag (ETag) matches the specified tag. # # If both of the `x-amz-copy-source-if-match` and # `x-amz-copy-source-if-unmodified-since` headers are present in the # request as follows: # # `x-amz-copy-source-if-match` condition evaluates to `true`, and; # # `x-amz-copy-source-if-unmodified-since` condition evaluates to # `false`; # # Amazon S3 returns `200 OK` and copies the data. # @option options [Time,DateTime,Date,Integer,String] :copy_source_if_modified_since # Copies the object if it has been modified since the specified time. # # If both of the `x-amz-copy-source-if-none-match` and # `x-amz-copy-source-if-modified-since` headers are present in the # request as follows: # # `x-amz-copy-source-if-none-match` condition evaluates to `false`, and; # # `x-amz-copy-source-if-modified-since` condition evaluates to `true`; # # Amazon S3 returns `412 Precondition Failed` response code. # @option options [String] :copy_source_if_none_match # Copies the object if its entity tag (ETag) is different than the # specified ETag. # # If both of the `x-amz-copy-source-if-none-match` and # `x-amz-copy-source-if-modified-since` headers are present in the # request as follows: # # `x-amz-copy-source-if-none-match` condition evaluates to `false`, and; # # `x-amz-copy-source-if-modified-since` condition evaluates to `true`; # # Amazon S3 returns `412 Precondition Failed` response code. # @option options [Time,DateTime,Date,Integer,String] :copy_source_if_unmodified_since # Copies the object if it hasn't been modified since the specified # time. # # If both of the `x-amz-copy-source-if-match` and # `x-amz-copy-source-if-unmodified-since` headers are present in the # request as follows: # # `x-amz-copy-source-if-match` condition evaluates to `true`, and; # # `x-amz-copy-source-if-unmodified-since` condition evaluates to # `false`; # # Amazon S3 returns `200 OK` and copies the data. # @option options [String] :copy_source_range # The range of bytes to copy from the source object. The range value # must use the form bytes=first-last, where the first and last are the # zero-based byte offsets to copy. For example, bytes=0-9 indicates that # you want to copy the first 10 bytes of the source. You can copy a # range only if the source object is greater than 5 MB. # @option options [String] :sse_customer_algorithm # Specifies the algorithm to use when encrypting the object (for # example, AES256). # # This functionality is not supported when the destination bucket is a # directory bucket. # # # @option options [String] :sse_customer_key # Specifies the customer-provided encryption key for Amazon S3 to use in # encrypting data. This value is used to store the object and then it is # discarded; Amazon S3 does not store the encryption key. The key must # be appropriate for use with the algorithm specified in the # `x-amz-server-side-encryption-customer-algorithm` header. This must be # the same encryption key specified in the initiate multipart upload # request. # # This functionality is not supported when the destination bucket is a # directory bucket. # # # @option options [String] :sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the encryption key according to # RFC 1321. Amazon S3 uses this header for a message integrity check to # ensure that the encryption key was transmitted without error. # # This functionality is not supported when the destination bucket is a # directory bucket. # # # @option options [String] :copy_source_sse_customer_algorithm # Specifies the algorithm to use when decrypting the source object (for # example, `AES256`). # # This functionality is not supported when the source object is in a # directory bucket. # # # @option options [String] :copy_source_sse_customer_key # Specifies the customer-provided encryption key for Amazon S3 to use to # decrypt the source object. The encryption key provided in this header # must be one that was used when the source object was created. # # This functionality is not supported when the source object is in a # directory bucket. # # # @option options [String] :copy_source_sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the encryption key according to # RFC 1321. Amazon S3 uses this header for a message integrity check to # ensure that the encryption key was transmitted without error. # # This functionality is not supported when the source object is in a # directory bucket. # # # @option options [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @option options [String] :expected_bucket_owner # The account ID of the expected destination bucket owner. If the # account ID that you provide does not match the actual owner of the # destination bucket, the request fails with the HTTP status code `403 # Forbidden` (access denied). # @option options [String] :expected_source_bucket_owner # The account ID of the expected source bucket owner. If the account ID # that you provide does not match the actual owner of the source bucket, # the request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [Types::UploadPartCopyOutput] def copy_from(options = {}) options = options.merge( bucket: @bucket_name, key: @object_key, upload_id: @multipart_upload_id, part_number: @part_number ) resp = Aws::Plugins::UserAgent.feature('resource') do @client.upload_part_copy(options) end resp.data end # @example Request syntax with placeholder values # # multipart_upload_part.upload({ # body: source_file, # content_length: 1, # content_md5: "ContentMD5", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # checksum_crc32: "ChecksumCRC32", # checksum_crc32c: "ChecksumCRC32C", # checksum_sha1: "ChecksumSHA1", # checksum_sha256: "ChecksumSHA256", # sse_customer_algorithm: "SSECustomerAlgorithm", # sse_customer_key: "SSECustomerKey", # sse_customer_key_md5: "SSECustomerKeyMD5", # request_payer: "requester", # accepts requester # expected_bucket_owner: "AccountId", # }) # @param [Hash] options ({}) # @option options [String, StringIO, File] :body # Object data. # @option options [Integer] :content_length # Size of the body in bytes. This parameter is useful when the size of # the body cannot be determined automatically. # @option options [String] :content_md5 # The base64-encoded 128-bit MD5 digest of the part data. This parameter # is auto-populated when using the command from the CLI. This parameter # is required if object lock parameters are specified. # # This functionality is not supported for directory buckets. # # # @option options [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # This checksum algorithm must be the same for all parts and it match # the checksum value supplied in the `CreateMultipartUpload` request. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [String] :checksum_crc32 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 32-bit CRC32 checksum of the object. For # more information, see [Checking object integrity][1] in the *Amazon S3 # User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [String] :checksum_crc32c # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 32-bit CRC32C checksum of the object. # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [String] :checksum_sha1 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 160-bit SHA-1 digest of the object. For # more information, see [Checking object integrity][1] in the *Amazon S3 # User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [String] :checksum_sha256 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 256-bit SHA-256 digest of the object. # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [String] :sse_customer_algorithm # Specifies the algorithm to use when encrypting the object (for # example, AES256). # # This functionality is not supported for directory buckets. # # # @option options [String] :sse_customer_key # Specifies the customer-provided encryption key for Amazon S3 to use in # encrypting data. This value is used to store the object and then it is # discarded; Amazon S3 does not store the encryption key. The key must # be appropriate for use with the algorithm specified in the # `x-amz-server-side-encryption-customer-algorithm header`. This must be # the same encryption key specified in the initiate multipart upload # request. # # This functionality is not supported for directory buckets. # # # @option options [String] :sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the encryption key according to # RFC 1321. Amazon S3 uses this header for a message integrity check to # ensure that the encryption key was transmitted without error. # # This functionality is not supported for directory buckets. # # # @option options [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @return [Types::UploadPartOutput] def upload(options = {}) options = options.merge( bucket: @bucket_name, key: @object_key, upload_id: @multipart_upload_id, part_number: @part_number ) resp = Aws::Plugins::UserAgent.feature('resource') do @client.upload_part(options) end resp.data end # @!group Associations # @return [MultipartUpload] def multipart_upload MultipartUpload.new( bucket_name: @bucket_name, object_key: @object_key, id: @multipart_upload_id, client: @client ) end # @deprecated # @api private def identifiers { bucket_name: @bucket_name, object_key: @object_key, multipart_upload_id: @multipart_upload_id, part_number: @part_number } end deprecated(:identifiers) private def extract_bucket_name(args, options) value = args[0] || options.delete(:bucket_name) case value when String then value when nil then raise ArgumentError, "missing required option :bucket_name" else msg = "expected :bucket_name to be a String, got #{value.class}" raise ArgumentError, msg end end def extract_object_key(args, options) value = args[1] || options.delete(:object_key) case value when String then value when nil then raise ArgumentError, "missing required option :object_key" else msg = "expected :object_key to be a String, got #{value.class}" raise ArgumentError, msg end end def extract_multipart_upload_id(args, options) value = args[2] || options.delete(:multipart_upload_id) case value when String then value when nil then raise ArgumentError, "missing required option :multipart_upload_id" else msg = "expected :multipart_upload_id to be a String, got #{value.class}" raise ArgumentError, msg end end def extract_part_number(args, options) value = args[3] || options.delete(:part_number) case value when Integer then value when nil then raise ArgumentError, "missing required option :part_number" else msg = "expected :part_number to be a Integer, got #{value.class}" raise ArgumentError, msg end end class Collection < Aws::Resources::Collection; end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/bucket_region_cache.rb0000644000004100000410000000445614563445240022556 0ustar www-datawww-data# frozen_string_literal: true require 'thread' module Aws module S3 class BucketRegionCache def initialize @regions = {} @listeners = [] @mutex = Mutex.new end # Registers a block as a callback. This listener is called when a # new bucket/region pair is added to the cache. # # S3::BUCKET_REGIONS.bucket_added do |bucket_name, region_name| # # ... # end # # This happens when a request is made against the classic endpoint, # "s3.amazonaws.com" and an error is returned requiring the request # to be resent with Signature Version 4. At this point, multiple # requests are made to discover the bucket region so that a v4 # signature can be generated. # # An application can register listeners here to avoid these extra # requests in the future. By constructing an {S3::Client} with # the proper region, a proper signature can be generated and redirects # avoided. # @return [void] def bucket_added(&block) if block @mutex.synchronize { @listeners << block } else raise ArgumentError, 'missing required block' end end # @param [String] bucket_name # @return [String,nil] Returns the cached region for the named bucket. # Returns `nil` if the bucket is not in the cache. # @api private def [](bucket_name) @mutex.synchronize { @regions[bucket_name] } end # Caches a bucket's region. Calling this method will trigger each # of the {#bucket_added} listener callbacks. # @param [String] bucket_name # @param [String] region_name # @return [void] # @api private def []=(bucket_name, region_name) @mutex.synchronize do @regions[bucket_name] = region_name @listeners.each { |block| block.call(bucket_name, region_name) } end end # @api private def clear @mutex.synchronize { @regions = {} } end # @return [Hash] Returns a hash of cached bucket names and region names. def to_hash @mutex.synchronize do @regions.dup end end alias to_h to_hash end # @api private BUCKET_REGIONS = BucketRegionCache.new end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/presigned_post.rb0000644000004100000410000006277414563445240021647 0ustar www-datawww-data# frozen_string_literal: true require 'openssl' require 'base64' module Aws module S3 # @note Normally you do not need to construct a {PresignedPost} yourself. # See {Bucket#presigned_post} and {Object#presigned_post}. # # ## Basic Usage # # To generate a presigned post, you need AWS credentials, the region # your bucket is in, and the name of your bucket. You can apply constraints # to the post object as options to {#initialize} or by calling # methods such as {#key} and {#content_length_range}. # # The following two examples are equivalent. # # ```ruby # post = Aws::S3::PresignedPost.new(creds, region, bucket, { # key: '/uploaded/object/key', # content_length_range: 0..1024, # acl: 'public-read', # metadata: { # 'original-filename' => '${filename}' # } # }) # post.fields # #=> { ... } # # post = Aws::S3::PresignedPost.new(creds, region, bucket). # key('/uploaded/object/key'). # content_length_range(0..1024). # acl('public-read'). # metadata('original-filename' => '${filename}'). # fields # #=> { ... } # ``` # # ## HTML Forms # # You can use a {PresignedPost} object to build an HTML form. It is # recommended to use some helper to build the form tag and input # tags that properly escapes values. # # ### Form Tag # # To upload a file to Amazon S3 using a browser, you need to create # a post form. The {#url} method returns the value you should use # as the form action. # # ```erb #
# ... #
# ``` # # The follow attributes must be set on the form: # # * `action` - This must be the {#url}. # * `method` - This must be `post`. # * `enctype` - This must be `multipart/form-data`. # # ### Form Fields # # The {#fields} method returns a hash of form fields to render inside # the form. Typically these are rendered as hidden input fields. # # ```erb # <% @post.fields.each do |name, value| %> # # <% end %> # ``` # # Lastly, the form must have a file field with the name `file`. # # ```erb # # ``` # # ## Post Policy # # When you construct a {PresignedPost}, you must specify every form # field name that will be posted by the browser. If you omit a form # field sent by the browser, Amazon S3 will reject the request. # You can specify accepted form field values three ways: # # * Specify exactly what the value must be. # * Specify what value the field starts with. # * Specify the field may have any value. # # ### Field Equals # # You can specify that a form field must be a certain value. # Simply pass an option like `:content_type` to the constructor, # or call the associated method. # # ```ruby # post = Aws::S3::PresignedPost.new(creds, region, bucket) # post.content_type('text/plain') # ``` # # If any of the given values are changed by the user in the form, then # Amazon S3 will reject the POST request. # # ### Field Starts With # # You can specify prefix values for many of the POST form fields. # To specify a required prefix, use the `:_starts_with` # option or call the associated `#_starts_with` method. # # ```ruby # post = Aws::S3::PresignedPost.new(creds, region, bucket, { # key_starts_with: '/images/', # content_type_starts_with: 'image/', # # ... # }) # ``` # # When using starts with, the form must contain a field where the # user can specify the value. The {PresignedPost} will not add # a value for these fields. # # ### Any Field Value # # To white-list a form field to send any value, you can name that # field with `:allow_any` or {#allow_any}. # # ```ruby # post = Aws::S3::PresignedPost.new(creds, region, bucket, { # key: 'object-key', # allow_any: ['Filename'], # # ... # }) # ``` # # ### Metadata # # You can add rules for metadata fields using `:metadata`, {#metadata}, # `:metadata_starts_with` and {#metadata_starts_with}. Unlike other # form fields, you pass a hash value to these options/methods: # # ```ruby # post = Aws::S3::PresignedPost.new(creds, region, bucket). # key('/fixed/key'). # metadata(foo: 'bar') # # post.fields['x-amz-meta-foo'] # #=> 'bar' # ``` # # ### The `${filename}` Variable # # The string `${filename}` is automatically replaced with the name of the # file provided by the user and is recognized by all form fields. It is # not supported with `starts_with` conditions. # # If the browser or client provides a full or partial path to the file, # only the text following the last slash (/) or backslash (\) will be used # (e.g., "C:\Program Files\directory1\file.txt" will be interpreted # as "file.txt"). If no file or file name is provided, the variable is # replaced with an empty string. # # In the following example, we use `${filename}` to store the original # filename in the `x-amz-meta-` hash with the uploaded object. # # ```ruby # post = Aws::S3::PresignedPost.new(creds, region, bucket, { # key: '/fixed/key', # metadata: { # 'original-filename': '${filename}' # } # }) # ``` # class PresignedPost @@allowed_fields = [] # @param [Credentials] credentials Security credentials for signing # the post policy. # @param [String] bucket_region Region of the target bucket. # @param [String] bucket_name Name of the target bucket. # @option options [Boolean] :use_accelerate_endpoint (false) When `true`, # PresignedPost will attempt to use accelerated endpoint. # @option options [String] :url See {PresignedPost#url}. # @option options [Sting, Array] :allow_any # See {PresignedPost#allow_any}. # @option options [Time] :signature_expiration Specify when the signature on # the post will expire. Defaults to one hour from creation of the # presigned post. May not exceed one week from creation time. # @option options [String] :key See {PresignedPost#key}. # @option options [String] :key_starts_with # See {PresignedPost#key_starts_with}. # @option options [String] :acl See {PresignedPost#acl}. # @option options [String] :acl_starts_with # See {PresignedPost#acl_starts_with}. # @option options [String] :cache_control # See {PresignedPost#cache_control}. # @option options [String] :cache_control_starts_with # See {PresignedPost#cache_control_starts_with}. # @option options [String] :content_type See {PresignedPost#content_type}. # @option options [String] :content_type_starts_with # See {PresignedPost#content_type_starts_with}. # @option options [String] :content_disposition # See {PresignedPost#content_disposition}. # @option options [String] :content_disposition_starts_with # See {PresignedPost#content_disposition_starts_with}. # @option options [String] :content_encoding # See {PresignedPost#content_encoding}. # @option options [String] :content_encoding_starts_with # See {PresignedPost#content_encoding_starts_with}. # @option options [Time] :expires See {PresignedPost#expires}. # @option options [String] :expires_starts_with # See {PresignedPost#expires_starts_with}. # @option options [Range] :content_length_range # See {PresignedPost#content_length_range}. # @option options [String] :success_action_redirect # See {PresignedPost#success_action_redirect}. # @option options [String] :success_action_redirect_starts_with # See {PresignedPost#success_action_redirect_starts_with}. # @option options [String] :success_action_status # See {PresignedPost#success_action_status}. # @option options [String] :storage_class # See {PresignedPost#storage_class}. # @option options [String] :website_redirect_location # See {PresignedPost#website_redirect_location}. # @option options [Hash] :metadata # See {PresignedPost#metadata}. # @option options [Hash] :metadata_starts_with # See {PresignedPost#metadata_starts_with}. # @option options [String] :server_side_encryption # See {PresignedPost#server_side_encryption}. # @option options [String] :server_side_encryption_aws_kms_key_id # See {PresignedPost#server_side_encryption_aws_kms_key_id}. # @option options [String] :server_side_encryption_customer_algorithm # See {PresignedPost#server_side_encryption_customer_algorithm}. # @option options [String] :server_side_encryption_customer_key # See {PresignedPost#server_side_encryption_customer_key}. # @option options [String] :server_side_encryption_customer_key_starts_with # See {PresignedPost#server_side_encryption_customer_key_starts_with}. def initialize(credentials, bucket_region, bucket_name, options = {}) @credentials = credentials.credentials @bucket_region = bucket_region @bucket_name = bucket_name @accelerate = !!options.delete(:use_accelerate_endpoint) options.delete(:url) if @accelerate # resource methods pass url @url = options.delete(:url) || bucket_url @fields = {} @key_set = false @signature_expiration = Time.now + 3600 @conditions = [{ 'bucket' => @bucket_name }] options.each do |option_name, option_value| case option_name when :allow_any then allow_any(option_value) when :signature_expiration then @signature_expiration = option_value else if @@allowed_fields.include?(option_name) send("#{option_name}", option_value) else raise ArgumentError, "Unsupported option: #{option_name}" end end end end # @return [String] The URL to post a file upload to. This should be # the form action. attr_reader :url # @return [Hash] A hash of fields to render in an HTML form # as hidden input fields. def fields check_required_values! datetime = Time.now.utc.strftime('%Y%m%dT%H%M%SZ') fields = @fields.dup fields.update('policy' => policy(datetime)) fields.update(signature_fields(datetime)) fields.update('x-amz-signature' => signature(datetime, fields['policy'])) end # A list of form fields to white-list with any value. # @param [Sting, Array] field_names # @return [self] def allow_any(*field_names) field_names.flatten.each do |field_name| @key_set = true if field_name.to_s == 'key' starts_with(field_name, '') end self end # @api private def self.define_field(field, *args, &block) @@allowed_fields << field options = args.last.is_a?(Hash) ? args.pop : {} field_name = args.last || field.to_s if block_given? define_method("#{field}", block) else define_method("#{field}") do |value| with(field_name, value) end if options[:starts_with] @@allowed_fields << "#{field}_starts_with".to_sym define_method("#{field}_starts_with") do |value| starts_with(field_name, value) end end end end # @!group Fields # @!method key(key) # The key to use for the uploaded object. You can use `${filename}` # as a variable in the key. This will be replaced with the name # of the file as provided by the user. # # For example, if the key is given as `/user/betty/${filename}` and # the file uploaded is named `lolcatz.jpg`, the resultant key will # be `/user/betty/lolcatz.jpg`. # # @param [String] key # @see http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html) # @return [self] define_field(:key) do |key| @key_set = true with('key', key) end # @!method key_starts_with(prefix) # Specify a prefix the uploaded # @param [String] prefix # @see #key # @return [self] define_field(:key_starts_with) do |prefix| @key_set = true starts_with('key', prefix) end # @!method acl(canned_acl) # Specify the cannedl ACL (access control list) for the object. # May be one of the following values: # # * `private` # * `public-read` # * `public-read-write` # * `authenticated-read` # * `bucket-owner-read` # * `bucket-owner-full-control` # # @param [String] canned_acl # @see http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html # @return [self] # # @!method acl_starts_with(prefix) # @param [String] prefix # @see #acl # @return [self] define_field(:acl, starts_with: true) # @!method cache_control(value) # Specify caching behavior along the request/reply chain. # @param [String] value # @see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9. # @return [self] # # @!method cache_control_starts_with(prefix) # @param [String] prefix # @see #cache_control # @return [self] define_field(:cache_control, 'Cache-Control', starts_with: true) # @return [String] # @!method content_type(value) # A standard MIME type describing the format of the contents. # @param [String] value # @see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 # @return [self] # # @!method content_type_starts_with(prefix) # @param [String] prefix # @see #content_type # @return [self] define_field(:content_type, 'Content-Type', starts_with: true) # @!method content_disposition(value) # Specifies presentational information for the object. # @param [String] value # @see http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 # @return [self] # # @!method content_disposition_starts_with(prefix) # @param [String] prefix # @see #content_disposition # @return [self] define_field(:content_disposition, 'Content-Disposition', starts_with: true) # @!method content_encoding(value) # Specifies what content encodings have been applied to the object # and thus what decoding mechanisms must be applied to obtain the # media-type referenced by the Content-Type header field. # @param [String] value # @see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 # @return [self] # # @!method content_encoding_starts_with(prefix) # @param [String] prefix # @see #content_encoding # @return [self] define_field(:content_encoding, 'Content-Encoding', starts_with: true) # @!method expires(time) # The date and time at which the object is no longer cacheable. # @note This does not affect the expiration of the presigned post # signature. # @param [Time] time # @see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 # @return [self] define_field(:expires) do |time| with('Expires', time.httpdate) end # @!method expires_starts_with(prefix) # @param [String] prefix # @see #expires # @return [self] define_field(:expires_starts_with) do |prefix| starts_with('Expires', prefix) end # @!method content_length_range(byte_range) # The minimum and maximum allowable size for the uploaded content. # @param [Range] byte_range # @return [self] define_field(:content_length_range) do |byte_range| min = byte_range.begin max = byte_range.end max -= 1 if byte_range.exclude_end? @conditions << ['content-length-range', min, max] self end # @!method success_action_redirect(value) # The URL to which the client is redirected # upon successful upload. If {#success_action_redirect} is not # specified, Amazon S3 returns the empty document type specified # by {#success_action_status}. # # If Amazon S3 cannot interpret the URL, it acts as if the field # is not present. If the upload fails, Amazon S3 displays an error # and does not redirect the user to a URL. # # @param [String] value # @return [self] # # @!method success_action_redirect_starts_with(prefix) # @param [String] prefix # @see #success_action_redirect # @return [self] define_field(:success_action_redirect, starts_with: true) # @!method success_action_status(value) # The status code returned to the client upon # successful upload if {#success_action_redirect} is not # specified. # # Accepts the values `200`, `201`, or `204` (default). # # If the value is set to 200 or 204, Amazon S3 returns an empty # document with a 200 or 204 status code. If the value is set to 201, # Amazon S3 returns an XML document with a 201 status code. # # If the value is not set or if it is set to an invalid value, Amazon # S3 returns an empty document with a 204 status code. # # @param [String] value The status code returned to the client upon # @return [self] define_field(:success_action_status) # @!method storage_class(value) # Storage class to use for storing the object. Defaults to # `STANDARD`. Must be one of: # # * `STANDARD` # * `REDUCED_REDUNDANCY` # # You cannot specify `GLACIER` as the storage class. To transition # objects to the GLACIER storage class you can use lifecycle # configuration. # @param [String] value Storage class to use for storing the # @return [self] define_field(:storage_class, 'x-amz-storage-class') # @!method website_redirect_location(value) # If the bucket is configured as a website, # redirects requests for this object to another object in the # same bucket or to an external URL. Amazon S3 stores this value # in the object metadata. # # The value must be prefixed by, "/", "http://" or "https://". # The length of the value is limited to 2K. # # @param [String] value # @see http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html # @see http://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html # @see http://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html # @return [self] define_field(:website_redirect_location, 'x-amz-website-redirect-location') # @!method metadata(hash) # Metadata hash to store with the uploaded object. Hash keys will be # prefixed with "x-amz-meta-". # @param [Hash] hash # @return [self] define_field(:metadata) do |hash| hash.each do |key, value| with("x-amz-meta-#{key}", value) end self end # @!method metadata_starts_with(hash) # Specify allowable prefix for each key in the metadata hash. # @param [Hash] hash # @see #metadata # @return [self] define_field(:metadata_starts_with) do |hash| hash.each do |key, value| starts_with("x-amz-meta-#{key}", value) end self end # @!endgroup # @!group Server-Side Encryption Fields # @!method server_side_encryption(value) # Specifies a server-side encryption algorithm to use when Amazon # S3 creates an object. Valid values include: # # * `aws:kms` # * `AES256` # # @param [String] value # @return [self] define_field(:server_side_encryption, 'x-amz-server-side-encryption') # @!method server_side_encryption_aws_kms_key_id(value) # If {#server_side_encryption} is called with the value of `aws:kms`, # this method specifies the ID of the AWS Key Management Service # (KMS) master encryption key to use for the object. # @param [String] value # @return [self] define_field( :server_side_encryption_aws_kms_key_id, 'x-amz-server-side-encryption-aws-kms-key-id' ) # @!endgroup # @!group Server-Side Encryption with Customer-Provided Key Fields # @!method server_side_encryption_customer_algorithm(value) # Specifies the algorithm to use to when encrypting the object. # Must be set to `AES256` when using customer-provided encryption # keys. Must also call {#server_side_encryption_customer_key}. # @param [String] value # @see #server_side_encryption_customer_key # @return [self] define_field( :server_side_encryption_customer_algorithm, 'x-amz-server-side-encryption-customer-algorithm' ) # @!method server_side_encryption_customer_key(value) # Specifies the customer-provided encryption key for Amazon S3 to use # in encrypting data. This value is used to store the object and then # it is discarded; Amazon does not store the encryption key. # # You must also call {#server_side_encryption_customer_algorithm}. # # @param [String] value # @see #server_side_encryption_customer_algorithm # @return [self] define_field(:server_side_encryption_customer_key) do |value| field_name = 'x-amz-server-side-encryption-customer-key' with(field_name, base64(value)) with(field_name + '-MD5', base64(OpenSSL::Digest::MD5.digest(value))) end # @!method server_side_encryption_customer_key_starts_with(prefix) # @param [String] prefix # @see #server_side_encryption_customer_key # @return [self] define_field(:server_side_encryption_customer_key_starts_with) do |prefix| field_name = 'x-amz-server-side-encryption-customer-key' starts_with(field_name, prefix) end # @!endgroup private def with(field_name, value) fvar = '${filename}' if index = value.rindex(fvar) if index + fvar.size == value.size @fields[field_name] = value starts_with(field_name, value[0,index]) else msg = "${filename} only supported at the end of #{field_name}" raise ArgumentError, msg end else @fields[field_name] = value.to_s @conditions << { field_name => value.to_s } end self end def starts_with(field_name, value, &block) @conditions << ['starts-with', "$#{field_name}", value.to_s] self end def check_required_values! unless @key_set msg = 'key required; you must provide a key via :key, '\ ":key_starts_with, or :allow_any => ['key']" raise msg end end def bucket_url # Taken from Aws::S3::Endpoints module params = Aws::S3::EndpointParameters.new( bucket: @bucket_name, region: @bucket_region, accelerate: @accelerate, use_global_endpoint: true ) endpoint = Aws::S3::EndpointProvider.new.resolve_endpoint(params) endpoint.url end # @return [Hash] def policy(datetime) check_required_values! policy = {} policy['expiration'] = @signature_expiration.utc.iso8601 policy['conditions'] = @conditions.dup signature_fields(datetime).each do |name, value| policy['conditions'] << { name => value } end base64(Json.dump(policy)) end def signature_fields(datetime) fields = {} fields['x-amz-credential'] = credential_scope(datetime) fields['x-amz-algorithm'] = 'AWS4-HMAC-SHA256' fields['x-amz-date'] = datetime if session_token = @credentials.session_token fields['x-amz-security-token'] = session_token end fields end def signature(datetime, string_to_sign) k_secret = @credentials.secret_access_key k_date = hmac('AWS4' + k_secret, datetime[0,8]) k_region = hmac(k_date, @bucket_region) k_service = hmac(k_region, 's3') k_credentials = hmac(k_service, 'aws4_request') hexhmac(k_credentials, string_to_sign) end def hmac(key, value) OpenSSL::HMAC.digest(OpenSSL::Digest.new('sha256'), key, value) end def hexhmac(key, value) OpenSSL::HMAC.hexdigest(OpenSSL::Digest.new('sha256'), key, value) end def credential_scope(datetime) parts = [] parts << @credentials.access_key_id parts << datetime[0,8] parts << @bucket_region parts << 's3' parts << 'aws4_request' parts.join('/') end def base64(str) Base64.strict_encode64(str) end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/object.rb0000644000004100000410000040616714563445240020066 0ustar www-datawww-data# frozen_string_literal: true # WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws::S3 class Object extend Aws::Deprecations # @overload def initialize(bucket_name, key, options = {}) # @param [String] bucket_name # @param [String] key # @option options [Client] :client # @overload def initialize(options = {}) # @option options [required, String] :bucket_name # @option options [required, String] :key # @option options [Client] :client def initialize(*args) options = Hash === args.last ? args.pop.dup : {} @bucket_name = extract_bucket_name(args, options) @key = extract_key(args, options) @data = options.delete(:data) @client = options.delete(:client) || Client.new(options) @waiter_block_warned = false end # @!group Read-Only Attributes # @return [String] def bucket_name @bucket_name end # @return [String] def key @key end # Specifies whether the object retrieved was (true) or was not (false) a # Delete Marker. If false, this response header does not appear in the # response. # # This functionality is not supported for directory buckets. # # # @return [Boolean] def delete_marker data[:delete_marker] end # Indicates that a range of bytes was specified. # @return [String] def accept_ranges data[:accept_ranges] end # If the object expiration is configured (see [ # `PutBucketLifecycleConfiguration` ][1]), the response includes this # header. It includes the `expiry-date` and `rule-id` key-value pairs # providing object expiration information. The value of the `rule-id` is # URL-encoded. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html # @return [String] def expiration data[:expiration] end # If the object is an archived object (an object whose storage class is # GLACIER), the response includes this header if either the archive # restoration is in progress (see [RestoreObject][1] or an archive copy # is already restored. # # If an archive copy is already restored, the header value indicates # when Amazon S3 is scheduled to delete the object copy. For example: # # `x-amz-restore: ongoing-request="false", expiry-date="Fri, 21 Dec 2012 # 00:00:00 GMT"` # # If the object restoration is in progress, the header returns the value # `ongoing-request="true"`. # # For more information about archiving objects, see [Transitioning # Objects: General Considerations][2]. # # This functionality is not supported for directory buckets. Only the S3 # Express One Zone storage class is supported by directory buckets to # store objects. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations # @return [String] def restore data[:restore] end # The archive state of the head object. # # This functionality is not supported for directory buckets. # # # @return [String] def archive_status data[:archive_status] end # Date and time when the object was last modified. # @return [Time] def last_modified data[:last_modified] end # Size of the body in bytes. # @return [Integer] def content_length data[:content_length] end # The base64-encoded, 32-bit CRC32 checksum of the object. This will # only be present if it was uploaded with the object. When you use an # API operation on an object that was uploaded using multipart uploads, # this value may not be a direct checksum value of the full object. # Instead, it's a calculation based on the checksum values of each # individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object integrity][1] # in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] def checksum_crc32 data[:checksum_crc32] end # The base64-encoded, 32-bit CRC32C checksum of the object. This will # only be present if it was uploaded with the object. When you use an # API operation on an object that was uploaded using multipart uploads, # this value may not be a direct checksum value of the full object. # Instead, it's a calculation based on the checksum values of each # individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object integrity][1] # in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] def checksum_crc32c data[:checksum_crc32c] end # The base64-encoded, 160-bit SHA-1 digest of the object. This will only # be present if it was uploaded with the object. When you use the API # operation on an object that was uploaded using multipart uploads, this # value may not be a direct checksum value of the full object. Instead, # it's a calculation based on the checksum values of each individual # part. For more information about how checksums are calculated with # multipart uploads, see [ Checking object integrity][1] in the *Amazon # S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] def checksum_sha1 data[:checksum_sha1] end # The base64-encoded, 256-bit SHA-256 digest of the object. This will # only be present if it was uploaded with the object. When you use an # API operation on an object that was uploaded using multipart uploads, # this value may not be a direct checksum value of the full object. # Instead, it's a calculation based on the checksum values of each # individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object integrity][1] # in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] def checksum_sha256 data[:checksum_sha256] end # An entity tag (ETag) is an opaque identifier assigned by a web server # to a specific version of a resource found at a URL. # @return [String] def etag data[:etag] end # This is set to the number of metadata entries not returned in # `x-amz-meta` headers. This can happen if you create metadata using an # API like SOAP that supports more flexible metadata than the REST API. # For example, using SOAP, you can create metadata whose values are not # legal HTTP headers. # # This functionality is not supported for directory buckets. # # # @return [Integer] def missing_meta data[:missing_meta] end # Version ID of the object. # # This functionality is not supported for directory buckets. # # # @return [String] def version_id data[:version_id] end # Specifies caching behavior along the request/reply chain. # @return [String] def cache_control data[:cache_control] end # Specifies presentational information for the object. # @return [String] def content_disposition data[:content_disposition] end # Indicates what content encodings have been applied to the object and # thus what decoding mechanisms must be applied to obtain the media-type # referenced by the Content-Type header field. # @return [String] def content_encoding data[:content_encoding] end # The language the content is in. # @return [String] def content_language data[:content_language] end # A standard MIME type describing the format of the object data. # @return [String] def content_type data[:content_type] end # The date and time at which the object is no longer cacheable. # @return [Time] def expires data[:expires] end # @return [String] def expires_string data[:expires_string] end # If the bucket is configured as a website, redirects requests for this # object to another object in the same bucket or to an external URL. # Amazon S3 stores the value of this header in the object metadata. # # This functionality is not supported for directory buckets. # # # @return [String] def website_redirect_location data[:website_redirect_location] end # The server-side encryption algorithm used when you store this object # in Amazon S3 (for example, `AES256`, `aws:kms`, `aws:kms:dsse`). # # For directory buckets, only server-side encryption with Amazon S3 # managed keys (SSE-S3) (`AES256`) is supported. # # # @return [String] def server_side_encryption data[:server_side_encryption] end # A map of metadata to store with the object in S3. # @return [Hash] def metadata data[:metadata] end # If server-side encryption with a customer-provided encryption key was # requested, the response will include this header to confirm the # encryption algorithm that's used. # # This functionality is not supported for directory buckets. # # # @return [String] def sse_customer_algorithm data[:sse_customer_algorithm] end # If server-side encryption with a customer-provided encryption key was # requested, the response will include this header to provide the # round-trip message integrity verification of the customer-provided # encryption key. # # This functionality is not supported for directory buckets. # # # @return [String] def sse_customer_key_md5 data[:sse_customer_key_md5] end # If present, indicates the ID of the Key Management Service (KMS) # symmetric encryption customer managed key that was used for the # object. # # This functionality is not supported for directory buckets. # # # @return [String] def ssekms_key_id data[:ssekms_key_id] end # Indicates whether the object uses an S3 Bucket Key for server-side # encryption with Key Management Service (KMS) keys (SSE-KMS). # # This functionality is not supported for directory buckets. # # # @return [Boolean] def bucket_key_enabled data[:bucket_key_enabled] end # Provides storage class information of the object. Amazon S3 returns # this header for all objects except for S3 Standard storage class # objects. # # For more information, see [Storage Classes][1]. # # Directory buckets - Only the S3 Express One Zone storage class # is supported by directory buckets to store objects. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html # @return [String] def storage_class data[:storage_class] end # If present, indicates that the requester was successfully charged for # the request. # # This functionality is not supported for directory buckets. # # # @return [String] def request_charged data[:request_charged] end # Amazon S3 can return this header if your request involves a bucket # that is either a source or a destination in a replication rule. # # In replication, you have a source bucket on which you configure # replication and destination bucket or buckets where Amazon S3 stores # object replicas. When you request an object (`GetObject`) or object # metadata (`HeadObject`) from these buckets, Amazon S3 will return the # `x-amz-replication-status` header in the response as follows: # # * **If requesting an object from the source bucket**, Amazon S3 will # return the `x-amz-replication-status` header if the object in your # request is eligible for replication. # # For example, suppose that in your replication configuration, you # specify object prefix `TaxDocs` requesting Amazon S3 to replicate # objects with key prefix `TaxDocs`. Any objects you upload with this # key name prefix, for example `TaxDocs/document1.pdf`, are eligible # for replication. For any object request with this key name prefix, # Amazon S3 will return the `x-amz-replication-status` header with # value PENDING, COMPLETED or FAILED indicating object replication # status. # # * **If requesting an object from a destination bucket**, Amazon S3 # will return the `x-amz-replication-status` header with value REPLICA # if the object in your request is a replica that Amazon S3 created # and there is no replica modification replication in progress. # # * **When replicating objects to multiple destination buckets**, the # `x-amz-replication-status` header acts differently. The header of # the source object will only return a value of COMPLETED when # replication is successful to all destinations. The header will # remain at value PENDING until replication has completed for all # destinations. If one or more destinations fails replication the # header will return FAILED. # # For more information, see [Replication][1]. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html # @return [String] def replication_status data[:replication_status] end # The count of parts this object has. This value is only returned if you # specify `partNumber` in your request and the object was uploaded as a # multipart upload. # @return [Integer] def parts_count data[:parts_count] end # The Object Lock mode, if any, that's in effect for this object. This # header is only returned if the requester has the # `s3:GetObjectRetention` permission. For more information about S3 # Object Lock, see [Object Lock][1]. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html # @return [String] def object_lock_mode data[:object_lock_mode] end # The date and time when the Object Lock retention period expires. This # header is only returned if the requester has the # `s3:GetObjectRetention` permission. # # This functionality is not supported for directory buckets. # # # @return [Time] def object_lock_retain_until_date data[:object_lock_retain_until_date] end # Specifies whether a legal hold is in effect for this object. This # header is only returned if the requester has the # `s3:GetObjectLegalHold` permission. This header is not returned if the # specified version of this object has never had a legal hold applied. # For more information about S3 Object Lock, see [Object Lock][1]. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html # @return [String] def object_lock_legal_hold_status data[:object_lock_legal_hold_status] end # @!endgroup # @return [Client] def client @client end # Loads, or reloads {#data} for the current {Object}. # Returns `self` making it possible to chain methods. # # object.reload.data # # @return [self] def load resp = Aws::Plugins::UserAgent.feature('resource') do @client.head_object( bucket: @bucket_name, key: @key ) end @data = resp.data self end alias :reload :load # @return [Types::HeadObjectOutput] # Returns the data for this {Object}. Calls # {Client#head_object} if {#data_loaded?} is `false`. def data load unless @data @data end # @return [Boolean] # Returns `true` if this resource is loaded. Accessing attributes or # {#data} on an unloaded resource will trigger a call to {#load}. def data_loaded? !!@data end # @param [Hash] options ({}) # @return [Boolean] # Returns `true` if the Object exists. def exists?(options = {}) begin wait_until_exists(options.merge(max_attempts: 1)) true rescue Aws::Waiters::Errors::UnexpectedError => e raise e.error rescue Aws::Waiters::Errors::WaiterFailed false end end # @param [Hash] options ({}) # @option options [Integer] :max_attempts (20) # @option options [Float] :delay (5) # @option options [Proc] :before_attempt # @option options [Proc] :before_wait # @return [Object] def wait_until_exists(options = {}, &block) options, params = separate_params_and_options(options) waiter = Waiters::ObjectExists.new(options) yield_waiter_and_warn(waiter, &block) if block_given? Aws::Plugins::UserAgent.feature('resource') do waiter.wait(params.merge(bucket: @bucket_name, key: @key)) end Object.new({ bucket_name: @bucket_name, key: @key, client: @client }) end # @param [Hash] options ({}) # @option options [Integer] :max_attempts (20) # @option options [Float] :delay (5) # @option options [Proc] :before_attempt # @option options [Proc] :before_wait # @return [Object] def wait_until_not_exists(options = {}, &block) options, params = separate_params_and_options(options) waiter = Waiters::ObjectNotExists.new(options) yield_waiter_and_warn(waiter, &block) if block_given? Aws::Plugins::UserAgent.feature('resource') do waiter.wait(params.merge(bucket: @bucket_name, key: @key)) end Object.new({ bucket_name: @bucket_name, key: @key, client: @client }) end # @deprecated Use [Aws::S3::Client] #wait_until instead # # Waiter polls an API operation until a resource enters a desired # state. # # @note The waiting operation is performed on a copy. The original resource # remains unchanged. # # ## Basic Usage # # Waiter will polls until it is successful, it fails by # entering a terminal state, or until a maximum number of attempts # are made. # # # polls in a loop until condition is true # resource.wait_until(options) {|resource| condition} # # ## Example # # instance.wait_until(max_attempts:10, delay:5) do |instance| # instance.state.name == 'running' # end # # ## Configuration # # You can configure the maximum number of polling attempts, and the # delay (in seconds) between each polling attempt. The waiting condition is # set by passing a block to {#wait_until}: # # # poll for ~25 seconds # resource.wait_until(max_attempts:5,delay:5) {|resource|...} # # ## Callbacks # # You can be notified before each polling attempt and before each # delay. If you throw `:success` or `:failure` from these callbacks, # it will terminate the waiter. # # started_at = Time.now # # poll for 1 hour, instead of a number of attempts # proc = Proc.new do |attempts, response| # throw :failure if Time.now - started_at > 3600 # end # # # disable max attempts # instance.wait_until(before_wait:proc, max_attempts:nil) {...} # # ## Handling Errors # # When a waiter is successful, it returns the Resource. When a waiter # fails, it raises an error. # # begin # resource.wait_until(...) # rescue Aws::Waiters::Errors::WaiterFailed # # resource did not enter the desired state in time # end # # @yieldparam [Resource] resource to be used in the waiting condition. # # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter # terminates because the waiter has entered a state that it will not # transition out of, preventing success. # # yet successful. # # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is # encountered while polling for a resource that is not expected. # # @raise [NotImplementedError] Raised when the resource does not # # @option options [Integer] :max_attempts (10) Maximum number of # attempts # @option options [Integer] :delay (10) Delay between each # attempt in seconds # @option options [Proc] :before_attempt (nil) Callback # invoked before each attempt # @option options [Proc] :before_wait (nil) Callback # invoked before each wait # @return [Resource] if the waiter was successful def wait_until(options = {}, &block) self_copy = self.dup attempts = 0 options[:max_attempts] = 10 unless options.key?(:max_attempts) options[:delay] ||= 10 options[:poller] = Proc.new do attempts += 1 if block.call(self_copy) [:success, self_copy] else self_copy.reload unless attempts == options[:max_attempts] :retry end end Aws::Plugins::UserAgent.feature('resource') do Aws::Waiters::Waiter.new(options).wait({}) end end # @!group Actions # @example Request syntax with placeholder values # # object.copy_from({ # acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control # cache_control: "CacheControl", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # content_disposition: "ContentDisposition", # content_encoding: "ContentEncoding", # content_language: "ContentLanguage", # content_type: "ContentType", # copy_source: "CopySource", # required # copy_source_if_match: "CopySourceIfMatch", # copy_source_if_modified_since: Time.now, # copy_source_if_none_match: "CopySourceIfNoneMatch", # copy_source_if_unmodified_since: Time.now, # expires: Time.now, # grant_full_control: "GrantFullControl", # grant_read: "GrantRead", # grant_read_acp: "GrantReadACP", # grant_write_acp: "GrantWriteACP", # metadata: { # "MetadataKey" => "MetadataValue", # }, # metadata_directive: "COPY", # accepts COPY, REPLACE # tagging_directive: "COPY", # accepts COPY, REPLACE # server_side_encryption: "AES256", # accepts AES256, aws:kms, aws:kms:dsse # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE # website_redirect_location: "WebsiteRedirectLocation", # sse_customer_algorithm: "SSECustomerAlgorithm", # sse_customer_key: "SSECustomerKey", # sse_customer_key_md5: "SSECustomerKeyMD5", # ssekms_key_id: "SSEKMSKeyId", # ssekms_encryption_context: "SSEKMSEncryptionContext", # bucket_key_enabled: false, # copy_source_sse_customer_algorithm: "CopySourceSSECustomerAlgorithm", # copy_source_sse_customer_key: "CopySourceSSECustomerKey", # copy_source_sse_customer_key_md5: "CopySourceSSECustomerKeyMD5", # request_payer: "requester", # accepts requester # tagging: "TaggingHeader", # object_lock_mode: "GOVERNANCE", # accepts GOVERNANCE, COMPLIANCE # object_lock_retain_until_date: Time.now, # object_lock_legal_hold_status: "ON", # accepts ON, OFF # expected_bucket_owner: "AccountId", # expected_source_bucket_owner: "AccountId", # }) # @param [Hash] options ({}) # @option options [String] :acl # The canned access control list (ACL) to apply to the object. # # When you copy an object, the ACL metadata is not preserved and is set # to `private` by default. Only the owner has full access control. To # override the default ACL setting, specify a new ACL when you generate # a copy request. For more information, see [Using ACLs][1]. # # If the destination bucket that you're copying objects to uses the # bucket owner enforced setting for S3 Object Ownership, ACLs are # disabled and no longer affect permissions. Buckets that use this # setting only accept `PUT` requests that don't specify an ACL or `PUT` # requests that specify bucket owner full control ACLs, such as the # `bucket-owner-full-control` canned ACL or an equivalent form of this # ACL expressed in the XML format. For more information, see # [Controlling ownership of objects and disabling ACLs][2] in the # *Amazon S3 User Guide*. # # * If your destination bucket uses the bucket owner enforced setting # for Object Ownership, all objects written to the bucket by any # account will be owned by the bucket owner. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html # @option options [String] :cache_control # Specifies the caching behavior along the request/reply chain. # @option options [String] :checksum_algorithm # Indicates the algorithm that you want Amazon S3 to use to create the # checksum for the object. For more information, see [Checking object # integrity][1] in the *Amazon S3 User Guide*. # # When you copy an object, if the source object has a checksum, that # checksum value will be copied to the new object by default. If the # `CopyObject` request does not include this `x-amz-checksum-algorithm` # header, the checksum algorithm will be copied from the source object # to the destination object (if it's present on the source object). You # can optionally specify a different checksum algorithm to use with the # `x-amz-checksum-algorithm` header. Unrecognized or unsupported values # will respond with the HTTP status code `400 Bad Request`. # # For directory buckets, when you use Amazon Web Services SDKs, `CRC32` # is the default checksum algorithm that's used for performance. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [String] :content_disposition # Specifies presentational information for the object. Indicates whether # an object should be displayed in a web browser or downloaded as a # file. It allows specifying the desired filename for the downloaded # file. # @option options [String] :content_encoding # Specifies what content encodings have been applied to the object and # thus what decoding mechanisms must be applied to obtain the media-type # referenced by the Content-Type header field. # # For directory buckets, only the `aws-chunked` value is supported in # this header field. # # # @option options [String] :content_language # The language the content is in. # @option options [String] :content_type # A standard MIME type that describes the format of the object data. # @option options [required, String] :copy_source # Specifies the source object for the copy operation. The source object # can be up to 5 GB. If the source object is an object that was uploaded # by using a multipart upload, the object copy will be a single part # object after the source object is copied to the destination bucket. # # You specify the value of the copy source in one of two formats, # depending on whether you want to access the source object through an # [access point][1]: # # * For objects not accessed through an access point, specify the name # of the source bucket and the key of the source object, separated by # a slash (/). For example, to copy the object `reports/january.pdf` # from the general purpose bucket `awsexamplebucket`, use # `awsexamplebucket/reports/january.pdf`. The value must be # URL-encoded. To copy the object `reports/january.pdf` from the # directory bucket `awsexamplebucket--use1-az5--x-s3`, use # `awsexamplebucket--use1-az5--x-s3/reports/january.pdf`. The value # must be URL-encoded. # # * For objects accessed through access points, specify the Amazon # Resource Name (ARN) of the object as accessed through the access # point, in the format # `arn:aws:s3:::accesspoint//object/`. # For example, to copy the object `reports/january.pdf` through access # point `my-access-point` owned by account `123456789012` in Region # `us-west-2`, use the URL encoding of # `arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf`. # The value must be URL encoded. # # * Amazon S3 supports copy operations using Access points only when # the source and destination buckets are in the same Amazon Web # Services Region. # # * Access points are not supported by directory buckets. # # # # Alternatively, for objects accessed through Amazon S3 on Outposts, # specify the ARN of the object as accessed in the format # `arn:aws:s3-outposts:::outpost//object/`. # For example, to copy the object `reports/january.pdf` through # outpost `my-outpost` owned by account `123456789012` in Region # `us-west-2`, use the URL encoding of # `arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf`. # The value must be URL-encoded. # # If your source bucket versioning is enabled, the `x-amz-copy-source` # header by default identifies the current version of an object to copy. # If the current version is a delete marker, Amazon S3 behaves as if the # object was deleted. To copy a different version, use the `versionId` # query parameter. Specifically, append `?versionId=` to the # value (for example, # `awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893`). # If you don't specify a version ID, Amazon S3 copies the latest # version of the source object. # # If you enable versioning on the destination bucket, Amazon S3 # generates a unique version ID for the copied object. This version ID # is different from the version ID of the source object. Amazon S3 # returns the version ID of the copied object in the `x-amz-version-id` # response header in the response. # # If you do not enable versioning or suspend it on the destination # bucket, the version ID that Amazon S3 generates in the # `x-amz-version-id` response header is always null. # # **Directory buckets** - S3 Versioning isn't enabled and supported for # directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html # @option options [String] :copy_source_if_match # Copies the object if its entity tag (ETag) matches the specified tag. # # If both the `x-amz-copy-source-if-match` and # `x-amz-copy-source-if-unmodified-since` headers are present in the # request and evaluate as follows, Amazon S3 returns `200 OK` and copies # the data: # # * `x-amz-copy-source-if-match` condition evaluates to true # # * `x-amz-copy-source-if-unmodified-since` condition evaluates to false # @option options [Time,DateTime,Date,Integer,String] :copy_source_if_modified_since # Copies the object if it has been modified since the specified time. # # If both the `x-amz-copy-source-if-none-match` and # `x-amz-copy-source-if-modified-since` headers are present in the # request and evaluate as follows, Amazon S3 returns the `412 # Precondition Failed` response code: # # * `x-amz-copy-source-if-none-match` condition evaluates to false # # * `x-amz-copy-source-if-modified-since` condition evaluates to true # @option options [String] :copy_source_if_none_match # Copies the object if its entity tag (ETag) is different than the # specified ETag. # # If both the `x-amz-copy-source-if-none-match` and # `x-amz-copy-source-if-modified-since` headers are present in the # request and evaluate as follows, Amazon S3 returns the `412 # Precondition Failed` response code: # # * `x-amz-copy-source-if-none-match` condition evaluates to false # # * `x-amz-copy-source-if-modified-since` condition evaluates to true # @option options [Time,DateTime,Date,Integer,String] :copy_source_if_unmodified_since # Copies the object if it hasn't been modified since the specified # time. # # If both the `x-amz-copy-source-if-match` and # `x-amz-copy-source-if-unmodified-since` headers are present in the # request and evaluate as follows, Amazon S3 returns `200 OK` and copies # the data: # # * `x-amz-copy-source-if-match` condition evaluates to true # # * `x-amz-copy-source-if-unmodified-since` condition evaluates to false # @option options [Time,DateTime,Date,Integer,String] :expires # The date and time at which the object is no longer cacheable. # @option options [String] :grant_full_control # Gives the grantee READ, READ\_ACP, and WRITE\_ACP permissions on the # object. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # @option options [String] :grant_read # Allows grantee to read the object data and its metadata. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # @option options [String] :grant_read_acp # Allows grantee to read the object ACL. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # @option options [String] :grant_write_acp # Allows grantee to write the ACL for the applicable object. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # @option options [Hash] :metadata # A map of metadata to store with the object in S3. # @option options [String] :metadata_directive # Specifies whether the metadata is copied from the source object or # replaced with metadata that's provided in the request. When copying # an object, you can preserve all metadata (the default) or specify new # metadata. If this header isn’t specified, `COPY` is the default # behavior. # # **General purpose bucket** - For general purpose buckets, when you # grant permissions, you can use the `s3:x-amz-metadata-directive` # condition key to enforce certain metadata behavior when objects are # uploaded. For more information, see [Amazon S3 condition key # examples][1] in the *Amazon S3 User Guide*. # # `x-amz-website-redirect-location` is unique to each object and is not # copied when using the `x-amz-metadata-directive` header. To copy the # value, you must specify `x-amz-website-redirect-location` in the # request header. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html # @option options [String] :tagging_directive # Specifies whether the object tag-set is copied from the source object # or replaced with the tag-set that's provided in the request. # # The default value is `COPY`. # # **Directory buckets** - For directory buckets in a `CopyObject` # operation, only the empty tag-set is supported. Any requests that # attempt to write non-empty tags into directory buckets will receive a # `501 Not Implemented` status code. When the destination bucket is a # directory bucket, you will receive a `501 Not Implemented` response in # any of the following situations: # # * When you attempt to `COPY` the tag-set from an S3 source object that # has non-empty tags. # # * When you attempt to `REPLACE` the tag-set of a source object and set # a non-empty value to `x-amz-tagging`. # # * When you don't set the `x-amz-tagging-directive` header and the # source object has non-empty tags. This is because the default value # of `x-amz-tagging-directive` is `COPY`. # # Because only the empty tag-set is supported for directory buckets in a # `CopyObject` operation, the following situations are allowed: # # * When you attempt to `COPY` the tag-set from a directory bucket # source object that has no tags to a general purpose bucket. It # copies an empty tag-set to the destination object. # # * When you attempt to `REPLACE` the tag-set of a directory bucket # source object and set the `x-amz-tagging` value of the directory # bucket destination object to empty. # # * When you attempt to `REPLACE` the tag-set of a general purpose # bucket source object that has non-empty tags and set the # `x-amz-tagging` value of the directory bucket destination object to # empty. # # * When you attempt to `REPLACE` the tag-set of a directory bucket # source object and don't set the `x-amz-tagging` value of the # directory bucket destination object. This is because the default # value of `x-amz-tagging` is the empty value. # # # @option options [String] :server_side_encryption # The server-side encryption algorithm used when storing this object in # Amazon S3 (for example, `AES256`, `aws:kms`, `aws:kms:dsse`). # Unrecognized or unsupported values won’t write a destination object # and will receive a `400 Bad Request` response. # # Amazon S3 automatically encrypts all new objects that are copied to an # S3 bucket. When copying an object, if you don't specify encryption # information in your copy request, the encryption setting of the target # object is set to the default encryption configuration of the # destination bucket. By default, all buckets have a base level of # encryption configuration that uses server-side encryption with Amazon # S3 managed keys (SSE-S3). If the destination bucket has a default # encryption configuration that uses server-side encryption with Key # Management Service (KMS) keys (SSE-KMS), dual-layer server-side # encryption with Amazon Web Services KMS keys (DSSE-KMS), or # server-side encryption with customer-provided encryption keys (SSE-C), # Amazon S3 uses the corresponding KMS key, or a customer-provided key # to encrypt the target object copy. # # When you perform a `CopyObject` operation, if you want to use a # different type of encryption setting for the target object, you can # specify appropriate encryption-related headers to encrypt the target # object with an Amazon S3 managed key, a KMS key, or a # customer-provided key. If the encryption setting in your request is # different from the default encryption configuration of the destination # bucket, the encryption setting in your request takes precedence. # # With server-side encryption, Amazon S3 encrypts your data as it writes # your data to disks in its data centers and decrypts the data when you # access it. For more information about server-side encryption, see # [Using Server-Side Encryption][1] in the *Amazon S3 User Guide*. # # For directory buckets, only server-side encryption with Amazon S3 # managed keys (SSE-S3) (`AES256`) is supported. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html # @option options [String] :storage_class # If the `x-amz-storage-class` header is not used, the copied object # will be stored in the `STANDARD` Storage Class by default. The # `STANDARD` storage class provides high durability and high # availability. Depending on performance needs, you can specify a # different Storage Class. # # * Directory buckets - For directory buckets, only the S3 # Express One Zone storage class is supported to store newly created # objects. Unsupported storage class values won't write a destination # object and will respond with the HTTP status code `400 Bad Request`. # # * Amazon S3 on Outposts - S3 on Outposts only uses the # `OUTPOSTS` Storage Class. # # # # You can use the `CopyObject` action to change the storage class of an # object that is already stored in Amazon S3 by using the # `x-amz-storage-class` header. For more information, see [Storage # Classes][1] in the *Amazon S3 User Guide*. # # Before using an object as a source object for the copy operation, you # must restore a copy of it if it meets any of the following conditions: # # * The storage class of the source object is `GLACIER` or # `DEEP_ARCHIVE`. # # * The storage class of the source object is `INTELLIGENT_TIERING` and # it's [S3 Intelligent-Tiering access tier][2] is `Archive Access` or # `Deep Archive Access`. # # For more information, see [RestoreObject][3] and [Copying Objects][4] # in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering-overview.html#intel-tiering-tier-definition # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html # @option options [String] :website_redirect_location # If the destination bucket is configured as a website, redirects # requests for this object copy to another object in the same bucket or # to an external URL. Amazon S3 stores the value of this header in the # object metadata. This value is unique to each object and is not copied # when using the `x-amz-metadata-directive` header. Instead, you may opt # to provide this header in combination with the # `x-amz-metadata-directive` header. # # This functionality is not supported for directory buckets. # # # @option options [String] :sse_customer_algorithm # Specifies the algorithm to use when encrypting the object (for # example, `AES256`). # # When you perform a `CopyObject` operation, if you want to use a # different type of encryption setting for the target object, you can # specify appropriate encryption-related headers to encrypt the target # object with an Amazon S3 managed key, a KMS key, or a # customer-provided key. If the encryption setting in your request is # different from the default encryption configuration of the destination # bucket, the encryption setting in your request takes precedence. # # This functionality is not supported when the destination bucket is a # directory bucket. # # # @option options [String] :sse_customer_key # Specifies the customer-provided encryption key for Amazon S3 to use in # encrypting data. This value is used to store the object and then it is # discarded. Amazon S3 does not store the encryption key. The key must # be appropriate for use with the algorithm specified in the # `x-amz-server-side-encryption-customer-algorithm` header. # # This functionality is not supported when the destination bucket is a # directory bucket. # # # @option options [String] :sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the encryption key according to # RFC 1321. Amazon S3 uses this header for a message integrity check to # ensure that the encryption key was transmitted without error. # # This functionality is not supported when the destination bucket is a # directory bucket. # # # @option options [String] :ssekms_key_id # Specifies the KMS ID (Key ID, Key ARN, or Key Alias) to use for object # encryption. All GET and PUT requests for an object protected by KMS # will fail if they're not made via SSL or using SigV4. For information # about configuring any of the officially supported Amazon Web Services # SDKs and Amazon Web Services CLI, see [Specifying the Signature # Version in Request Authentication][1] in the *Amazon S3 User Guide*. # # This functionality is not supported when the destination bucket is a # directory bucket. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version # @option options [String] :ssekms_encryption_context # Specifies the Amazon Web Services KMS Encryption Context to use for # object encryption. The value of this header is a base64-encoded UTF-8 # string holding JSON with the encryption context key-value pairs. This # value must be explicitly added to specify encryption context for # `CopyObject` requests. # # This functionality is not supported when the destination bucket is a # directory bucket. # # # @option options [Boolean] :bucket_key_enabled # Specifies whether Amazon S3 should use an S3 Bucket Key for object # encryption with server-side encryption using Key Management Service # (KMS) keys (SSE-KMS). If a target object uses SSE-KMS, you can enable # an S3 Bucket Key for the object. # # Setting this header to `true` causes Amazon S3 to use an S3 Bucket Key # for object encryption with SSE-KMS. Specifying this header with a COPY # action doesn’t affect bucket-level settings for S3 Bucket Key. # # For more information, see [Amazon S3 Bucket Keys][1] in the *Amazon S3 # User Guide*. # # This functionality is not supported when the destination bucket is a # directory bucket. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html # @option options [String] :copy_source_sse_customer_algorithm # Specifies the algorithm to use when decrypting the source object (for # example, `AES256`). # # If the source object for the copy is stored in Amazon S3 using SSE-C, # you must provide the necessary encryption information in your request # so that Amazon S3 can decrypt the object for copying. # # This functionality is not supported when the source object is in a # directory bucket. # # # @option options [String] :copy_source_sse_customer_key # Specifies the customer-provided encryption key for Amazon S3 to use to # decrypt the source object. The encryption key provided in this header # must be the same one that was used when the source object was created. # # If the source object for the copy is stored in Amazon S3 using SSE-C, # you must provide the necessary encryption information in your request # so that Amazon S3 can decrypt the object for copying. # # This functionality is not supported when the source object is in a # directory bucket. # # # @option options [String] :copy_source_sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the encryption key according to # RFC 1321. Amazon S3 uses this header for a message integrity check to # ensure that the encryption key was transmitted without error. # # If the source object for the copy is stored in Amazon S3 using SSE-C, # you must provide the necessary encryption information in your request # so that Amazon S3 can decrypt the object for copying. # # This functionality is not supported when the source object is in a # directory bucket. # # # @option options [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @option options [String] :tagging # The tag-set for the object copy in the destination bucket. This value # must be used in conjunction with the `x-amz-tagging-directive` if you # choose `REPLACE` for the `x-amz-tagging-directive`. If you choose # `COPY` for the `x-amz-tagging-directive`, you don't need to set the # `x-amz-tagging` header, because the tag-set will be copied from the # source object directly. The tag-set must be encoded as URL Query # parameters. # # The default value is the empty value. # # **Directory buckets** - For directory buckets in a `CopyObject` # operation, only the empty tag-set is supported. Any requests that # attempt to write non-empty tags into directory buckets will receive a # `501 Not Implemented` status code. When the destination bucket is a # directory bucket, you will receive a `501 Not Implemented` response in # any of the following situations: # # * When you attempt to `COPY` the tag-set from an S3 source object that # has non-empty tags. # # * When you attempt to `REPLACE` the tag-set of a source object and set # a non-empty value to `x-amz-tagging`. # # * When you don't set the `x-amz-tagging-directive` header and the # source object has non-empty tags. This is because the default value # of `x-amz-tagging-directive` is `COPY`. # # Because only the empty tag-set is supported for directory buckets in a # `CopyObject` operation, the following situations are allowed: # # * When you attempt to `COPY` the tag-set from a directory bucket # source object that has no tags to a general purpose bucket. It # copies an empty tag-set to the destination object. # # * When you attempt to `REPLACE` the tag-set of a directory bucket # source object and set the `x-amz-tagging` value of the directory # bucket destination object to empty. # # * When you attempt to `REPLACE` the tag-set of a general purpose # bucket source object that has non-empty tags and set the # `x-amz-tagging` value of the directory bucket destination object to # empty. # # * When you attempt to `REPLACE` the tag-set of a directory bucket # source object and don't set the `x-amz-tagging` value of the # directory bucket destination object. This is because the default # value of `x-amz-tagging` is the empty value. # # # @option options [String] :object_lock_mode # The Object Lock mode that you want to apply to the object copy. # # This functionality is not supported for directory buckets. # # # @option options [Time,DateTime,Date,Integer,String] :object_lock_retain_until_date # The date and time when you want the Object Lock of the object copy to # expire. # # This functionality is not supported for directory buckets. # # # @option options [String] :object_lock_legal_hold_status # Specifies whether you want to apply a legal hold to the object copy. # # This functionality is not supported for directory buckets. # # # @option options [String] :expected_bucket_owner # The account ID of the expected destination bucket owner. If the # account ID that you provide does not match the actual owner of the # destination bucket, the request fails with the HTTP status code `403 # Forbidden` (access denied). # @option options [String] :expected_source_bucket_owner # The account ID of the expected source bucket owner. If the account ID # that you provide does not match the actual owner of the source bucket, # the request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [Types::CopyObjectOutput] def copy_from(options = {}) options = options.merge( bucket: @bucket_name, key: @key ) resp = Aws::Plugins::UserAgent.feature('resource') do @client.copy_object(options) end resp.data end # @example Request syntax with placeholder values # # object.delete({ # mfa: "MFA", # version_id: "ObjectVersionId", # request_payer: "requester", # accepts requester # bypass_governance_retention: false, # expected_bucket_owner: "AccountId", # }) # @param [Hash] options ({}) # @option options [String] :mfa # The concatenation of the authentication device's serial number, a # space, and the value that is displayed on your authentication device. # Required to permanently delete a versioned object if versioning is # configured with MFA delete enabled. # # This functionality is not supported for directory buckets. # # # @option options [String] :version_id # Version ID used to reference a specific version of the object. # # For directory buckets in this API operation, only the `null` value of # the version ID is supported. # # # @option options [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @option options [Boolean] :bypass_governance_retention # Indicates whether S3 Object Lock should bypass Governance-mode # restrictions to process this operation. To use this header, you must # have the `s3:BypassGovernanceRetention` permission. # # This functionality is not supported for directory buckets. # # # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @return [Types::DeleteObjectOutput] def delete(options = {}) options = options.merge( bucket: @bucket_name, key: @key ) resp = Aws::Plugins::UserAgent.feature('resource') do @client.delete_object(options) end resp.data end # @example Request syntax with placeholder values # # object.get({ # if_match: "IfMatch", # if_modified_since: Time.now, # if_none_match: "IfNoneMatch", # if_unmodified_since: Time.now, # range: "Range", # response_cache_control: "ResponseCacheControl", # response_content_disposition: "ResponseContentDisposition", # response_content_encoding: "ResponseContentEncoding", # response_content_language: "ResponseContentLanguage", # response_content_type: "ResponseContentType", # response_expires: Time.now, # version_id: "ObjectVersionId", # sse_customer_algorithm: "SSECustomerAlgorithm", # sse_customer_key: "SSECustomerKey", # sse_customer_key_md5: "SSECustomerKeyMD5", # request_payer: "requester", # accepts requester # part_number: 1, # expected_bucket_owner: "AccountId", # checksum_mode: "ENABLED", # accepts ENABLED # }) # @param [Hash] options ({}) # @option options [String] :if_match # Return the object only if its entity tag (ETag) is the same as the one # specified in this header; otherwise, return a `412 Precondition # Failed` error. # # If both of the `If-Match` and `If-Unmodified-Since` headers are # present in the request as follows: `If-Match` condition evaluates to # `true`, and; `If-Unmodified-Since` condition evaluates to `false`; # then, S3 returns `200 OK` and the data requested. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # @option options [Time,DateTime,Date,Integer,String] :if_modified_since # Return the object only if it has been modified since the specified # time; otherwise, return a `304 Not Modified` error. # # If both of the `If-None-Match` and `If-Modified-Since` headers are # present in the request as follows:` If-None-Match` condition evaluates # to `false`, and; `If-Modified-Since` condition evaluates to `true`; # then, S3 returns `304 Not Modified` status code. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # @option options [String] :if_none_match # Return the object only if its entity tag (ETag) is different from the # one specified in this header; otherwise, return a `304 Not Modified` # error. # # If both of the `If-None-Match` and `If-Modified-Since` headers are # present in the request as follows:` If-None-Match` condition evaluates # to `false`, and; `If-Modified-Since` condition evaluates to `true`; # then, S3 returns `304 Not Modified` HTTP status code. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # @option options [Time,DateTime,Date,Integer,String] :if_unmodified_since # Return the object only if it has not been modified since the specified # time; otherwise, return a `412 Precondition Failed` error. # # If both of the `If-Match` and `If-Unmodified-Since` headers are # present in the request as follows: `If-Match` condition evaluates to # `true`, and; `If-Unmodified-Since` condition evaluates to `false`; # then, S3 returns `200 OK` and the data requested. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # @option options [String] :range # Downloads the specified byte range of an object. For more information # about the HTTP Range header, see # [https://www.rfc-editor.org/rfc/rfc9110.html#name-range][1]. # # Amazon S3 doesn't support retrieving multiple ranges of data per # `GET` request. # # # # # # [1]: https://www.rfc-editor.org/rfc/rfc9110.html#name-range # @option options [String] :response_cache_control # Sets the `Cache-Control` header of the response. # @option options [String] :response_content_disposition # Sets the `Content-Disposition` header of the response. # @option options [String] :response_content_encoding # Sets the `Content-Encoding` header of the response. # @option options [String] :response_content_language # Sets the `Content-Language` header of the response. # @option options [String] :response_content_type # Sets the `Content-Type` header of the response. # @option options [Time,DateTime,Date,Integer,String] :response_expires # Sets the `Expires` header of the response. # @option options [String] :version_id # Version ID used to reference a specific version of the object. # # By default, the `GetObject` operation returns the current version of # an object. To return a different version, use the `versionId` # subresource. # # * If you include a `versionId` in your request header, you must have # the `s3:GetObjectVersion` permission to access a specific version of # an object. The `s3:GetObject` permission is not required in this # scenario. # # * If you request the current version of an object without a specific # `versionId` in the request header, only the `s3:GetObject` # permission is required. The `s3:GetObjectVersion` permission is not # required in this scenario. # # * **Directory buckets** - S3 Versioning isn't enabled and supported # for directory buckets. For this API operation, only the `null` value # of the version ID is supported by directory buckets. You can only # specify `null` to the `versionId` query parameter in the request. # # # # For more information about versioning, see [PutBucketVersioning][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html # @option options [String] :sse_customer_algorithm # Specifies the algorithm to use when decrypting the object (for # example, `AES256`). # # If you encrypt an object by using server-side encryption with # customer-provided encryption keys (SSE-C) when you store the object in # Amazon S3, then when you GET the object, you must use the following # headers: # # * `x-amz-server-side-encryption-customer-algorithm` # # * `x-amz-server-side-encryption-customer-key` # # * `x-amz-server-side-encryption-customer-key-MD5` # # For more information about SSE-C, see [Server-Side Encryption (Using # Customer-Provided Encryption Keys)][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # @option options [String] :sse_customer_key # Specifies the customer-provided encryption key that you originally # provided for Amazon S3 to encrypt the data before storing it. This # value is used to decrypt the object when recovering it and must match # the one used when storing the data. The key must be appropriate for # use with the algorithm specified in the # `x-amz-server-side-encryption-customer-algorithm` header. # # If you encrypt an object by using server-side encryption with # customer-provided encryption keys (SSE-C) when you store the object in # Amazon S3, then when you GET the object, you must use the following # headers: # # * `x-amz-server-side-encryption-customer-algorithm` # # * `x-amz-server-side-encryption-customer-key` # # * `x-amz-server-side-encryption-customer-key-MD5` # # For more information about SSE-C, see [Server-Side Encryption (Using # Customer-Provided Encryption Keys)][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # @option options [String] :sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the customer-provided encryption # key according to RFC 1321. Amazon S3 uses this header for a message # integrity check to ensure that the encryption key was transmitted # without error. # # If you encrypt an object by using server-side encryption with # customer-provided encryption keys (SSE-C) when you store the object in # Amazon S3, then when you GET the object, you must use the following # headers: # # * `x-amz-server-side-encryption-customer-algorithm` # # * `x-amz-server-side-encryption-customer-key` # # * `x-amz-server-side-encryption-customer-key-MD5` # # For more information about SSE-C, see [Server-Side Encryption (Using # Customer-Provided Encryption Keys)][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # @option options [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @option options [Integer] :part_number # Part number of the object being read. This is a positive integer # between 1 and 10,000. Effectively performs a 'ranged' GET request # for the part specified. Useful for downloading just a part of an # object. # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @option options [String] :checksum_mode # To retrieve the checksum, this mode must be enabled. # @return [Types::GetObjectOutput] def get(options = {}, &block) options = options.merge( bucket: @bucket_name, key: @key ) resp = Aws::Plugins::UserAgent.feature('resource') do @client.get_object(options, &block) end resp.data end # @example Request syntax with placeholder values # # multipartupload = object.initiate_multipart_upload({ # acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control # cache_control: "CacheControl", # content_disposition: "ContentDisposition", # content_encoding: "ContentEncoding", # content_language: "ContentLanguage", # content_type: "ContentType", # expires: Time.now, # grant_full_control: "GrantFullControl", # grant_read: "GrantRead", # grant_read_acp: "GrantReadACP", # grant_write_acp: "GrantWriteACP", # metadata: { # "MetadataKey" => "MetadataValue", # }, # server_side_encryption: "AES256", # accepts AES256, aws:kms, aws:kms:dsse # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE # website_redirect_location: "WebsiteRedirectLocation", # sse_customer_algorithm: "SSECustomerAlgorithm", # sse_customer_key: "SSECustomerKey", # sse_customer_key_md5: "SSECustomerKeyMD5", # ssekms_key_id: "SSEKMSKeyId", # ssekms_encryption_context: "SSEKMSEncryptionContext", # bucket_key_enabled: false, # request_payer: "requester", # accepts requester # tagging: "TaggingHeader", # object_lock_mode: "GOVERNANCE", # accepts GOVERNANCE, COMPLIANCE # object_lock_retain_until_date: Time.now, # object_lock_legal_hold_status: "ON", # accepts ON, OFF # expected_bucket_owner: "AccountId", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # }) # @param [Hash] options ({}) # @option options [String] :acl # The canned ACL to apply to the object. Amazon S3 supports a set of # predefined ACLs, known as *canned ACLs*. Each canned ACL has a # predefined set of grantees and permissions. For more information, see # [Canned ACL][1] in the *Amazon S3 User Guide*. # # By default, all objects are private. Only the owner has full access # control. When uploading an object, you can grant access permissions to # individual Amazon Web Services accounts or to predefined groups # defined by Amazon S3. These permissions are then added to the access # control list (ACL) on the new object. For more information, see [Using # ACLs][2]. One way to grant the permissions using the request headers # is to specify a canned ACL with the `x-amz-acl` request header. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html # @option options [String] :cache_control # Specifies caching behavior along the request/reply chain. # @option options [String] :content_disposition # Specifies presentational information for the object. # @option options [String] :content_encoding # Specifies what content encodings have been applied to the object and # thus what decoding mechanisms must be applied to obtain the media-type # referenced by the Content-Type header field. # # For directory buckets, only the `aws-chunked` value is supported in # this header field. # # # @option options [String] :content_language # The language that the content is in. # @option options [String] :content_type # A standard MIME type describing the format of the object data. # @option options [Time,DateTime,Date,Integer,String] :expires # The date and time at which the object is no longer cacheable. # @option options [String] :grant_full_control # Specify access permissions explicitly to give the grantee READ, # READ\_ACP, and WRITE\_ACP permissions on the object. # # By default, all objects are private. Only the owner has full access # control. When uploading an object, you can use this header to # explicitly grant access permissions to specific Amazon Web Services # accounts or groups. This header maps to specific permissions that # Amazon S3 supports in an ACL. For more information, see [Access # Control List (ACL) Overview][1] in the *Amazon S3 User Guide*. # # You specify each grantee as a type=value pair, where the type is one # of the following: # # * `id` – if the value specified is the canonical user ID of an Amazon # Web Services account # # * `uri` – if you are granting permissions to a predefined group # # * `emailAddress` – if the value specified is the email address of an # Amazon Web Services account # # Using email addresses to specify a grantee is only supported in the # following Amazon Web Services Regions: # # * US East (N. Virginia) # # * US West (N. California) # # * US West (Oregon) # # * Asia Pacific (Singapore) # # * Asia Pacific (Sydney) # # * Asia Pacific (Tokyo) # # * Europe (Ireland) # # * South America (São Paulo) # # For a list of all the Amazon S3 supported Regions and endpoints, see # [Regions and Endpoints][2] in the Amazon Web Services General # Reference. # # # # For example, the following `x-amz-grant-read` header grants the Amazon # Web Services accounts identified by account IDs permissions to read # object data and its metadata: # # `x-amz-grant-read: id="11112222333", id="444455556666" ` # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html # [2]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region # @option options [String] :grant_read # Specify access permissions explicitly to allow grantee to read the # object data and its metadata. # # By default, all objects are private. Only the owner has full access # control. When uploading an object, you can use this header to # explicitly grant access permissions to specific Amazon Web Services # accounts or groups. This header maps to specific permissions that # Amazon S3 supports in an ACL. For more information, see [Access # Control List (ACL) Overview][1] in the *Amazon S3 User Guide*. # # You specify each grantee as a type=value pair, where the type is one # of the following: # # * `id` – if the value specified is the canonical user ID of an Amazon # Web Services account # # * `uri` – if you are granting permissions to a predefined group # # * `emailAddress` – if the value specified is the email address of an # Amazon Web Services account # # Using email addresses to specify a grantee is only supported in the # following Amazon Web Services Regions: # # * US East (N. Virginia) # # * US West (N. California) # # * US West (Oregon) # # * Asia Pacific (Singapore) # # * Asia Pacific (Sydney) # # * Asia Pacific (Tokyo) # # * Europe (Ireland) # # * South America (São Paulo) # # For a list of all the Amazon S3 supported Regions and endpoints, see # [Regions and Endpoints][2] in the Amazon Web Services General # Reference. # # # # For example, the following `x-amz-grant-read` header grants the Amazon # Web Services accounts identified by account IDs permissions to read # object data and its metadata: # # `x-amz-grant-read: id="11112222333", id="444455556666" ` # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html # [2]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region # @option options [String] :grant_read_acp # Specify access permissions explicitly to allows grantee to read the # object ACL. # # By default, all objects are private. Only the owner has full access # control. When uploading an object, you can use this header to # explicitly grant access permissions to specific Amazon Web Services # accounts or groups. This header maps to specific permissions that # Amazon S3 supports in an ACL. For more information, see [Access # Control List (ACL) Overview][1] in the *Amazon S3 User Guide*. # # You specify each grantee as a type=value pair, where the type is one # of the following: # # * `id` – if the value specified is the canonical user ID of an Amazon # Web Services account # # * `uri` – if you are granting permissions to a predefined group # # * `emailAddress` – if the value specified is the email address of an # Amazon Web Services account # # Using email addresses to specify a grantee is only supported in the # following Amazon Web Services Regions: # # * US East (N. Virginia) # # * US West (N. California) # # * US West (Oregon) # # * Asia Pacific (Singapore) # # * Asia Pacific (Sydney) # # * Asia Pacific (Tokyo) # # * Europe (Ireland) # # * South America (São Paulo) # # For a list of all the Amazon S3 supported Regions and endpoints, see # [Regions and Endpoints][2] in the Amazon Web Services General # Reference. # # # # For example, the following `x-amz-grant-read` header grants the Amazon # Web Services accounts identified by account IDs permissions to read # object data and its metadata: # # `x-amz-grant-read: id="11112222333", id="444455556666" ` # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html # [2]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region # @option options [String] :grant_write_acp # Specify access permissions explicitly to allows grantee to allow # grantee to write the ACL for the applicable object. # # By default, all objects are private. Only the owner has full access # control. When uploading an object, you can use this header to # explicitly grant access permissions to specific Amazon Web Services # accounts or groups. This header maps to specific permissions that # Amazon S3 supports in an ACL. For more information, see [Access # Control List (ACL) Overview][1] in the *Amazon S3 User Guide*. # # You specify each grantee as a type=value pair, where the type is one # of the following: # # * `id` – if the value specified is the canonical user ID of an Amazon # Web Services account # # * `uri` – if you are granting permissions to a predefined group # # * `emailAddress` – if the value specified is the email address of an # Amazon Web Services account # # Using email addresses to specify a grantee is only supported in the # following Amazon Web Services Regions: # # * US East (N. Virginia) # # * US West (N. California) # # * US West (Oregon) # # * Asia Pacific (Singapore) # # * Asia Pacific (Sydney) # # * Asia Pacific (Tokyo) # # * Europe (Ireland) # # * South America (São Paulo) # # For a list of all the Amazon S3 supported Regions and endpoints, see # [Regions and Endpoints][2] in the Amazon Web Services General # Reference. # # # # For example, the following `x-amz-grant-read` header grants the Amazon # Web Services accounts identified by account IDs permissions to read # object data and its metadata: # # `x-amz-grant-read: id="11112222333", id="444455556666" ` # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html # [2]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region # @option options [Hash] :metadata # A map of metadata to store with the object in S3. # @option options [String] :server_side_encryption # The server-side encryption algorithm used when you store this object # in Amazon S3 (for example, `AES256`, `aws:kms`). # # For directory buckets, only server-side encryption with Amazon S3 # managed keys (SSE-S3) (`AES256`) is supported. # # # @option options [String] :storage_class # By default, Amazon S3 uses the STANDARD Storage Class to store newly # created objects. The STANDARD storage class provides high durability # and high availability. Depending on performance needs, you can specify # a different Storage Class. For more information, see [Storage # Classes][1] in the *Amazon S3 User Guide*. # # * For directory buckets, only the S3 Express One Zone storage class is # supported to store newly created objects. # # * Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html # @option options [String] :website_redirect_location # If the bucket is configured as a website, redirects requests for this # object to another object in the same bucket or to an external URL. # Amazon S3 stores the value of this header in the object metadata. # # This functionality is not supported for directory buckets. # # # @option options [String] :sse_customer_algorithm # Specifies the algorithm to use when encrypting the object (for # example, AES256). # # This functionality is not supported for directory buckets. # # # @option options [String] :sse_customer_key # Specifies the customer-provided encryption key for Amazon S3 to use in # encrypting data. This value is used to store the object and then it is # discarded; Amazon S3 does not store the encryption key. The key must # be appropriate for use with the algorithm specified in the # `x-amz-server-side-encryption-customer-algorithm` header. # # This functionality is not supported for directory buckets. # # # @option options [String] :sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the customer-provided encryption # key according to RFC 1321. Amazon S3 uses this header for a message # integrity check to ensure that the encryption key was transmitted # without error. # # This functionality is not supported for directory buckets. # # # @option options [String] :ssekms_key_id # Specifies the ID (Key ID, Key ARN, or Key Alias) of the symmetric # encryption customer managed key to use for object encryption. # # This functionality is not supported for directory buckets. # # # @option options [String] :ssekms_encryption_context # Specifies the Amazon Web Services KMS Encryption Context to use for # object encryption. The value of this header is a base64-encoded UTF-8 # string holding JSON with the encryption context key-value pairs. # # This functionality is not supported for directory buckets. # # # @option options [Boolean] :bucket_key_enabled # Specifies whether Amazon S3 should use an S3 Bucket Key for object # encryption with server-side encryption using Key Management Service # (KMS) keys (SSE-KMS). Setting this header to `true` causes Amazon S3 # to use an S3 Bucket Key for object encryption with SSE-KMS. # # Specifying this header with an object action doesn’t affect # bucket-level settings for S3 Bucket Key. # # This functionality is not supported for directory buckets. # # # @option options [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @option options [String] :tagging # The tag-set for the object. The tag-set must be encoded as URL Query # parameters. # # This functionality is not supported for directory buckets. # # # @option options [String] :object_lock_mode # Specifies the Object Lock mode that you want to apply to the uploaded # object. # # This functionality is not supported for directory buckets. # # # @option options [Time,DateTime,Date,Integer,String] :object_lock_retain_until_date # Specifies the date and time when you want the Object Lock to expire. # # This functionality is not supported for directory buckets. # # # @option options [String] :object_lock_legal_hold_status # Specifies whether you want to apply a legal hold to the uploaded # object. # # This functionality is not supported for directory buckets. # # # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @option options [String] :checksum_algorithm # Indicates the algorithm that you want Amazon S3 to use to create the # checksum for the object. For more information, see [Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [MultipartUpload] def initiate_multipart_upload(options = {}) options = options.merge( bucket: @bucket_name, key: @key ) resp = Aws::Plugins::UserAgent.feature('resource') do @client.create_multipart_upload(options) end MultipartUpload.new( bucket_name: @bucket_name, object_key: @key, id: resp.data.upload_id, client: @client ) end # @example Request syntax with placeholder values # # object.put({ # acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control # body: source_file, # cache_control: "CacheControl", # content_disposition: "ContentDisposition", # content_encoding: "ContentEncoding", # content_language: "ContentLanguage", # content_length: 1, # content_md5: "ContentMD5", # content_type: "ContentType", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # checksum_crc32: "ChecksumCRC32", # checksum_crc32c: "ChecksumCRC32C", # checksum_sha1: "ChecksumSHA1", # checksum_sha256: "ChecksumSHA256", # expires: Time.now, # grant_full_control: "GrantFullControl", # grant_read: "GrantRead", # grant_read_acp: "GrantReadACP", # grant_write_acp: "GrantWriteACP", # metadata: { # "MetadataKey" => "MetadataValue", # }, # server_side_encryption: "AES256", # accepts AES256, aws:kms, aws:kms:dsse # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE # website_redirect_location: "WebsiteRedirectLocation", # sse_customer_algorithm: "SSECustomerAlgorithm", # sse_customer_key: "SSECustomerKey", # sse_customer_key_md5: "SSECustomerKeyMD5", # ssekms_key_id: "SSEKMSKeyId", # ssekms_encryption_context: "SSEKMSEncryptionContext", # bucket_key_enabled: false, # request_payer: "requester", # accepts requester # tagging: "TaggingHeader", # object_lock_mode: "GOVERNANCE", # accepts GOVERNANCE, COMPLIANCE # object_lock_retain_until_date: Time.now, # object_lock_legal_hold_status: "ON", # accepts ON, OFF # expected_bucket_owner: "AccountId", # }) # @param [Hash] options ({}) # @option options [String] :acl # The canned ACL to apply to the object. For more information, see # [Canned ACL][1] in the *Amazon S3 User Guide*. # # When adding a new object, you can use headers to grant ACL-based # permissions to individual Amazon Web Services accounts or to # predefined groups defined by Amazon S3. These permissions are then # added to the ACL on the object. By default, all objects are private. # Only the owner has full access control. For more information, see # [Access Control List (ACL) Overview][2] and [Managing ACLs Using the # REST API][3] in the *Amazon S3 User Guide*. # # If the bucket that you're uploading objects to uses the bucket owner # enforced setting for S3 Object Ownership, ACLs are disabled and no # longer affect permissions. Buckets that use this setting only accept # PUT requests that don't specify an ACL or PUT requests that specify # bucket owner full control ACLs, such as the # `bucket-owner-full-control` canned ACL or an equivalent form of this # ACL expressed in the XML format. PUT requests that contain other ACLs # (for example, custom grants to certain Amazon Web Services accounts) # fail and return a `400` error with the error code # `AccessControlListNotSupported`. For more information, see [ # Controlling ownership of objects and disabling ACLs][4] in the *Amazon # S3 User Guide*. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html # @option options [String, StringIO, File] :body # Object data. # @option options [String] :cache_control # Can be used to specify caching behavior along the request/reply chain. # For more information, see # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9][1]. # # # # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 # @option options [String] :content_disposition # Specifies presentational information for the object. For more # information, see # [https://www.rfc-editor.org/rfc/rfc6266#section-4][1]. # # # # [1]: https://www.rfc-editor.org/rfc/rfc6266#section-4 # @option options [String] :content_encoding # Specifies what content encodings have been applied to the object and # thus what decoding mechanisms must be applied to obtain the media-type # referenced by the Content-Type header field. For more information, see # [https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding][1]. # # # # [1]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding # @option options [String] :content_language # The language the content is in. # @option options [Integer] :content_length # Size of the body in bytes. This parameter is useful when the size of # the body cannot be determined automatically. For more information, see # [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length][1]. # # # # [1]: https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length # @option options [String] :content_md5 # The base64-encoded 128-bit MD5 digest of the message (without the # headers) according to RFC 1864. This header can be used as a message # integrity check to verify that the data is the same data that was # originally sent. Although it is optional, we recommend using the # Content-MD5 mechanism as an end-to-end integrity check. For more # information about REST request authentication, see [REST # Authentication][1]. # # The `Content-MD5` header is required for any request to upload an # object with a retention period configured using Amazon S3 Object Lock. # For more information about Amazon S3 Object Lock, see [Amazon S3 # Object Lock Overview][2] in the *Amazon S3 User Guide*. # # # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html # @option options [String] :content_type # A standard MIME type describing the format of the contents. For more # information, see # [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type][1]. # # # # [1]: https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type # @option options [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum-algorithm ` or # `x-amz-trailer` header sent. Otherwise, Amazon S3 fails the request # with the HTTP status code `400 Bad Request`. # # For the `x-amz-checksum-algorithm ` header, replace ` algorithm ` with # the supported algorithm from the following list: # # * CRC32 # # * CRC32C # # * SHA1 # # * SHA256 # # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # If the individual checksum value you provide through # `x-amz-checksum-algorithm ` doesn't match the checksum algorithm you # set through `x-amz-sdk-checksum-algorithm`, Amazon S3 ignores any # provided `ChecksumAlgorithm` parameter and uses the checksum algorithm # that matches the provided value in `x-amz-checksum-algorithm `. # # For directory buckets, when you use Amazon Web Services SDKs, `CRC32` # is the default checksum algorithm that's used for performance. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [String] :checksum_crc32 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 32-bit CRC32 checksum of the object. For # more information, see [Checking object integrity][1] in the *Amazon S3 # User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [String] :checksum_crc32c # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 32-bit CRC32C checksum of the object. # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [String] :checksum_sha1 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 160-bit SHA-1 digest of the object. For # more information, see [Checking object integrity][1] in the *Amazon S3 # User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [String] :checksum_sha256 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 256-bit SHA-256 digest of the object. # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [Time,DateTime,Date,Integer,String] :expires # The date and time at which the object is no longer cacheable. For more # information, see # [https://www.rfc-editor.org/rfc/rfc7234#section-5.3][1]. # # # # [1]: https://www.rfc-editor.org/rfc/rfc7234#section-5.3 # @option options [String] :grant_full_control # Gives the grantee READ, READ\_ACP, and WRITE\_ACP permissions on the # object. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # @option options [String] :grant_read # Allows grantee to read the object data and its metadata. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # @option options [String] :grant_read_acp # Allows grantee to read the object ACL. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # @option options [String] :grant_write_acp # Allows grantee to write the ACL for the applicable object. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # @option options [Hash] :metadata # A map of metadata to store with the object in S3. # @option options [String] :server_side_encryption # The server-side encryption algorithm that was used when you store this # object in Amazon S3 (for example, `AES256`, `aws:kms`, # `aws:kms:dsse`). # # General purpose buckets - You have four mutually exclusive # options to protect data using server-side encryption in Amazon S3, # depending on how you choose to manage the encryption keys. # Specifically, the encryption key options are Amazon S3 managed keys # (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and # customer-provided keys (SSE-C). Amazon S3 encrypts data with # server-side encryption by using Amazon S3 managed keys (SSE-S3) by # default. You can optionally tell Amazon S3 to encrypt data at rest by # using server-side encryption with other key options. For more # information, see [Using Server-Side Encryption][1] in the *Amazon S3 # User Guide*. # # Directory buckets - For directory buckets, only the # server-side encryption with Amazon S3 managed keys (SSE-S3) (`AES256`) # value is supported. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html # @option options [String] :storage_class # By default, Amazon S3 uses the STANDARD Storage Class to store newly # created objects. The STANDARD storage class provides high durability # and high availability. Depending on performance needs, you can specify # a different Storage Class. For more information, see [Storage # Classes][1] in the *Amazon S3 User Guide*. # # * For directory buckets, only the S3 Express One Zone storage class is # supported to store newly created objects. # # * Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html # @option options [String] :website_redirect_location # If the bucket is configured as a website, redirects requests for this # object to another object in the same bucket or to an external URL. # Amazon S3 stores the value of this header in the object metadata. For # information about object metadata, see [Object Key and Metadata][1] in # the *Amazon S3 User Guide*. # # In the following example, the request header sets the redirect to an # object (anotherPage.html) in the same bucket: # # `x-amz-website-redirect-location: /anotherPage.html` # # In the following example, the request header sets the object redirect # to another website: # # `x-amz-website-redirect-location: http://www.example.com/` # # For more information about website hosting in Amazon S3, see [Hosting # Websites on Amazon S3][2] and [How to Configure Website Page # Redirects][3] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html # @option options [String] :sse_customer_algorithm # Specifies the algorithm to use when encrypting the object (for # example, `AES256`). # # This functionality is not supported for directory buckets. # # # @option options [String] :sse_customer_key # Specifies the customer-provided encryption key for Amazon S3 to use in # encrypting data. This value is used to store the object and then it is # discarded; Amazon S3 does not store the encryption key. The key must # be appropriate for use with the algorithm specified in the # `x-amz-server-side-encryption-customer-algorithm` header. # # This functionality is not supported for directory buckets. # # # @option options [String] :sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the encryption key according to # RFC 1321. Amazon S3 uses this header for a message integrity check to # ensure that the encryption key was transmitted without error. # # This functionality is not supported for directory buckets. # # # @option options [String] :ssekms_key_id # If `x-amz-server-side-encryption` has a valid value of `aws:kms` or # `aws:kms:dsse`, this header specifies the ID (Key ID, Key ARN, or Key # Alias) of the Key Management Service (KMS) symmetric encryption # customer managed key that was used for the object. If you specify # `x-amz-server-side-encryption:aws:kms` or # `x-amz-server-side-encryption:aws:kms:dsse`, but do not provide` # x-amz-server-side-encryption-aws-kms-key-id`, Amazon S3 uses the # Amazon Web Services managed key (`aws/s3`) to protect the data. If the # KMS key does not exist in the same account that's issuing the # command, you must use the full ARN and not just the ID. # # This functionality is not supported for directory buckets. # # # @option options [String] :ssekms_encryption_context # Specifies the Amazon Web Services KMS Encryption Context to use for # object encryption. The value of this header is a base64-encoded UTF-8 # string holding JSON with the encryption context key-value pairs. This # value is stored as object metadata and automatically gets passed on to # Amazon Web Services KMS for future `GetObject` or `CopyObject` # operations on this object. This value must be explicitly added during # `CopyObject` operations. # # This functionality is not supported for directory buckets. # # # @option options [Boolean] :bucket_key_enabled # Specifies whether Amazon S3 should use an S3 Bucket Key for object # encryption with server-side encryption using Key Management Service # (KMS) keys (SSE-KMS). Setting this header to `true` causes Amazon S3 # to use an S3 Bucket Key for object encryption with SSE-KMS. # # Specifying this header with a PUT action doesn’t affect bucket-level # settings for S3 Bucket Key. # # This functionality is not supported for directory buckets. # # # @option options [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @option options [String] :tagging # The tag-set for the object. The tag-set must be encoded as URL Query # parameters. (For example, "Key1=Value1") # # This functionality is not supported for directory buckets. # # # @option options [String] :object_lock_mode # The Object Lock mode that you want to apply to this object. # # This functionality is not supported for directory buckets. # # # @option options [Time,DateTime,Date,Integer,String] :object_lock_retain_until_date # The date and time when you want this object's Object Lock to expire. # Must be formatted as a timestamp parameter. # # This functionality is not supported for directory buckets. # # # @option options [String] :object_lock_legal_hold_status # Specifies whether a legal hold will be applied to this object. For # more information about S3 Object Lock, see [Object Lock][1] in the # *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @return [Types::PutObjectOutput] def put(options = {}) options = options.merge( bucket: @bucket_name, key: @key ) resp = Aws::Plugins::UserAgent.feature('resource') do @client.put_object(options) end resp.data end # @example Request syntax with placeholder values # # object.restore_object({ # version_id: "ObjectVersionId", # restore_request: { # days: 1, # glacier_job_parameters: { # tier: "Standard", # required, accepts Standard, Bulk, Expedited # }, # type: "SELECT", # accepts SELECT # tier: "Standard", # accepts Standard, Bulk, Expedited # description: "Description", # select_parameters: { # input_serialization: { # required # csv: { # file_header_info: "USE", # accepts USE, IGNORE, NONE # comments: "Comments", # quote_escape_character: "QuoteEscapeCharacter", # record_delimiter: "RecordDelimiter", # field_delimiter: "FieldDelimiter", # quote_character: "QuoteCharacter", # allow_quoted_record_delimiter: false, # }, # compression_type: "NONE", # accepts NONE, GZIP, BZIP2 # json: { # type: "DOCUMENT", # accepts DOCUMENT, LINES # }, # parquet: { # }, # }, # expression_type: "SQL", # required, accepts SQL # expression: "Expression", # required # output_serialization: { # required # csv: { # quote_fields: "ALWAYS", # accepts ALWAYS, ASNEEDED # quote_escape_character: "QuoteEscapeCharacter", # record_delimiter: "RecordDelimiter", # field_delimiter: "FieldDelimiter", # quote_character: "QuoteCharacter", # }, # json: { # record_delimiter: "RecordDelimiter", # }, # }, # }, # output_location: { # s3: { # bucket_name: "BucketName", # required # prefix: "LocationPrefix", # required # encryption: { # encryption_type: "AES256", # required, accepts AES256, aws:kms, aws:kms:dsse # kms_key_id: "SSEKMSKeyId", # kms_context: "KMSContext", # }, # canned_acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control # access_control_list: [ # { # grantee: { # display_name: "DisplayName", # email_address: "EmailAddress", # id: "ID", # type: "CanonicalUser", # required, accepts CanonicalUser, AmazonCustomerByEmail, Group # uri: "URI", # }, # permission: "FULL_CONTROL", # accepts FULL_CONTROL, WRITE, WRITE_ACP, READ, READ_ACP # }, # ], # tagging: { # tag_set: [ # required # { # key: "ObjectKey", # required # value: "Value", # required # }, # ], # }, # user_metadata: [ # { # name: "MetadataKey", # value: "MetadataValue", # }, # ], # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE # }, # }, # }, # request_payer: "requester", # accepts requester # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # expected_bucket_owner: "AccountId", # }) # @param [Hash] options ({}) # @option options [String] :version_id # VersionId used to reference a specific version of the object. # @option options [Types::RestoreRequest] :restore_request # Container for restore job parameters. # @option options [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @option options [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @return [Types::RestoreObjectOutput] def restore_object(options = {}) options = options.merge( bucket: @bucket_name, key: @key ) resp = Aws::Plugins::UserAgent.feature('resource') do @client.restore_object(options) end resp.data end # @example Request syntax with placeholder values # # object.head({ # if_match: "IfMatch", # if_modified_since: Time.now, # if_none_match: "IfNoneMatch", # if_unmodified_since: Time.now, # range: "Range", # version_id: "ObjectVersionId", # sse_customer_algorithm: "SSECustomerAlgorithm", # sse_customer_key: "SSECustomerKey", # sse_customer_key_md5: "SSECustomerKeyMD5", # request_payer: "requester", # accepts requester # part_number: 1, # expected_bucket_owner: "AccountId", # checksum_mode: "ENABLED", # accepts ENABLED # }) # @param [Hash] options ({}) # @option options [String] :if_match # Return the object only if its entity tag (ETag) is the same as the one # specified; otherwise, return a 412 (precondition failed) error. # # If both of the `If-Match` and `If-Unmodified-Since` headers are # present in the request as follows: # # * `If-Match` condition evaluates to `true`, and; # # * `If-Unmodified-Since` condition evaluates to `false`; # # Then Amazon S3 returns `200 OK` and the data requested. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # @option options [Time,DateTime,Date,Integer,String] :if_modified_since # Return the object only if it has been modified since the specified # time; otherwise, return a 304 (not modified) error. # # If both of the `If-None-Match` and `If-Modified-Since` headers are # present in the request as follows: # # * `If-None-Match` condition evaluates to `false`, and; # # * `If-Modified-Since` condition evaluates to `true`; # # Then Amazon S3 returns the `304 Not Modified` response code. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # @option options [String] :if_none_match # Return the object only if its entity tag (ETag) is different from the # one specified; otherwise, return a 304 (not modified) error. # # If both of the `If-None-Match` and `If-Modified-Since` headers are # present in the request as follows: # # * `If-None-Match` condition evaluates to `false`, and; # # * `If-Modified-Since` condition evaluates to `true`; # # Then Amazon S3 returns the `304 Not Modified` response code. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # @option options [Time,DateTime,Date,Integer,String] :if_unmodified_since # Return the object only if it has not been modified since the specified # time; otherwise, return a 412 (precondition failed) error. # # If both of the `If-Match` and `If-Unmodified-Since` headers are # present in the request as follows: # # * `If-Match` condition evaluates to `true`, and; # # * `If-Unmodified-Since` condition evaluates to `false`; # # Then Amazon S3 returns `200 OK` and the data requested. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # @option options [String] :range # HeadObject returns only the metadata for an object. If the Range is # satisfiable, only the `ContentLength` is affected in the response. If # the Range is not satisfiable, S3 returns a `416 - Requested Range Not # Satisfiable` error. # @option options [String] :version_id # Version ID used to reference a specific version of the object. # # For directory buckets in this API operation, only the `null` value of # the version ID is supported. # # # @option options [String] :sse_customer_algorithm # Specifies the algorithm to use when encrypting the object (for # example, AES256). # # This functionality is not supported for directory buckets. # # # @option options [String] :sse_customer_key # Specifies the customer-provided encryption key for Amazon S3 to use in # encrypting data. This value is used to store the object and then it is # discarded; Amazon S3 does not store the encryption key. The key must # be appropriate for use with the algorithm specified in the # `x-amz-server-side-encryption-customer-algorithm` header. # # This functionality is not supported for directory buckets. # # # @option options [String] :sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the encryption key according to # RFC 1321. Amazon S3 uses this header for a message integrity check to # ensure that the encryption key was transmitted without error. # # This functionality is not supported for directory buckets. # # # @option options [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @option options [Integer] :part_number # Part number of the object being read. This is a positive integer # between 1 and 10,000. Effectively performs a 'ranged' HEAD request # for the part specified. Useful querying about the size of the part and # the number of parts in this object. # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @option options [String] :checksum_mode # To retrieve the checksum, this parameter must be enabled. # # In addition, if you enable `ChecksumMode` and the object is encrypted # with Amazon Web Services Key Management Service (Amazon Web Services # KMS), you must have permission to use the `kms:Decrypt` action for the # request to succeed. # @return [Types::HeadObjectOutput] def head(options = {}) options = options.merge( bucket: @bucket_name, key: @key ) resp = Aws::Plugins::UserAgent.feature('resource') do @client.head_object(options) end resp.data end # @!group Associations # @return [ObjectAcl] def acl ObjectAcl.new( bucket_name: @bucket_name, object_key: @key, client: @client ) end # @return [Bucket] def bucket Bucket.new( name: @bucket_name, client: @client ) end # @param [String] id # @return [MultipartUpload] def multipart_upload(id) MultipartUpload.new( bucket_name: @bucket_name, object_key: @key, id: id, client: @client ) end # @param [String] id # @return [ObjectVersion] def version(id) ObjectVersion.new( bucket_name: @bucket_name, object_key: @key, id: id, client: @client ) end # @deprecated # @api private def identifiers { bucket_name: @bucket_name, key: @key } end deprecated(:identifiers) private def extract_bucket_name(args, options) value = args[0] || options.delete(:bucket_name) case value when String then value when nil then raise ArgumentError, "missing required option :bucket_name" else msg = "expected :bucket_name to be a String, got #{value.class}" raise ArgumentError, msg end end def extract_key(args, options) value = args[1] || options.delete(:key) case value when String then value when nil then raise ArgumentError, "missing required option :key" else msg = "expected :key to be a String, got #{value.class}" raise ArgumentError, msg end end def yield_waiter_and_warn(waiter, &block) if !@waiter_block_warned msg = "pass options to configure the waiter; "\ "yielding the waiter is deprecated" warn(msg) @waiter_block_warned = true end yield(waiter.waiter) end def separate_params_and_options(options) opts = Set.new( [:client, :max_attempts, :delay, :before_attempt, :before_wait] ) waiter_opts = {} waiter_params = {} options.each_pair do |key, value| if opts.include?(key) waiter_opts[key] = value else waiter_params[key] = value end end waiter_opts[:client] ||= @client [waiter_opts, waiter_params] end class Collection < Aws::Resources::Collection # @!group Batch Actions # @example Request syntax with placeholder values # # object.batch_delete!({ # mfa: "MFA", # request_payer: "requester", # accepts requester # bypass_governance_retention: false, # expected_bucket_owner: "AccountId", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # }) # @param options ({}) # @option options [String] :mfa # The concatenation of the authentication device's serial number, a # space, and the value that is displayed on your authentication device. # Required to permanently delete a versioned object if versioning is # configured with MFA delete enabled. # # When performing the `DeleteObjects` operation on an MFA delete enabled # bucket, which attempts to delete the specified versioned objects, you # must include an MFA token. If you don't provide an MFA token, the # entire request will fail, even if there are non-versioned objects that # you are trying to delete. If you provide an invalid token, whether # there are versioned object keys in the request or not, the entire # Multi-Object Delete request will fail. For information about MFA # Delete, see [ MFA Delete][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete # @option options [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @option options [Boolean] :bypass_governance_retention # Specifies whether you want to delete this object even if it has a # Governance-type Object Lock in place. To use this header, you must # have the `s3:BypassGovernanceRetention` permission. # # This functionality is not supported for directory buckets. # # # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @option options [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum-algorithm ` or # `x-amz-trailer` header sent. Otherwise, Amazon S3 fails the request # with the HTTP status code `400 Bad Request`. # # For the `x-amz-checksum-algorithm ` header, replace ` algorithm ` with # the supported algorithm from the following list: # # * CRC32 # # * CRC32C # # * SHA1 # # * SHA256 # # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # If the individual checksum value you provide through # `x-amz-checksum-algorithm ` doesn't match the checksum algorithm you # set through `x-amz-sdk-checksum-algorithm`, Amazon S3 ignores any # provided `ChecksumAlgorithm` parameter and uses the checksum algorithm # that matches the provided value in `x-amz-checksum-algorithm `. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [void] def batch_delete!(options = {}) batch_enum.each do |batch| params = Aws::Util.copy_hash(options) params[:bucket] = batch[0].bucket_name params[:delete] ||= {} params[:delete][:objects] ||= [] batch.each do |item| params[:delete][:objects] << { key: item.key } end Aws::Plugins::UserAgent.feature('resource') do batch[0].client.delete_objects(params) end end nil end # @!endgroup end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/plugins/0000755000004100000410000000000014563445240017736 5ustar www-datawww-dataaws-sdk-s3-1.143.0/lib/aws-sdk-s3/plugins/express_session_auth.rb0000644000004100000410000000645714563445240024554 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 module Plugins # @api private class ExpressSessionAuth < Seahorse::Client::Plugin # This should be s3_disable_express_auth instead # But this is not a built in. We're overwriting the generated value option(:disable_s3_express_session_auth, default: false, doc_type: 'Boolean', docstring: <<-DOCS) do |cfg| When `true`, S3 Express session authentication is disabled. DOCS resolve_disable_s3_express_session_auth(cfg) end option(:express_credentials_provider, doc_type: 'Aws::S3::ExpressCredentialsProvider', rbs_type: 'untyped', docstring: <<-DOCS) do |_cfg| Credential Provider for S3 Express endpoints. Manages credentials for different buckets. DOCS Aws::S3::ExpressCredentialsProvider.new end # @api private class Handler < Seahorse::Client::Handler def call(context) if (props = context[:endpoint_properties]) # S3 Express endpoint - turn off md5 and enable crc32 default if (backend = props['backend']) && backend == 'S3Express' if context.operation_name == :put_object || checksum_required?(context) context[:default_request_checksum_algorithm] = 'CRC32' end context[:s3_express_endpoint] = true end # if s3 express auth, use new credentials and sign additional header if context[:auth_scheme]['name'] == 'sigv4-s3express' && !context.config.disable_s3_express_session_auth bucket = context.params[:bucket] credentials_provider = context.config.express_credentials_provider credentials = credentials_provider.express_credentials_for(bucket) context[:sigv4_credentials] = credentials # Sign will use this end end @handler.call(context) end private def checksum_required?(context) context.operation.http_checksum_required || (context.operation.http_checksum && context.operation.http_checksum['requestChecksumRequired']) end end handler(Handler) # Optimization - sets this client as the client to create sessions. def after_initialize(client) provider = client.config.express_credentials_provider provider.client = client unless provider.client end class << self private def resolve_disable_s3_express_session_auth(cfg) value = ENV['AWS_S3_DISABLE_EXPRESS_SESSION_AUTH'] || Aws.shared_config.s3_disable_express_session_auth(profile: cfg.profile) || 'false' value = Aws::Util.str_2_bool(value) # Raise if provided value is not true or false if value.nil? raise ArgumentError, 'Must provide either `true` or `false` for the '\ '`s3_disable_express_session_auth` profile option or for '\ "ENV['AWS_S3_DISABLE_EXPRESS_SESSION_AUTH']." end value end end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/plugins/redirects.rb0000644000004100000410000000222614563445240022251 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 module Plugins class Redirects < Seahorse::Client::Plugin option(:follow_redirects, default: true, doc_type: 'Boolean', docstring: <<-DOCS) When `true`, this client will follow 307 redirects returned by Amazon S3. DOCS # @api private class Handler < Seahorse::Client::Handler def call(context) response = @handler.call(context) if context.http_response.status_code == 307 endpoint = context.http_response.headers['location'] unless context.http_request.endpoint.host.include?('fips') context.http_request.endpoint = endpoint end context.http_response.body.truncate(0) @handler.call(context) else response end end end def add_handlers(handlers, config) if config.follow_redirects # we want to re-trigger request signing handlers.add(Handler, step: :sign, priority: 90) end end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/plugins/s3_host_id.rb0000644000004100000410000000127314563445240022324 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 module Plugins # Support S3 host id, more information, see: # http://docs.aws.amazon.com/AmazonS3/latest/dev/troubleshooting.html#sdk-request-ids # # This plugin adds :host_id for s3 responses when available # @api private class S3HostId < Seahorse::Client::Plugin class Handler < Seahorse::Client::Handler def call(context) response = @handler.call(context) h = context.http_response.headers context[:s3_host_id] = h['x-amz-id-2'] response end end handler(Handler, step: :sign) end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/plugins/iad_regional_endpoint.rb0000644000004100000410000000240614563445240024602 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 module Plugins class IADRegionalEndpoint < Seahorse::Client::Plugin option(:s3_us_east_1_regional_endpoint, default: 'legacy', doc_type: String, docstring: <<-DOCS) do |cfg| Pass in `regional` to enable the `us-east-1` regional endpoint. Defaults to `legacy` mode which uses the global endpoint. DOCS resolve_iad_regional_endpoint(cfg) end private def self.resolve_iad_regional_endpoint(cfg) default_mode_value = if cfg.respond_to?(:defaults_mode_config_resolver) cfg.defaults_mode_config_resolver.resolve(:s3_us_east_1_regional_endpoint) end mode = ENV['AWS_S3_US_EAST_1_REGIONAL_ENDPOINT'] || Aws.shared_config.s3_us_east_1_regional_endpoint(profile: cfg.profile) || default_mode_value || 'legacy' mode = mode.downcase unless %w(legacy regional).include?(mode) raise ArgumentError, "expected :s3_us_east_1_regional_endpoint or"\ " ENV['AWS_S3_US_EAST_1_REGIONAL_ENDPOINT'] to be `legacy` or"\ " `regional`." end mode end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/plugins/arn.rb0000644000004100000410000000462614563445240021053 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 module Plugins # When an accesspoint ARN is provided for :bucket in S3 operations, this # plugin resolves the request endpoint from the ARN when possible. # @api private class ARN < Seahorse::Client::Plugin option( :s3_use_arn_region, default: true, doc_type: 'Boolean', docstring: <<-DOCS) do |cfg| For S3 ARNs passed into the `:bucket` parameter, this option will use the region in the ARN, allowing for cross-region requests to be made. Set to `false` to use the client's region instead. DOCS resolve_s3_use_arn_region(cfg) end option( :s3_disable_multiregion_access_points, default: false, doc_type: 'Boolean', docstring: <<-DOCS) do |cfg| When set to `false` this will option will raise errors when multi-region access point ARNs are used. Multi-region access points can potentially result in cross region requests. DOCS resolve_s3_disable_multiregion_access_points(cfg) end class << self private def resolve_s3_use_arn_region(cfg) value = ENV['AWS_S3_USE_ARN_REGION'] || Aws.shared_config.s3_use_arn_region(profile: cfg.profile) || 'true' value = Aws::Util.str_2_bool(value) # Raise if provided value is not true or false if value.nil? raise ArgumentError, 'Must provide either `true` or `false` for the '\ '`s3_use_arn_region` profile option or for '\ "ENV['AWS_S3_USE_ARN_REGION']." end value end def resolve_s3_disable_multiregion_access_points(cfg) value = ENV['AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS'] || Aws.shared_config.s3_disable_multiregion_access_points(profile: cfg.profile) || 'false' value = Aws::Util.str_2_bool(value) # Raise if provided value is not true or false if value.nil? raise ArgumentError, 'Must provide either `true` or `false` for '\ 's3_use_arn_region profile option or for '\ "ENV['AWS_S3_USE_ARN_REGION']" end value end end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/plugins/s3_signer.rb0000644000004100000410000001422214563445240022160 0ustar www-datawww-data# frozen_string_literal: true require 'aws-sigv4' module Aws module S3 module Plugins # This plugin used to have a V4 signer but it was removed in favor of # generic Sign plugin that uses endpoint auth scheme. # # @api private class S3Signer < Seahorse::Client::Plugin option(:signature_version, 'v4') def add_handlers(handlers, cfg) case cfg.signature_version when 'v4' then add_v4_handlers(handlers) when 's3' then add_legacy_handler(handlers) else msg = "unsupported signature version `#{cfg.signature_version}'" raise ArgumentError, msg end end def add_v4_handlers(handlers) handlers.add(CachedBucketRegionHandler, step: :sign, priority: 60) handlers.add(BucketRegionErrorHandler, step: :sign, priority: 40) end def add_legacy_handler(handlers) # generic Sign plugin will be skipped if it sees sigv2 handlers.add(LegacyHandler, step: :sign) end class LegacyHandler < Seahorse::Client::Handler def call(context) LegacySigner.sign(context) @handler.call(context) end end # This handler will update the http endpoint when the bucket region # is known/cached. class CachedBucketRegionHandler < Seahorse::Client::Handler def call(context) bucket = context.params[:bucket] check_for_cached_region(context, bucket) if bucket @handler.call(context) end private def check_for_cached_region(context, bucket) cached_region = S3::BUCKET_REGIONS[bucket] if cached_region && cached_region != context.config.region && !S3Signer.custom_endpoint?(context) context.http_request.endpoint.host = S3Signer.new_hostname( context, cached_region ) context[:sigv4_region] = cached_region # Sign plugin will use this end end end # This handler detects when a request fails because of a mismatched bucket # region. It follows up by making a request to determine the correct # region, then finally a version 4 signed request against the correct # regional endpoint. This is intended for s3's global endpoint which # will return 400 if the bucket is not in region. class BucketRegionErrorHandler < Seahorse::Client::Handler def call(context) response = @handler.call(context) handle_region_errors(response) end private def handle_region_errors(response) if wrong_sigv4_region?(response) && !fips_region?(response) && !S3Signer.custom_endpoint?(response.context) && !expired_credentials?(response) get_region_and_retry(response.context) else response end end def get_region_and_retry(context) actual_region = context.http_response.headers['x-amz-bucket-region'] actual_region ||= region_from_body(context.http_response.body_contents) update_bucket_cache(context, actual_region) log_warning(context, actual_region) resign_with_new_region(context, actual_region) @handler.call(context) end def update_bucket_cache(context, actual_region) S3::BUCKET_REGIONS[context.params[:bucket]] = actual_region end def fips_region?(resp) resp.context.http_request.endpoint.host.include?('s3-fips.') end def expired_credentials?(resp) resp.context.http_response.body_contents.match(/ExpiredToken<\/Code>/) end def wrong_sigv4_region?(resp) resp.context.http_response.status_code == 400 && (resp.context.http_response.headers['x-amz-bucket-region'] || resp.context.http_response.body_contents.match(/.+?<\/Region>/)) end def resign_with_new_region(context, actual_region) context.http_response.body.truncate(0) context.http_request.endpoint.host = S3Signer.new_hostname( context, actual_region ) context.metadata[:redirect_region] = actual_region signer = Aws::Plugins::Sign.signer_for( context[:auth_scheme], context.config, actual_region ) signer.sign(context) end def region_from_body(body) region = body.match(/(.+?)<\/Region>/)[1] if region.nil? || region == '' raise "couldn't get region from body: #{body}" else region end end def log_warning(context, actual_region) msg = "S3 client configured for #{context.config.region.inspect} " \ "but the bucket #{context.params[:bucket].inspect} is in " \ "#{actual_region.inspect}; Please configure the proper region " \ "to avoid multiple unnecessary redirects and signing attempts\n" if (logger = context.config.logger) logger.warn(msg) else warn(msg) end end end class << self def new_hostname(context, region) endpoint_params = context[:endpoint_params].dup endpoint_params.region = region endpoint_params.endpoint = nil endpoint = context.config.endpoint_provider.resolve_endpoint(endpoint_params) URI(endpoint.url).host end def custom_endpoint?(context) region = context.config.region partition = Aws::Endpoints::Matchers.aws_partition(region) endpoint = context.http_request.endpoint !endpoint.hostname.include?(partition['dnsSuffix']) && !endpoint.hostname.include?(partition['dualStackDnsSuffix']) end end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/plugins/url_encoded_keys.rb0000644000004100000410000000576114563445240023612 0ustar www-datawww-data# frozen_string_literal: true require 'uri' require 'cgi' module Aws module S3 module Plugins # This plugin auto-populates the `:encoding_type` request parameter # to all calls made to Amazon S3 that accept it. # # This enables Amazon S3 to return object keys that might contain # invalid XML characters as URL encoded strings. This plugin also # automatically decodes these keys so that the key management is # transparent to the user. # # If you specify the `:encoding_type` parameter, then this plugin # will be disabled, and you will need to decode the keys yourself. # # The following operations are managed: # # * {S3::Client#list_objects} # * {S3::Client#list_multipart_uploads} # * {S3::Client#list_object_versions} # class UrlEncodedKeys < Seahorse::Client::Plugin class Handler < Seahorse::Client::Handler def call(context) if context.params.key?(:encoding_type) @handler.call(context) # user managed else manage_keys(context) end end private def manage_keys(context) context.params[:encoding_type] = 'url' @handler.call(context).on_success do |resp| send("decode_#{resp.context.operation_name}_keys", resp.data) end end def decode_list_objects_keys(data) decode(:marker, data) decode(:next_marker, data) decode(:prefix, data) decode(:delimiter, data) data.contents.each { |o| decode(:key, o) } if data.contents data.common_prefixes.each { |o| decode(:prefix, o) } if data.common_prefixes end def decode_list_object_versions_keys(data) decode(:key_marker, data) decode(:next_key_marker, data) decode(:prefix, data) decode(:delimiter, data) data.versions.each { |o| decode(:key, o) } if data.versions data.delete_markers.each { |o| decode(:key, o) } if data.delete_markers data.common_prefixes.each { |o| decode(:prefix, o) } if data.common_prefixes end def decode_list_multipart_uploads_keys(data) decode(:key_marker, data) decode(:next_key_marker, data) decode(:prefix, data) decode(:delimiter, data) data.uploads.each { |o| decode(:key, o) } if data.uploads data.common_prefixes.each { |o| decode(:prefix, o) } if data.common_prefixes end def decode(member, struct) if struct[member] struct[member] = CGI.unescape(struct[member]) end end end handler(Handler, step: :validate, priority: 0, operations: [ :list_objects, :list_object_versions, :list_multipart_uploads, ] ) end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/plugins/get_bucket_location_fix.rb0000644000004100000410000000130414563445240025133 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 module Plugins class GetBucketLocationFix < Seahorse::Client::Plugin class Handler < Seahorse::Client::Handler def call(context) @handler.call(context).on(200) do |response| response.data = S3::Types::GetBucketLocationOutput.new xml = context.http_response.body_contents matches = xml.match(/(.+?)<\/LocationConstraint>/) response.data[:location_constraint] = matches ? matches[1] : '' end end end handler(Handler, priority: 60, operations: [:get_bucket_location]) end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/plugins/sse_cpk.rb0000644000004100000410000000374314563445240021721 0ustar www-datawww-data# frozen_string_literal: true require 'uri' require 'openssl' module Aws module S3 module Plugins class SseCpk < Seahorse::Client::Plugin option(:require_https_for_sse_cpk, default: true, doc_type: 'Boolean', docstring: <<-DOCS) When `true`, the endpoint **must** be HTTPS for all operations where server-side-encryption is used with customer-provided keys. This should only be disabled for local testing. DOCS class Handler < Seahorse::Client::Handler def call(context) compute_key_md5(context) if context.params.is_a?(Hash) @handler.call(context) end private def compute_key_md5(context) params = context.params if key = params[:sse_customer_key] require_https(context) params[:sse_customer_key] = base64(key) params[:sse_customer_key_md5] = base64(md5(key)) end if key = params[:copy_source_sse_customer_key] require_https(context) params[:copy_source_sse_customer_key] = base64(key) params[:copy_source_sse_customer_key_md5] = base64(md5(key)) end end def require_https(context) unless URI::HTTPS === context.config.endpoint msg = <<-MSG.strip.gsub("\n", ' ') Attempting to send customer-provided-keys for S3 server-side-encryption over HTTP; Please configure a HTTPS endpoint. If you are attempting to use a test endpoint, you can disable this check via `:require_https_for_sse_cpk` MSG raise ArgumentError, msg end end def md5(str) OpenSSL::Digest::MD5.digest(str) end def base64(str) Base64.encode64(str).strip end end handler(Handler, step: :initialize) end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/plugins/expect_100_continue.rb0000644000004100000410000000135114563445240024037 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 module Plugins class Expect100Continue < Seahorse::Client::Plugin def add_handlers(handlers, config) if config.http_continue_timeout && config.http_continue_timeout > 0 handlers.add(Handler) end end # @api private class Handler < Seahorse::Client::Handler def call(context) body = context.http_request.body if body.respond_to?(:size) && body.size > 0 && !context[:use_accelerate_endpoint] context.http_request.headers['expect'] = '100-continue' end @handler.call(context) end end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/plugins/bucket_name_restrictions.rb0000644000004100000410000000216214563445240025351 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 module Plugins # @api private class BucketNameRestrictions < Seahorse::Client::Plugin class Handler < Seahorse::Client::Handler # Useful because Aws::S3::Errors::SignatureDoesNotMatch is thrown # when passed a bucket with a forward slash. Instead provide a more # helpful error. Ideally should not be a plugin? def call(context) bucket_member = _bucket_member(context.operation.input.shape) if bucket_member && (bucket = context.params[bucket_member]) if !Aws::ARNParser.arn?(bucket) && bucket.include?('/') raise ArgumentError, 'bucket name must not contain a forward-slash (/)' end end @handler.call(context) end private def _bucket_member(input) input.members.each do |member, ref| return member if ref.shape.name == 'BucketName' end nil end end handler(Handler) end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/plugins/accelerate.rb0000644000004100000410000000336514563445240022362 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 module Plugins # Provides support for using `Aws::S3::Client` with Amazon S3 Transfer # Acceleration. # # Go here for more information about transfer acceleration: # [http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html](http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) class Accelerate < Seahorse::Client::Plugin option( :use_accelerate_endpoint, default: false, doc_type: 'Boolean', docstring: <<-DOCS) When set to `true`, accelerated bucket endpoints will be used for all object operations. You must first enable accelerate for each bucket. [Go here for more information](http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). DOCS def add_handlers(handlers, config) operations = config.api.operation_names - [ :create_bucket, :list_buckets, :delete_bucket ] handlers.add( OptionHandler, step: :initialize, operations: operations ) end # @api private class OptionHandler < Seahorse::Client::Handler def call(context) # Support client configuration and per-operation configuration # TODO: move this to an options hash and warn here. if context.params.is_a?(Hash) accelerate = context.params.delete(:use_accelerate_endpoint) end if accelerate.nil? accelerate = context.config.use_accelerate_endpoint end context[:use_accelerate_endpoint] = accelerate @handler.call(context) end end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/plugins/bucket_dns.rb0000644000004100000410000000341714563445240022411 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 module Plugins # Amazon S3 requires DNS style addressing for buckets outside of # the classic region when possible. class BucketDns < Seahorse::Client::Plugin # When set to `false` DNS compatible bucket names are moved from # the request URI path to the host as a subdomain, unless the request # is using SSL and the bucket name contains a dot. # # When set to `true`, the bucket name is always forced to be part # of the request URI path. This will not work with buckets outside # the classic region. option(:force_path_style, default: false, doc_type: 'Boolean', docstring: <<-DOCS) When set to `true`, the bucket name is always left in the request URI and never moved to the host as a sub-domain. DOCS # These class methods were originally used in a handler in this plugin. # SigV2 legacy signer needs this logic so we keep it here as utility. # New endpoint resolution will check this as a matcher. class << self # @param [String] bucket_name # @param [Boolean] ssl # @return [Boolean] def dns_compatible?(bucket_name, ssl) if valid_subdomain?(bucket_name) bucket_name.match(/\./) && ssl ? false : true else false end end # @param [String] bucket_name # @return [Boolean] def valid_subdomain?(bucket_name) bucket_name.size < 64 && bucket_name =~ /^[a-z0-9][a-z0-9.-]+[a-z0-9]$/ && bucket_name !~ /(\d+\.){3}\d+/ && bucket_name !~ /[.-]{2}/ end end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/plugins/md5s.rb0000644000004100000410000000526614563445240021144 0ustar www-datawww-data# frozen_string_literal: true require 'openssl' module Aws module S3 module Plugins # @api private # This plugin is effectively deprecated in favor of modeled # httpChecksumRequired traits. class Md5s < Seahorse::Client::Plugin # These operations allow Content MD5 but are not required by # httpChecksumRequired. This list should not grow. OPTIONAL_OPERATIONS = [ :put_object, :upload_part ] # @api private class Handler < Seahorse::Client::Handler CHUNK_SIZE = 1 * 1024 * 1024 # one MB def call(context) if !context[:checksum_algorithms] && # skip in favor of flexible checksum !context[:s3_express_endpoint] # s3 express endpoints do not support md5 body = context.http_request.body if body.respond_to?(:size) && body.size > 0 context.http_request.headers['Content-Md5'] ||= md5(body) end end @handler.call(context) end private # @param [File, Tempfile, IO#read, String] value # @return [String] def md5(value) if (File === value || Tempfile === value) && !value.path.nil? && File.exist?(value.path) OpenSSL::Digest::MD5.file(value).base64digest elsif value.respond_to?(:read) md5 = OpenSSL::Digest::MD5.new update_in_chunks(md5, value) md5.base64digest else OpenSSL::Digest::MD5.digest(value).base64digest end end def update_in_chunks(digest, io) loop do chunk = io.read(CHUNK_SIZE) break unless chunk digest.update(chunk) end io.rewind end end option(:compute_checksums, default: true, doc_type: 'Boolean', docstring: <<-DOCS) When `true` a MD5 checksum will be computed and sent in the Content Md5 header for :put_object and :upload_part. When `false`, MD5 checksums will not be computed for these operations. Checksums are still computed for operations requiring them. Checksum errors returned by Amazon S3 are automatically retried up to `:retry_limit` times. DOCS def add_handlers(handlers, config) if config.compute_checksums # priority set low to ensure md5 is computed AFTER the request is # built but before it is signed handlers.add( Handler, priority: 10, step: :build, operations: OPTIONAL_OPERATIONS ) end end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/plugins/streaming_retry.rb0000644000004100000410000001072514563445240023506 0ustar www-datawww-data# frozen_string_literal: true require 'forwardable' module Aws module S3 module Plugins # A wrapper around BlockIO that adds no-ops for truncate and rewind # @api private class RetryableBlockIO extend Forwardable def_delegators :@block_io, :write, :read, :size def initialize(block_io) @block_io = block_io end def truncate(_integer); end def rewind; end end # A wrapper around ManagedFile that adds no-ops for truncate and rewind # @api private class RetryableManagedFile extend Forwardable def_delegators :@file, :write, :read, :size, :open?, :close def initialize(managed_file) @file = managed_file end def truncate(_integer); end def rewind; end end class NonRetryableStreamingError < StandardError def initialize(error) super('Unable to retry request - retry could result in processing duplicated chunks.') set_backtrace(error.backtrace) @original_error = error end attr_reader :original_error end # This handler works with the ResponseTarget plugin to provide smart # retries of S3 streaming operations that support the range parameter # (currently only: get_object). When a 200 OK with a TruncatedBodyError # is received this handler will add a range header that excludes the # data that has already been processed (written to file or sent to # the target Proc). # It is important to not write data to the custom target in the case of # a non-success response. We do not want to write an XML error # message to someone's file or pass it to a user's Proc. # @api private class StreamingRetry < Seahorse::Client::Plugin class Handler < Seahorse::Client::Handler def call(context) target = context.params[:response_target] || context[:response_target] # retry is only supported when range is NOT set on the initial request if supported_target?(target) && !context.params[:range] add_event_listeners(context, target) end @handler.call(context) end private def add_event_listeners(context, target) context.http_response.on_headers(200..299) do case context.http_response.body when Seahorse::Client::BlockIO then context.http_response.body = RetryableBlockIO.new(context.http_response.body) when Seahorse::Client::ManagedFile then context.http_response.body = RetryableManagedFile.new(context.http_response.body) end end context.http_response.on_headers(400..599) do context.http_response.body = StringIO.new # something to write the error to end context.http_response.on_success(200..299) do body = context.http_response.body if body.is_a?(RetryableManagedFile) && body.open? body.close end end context.http_response.on_error do |error| if retryable_body?(context) if truncated_body?(error) context.http_request.headers[:range] = "bytes=#{context.http_response.body.size}-" else case context.http_response.body when RetryableManagedFile # call rewind on the underlying file context.http_response.body.instance_variable_get(:@file).rewind else raise NonRetryableStreamingError, error end end end end end def truncated_body?(error) error.is_a?(Seahorse::Client::NetworkingError) && error.original_error.is_a?( Seahorse::Client::NetHttp::Handler::TruncatedBodyError ) end def retryable_body?(context) context.http_response.body.is_a?(RetryableBlockIO) || context.http_response.body.is_a?(RetryableManagedFile) end def supported_target?(target) case target when Proc, String, Pathname then true else false end end end handler(Handler, step: :sign, operations: [:get_object], priority: 10) end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/plugins/location_constraint.rb0000644000004100000410000000205414563445240024340 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 module Plugins # When making calls to {S3::Client#create_bucket} outside the # "classic" region, the bucket location constraint must be specified. # This plugin auto populates the constraint to the configured region. class LocationConstraint < Seahorse::Client::Plugin class Handler < Seahorse::Client::Handler def call(context) unless context.config.region == 'us-east-1' populate_location_constraint(context.params, context.config.region) end @handler.call(context) end private def populate_location_constraint(params, region) params[:create_bucket_configuration] ||= {} unless params[:create_bucket_configuration][:location] params[:create_bucket_configuration][:location_constraint] ||= region end end end handler(Handler, step: :initialize, operations: [:create_bucket]) end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/plugins/skip_whole_multipart_get_checksums.rb0000644000004100000410000000130214563445240027430 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 module Plugins # S3 GetObject results for whole Multipart Objects contain a checksum # that cannot be validated. These should be skipped by the # ChecksumAlgorithm plugin. class SkipWholeMultipartGetChecksums < Seahorse::Client::Plugin class Handler < Seahorse::Client::Handler def call(context) context[:http_checksum] ||= {} context[:http_checksum][:skip_on_suffix] = true @handler.call(context) end end handler( Handler, step: :initialize, operations: [:get_object] ) end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/plugins/endpoints.rb0000644000004100000410000003134114563445240022270 0ustar www-datawww-data# frozen_string_literal: true # WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws::S3 module Plugins class Endpoints < Seahorse::Client::Plugin option( :endpoint_provider, doc_type: 'Aws::S3::EndpointProvider', rbs_type: 'untyped', docstring: 'The endpoint provider used to resolve endpoints. Any '\ 'object that responds to `#resolve_endpoint(parameters)` '\ 'where `parameters` is a Struct similar to '\ '`Aws::S3::EndpointParameters`' ) do |cfg| Aws::S3::EndpointProvider.new end option( :disable_s3_express_session_auth, doc_type: 'Boolean', default: nil, docstring: "Parameter to indicate whether S3Express session auth should be disabled") # @api private class Handler < Seahorse::Client::Handler def call(context) unless context[:discovered_endpoint] params = parameters_for_operation(context) endpoint = context.config.endpoint_provider.resolve_endpoint(params) context.http_request.endpoint = endpoint.url apply_endpoint_headers(context, endpoint.headers) context[:endpoint_params] = params context[:endpoint_properties] = endpoint.properties end context[:auth_scheme] = Aws::Endpoints.resolve_auth_scheme(context, endpoint) @handler.call(context) end private def apply_endpoint_headers(context, headers) headers.each do |key, values| value = values .compact .map { |s| Seahorse::Util.escape_header_list_string(s.to_s) } .join(',') context.http_request.headers[key] = value end end def parameters_for_operation(context) case context.operation_name when :abort_multipart_upload Aws::S3::Endpoints::AbortMultipartUpload.build(context) when :complete_multipart_upload Aws::S3::Endpoints::CompleteMultipartUpload.build(context) when :copy_object Aws::S3::Endpoints::CopyObject.build(context) when :create_bucket Aws::S3::Endpoints::CreateBucket.build(context) when :create_multipart_upload Aws::S3::Endpoints::CreateMultipartUpload.build(context) when :create_session Aws::S3::Endpoints::CreateSession.build(context) when :delete_bucket Aws::S3::Endpoints::DeleteBucket.build(context) when :delete_bucket_analytics_configuration Aws::S3::Endpoints::DeleteBucketAnalyticsConfiguration.build(context) when :delete_bucket_cors Aws::S3::Endpoints::DeleteBucketCors.build(context) when :delete_bucket_encryption Aws::S3::Endpoints::DeleteBucketEncryption.build(context) when :delete_bucket_intelligent_tiering_configuration Aws::S3::Endpoints::DeleteBucketIntelligentTieringConfiguration.build(context) when :delete_bucket_inventory_configuration Aws::S3::Endpoints::DeleteBucketInventoryConfiguration.build(context) when :delete_bucket_lifecycle Aws::S3::Endpoints::DeleteBucketLifecycle.build(context) when :delete_bucket_metrics_configuration Aws::S3::Endpoints::DeleteBucketMetricsConfiguration.build(context) when :delete_bucket_ownership_controls Aws::S3::Endpoints::DeleteBucketOwnershipControls.build(context) when :delete_bucket_policy Aws::S3::Endpoints::DeleteBucketPolicy.build(context) when :delete_bucket_replication Aws::S3::Endpoints::DeleteBucketReplication.build(context) when :delete_bucket_tagging Aws::S3::Endpoints::DeleteBucketTagging.build(context) when :delete_bucket_website Aws::S3::Endpoints::DeleteBucketWebsite.build(context) when :delete_object Aws::S3::Endpoints::DeleteObject.build(context) when :delete_object_tagging Aws::S3::Endpoints::DeleteObjectTagging.build(context) when :delete_objects Aws::S3::Endpoints::DeleteObjects.build(context) when :delete_public_access_block Aws::S3::Endpoints::DeletePublicAccessBlock.build(context) when :get_bucket_accelerate_configuration Aws::S3::Endpoints::GetBucketAccelerateConfiguration.build(context) when :get_bucket_acl Aws::S3::Endpoints::GetBucketAcl.build(context) when :get_bucket_analytics_configuration Aws::S3::Endpoints::GetBucketAnalyticsConfiguration.build(context) when :get_bucket_cors Aws::S3::Endpoints::GetBucketCors.build(context) when :get_bucket_encryption Aws::S3::Endpoints::GetBucketEncryption.build(context) when :get_bucket_intelligent_tiering_configuration Aws::S3::Endpoints::GetBucketIntelligentTieringConfiguration.build(context) when :get_bucket_inventory_configuration Aws::S3::Endpoints::GetBucketInventoryConfiguration.build(context) when :get_bucket_lifecycle Aws::S3::Endpoints::GetBucketLifecycle.build(context) when :get_bucket_lifecycle_configuration Aws::S3::Endpoints::GetBucketLifecycleConfiguration.build(context) when :get_bucket_location Aws::S3::Endpoints::GetBucketLocation.build(context) when :get_bucket_logging Aws::S3::Endpoints::GetBucketLogging.build(context) when :get_bucket_metrics_configuration Aws::S3::Endpoints::GetBucketMetricsConfiguration.build(context) when :get_bucket_notification Aws::S3::Endpoints::GetBucketNotification.build(context) when :get_bucket_notification_configuration Aws::S3::Endpoints::GetBucketNotificationConfiguration.build(context) when :get_bucket_ownership_controls Aws::S3::Endpoints::GetBucketOwnershipControls.build(context) when :get_bucket_policy Aws::S3::Endpoints::GetBucketPolicy.build(context) when :get_bucket_policy_status Aws::S3::Endpoints::GetBucketPolicyStatus.build(context) when :get_bucket_replication Aws::S3::Endpoints::GetBucketReplication.build(context) when :get_bucket_request_payment Aws::S3::Endpoints::GetBucketRequestPayment.build(context) when :get_bucket_tagging Aws::S3::Endpoints::GetBucketTagging.build(context) when :get_bucket_versioning Aws::S3::Endpoints::GetBucketVersioning.build(context) when :get_bucket_website Aws::S3::Endpoints::GetBucketWebsite.build(context) when :get_object Aws::S3::Endpoints::GetObject.build(context) when :get_object_acl Aws::S3::Endpoints::GetObjectAcl.build(context) when :get_object_attributes Aws::S3::Endpoints::GetObjectAttributes.build(context) when :get_object_legal_hold Aws::S3::Endpoints::GetObjectLegalHold.build(context) when :get_object_lock_configuration Aws::S3::Endpoints::GetObjectLockConfiguration.build(context) when :get_object_retention Aws::S3::Endpoints::GetObjectRetention.build(context) when :get_object_tagging Aws::S3::Endpoints::GetObjectTagging.build(context) when :get_object_torrent Aws::S3::Endpoints::GetObjectTorrent.build(context) when :get_public_access_block Aws::S3::Endpoints::GetPublicAccessBlock.build(context) when :head_bucket Aws::S3::Endpoints::HeadBucket.build(context) when :head_object Aws::S3::Endpoints::HeadObject.build(context) when :list_bucket_analytics_configurations Aws::S3::Endpoints::ListBucketAnalyticsConfigurations.build(context) when :list_bucket_intelligent_tiering_configurations Aws::S3::Endpoints::ListBucketIntelligentTieringConfigurations.build(context) when :list_bucket_inventory_configurations Aws::S3::Endpoints::ListBucketInventoryConfigurations.build(context) when :list_bucket_metrics_configurations Aws::S3::Endpoints::ListBucketMetricsConfigurations.build(context) when :list_buckets Aws::S3::Endpoints::ListBuckets.build(context) when :list_directory_buckets Aws::S3::Endpoints::ListDirectoryBuckets.build(context) when :list_multipart_uploads Aws::S3::Endpoints::ListMultipartUploads.build(context) when :list_object_versions Aws::S3::Endpoints::ListObjectVersions.build(context) when :list_objects Aws::S3::Endpoints::ListObjects.build(context) when :list_objects_v2 Aws::S3::Endpoints::ListObjectsV2.build(context) when :list_parts Aws::S3::Endpoints::ListParts.build(context) when :put_bucket_accelerate_configuration Aws::S3::Endpoints::PutBucketAccelerateConfiguration.build(context) when :put_bucket_acl Aws::S3::Endpoints::PutBucketAcl.build(context) when :put_bucket_analytics_configuration Aws::S3::Endpoints::PutBucketAnalyticsConfiguration.build(context) when :put_bucket_cors Aws::S3::Endpoints::PutBucketCors.build(context) when :put_bucket_encryption Aws::S3::Endpoints::PutBucketEncryption.build(context) when :put_bucket_intelligent_tiering_configuration Aws::S3::Endpoints::PutBucketIntelligentTieringConfiguration.build(context) when :put_bucket_inventory_configuration Aws::S3::Endpoints::PutBucketInventoryConfiguration.build(context) when :put_bucket_lifecycle Aws::S3::Endpoints::PutBucketLifecycle.build(context) when :put_bucket_lifecycle_configuration Aws::S3::Endpoints::PutBucketLifecycleConfiguration.build(context) when :put_bucket_logging Aws::S3::Endpoints::PutBucketLogging.build(context) when :put_bucket_metrics_configuration Aws::S3::Endpoints::PutBucketMetricsConfiguration.build(context) when :put_bucket_notification Aws::S3::Endpoints::PutBucketNotification.build(context) when :put_bucket_notification_configuration Aws::S3::Endpoints::PutBucketNotificationConfiguration.build(context) when :put_bucket_ownership_controls Aws::S3::Endpoints::PutBucketOwnershipControls.build(context) when :put_bucket_policy Aws::S3::Endpoints::PutBucketPolicy.build(context) when :put_bucket_replication Aws::S3::Endpoints::PutBucketReplication.build(context) when :put_bucket_request_payment Aws::S3::Endpoints::PutBucketRequestPayment.build(context) when :put_bucket_tagging Aws::S3::Endpoints::PutBucketTagging.build(context) when :put_bucket_versioning Aws::S3::Endpoints::PutBucketVersioning.build(context) when :put_bucket_website Aws::S3::Endpoints::PutBucketWebsite.build(context) when :put_object Aws::S3::Endpoints::PutObject.build(context) when :put_object_acl Aws::S3::Endpoints::PutObjectAcl.build(context) when :put_object_legal_hold Aws::S3::Endpoints::PutObjectLegalHold.build(context) when :put_object_lock_configuration Aws::S3::Endpoints::PutObjectLockConfiguration.build(context) when :put_object_retention Aws::S3::Endpoints::PutObjectRetention.build(context) when :put_object_tagging Aws::S3::Endpoints::PutObjectTagging.build(context) when :put_public_access_block Aws::S3::Endpoints::PutPublicAccessBlock.build(context) when :restore_object Aws::S3::Endpoints::RestoreObject.build(context) when :select_object_content Aws::S3::Endpoints::SelectObjectContent.build(context) when :upload_part Aws::S3::Endpoints::UploadPart.build(context) when :upload_part_copy Aws::S3::Endpoints::UploadPartCopy.build(context) when :write_get_object_response Aws::S3::Endpoints::WriteGetObjectResponse.build(context) end end end def add_handlers(handlers, _config) handlers.add(Handler, step: :build, priority: 75) end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/plugins/http_200_errors.rb0000644000004100000410000000333214563445240023220 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 module Plugins # A handful of Amazon S3 operations will respond with a 200 status # code but will send an error in the response body. This plugin # injects a handler that will parse 200 response bodies for potential # errors, allowing them to be retried. # @api private class Http200Errors < Seahorse::Client::Plugin class Handler < Seahorse::Client::Handler def call(context) @handler.call(context).on(200) do |response| if error = check_for_error(context) context.http_response.status_code = 500 response.data = nil response.error = error end end end def check_for_error(context) xml = context.http_response.body_contents if xml.match(//) error_code = xml.match(/(.+?)<\/Code>/)[1] error_message = xml.match(/(.+?)<\/Message>/)[1] S3::Errors.error_class(error_code).new(context, error_message) elsif !xml.match(/<\w/) # Must have the start of an XML Tag # Other incomplete xml bodies will result in XML ParsingError Seahorse::Client::NetworkingError.new( S3::Errors .error_class('InternalError') .new(context, 'Empty or incomplete response body') ) end end end handler( Handler, step: :sign, operations: [ :complete_multipart_upload, :copy_object, :upload_part_copy, ] ) end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/plugins/dualstack.rb0000644000004100000410000000143414563445240022240 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 module Plugins # @api private class Dualstack < Seahorse::Client::Plugin def add_handlers(handlers, _config) handlers.add(OptionHandler, step: :initialize) end # @api private class OptionHandler < Seahorse::Client::Handler def call(context) # Support client configuration and per-operation configuration if context.params.is_a?(Hash) dualstack = context.params.delete(:use_dualstack_endpoint) end dualstack = context.config.use_dualstack_endpoint if dualstack.nil? context[:use_dualstack_endpoint] = dualstack @handler.call(context) end end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/object_acl.rb0000644000004100000410000003131214563445240020667 0ustar www-datawww-data# frozen_string_literal: true # WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws::S3 class ObjectAcl extend Aws::Deprecations # @overload def initialize(bucket_name, object_key, options = {}) # @param [String] bucket_name # @param [String] object_key # @option options [Client] :client # @overload def initialize(options = {}) # @option options [required, String] :bucket_name # @option options [required, String] :object_key # @option options [Client] :client def initialize(*args) options = Hash === args.last ? args.pop.dup : {} @bucket_name = extract_bucket_name(args, options) @object_key = extract_object_key(args, options) @data = options.delete(:data) @client = options.delete(:client) || Client.new(options) @waiter_block_warned = false end # @!group Read-Only Attributes # @return [String] def bucket_name @bucket_name end # @return [String] def object_key @object_key end # Container for the bucket owner's display name and ID. # @return [Types::Owner] def owner data[:owner] end # A list of grants. # @return [Array] def grants data[:grants] end # If present, indicates that the requester was successfully charged for # the request. # # This functionality is not supported for directory buckets. # # # @return [String] def request_charged data[:request_charged] end # @!endgroup # @return [Client] def client @client end # Loads, or reloads {#data} for the current {ObjectAcl}. # Returns `self` making it possible to chain methods. # # object_acl.reload.data # # @return [self] def load resp = Aws::Plugins::UserAgent.feature('resource') do @client.get_object_acl( bucket: @bucket_name, key: @object_key ) end @data = resp.data self end alias :reload :load # @return [Types::GetObjectAclOutput] # Returns the data for this {ObjectAcl}. Calls # {Client#get_object_acl} if {#data_loaded?} is `false`. def data load unless @data @data end # @return [Boolean] # Returns `true` if this resource is loaded. Accessing attributes or # {#data} on an unloaded resource will trigger a call to {#load}. def data_loaded? !!@data end # @deprecated Use [Aws::S3::Client] #wait_until instead # # Waiter polls an API operation until a resource enters a desired # state. # # @note The waiting operation is performed on a copy. The original resource # remains unchanged. # # ## Basic Usage # # Waiter will polls until it is successful, it fails by # entering a terminal state, or until a maximum number of attempts # are made. # # # polls in a loop until condition is true # resource.wait_until(options) {|resource| condition} # # ## Example # # instance.wait_until(max_attempts:10, delay:5) do |instance| # instance.state.name == 'running' # end # # ## Configuration # # You can configure the maximum number of polling attempts, and the # delay (in seconds) between each polling attempt. The waiting condition is # set by passing a block to {#wait_until}: # # # poll for ~25 seconds # resource.wait_until(max_attempts:5,delay:5) {|resource|...} # # ## Callbacks # # You can be notified before each polling attempt and before each # delay. If you throw `:success` or `:failure` from these callbacks, # it will terminate the waiter. # # started_at = Time.now # # poll for 1 hour, instead of a number of attempts # proc = Proc.new do |attempts, response| # throw :failure if Time.now - started_at > 3600 # end # # # disable max attempts # instance.wait_until(before_wait:proc, max_attempts:nil) {...} # # ## Handling Errors # # When a waiter is successful, it returns the Resource. When a waiter # fails, it raises an error. # # begin # resource.wait_until(...) # rescue Aws::Waiters::Errors::WaiterFailed # # resource did not enter the desired state in time # end # # @yieldparam [Resource] resource to be used in the waiting condition. # # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter # terminates because the waiter has entered a state that it will not # transition out of, preventing success. # # yet successful. # # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is # encountered while polling for a resource that is not expected. # # @raise [NotImplementedError] Raised when the resource does not # # @option options [Integer] :max_attempts (10) Maximum number of # attempts # @option options [Integer] :delay (10) Delay between each # attempt in seconds # @option options [Proc] :before_attempt (nil) Callback # invoked before each attempt # @option options [Proc] :before_wait (nil) Callback # invoked before each wait # @return [Resource] if the waiter was successful def wait_until(options = {}, &block) self_copy = self.dup attempts = 0 options[:max_attempts] = 10 unless options.key?(:max_attempts) options[:delay] ||= 10 options[:poller] = Proc.new do attempts += 1 if block.call(self_copy) [:success, self_copy] else self_copy.reload unless attempts == options[:max_attempts] :retry end end Aws::Plugins::UserAgent.feature('resource') do Aws::Waiters::Waiter.new(options).wait({}) end end # @!group Actions # @example Request syntax with placeholder values # # object_acl.put({ # acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control # access_control_policy: { # grants: [ # { # grantee: { # display_name: "DisplayName", # email_address: "EmailAddress", # id: "ID", # type: "CanonicalUser", # required, accepts CanonicalUser, AmazonCustomerByEmail, Group # uri: "URI", # }, # permission: "FULL_CONTROL", # accepts FULL_CONTROL, WRITE, WRITE_ACP, READ, READ_ACP # }, # ], # owner: { # display_name: "DisplayName", # id: "ID", # }, # }, # content_md5: "ContentMD5", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # grant_full_control: "GrantFullControl", # grant_read: "GrantRead", # grant_read_acp: "GrantReadACP", # grant_write: "GrantWrite", # grant_write_acp: "GrantWriteACP", # request_payer: "requester", # accepts requester # version_id: "ObjectVersionId", # expected_bucket_owner: "AccountId", # }) # @param [Hash] options ({}) # @option options [String] :acl # The canned ACL to apply to the object. For more information, see # [Canned ACL][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL # @option options [Types::AccessControlPolicy] :access_control_policy # Contains the elements that set the ACL permissions for an object per # grantee. # @option options [String] :content_md5 # The base64-encoded 128-bit MD5 digest of the data. This header must be # used as a message integrity check to verify that the request body was # not corrupted in transit. For more information, go to [RFC # 1864.>][1] # # For requests made using the Amazon Web Services Command Line Interface # (CLI) or Amazon Web Services SDKs, this field is calculated # automatically. # # # # [1]: http://www.ietf.org/rfc/rfc1864.txt # @option options [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [String] :grant_full_control # Allows grantee the read, write, read ACP, and write ACP permissions on # the bucket. # # This functionality is not supported for Amazon S3 on Outposts. # @option options [String] :grant_read # Allows grantee to list the objects in the bucket. # # This functionality is not supported for Amazon S3 on Outposts. # @option options [String] :grant_read_acp # Allows grantee to read the bucket ACL. # # This functionality is not supported for Amazon S3 on Outposts. # @option options [String] :grant_write # Allows grantee to create new objects in the bucket. # # For the bucket and object owners of existing objects, also allows # deletions and overwrites of those objects. # @option options [String] :grant_write_acp # Allows grantee to write the ACL for the applicable bucket. # # This functionality is not supported for Amazon S3 on Outposts. # @option options [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @option options [String] :version_id # Version ID used to reference a specific version of the object. # # This functionality is not supported for directory buckets. # # # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @return [Types::PutObjectAclOutput] def put(options = {}) options = options.merge( bucket: @bucket_name, key: @object_key ) resp = Aws::Plugins::UserAgent.feature('resource') do @client.put_object_acl(options) end resp.data end # @!group Associations # @return [Object] def object Object.new( bucket_name: @bucket_name, key: @object_key, client: @client ) end # @deprecated # @api private def identifiers { bucket_name: @bucket_name, object_key: @object_key } end deprecated(:identifiers) private def extract_bucket_name(args, options) value = args[0] || options.delete(:bucket_name) case value when String then value when nil then raise ArgumentError, "missing required option :bucket_name" else msg = "expected :bucket_name to be a String, got #{value.class}" raise ArgumentError, msg end end def extract_object_key(args, options) value = args[1] || options.delete(:object_key) case value when String then value when nil then raise ArgumentError, "missing required option :object_key" else msg = "expected :object_key to be a String, got #{value.class}" raise ArgumentError, msg end end class Collection < Aws::Resources::Collection; end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/encryption_v2.rb0000644000004100000410000000137114563445240021405 0ustar www-datawww-datarequire 'aws-sdk-s3/encryptionV2/client' require 'aws-sdk-s3/encryptionV2/decrypt_handler' require 'aws-sdk-s3/encryptionV2/default_cipher_provider' require 'aws-sdk-s3/encryptionV2/encrypt_handler' require 'aws-sdk-s3/encryptionV2/errors' require 'aws-sdk-s3/encryptionV2/io_encrypter' require 'aws-sdk-s3/encryptionV2/io_decrypter' require 'aws-sdk-s3/encryptionV2/io_auth_decrypter' require 'aws-sdk-s3/encryptionV2/key_provider' require 'aws-sdk-s3/encryptionV2/kms_cipher_provider' require 'aws-sdk-s3/encryptionV2/materials' require 'aws-sdk-s3/encryptionV2/utils' require 'aws-sdk-s3/encryptionV2/default_key_provider' module Aws module S3 module EncryptionV2 AES_GCM_TAG_LEN_BYTES = 16 EC_USER_AGENT = 'S3CryptoV2' end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/bucket_request_payment.rb0000644000004100000410000002077314563445240023375 0ustar www-datawww-data# frozen_string_literal: true # WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws::S3 class BucketRequestPayment extend Aws::Deprecations # @overload def initialize(bucket_name, options = {}) # @param [String] bucket_name # @option options [Client] :client # @overload def initialize(options = {}) # @option options [required, String] :bucket_name # @option options [Client] :client def initialize(*args) options = Hash === args.last ? args.pop.dup : {} @bucket_name = extract_bucket_name(args, options) @data = options.delete(:data) @client = options.delete(:client) || Client.new(options) @waiter_block_warned = false end # @!group Read-Only Attributes # @return [String] def bucket_name @bucket_name end # Specifies who pays for the download and request fees. # @return [String] def payer data[:payer] end # @!endgroup # @return [Client] def client @client end # Loads, or reloads {#data} for the current {BucketRequestPayment}. # Returns `self` making it possible to chain methods. # # bucket_request_payment.reload.data # # @return [self] def load resp = Aws::Plugins::UserAgent.feature('resource') do @client.get_bucket_request_payment(bucket: @bucket_name) end @data = resp.data self end alias :reload :load # @return [Types::GetBucketRequestPaymentOutput] # Returns the data for this {BucketRequestPayment}. Calls # {Client#get_bucket_request_payment} if {#data_loaded?} is `false`. def data load unless @data @data end # @return [Boolean] # Returns `true` if this resource is loaded. Accessing attributes or # {#data} on an unloaded resource will trigger a call to {#load}. def data_loaded? !!@data end # @deprecated Use [Aws::S3::Client] #wait_until instead # # Waiter polls an API operation until a resource enters a desired # state. # # @note The waiting operation is performed on a copy. The original resource # remains unchanged. # # ## Basic Usage # # Waiter will polls until it is successful, it fails by # entering a terminal state, or until a maximum number of attempts # are made. # # # polls in a loop until condition is true # resource.wait_until(options) {|resource| condition} # # ## Example # # instance.wait_until(max_attempts:10, delay:5) do |instance| # instance.state.name == 'running' # end # # ## Configuration # # You can configure the maximum number of polling attempts, and the # delay (in seconds) between each polling attempt. The waiting condition is # set by passing a block to {#wait_until}: # # # poll for ~25 seconds # resource.wait_until(max_attempts:5,delay:5) {|resource|...} # # ## Callbacks # # You can be notified before each polling attempt and before each # delay. If you throw `:success` or `:failure` from these callbacks, # it will terminate the waiter. # # started_at = Time.now # # poll for 1 hour, instead of a number of attempts # proc = Proc.new do |attempts, response| # throw :failure if Time.now - started_at > 3600 # end # # # disable max attempts # instance.wait_until(before_wait:proc, max_attempts:nil) {...} # # ## Handling Errors # # When a waiter is successful, it returns the Resource. When a waiter # fails, it raises an error. # # begin # resource.wait_until(...) # rescue Aws::Waiters::Errors::WaiterFailed # # resource did not enter the desired state in time # end # # @yieldparam [Resource] resource to be used in the waiting condition. # # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter # terminates because the waiter has entered a state that it will not # transition out of, preventing success. # # yet successful. # # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is # encountered while polling for a resource that is not expected. # # @raise [NotImplementedError] Raised when the resource does not # # @option options [Integer] :max_attempts (10) Maximum number of # attempts # @option options [Integer] :delay (10) Delay between each # attempt in seconds # @option options [Proc] :before_attempt (nil) Callback # invoked before each attempt # @option options [Proc] :before_wait (nil) Callback # invoked before each wait # @return [Resource] if the waiter was successful def wait_until(options = {}, &block) self_copy = self.dup attempts = 0 options[:max_attempts] = 10 unless options.key?(:max_attempts) options[:delay] ||= 10 options[:poller] = Proc.new do attempts += 1 if block.call(self_copy) [:success, self_copy] else self_copy.reload unless attempts == options[:max_attempts] :retry end end Aws::Plugins::UserAgent.feature('resource') do Aws::Waiters::Waiter.new(options).wait({}) end end # @!group Actions # @example Request syntax with placeholder values # # bucket_request_payment.put({ # content_md5: "ContentMD5", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # request_payment_configuration: { # required # payer: "Requester", # required, accepts Requester, BucketOwner # }, # expected_bucket_owner: "AccountId", # }) # @param [Hash] options ({}) # @option options [String] :content_md5 # The base64-encoded 128-bit MD5 digest of the data. You must use this # header as a message integrity check to verify that the request body # was not corrupted in transit. For more information, see [RFC 1864][1]. # # For requests made using the Amazon Web Services Command Line Interface # (CLI) or Amazon Web Services SDKs, this field is calculated # automatically. # # # # [1]: http://www.ietf.org/rfc/rfc1864.txt # @option options [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [required, Types::RequestPaymentConfiguration] :request_payment_configuration # Container for Payer. # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @return [EmptyStructure] def put(options = {}) options = options.merge(bucket: @bucket_name) resp = Aws::Plugins::UserAgent.feature('resource') do @client.put_bucket_request_payment(options) end resp.data end # @!group Associations # @return [Bucket] def bucket Bucket.new( name: @bucket_name, client: @client ) end # @deprecated # @api private def identifiers { bucket_name: @bucket_name } end deprecated(:identifiers) private def extract_bucket_name(args, options) value = args[0] || options.delete(:bucket_name) case value when String then value when nil then raise ArgumentError, "missing required option :bucket_name" else msg = "expected :bucket_name to be a String, got #{value.class}" raise ArgumentError, msg end end class Collection < Aws::Resources::Collection; end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/multipart_file_uploader.rb0000644000004100000410000001571714563445240023530 0ustar www-datawww-data# frozen_string_literal: true require 'pathname' require 'set' module Aws module S3 # @api private class MultipartFileUploader MIN_PART_SIZE = 5 * 1024 * 1024 # 5MB FILE_TOO_SMALL = "unable to multipart upload files smaller than 5MB" MAX_PARTS = 10_000 THREAD_COUNT = 10 # @api private CREATE_OPTIONS = Set.new( Client.api.operation(:create_multipart_upload).input.shape.member_names ) COMPLETE_OPTIONS = Set.new( Client.api.operation(:complete_multipart_upload).input.shape.member_names ) # @api private UPLOAD_PART_OPTIONS = Set.new( Client.api.operation(:upload_part).input.shape.member_names ) # @option options [Client] :client # @option options [Integer] :thread_count (THREAD_COUNT) def initialize(options = {}) @client = options[:client] || Client.new @thread_count = options[:thread_count] || THREAD_COUNT end # @return [Client] attr_reader :client # @param [String, Pathname, File, Tempfile] source The file to upload. # @option options [required, String] :bucket The bucket to upload to. # @option options [required, String] :key The key for the object. # @option options [Proc] :progress_callback # A Proc that will be called when each chunk of the upload is sent. # It will be invoked with [bytes_read], [total_sizes] # @return [Seahorse::Client::Response] - the CompleteMultipartUploadResponse def upload(source, options = {}) if File.size(source) < MIN_PART_SIZE raise ArgumentError, FILE_TOO_SMALL else upload_id = initiate_upload(options) parts = upload_parts(upload_id, source, options) complete_upload(upload_id, parts, options) end end private def initiate_upload(options) @client.create_multipart_upload(create_opts(options)).upload_id end def complete_upload(upload_id, parts, options) @client.complete_multipart_upload( **complete_opts(options).merge( upload_id: upload_id, multipart_upload: { parts: parts } ) ) end def upload_parts(upload_id, source, options) pending = PartList.new(compute_parts(upload_id, source, options)) completed = PartList.new errors = upload_in_threads(pending, completed, options) if errors.empty? completed.to_a.sort_by { |part| part[:part_number] } else abort_upload(upload_id, options, errors) end end def abort_upload(upload_id, options, errors) @client.abort_multipart_upload( bucket: options[:bucket], key: options[:key], upload_id: upload_id ) msg = "multipart upload failed: #{errors.map(&:message).join("; ")}" raise MultipartUploadError.new(msg, errors) rescue MultipartUploadError => error raise error rescue => error msg = "failed to abort multipart upload: #{error.message}" raise MultipartUploadError.new(msg, errors + [error]) end def compute_parts(upload_id, source, options) size = File.size(source) default_part_size = compute_default_part_size(size) offset = 0 part_number = 1 parts = [] while offset < size parts << upload_part_opts(options).merge( upload_id: upload_id, part_number: part_number, body: FilePart.new( source: source, offset: offset, size: part_size(size, default_part_size, offset) ) ) part_number += 1 offset += default_part_size end parts end def create_opts(options) CREATE_OPTIONS.inject({}) do |hash, key| hash[key] = options[key] if options.key?(key) hash end end def complete_opts(options) COMPLETE_OPTIONS.inject({}) do |hash, key| hash[key] = options[key] if options.key?(key) hash end end def upload_part_opts(options) UPLOAD_PART_OPTIONS.inject({}) do |hash, key| hash[key] = options[key] if options.key?(key) hash end end def upload_in_threads(pending, completed, options) threads = [] if (callback = options[:progress_callback]) progress = MultipartProgress.new(pending, callback) end @thread_count.times do thread = Thread.new do begin while part = pending.shift if progress part[:on_chunk_sent] = proc do |_chunk, bytes, _total| progress.call(part[:part_number], bytes) end end resp = @client.upload_part(part) part[:body].close completed_part = {etag: resp.etag, part_number: part[:part_number]} # get the requested checksum from the response if part[:checksum_algorithm] k = "checksum_#{part[:checksum_algorithm].downcase}".to_sym completed_part[k] = resp[k] end completed.push(completed_part) end nil rescue => error # keep other threads from uploading other parts pending.clear! error end end threads << thread end threads.map(&:value).compact end def compute_default_part_size(source_size) [(source_size.to_f / MAX_PARTS).ceil, MIN_PART_SIZE].max.to_i end def part_size(total_size, part_size, offset) if offset + part_size > total_size total_size - offset else part_size end end # @api private class PartList def initialize(parts = []) @parts = parts @mutex = Mutex.new end def push(part) @mutex.synchronize { @parts.push(part) } end def shift @mutex.synchronize { @parts.shift } end def clear! @mutex.synchronize { @parts.clear } end def size @mutex.synchronize { @parts.size } end def part_sizes @mutex.synchronize { @parts.map { |p| p[:body].size } } end def to_a @mutex.synchronize { @parts.dup } end end # @api private class MultipartProgress def initialize(parts, progress_callback) @bytes_sent = Array.new(parts.size, 0) @total_sizes = parts.part_sizes @progress_callback = progress_callback end def call(part_number, bytes_read) # part numbers start at 1 @bytes_sent[part_number - 1] = bytes_read @progress_callback.call(@bytes_sent, @total_sizes) end end end end endaws-sdk-s3-1.143.0/lib/aws-sdk-s3/object_multipart_copier.rb0000644000004100000410000001423614563445240023520 0ustar www-datawww-data# frozen_string_literal: true require 'thread' require 'cgi' module Aws module S3 # @api private class ObjectMultipartCopier FIVE_MB = 5 * 1024 * 1024 # 5MB FILE_TOO_SMALL = "unable to multipart copy files smaller than 5MB" MAX_PARTS = 10_000 # @option options [Client] :client # @option options [Integer] :min_part_size (52428800) # Size of copied parts. Defaults to 50MB. # @option options [Integer] :thread_count (10) Number of concurrent # threads to use for copying parts. # @option options [Boolean] :use_source_parts (false) Use part sizes # defined on the source object if any exist. If copying or moving an # object that is already multipart, this does not re-part the object, # instead re-using the part definitions on the original. That means # the etag and any checksums will not change. This is especially # useful if the source object has parts with varied sizes. def initialize(options = {}) @use_source_parts = options.delete(:use_source_parts) || false @thread_count = options.delete(:thread_count) || 10 @min_part_size = options.delete(:min_part_size) || (FIVE_MB * 10) @client = options[:client] || Client.new end # @return [Client] attr_reader :client # @option (see S3::Client#copy_object) def copy(options = {}) metadata = source_metadata(options) size = metadata[:content_length] options[:upload_id] = initiate_upload(metadata.merge(options)) begin parts = copy_parts(size, default_part_size(size), options) complete_upload(parts, options) rescue => error abort_upload(options) raise error end end private def initiate_upload(options) options = options_for(:create_multipart_upload, options) @client.create_multipart_upload(options).upload_id end def copy_parts(size, default_part_size, options) queue = PartQueue.new(compute_parts(size, default_part_size, options)) threads = [] @thread_count.times do threads << copy_part_thread(queue) end threads.map(&:value).flatten.sort_by{ |part| part[:part_number] } end def copy_part_thread(queue) Thread.new do begin completed = [] while part = queue.shift completed << copy_part(part) end completed rescue => error queue.clear! raise error end end end def copy_part(part) @client.upload_part_copy(part).copy_part_result.to_h.merge({ part_number: part[:part_number] }).tap { |result| result.delete(:last_modified) } end def complete_upload(parts, options) options = options_for(:complete_multipart_upload, options) options[:multipart_upload] = { parts: parts } @client.complete_multipart_upload(options) end def abort_upload(options) @client.abort_multipart_upload({ bucket: options[:bucket], key: options[:key], upload_id: options[:upload_id], }) end def compute_parts(size, default_part_size, options) part_number = 1 offset = 0 parts = [] options = options_for(:upload_part_copy, options) while offset < size part_size = calculate_part_size(part_number, default_part_size, options) parts << options.merge({ part_number: part_number, copy_source_range: byte_range(offset, part_size, size), }) part_number += 1 offset += part_size end parts end def byte_range(offset, part_size, size) if offset + part_size < size "bytes=#{offset}-#{offset + part_size - 1}" else "bytes=#{offset}-#{size - 1}" end end def calculate_part_size(part_number, default_part_size, options) if @use_source_parts && source_has_parts(options) source_metadata(options.merge({ part_number: part_number }))[:content_length] else default_part_size end end def source_has_parts(options) @source_has_parts ||= source_metadata(options.merge({ part_number: 1 }))[:parts_count] end def source_metadata(options) if options[:content_length] return { content_length: options.delete(:content_length) } end client = options[:copy_source_client] || @client if vid_match = options[:copy_source].match(/([^\/]+?)\/(.+)\?versionId=(.+)/) bucket, key, version_id = vid_match[1,3] else bucket, key = options[:copy_source].match(/([^\/]+?)\/(.+)/)[1,2] end key = CGI.unescape(key) opts = { bucket: bucket, key: key } opts[:version_id] = version_id if version_id opts[:part_number] = options[:part_number] if options[:part_number] client.head_object(opts).to_h end def default_part_size(source_size) if source_size < FIVE_MB raise ArgumentError, FILE_TOO_SMALL else [(source_size.to_f / MAX_PARTS).ceil, @min_part_size].max.to_i end end def options_for(operation_name, options) API_OPTIONS[operation_name].inject({}) do |hash, opt_name| hash[opt_name] = options[opt_name] if options.key?(opt_name) hash end end # @api private def self.options_for(shape_name) Client.api.metadata['shapes'][shape_name].member_names end API_OPTIONS = { create_multipart_upload: Types::CreateMultipartUploadRequest.members, upload_part_copy: Types::UploadPartCopyRequest.members, complete_multipart_upload: Types::CompleteMultipartUploadRequest.members, } class PartQueue def initialize(parts = []) @parts = parts @mutex = Mutex.new end def shift @mutex.synchronize { @parts.shift } end def clear! @mutex.synchronize { @parts.clear } end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/event_streams.rb0000644000004100000410000000316614563445240021467 0ustar www-datawww-data# frozen_string_literal: true # WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws::S3 module EventStreams class SelectObjectContentEventStream def initialize @event_emitter = Aws::EventEmitter.new end def on_records_event(&block) @event_emitter.on(:records, block) if block_given? end def on_stats_event(&block) @event_emitter.on(:stats, block) if block_given? end def on_progress_event(&block) @event_emitter.on(:progress, block) if block_given? end def on_cont_event(&block) @event_emitter.on(:cont, block) if block_given? end def on_end_event(&block) @event_emitter.on(:end, block) if block_given? end def on_error_event(&block) @event_emitter.on(:error, block) if block_given? end def on_initial_response_event(&block) @event_emitter.on(:initial_response, block) if block_given? end def on_unknown_event(&block) @event_emitter.on(:unknown_event, block) if block_given? end def on_event(&block) on_records_event(&block) on_stats_event(&block) on_progress_event(&block) on_cont_event(&block) on_end_event(&block) on_error_event(&block) on_initial_response_event(&block) on_unknown_event(&block) end # @api private # @return Aws::EventEmitter attr_reader :event_emitter end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/object_summary.rb0000644000004100000410000033760214563445240021640 0ustar www-datawww-data# frozen_string_literal: true # WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws::S3 class ObjectSummary extend Aws::Deprecations # @overload def initialize(bucket_name, key, options = {}) # @param [String] bucket_name # @param [String] key # @option options [Client] :client # @overload def initialize(options = {}) # @option options [required, String] :bucket_name # @option options [required, String] :key # @option options [Client] :client def initialize(*args) options = Hash === args.last ? args.pop.dup : {} @bucket_name = extract_bucket_name(args, options) @key = extract_key(args, options) @data = options.delete(:data) @client = options.delete(:client) || Client.new(options) @waiter_block_warned = false end # @!group Read-Only Attributes # @return [String] def bucket_name @bucket_name end # @return [String] def key @key end # Creation date of the object. # @return [Time] def last_modified data[:last_modified] end # The entity tag is a hash of the object. The ETag reflects changes only # to the contents of an object, not its metadata. The ETag may or may # not be an MD5 digest of the object data. Whether or not it is depends # on how the object was created and how it is encrypted as described # below: # # * Objects created by the PUT Object, POST Object, or Copy operation, # or through the Amazon Web Services Management Console, and are # encrypted by SSE-S3 or plaintext, have ETags that are an MD5 digest # of their object data. # # * Objects created by the PUT Object, POST Object, or Copy operation, # or through the Amazon Web Services Management Console, and are # encrypted by SSE-C or SSE-KMS, have ETags that are not an MD5 digest # of their object data. # # * If an object is created by either the Multipart Upload or Part Copy # operation, the ETag is not an MD5 digest, regardless of the method # of encryption. If an object is larger than 16 MB, the Amazon Web # Services Management Console will upload or copy that object as a # Multipart Upload, and therefore the ETag will not be an MD5 digest. # # **Directory buckets** - MD5 is not supported by directory buckets. # # # @return [String] def etag data[:etag] end # The algorithm that was used to create a checksum of the object. # @return [Array] def checksum_algorithm data[:checksum_algorithm] end # Size in bytes of the object # @return [Integer] def size data[:size] end # The class of storage used to store the object. # # **Directory buckets** - Only the S3 Express One Zone storage class is # supported by directory buckets to store objects. # # # @return [String] def storage_class data[:storage_class] end # The owner of the object # # **Directory buckets** - The bucket owner is returned as the object # owner. # # # @return [Types::Owner] def owner data[:owner] end # Specifies the restoration status of an object. Objects in certain # storage classes must be restored before they can be retrieved. For # more information about these storage classes and how to work with # archived objects, see [ Working with archived objects][1] in the # *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. Only the S3 # Express One Zone storage class is supported by directory buckets to # store objects. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html # @return [Types::RestoreStatus] def restore_status data[:restore_status] end # @!endgroup # @return [Client] def client @client end # @raise [NotImplementedError] # @api private def load msg = "#load is not implemented, data only available via enumeration" raise NotImplementedError, msg end alias :reload :load # @raise [NotImplementedError] Raises when {#data_loaded?} is `false`. # @return [Types::Object] # Returns the data for this {ObjectSummary}. def data load unless @data @data end # @return [Boolean] # Returns `true` if this resource is loaded. Accessing attributes or # {#data} on an unloaded resource will trigger a call to {#load}. def data_loaded? !!@data end # @param [Hash] options ({}) # @return [Boolean] # Returns `true` if the ObjectSummary exists. def exists?(options = {}) begin wait_until_exists(options.merge(max_attempts: 1)) true rescue Aws::Waiters::Errors::UnexpectedError => e raise e.error rescue Aws::Waiters::Errors::WaiterFailed false end end # @param [Hash] options ({}) # @option options [Integer] :max_attempts (20) # @option options [Float] :delay (5) # @option options [Proc] :before_attempt # @option options [Proc] :before_wait # @return [ObjectSummary] def wait_until_exists(options = {}, &block) options, params = separate_params_and_options(options) waiter = Waiters::ObjectExists.new(options) yield_waiter_and_warn(waiter, &block) if block_given? Aws::Plugins::UserAgent.feature('resource') do waiter.wait(params.merge(bucket: @bucket_name, key: @key)) end ObjectSummary.new({ bucket_name: @bucket_name, key: @key, client: @client }) end # @param [Hash] options ({}) # @option options [Integer] :max_attempts (20) # @option options [Float] :delay (5) # @option options [Proc] :before_attempt # @option options [Proc] :before_wait # @return [ObjectSummary] def wait_until_not_exists(options = {}, &block) options, params = separate_params_and_options(options) waiter = Waiters::ObjectNotExists.new(options) yield_waiter_and_warn(waiter, &block) if block_given? Aws::Plugins::UserAgent.feature('resource') do waiter.wait(params.merge(bucket: @bucket_name, key: @key)) end ObjectSummary.new({ bucket_name: @bucket_name, key: @key, client: @client }) end # @deprecated Use [Aws::S3::Client] #wait_until instead # # Waiter polls an API operation until a resource enters a desired # state. # # @note The waiting operation is performed on a copy. The original resource # remains unchanged. # # ## Basic Usage # # Waiter will polls until it is successful, it fails by # entering a terminal state, or until a maximum number of attempts # are made. # # # polls in a loop until condition is true # resource.wait_until(options) {|resource| condition} # # ## Example # # instance.wait_until(max_attempts:10, delay:5) do |instance| # instance.state.name == 'running' # end # # ## Configuration # # You can configure the maximum number of polling attempts, and the # delay (in seconds) between each polling attempt. The waiting condition is # set by passing a block to {#wait_until}: # # # poll for ~25 seconds # resource.wait_until(max_attempts:5,delay:5) {|resource|...} # # ## Callbacks # # You can be notified before each polling attempt and before each # delay. If you throw `:success` or `:failure` from these callbacks, # it will terminate the waiter. # # started_at = Time.now # # poll for 1 hour, instead of a number of attempts # proc = Proc.new do |attempts, response| # throw :failure if Time.now - started_at > 3600 # end # # # disable max attempts # instance.wait_until(before_wait:proc, max_attempts:nil) {...} # # ## Handling Errors # # When a waiter is successful, it returns the Resource. When a waiter # fails, it raises an error. # # begin # resource.wait_until(...) # rescue Aws::Waiters::Errors::WaiterFailed # # resource did not enter the desired state in time # end # # @yieldparam [Resource] resource to be used in the waiting condition. # # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter # terminates because the waiter has entered a state that it will not # transition out of, preventing success. # # yet successful. # # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is # encountered while polling for a resource that is not expected. # # @raise [NotImplementedError] Raised when the resource does not # # @option options [Integer] :max_attempts (10) Maximum number of # attempts # @option options [Integer] :delay (10) Delay between each # attempt in seconds # @option options [Proc] :before_attempt (nil) Callback # invoked before each attempt # @option options [Proc] :before_wait (nil) Callback # invoked before each wait # @return [Resource] if the waiter was successful def wait_until(options = {}, &block) self_copy = self.dup attempts = 0 options[:max_attempts] = 10 unless options.key?(:max_attempts) options[:delay] ||= 10 options[:poller] = Proc.new do attempts += 1 if block.call(self_copy) [:success, self_copy] else self_copy.reload unless attempts == options[:max_attempts] :retry end end Aws::Plugins::UserAgent.feature('resource') do Aws::Waiters::Waiter.new(options).wait({}) end end # @!group Actions # @example Request syntax with placeholder values # # object_summary.copy_from({ # acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control # cache_control: "CacheControl", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # content_disposition: "ContentDisposition", # content_encoding: "ContentEncoding", # content_language: "ContentLanguage", # content_type: "ContentType", # copy_source: "CopySource", # required # copy_source_if_match: "CopySourceIfMatch", # copy_source_if_modified_since: Time.now, # copy_source_if_none_match: "CopySourceIfNoneMatch", # copy_source_if_unmodified_since: Time.now, # expires: Time.now, # grant_full_control: "GrantFullControl", # grant_read: "GrantRead", # grant_read_acp: "GrantReadACP", # grant_write_acp: "GrantWriteACP", # metadata: { # "MetadataKey" => "MetadataValue", # }, # metadata_directive: "COPY", # accepts COPY, REPLACE # tagging_directive: "COPY", # accepts COPY, REPLACE # server_side_encryption: "AES256", # accepts AES256, aws:kms, aws:kms:dsse # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE # website_redirect_location: "WebsiteRedirectLocation", # sse_customer_algorithm: "SSECustomerAlgorithm", # sse_customer_key: "SSECustomerKey", # sse_customer_key_md5: "SSECustomerKeyMD5", # ssekms_key_id: "SSEKMSKeyId", # ssekms_encryption_context: "SSEKMSEncryptionContext", # bucket_key_enabled: false, # copy_source_sse_customer_algorithm: "CopySourceSSECustomerAlgorithm", # copy_source_sse_customer_key: "CopySourceSSECustomerKey", # copy_source_sse_customer_key_md5: "CopySourceSSECustomerKeyMD5", # request_payer: "requester", # accepts requester # tagging: "TaggingHeader", # object_lock_mode: "GOVERNANCE", # accepts GOVERNANCE, COMPLIANCE # object_lock_retain_until_date: Time.now, # object_lock_legal_hold_status: "ON", # accepts ON, OFF # expected_bucket_owner: "AccountId", # expected_source_bucket_owner: "AccountId", # }) # @param [Hash] options ({}) # @option options [String] :acl # The canned access control list (ACL) to apply to the object. # # When you copy an object, the ACL metadata is not preserved and is set # to `private` by default. Only the owner has full access control. To # override the default ACL setting, specify a new ACL when you generate # a copy request. For more information, see [Using ACLs][1]. # # If the destination bucket that you're copying objects to uses the # bucket owner enforced setting for S3 Object Ownership, ACLs are # disabled and no longer affect permissions. Buckets that use this # setting only accept `PUT` requests that don't specify an ACL or `PUT` # requests that specify bucket owner full control ACLs, such as the # `bucket-owner-full-control` canned ACL or an equivalent form of this # ACL expressed in the XML format. For more information, see # [Controlling ownership of objects and disabling ACLs][2] in the # *Amazon S3 User Guide*. # # * If your destination bucket uses the bucket owner enforced setting # for Object Ownership, all objects written to the bucket by any # account will be owned by the bucket owner. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html # @option options [String] :cache_control # Specifies the caching behavior along the request/reply chain. # @option options [String] :checksum_algorithm # Indicates the algorithm that you want Amazon S3 to use to create the # checksum for the object. For more information, see [Checking object # integrity][1] in the *Amazon S3 User Guide*. # # When you copy an object, if the source object has a checksum, that # checksum value will be copied to the new object by default. If the # `CopyObject` request does not include this `x-amz-checksum-algorithm` # header, the checksum algorithm will be copied from the source object # to the destination object (if it's present on the source object). You # can optionally specify a different checksum algorithm to use with the # `x-amz-checksum-algorithm` header. Unrecognized or unsupported values # will respond with the HTTP status code `400 Bad Request`. # # For directory buckets, when you use Amazon Web Services SDKs, `CRC32` # is the default checksum algorithm that's used for performance. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [String] :content_disposition # Specifies presentational information for the object. Indicates whether # an object should be displayed in a web browser or downloaded as a # file. It allows specifying the desired filename for the downloaded # file. # @option options [String] :content_encoding # Specifies what content encodings have been applied to the object and # thus what decoding mechanisms must be applied to obtain the media-type # referenced by the Content-Type header field. # # For directory buckets, only the `aws-chunked` value is supported in # this header field. # # # @option options [String] :content_language # The language the content is in. # @option options [String] :content_type # A standard MIME type that describes the format of the object data. # @option options [required, String] :copy_source # Specifies the source object for the copy operation. The source object # can be up to 5 GB. If the source object is an object that was uploaded # by using a multipart upload, the object copy will be a single part # object after the source object is copied to the destination bucket. # # You specify the value of the copy source in one of two formats, # depending on whether you want to access the source object through an # [access point][1]: # # * For objects not accessed through an access point, specify the name # of the source bucket and the key of the source object, separated by # a slash (/). For example, to copy the object `reports/january.pdf` # from the general purpose bucket `awsexamplebucket`, use # `awsexamplebucket/reports/january.pdf`. The value must be # URL-encoded. To copy the object `reports/january.pdf` from the # directory bucket `awsexamplebucket--use1-az5--x-s3`, use # `awsexamplebucket--use1-az5--x-s3/reports/january.pdf`. The value # must be URL-encoded. # # * For objects accessed through access points, specify the Amazon # Resource Name (ARN) of the object as accessed through the access # point, in the format # `arn:aws:s3:::accesspoint//object/`. # For example, to copy the object `reports/january.pdf` through access # point `my-access-point` owned by account `123456789012` in Region # `us-west-2`, use the URL encoding of # `arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf`. # The value must be URL encoded. # # * Amazon S3 supports copy operations using Access points only when # the source and destination buckets are in the same Amazon Web # Services Region. # # * Access points are not supported by directory buckets. # # # # Alternatively, for objects accessed through Amazon S3 on Outposts, # specify the ARN of the object as accessed in the format # `arn:aws:s3-outposts:::outpost//object/`. # For example, to copy the object `reports/january.pdf` through # outpost `my-outpost` owned by account `123456789012` in Region # `us-west-2`, use the URL encoding of # `arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf`. # The value must be URL-encoded. # # If your source bucket versioning is enabled, the `x-amz-copy-source` # header by default identifies the current version of an object to copy. # If the current version is a delete marker, Amazon S3 behaves as if the # object was deleted. To copy a different version, use the `versionId` # query parameter. Specifically, append `?versionId=` to the # value (for example, # `awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893`). # If you don't specify a version ID, Amazon S3 copies the latest # version of the source object. # # If you enable versioning on the destination bucket, Amazon S3 # generates a unique version ID for the copied object. This version ID # is different from the version ID of the source object. Amazon S3 # returns the version ID of the copied object in the `x-amz-version-id` # response header in the response. # # If you do not enable versioning or suspend it on the destination # bucket, the version ID that Amazon S3 generates in the # `x-amz-version-id` response header is always null. # # **Directory buckets** - S3 Versioning isn't enabled and supported for # directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html # @option options [String] :copy_source_if_match # Copies the object if its entity tag (ETag) matches the specified tag. # # If both the `x-amz-copy-source-if-match` and # `x-amz-copy-source-if-unmodified-since` headers are present in the # request and evaluate as follows, Amazon S3 returns `200 OK` and copies # the data: # # * `x-amz-copy-source-if-match` condition evaluates to true # # * `x-amz-copy-source-if-unmodified-since` condition evaluates to false # @option options [Time,DateTime,Date,Integer,String] :copy_source_if_modified_since # Copies the object if it has been modified since the specified time. # # If both the `x-amz-copy-source-if-none-match` and # `x-amz-copy-source-if-modified-since` headers are present in the # request and evaluate as follows, Amazon S3 returns the `412 # Precondition Failed` response code: # # * `x-amz-copy-source-if-none-match` condition evaluates to false # # * `x-amz-copy-source-if-modified-since` condition evaluates to true # @option options [String] :copy_source_if_none_match # Copies the object if its entity tag (ETag) is different than the # specified ETag. # # If both the `x-amz-copy-source-if-none-match` and # `x-amz-copy-source-if-modified-since` headers are present in the # request and evaluate as follows, Amazon S3 returns the `412 # Precondition Failed` response code: # # * `x-amz-copy-source-if-none-match` condition evaluates to false # # * `x-amz-copy-source-if-modified-since` condition evaluates to true # @option options [Time,DateTime,Date,Integer,String] :copy_source_if_unmodified_since # Copies the object if it hasn't been modified since the specified # time. # # If both the `x-amz-copy-source-if-match` and # `x-amz-copy-source-if-unmodified-since` headers are present in the # request and evaluate as follows, Amazon S3 returns `200 OK` and copies # the data: # # * `x-amz-copy-source-if-match` condition evaluates to true # # * `x-amz-copy-source-if-unmodified-since` condition evaluates to false # @option options [Time,DateTime,Date,Integer,String] :expires # The date and time at which the object is no longer cacheable. # @option options [String] :grant_full_control # Gives the grantee READ, READ\_ACP, and WRITE\_ACP permissions on the # object. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # @option options [String] :grant_read # Allows grantee to read the object data and its metadata. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # @option options [String] :grant_read_acp # Allows grantee to read the object ACL. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # @option options [String] :grant_write_acp # Allows grantee to write the ACL for the applicable object. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # @option options [Hash] :metadata # A map of metadata to store with the object in S3. # @option options [String] :metadata_directive # Specifies whether the metadata is copied from the source object or # replaced with metadata that's provided in the request. When copying # an object, you can preserve all metadata (the default) or specify new # metadata. If this header isn’t specified, `COPY` is the default # behavior. # # **General purpose bucket** - For general purpose buckets, when you # grant permissions, you can use the `s3:x-amz-metadata-directive` # condition key to enforce certain metadata behavior when objects are # uploaded. For more information, see [Amazon S3 condition key # examples][1] in the *Amazon S3 User Guide*. # # `x-amz-website-redirect-location` is unique to each object and is not # copied when using the `x-amz-metadata-directive` header. To copy the # value, you must specify `x-amz-website-redirect-location` in the # request header. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html # @option options [String] :tagging_directive # Specifies whether the object tag-set is copied from the source object # or replaced with the tag-set that's provided in the request. # # The default value is `COPY`. # # **Directory buckets** - For directory buckets in a `CopyObject` # operation, only the empty tag-set is supported. Any requests that # attempt to write non-empty tags into directory buckets will receive a # `501 Not Implemented` status code. When the destination bucket is a # directory bucket, you will receive a `501 Not Implemented` response in # any of the following situations: # # * When you attempt to `COPY` the tag-set from an S3 source object that # has non-empty tags. # # * When you attempt to `REPLACE` the tag-set of a source object and set # a non-empty value to `x-amz-tagging`. # # * When you don't set the `x-amz-tagging-directive` header and the # source object has non-empty tags. This is because the default value # of `x-amz-tagging-directive` is `COPY`. # # Because only the empty tag-set is supported for directory buckets in a # `CopyObject` operation, the following situations are allowed: # # * When you attempt to `COPY` the tag-set from a directory bucket # source object that has no tags to a general purpose bucket. It # copies an empty tag-set to the destination object. # # * When you attempt to `REPLACE` the tag-set of a directory bucket # source object and set the `x-amz-tagging` value of the directory # bucket destination object to empty. # # * When you attempt to `REPLACE` the tag-set of a general purpose # bucket source object that has non-empty tags and set the # `x-amz-tagging` value of the directory bucket destination object to # empty. # # * When you attempt to `REPLACE` the tag-set of a directory bucket # source object and don't set the `x-amz-tagging` value of the # directory bucket destination object. This is because the default # value of `x-amz-tagging` is the empty value. # # # @option options [String] :server_side_encryption # The server-side encryption algorithm used when storing this object in # Amazon S3 (for example, `AES256`, `aws:kms`, `aws:kms:dsse`). # Unrecognized or unsupported values won’t write a destination object # and will receive a `400 Bad Request` response. # # Amazon S3 automatically encrypts all new objects that are copied to an # S3 bucket. When copying an object, if you don't specify encryption # information in your copy request, the encryption setting of the target # object is set to the default encryption configuration of the # destination bucket. By default, all buckets have a base level of # encryption configuration that uses server-side encryption with Amazon # S3 managed keys (SSE-S3). If the destination bucket has a default # encryption configuration that uses server-side encryption with Key # Management Service (KMS) keys (SSE-KMS), dual-layer server-side # encryption with Amazon Web Services KMS keys (DSSE-KMS), or # server-side encryption with customer-provided encryption keys (SSE-C), # Amazon S3 uses the corresponding KMS key, or a customer-provided key # to encrypt the target object copy. # # When you perform a `CopyObject` operation, if you want to use a # different type of encryption setting for the target object, you can # specify appropriate encryption-related headers to encrypt the target # object with an Amazon S3 managed key, a KMS key, or a # customer-provided key. If the encryption setting in your request is # different from the default encryption configuration of the destination # bucket, the encryption setting in your request takes precedence. # # With server-side encryption, Amazon S3 encrypts your data as it writes # your data to disks in its data centers and decrypts the data when you # access it. For more information about server-side encryption, see # [Using Server-Side Encryption][1] in the *Amazon S3 User Guide*. # # For directory buckets, only server-side encryption with Amazon S3 # managed keys (SSE-S3) (`AES256`) is supported. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html # @option options [String] :storage_class # If the `x-amz-storage-class` header is not used, the copied object # will be stored in the `STANDARD` Storage Class by default. The # `STANDARD` storage class provides high durability and high # availability. Depending on performance needs, you can specify a # different Storage Class. # # * Directory buckets - For directory buckets, only the S3 # Express One Zone storage class is supported to store newly created # objects. Unsupported storage class values won't write a destination # object and will respond with the HTTP status code `400 Bad Request`. # # * Amazon S3 on Outposts - S3 on Outposts only uses the # `OUTPOSTS` Storage Class. # # # # You can use the `CopyObject` action to change the storage class of an # object that is already stored in Amazon S3 by using the # `x-amz-storage-class` header. For more information, see [Storage # Classes][1] in the *Amazon S3 User Guide*. # # Before using an object as a source object for the copy operation, you # must restore a copy of it if it meets any of the following conditions: # # * The storage class of the source object is `GLACIER` or # `DEEP_ARCHIVE`. # # * The storage class of the source object is `INTELLIGENT_TIERING` and # it's [S3 Intelligent-Tiering access tier][2] is `Archive Access` or # `Deep Archive Access`. # # For more information, see [RestoreObject][3] and [Copying Objects][4] # in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering-overview.html#intel-tiering-tier-definition # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html # @option options [String] :website_redirect_location # If the destination bucket is configured as a website, redirects # requests for this object copy to another object in the same bucket or # to an external URL. Amazon S3 stores the value of this header in the # object metadata. This value is unique to each object and is not copied # when using the `x-amz-metadata-directive` header. Instead, you may opt # to provide this header in combination with the # `x-amz-metadata-directive` header. # # This functionality is not supported for directory buckets. # # # @option options [String] :sse_customer_algorithm # Specifies the algorithm to use when encrypting the object (for # example, `AES256`). # # When you perform a `CopyObject` operation, if you want to use a # different type of encryption setting for the target object, you can # specify appropriate encryption-related headers to encrypt the target # object with an Amazon S3 managed key, a KMS key, or a # customer-provided key. If the encryption setting in your request is # different from the default encryption configuration of the destination # bucket, the encryption setting in your request takes precedence. # # This functionality is not supported when the destination bucket is a # directory bucket. # # # @option options [String] :sse_customer_key # Specifies the customer-provided encryption key for Amazon S3 to use in # encrypting data. This value is used to store the object and then it is # discarded. Amazon S3 does not store the encryption key. The key must # be appropriate for use with the algorithm specified in the # `x-amz-server-side-encryption-customer-algorithm` header. # # This functionality is not supported when the destination bucket is a # directory bucket. # # # @option options [String] :sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the encryption key according to # RFC 1321. Amazon S3 uses this header for a message integrity check to # ensure that the encryption key was transmitted without error. # # This functionality is not supported when the destination bucket is a # directory bucket. # # # @option options [String] :ssekms_key_id # Specifies the KMS ID (Key ID, Key ARN, or Key Alias) to use for object # encryption. All GET and PUT requests for an object protected by KMS # will fail if they're not made via SSL or using SigV4. For information # about configuring any of the officially supported Amazon Web Services # SDKs and Amazon Web Services CLI, see [Specifying the Signature # Version in Request Authentication][1] in the *Amazon S3 User Guide*. # # This functionality is not supported when the destination bucket is a # directory bucket. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version # @option options [String] :ssekms_encryption_context # Specifies the Amazon Web Services KMS Encryption Context to use for # object encryption. The value of this header is a base64-encoded UTF-8 # string holding JSON with the encryption context key-value pairs. This # value must be explicitly added to specify encryption context for # `CopyObject` requests. # # This functionality is not supported when the destination bucket is a # directory bucket. # # # @option options [Boolean] :bucket_key_enabled # Specifies whether Amazon S3 should use an S3 Bucket Key for object # encryption with server-side encryption using Key Management Service # (KMS) keys (SSE-KMS). If a target object uses SSE-KMS, you can enable # an S3 Bucket Key for the object. # # Setting this header to `true` causes Amazon S3 to use an S3 Bucket Key # for object encryption with SSE-KMS. Specifying this header with a COPY # action doesn’t affect bucket-level settings for S3 Bucket Key. # # For more information, see [Amazon S3 Bucket Keys][1] in the *Amazon S3 # User Guide*. # # This functionality is not supported when the destination bucket is a # directory bucket. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html # @option options [String] :copy_source_sse_customer_algorithm # Specifies the algorithm to use when decrypting the source object (for # example, `AES256`). # # If the source object for the copy is stored in Amazon S3 using SSE-C, # you must provide the necessary encryption information in your request # so that Amazon S3 can decrypt the object for copying. # # This functionality is not supported when the source object is in a # directory bucket. # # # @option options [String] :copy_source_sse_customer_key # Specifies the customer-provided encryption key for Amazon S3 to use to # decrypt the source object. The encryption key provided in this header # must be the same one that was used when the source object was created. # # If the source object for the copy is stored in Amazon S3 using SSE-C, # you must provide the necessary encryption information in your request # so that Amazon S3 can decrypt the object for copying. # # This functionality is not supported when the source object is in a # directory bucket. # # # @option options [String] :copy_source_sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the encryption key according to # RFC 1321. Amazon S3 uses this header for a message integrity check to # ensure that the encryption key was transmitted without error. # # If the source object for the copy is stored in Amazon S3 using SSE-C, # you must provide the necessary encryption information in your request # so that Amazon S3 can decrypt the object for copying. # # This functionality is not supported when the source object is in a # directory bucket. # # # @option options [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @option options [String] :tagging # The tag-set for the object copy in the destination bucket. This value # must be used in conjunction with the `x-amz-tagging-directive` if you # choose `REPLACE` for the `x-amz-tagging-directive`. If you choose # `COPY` for the `x-amz-tagging-directive`, you don't need to set the # `x-amz-tagging` header, because the tag-set will be copied from the # source object directly. The tag-set must be encoded as URL Query # parameters. # # The default value is the empty value. # # **Directory buckets** - For directory buckets in a `CopyObject` # operation, only the empty tag-set is supported. Any requests that # attempt to write non-empty tags into directory buckets will receive a # `501 Not Implemented` status code. When the destination bucket is a # directory bucket, you will receive a `501 Not Implemented` response in # any of the following situations: # # * When you attempt to `COPY` the tag-set from an S3 source object that # has non-empty tags. # # * When you attempt to `REPLACE` the tag-set of a source object and set # a non-empty value to `x-amz-tagging`. # # * When you don't set the `x-amz-tagging-directive` header and the # source object has non-empty tags. This is because the default value # of `x-amz-tagging-directive` is `COPY`. # # Because only the empty tag-set is supported for directory buckets in a # `CopyObject` operation, the following situations are allowed: # # * When you attempt to `COPY` the tag-set from a directory bucket # source object that has no tags to a general purpose bucket. It # copies an empty tag-set to the destination object. # # * When you attempt to `REPLACE` the tag-set of a directory bucket # source object and set the `x-amz-tagging` value of the directory # bucket destination object to empty. # # * When you attempt to `REPLACE` the tag-set of a general purpose # bucket source object that has non-empty tags and set the # `x-amz-tagging` value of the directory bucket destination object to # empty. # # * When you attempt to `REPLACE` the tag-set of a directory bucket # source object and don't set the `x-amz-tagging` value of the # directory bucket destination object. This is because the default # value of `x-amz-tagging` is the empty value. # # # @option options [String] :object_lock_mode # The Object Lock mode that you want to apply to the object copy. # # This functionality is not supported for directory buckets. # # # @option options [Time,DateTime,Date,Integer,String] :object_lock_retain_until_date # The date and time when you want the Object Lock of the object copy to # expire. # # This functionality is not supported for directory buckets. # # # @option options [String] :object_lock_legal_hold_status # Specifies whether you want to apply a legal hold to the object copy. # # This functionality is not supported for directory buckets. # # # @option options [String] :expected_bucket_owner # The account ID of the expected destination bucket owner. If the # account ID that you provide does not match the actual owner of the # destination bucket, the request fails with the HTTP status code `403 # Forbidden` (access denied). # @option options [String] :expected_source_bucket_owner # The account ID of the expected source bucket owner. If the account ID # that you provide does not match the actual owner of the source bucket, # the request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [Types::CopyObjectOutput] def copy_from(options = {}) options = options.merge( bucket: @bucket_name, key: @key ) resp = Aws::Plugins::UserAgent.feature('resource') do @client.copy_object(options) end resp.data end # @example Request syntax with placeholder values # # object_summary.delete({ # mfa: "MFA", # version_id: "ObjectVersionId", # request_payer: "requester", # accepts requester # bypass_governance_retention: false, # expected_bucket_owner: "AccountId", # }) # @param [Hash] options ({}) # @option options [String] :mfa # The concatenation of the authentication device's serial number, a # space, and the value that is displayed on your authentication device. # Required to permanently delete a versioned object if versioning is # configured with MFA delete enabled. # # This functionality is not supported for directory buckets. # # # @option options [String] :version_id # Version ID used to reference a specific version of the object. # # For directory buckets in this API operation, only the `null` value of # the version ID is supported. # # # @option options [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @option options [Boolean] :bypass_governance_retention # Indicates whether S3 Object Lock should bypass Governance-mode # restrictions to process this operation. To use this header, you must # have the `s3:BypassGovernanceRetention` permission. # # This functionality is not supported for directory buckets. # # # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @return [Types::DeleteObjectOutput] def delete(options = {}) options = options.merge( bucket: @bucket_name, key: @key ) resp = Aws::Plugins::UserAgent.feature('resource') do @client.delete_object(options) end resp.data end # @example Request syntax with placeholder values # # object_summary.get({ # if_match: "IfMatch", # if_modified_since: Time.now, # if_none_match: "IfNoneMatch", # if_unmodified_since: Time.now, # range: "Range", # response_cache_control: "ResponseCacheControl", # response_content_disposition: "ResponseContentDisposition", # response_content_encoding: "ResponseContentEncoding", # response_content_language: "ResponseContentLanguage", # response_content_type: "ResponseContentType", # response_expires: Time.now, # version_id: "ObjectVersionId", # sse_customer_algorithm: "SSECustomerAlgorithm", # sse_customer_key: "SSECustomerKey", # sse_customer_key_md5: "SSECustomerKeyMD5", # request_payer: "requester", # accepts requester # part_number: 1, # expected_bucket_owner: "AccountId", # checksum_mode: "ENABLED", # accepts ENABLED # }) # @param [Hash] options ({}) # @option options [String] :if_match # Return the object only if its entity tag (ETag) is the same as the one # specified in this header; otherwise, return a `412 Precondition # Failed` error. # # If both of the `If-Match` and `If-Unmodified-Since` headers are # present in the request as follows: `If-Match` condition evaluates to # `true`, and; `If-Unmodified-Since` condition evaluates to `false`; # then, S3 returns `200 OK` and the data requested. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # @option options [Time,DateTime,Date,Integer,String] :if_modified_since # Return the object only if it has been modified since the specified # time; otherwise, return a `304 Not Modified` error. # # If both of the `If-None-Match` and `If-Modified-Since` headers are # present in the request as follows:` If-None-Match` condition evaluates # to `false`, and; `If-Modified-Since` condition evaluates to `true`; # then, S3 returns `304 Not Modified` status code. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # @option options [String] :if_none_match # Return the object only if its entity tag (ETag) is different from the # one specified in this header; otherwise, return a `304 Not Modified` # error. # # If both of the `If-None-Match` and `If-Modified-Since` headers are # present in the request as follows:` If-None-Match` condition evaluates # to `false`, and; `If-Modified-Since` condition evaluates to `true`; # then, S3 returns `304 Not Modified` HTTP status code. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # @option options [Time,DateTime,Date,Integer,String] :if_unmodified_since # Return the object only if it has not been modified since the specified # time; otherwise, return a `412 Precondition Failed` error. # # If both of the `If-Match` and `If-Unmodified-Since` headers are # present in the request as follows: `If-Match` condition evaluates to # `true`, and; `If-Unmodified-Since` condition evaluates to `false`; # then, S3 returns `200 OK` and the data requested. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # @option options [String] :range # Downloads the specified byte range of an object. For more information # about the HTTP Range header, see # [https://www.rfc-editor.org/rfc/rfc9110.html#name-range][1]. # # Amazon S3 doesn't support retrieving multiple ranges of data per # `GET` request. # # # # # # [1]: https://www.rfc-editor.org/rfc/rfc9110.html#name-range # @option options [String] :response_cache_control # Sets the `Cache-Control` header of the response. # @option options [String] :response_content_disposition # Sets the `Content-Disposition` header of the response. # @option options [String] :response_content_encoding # Sets the `Content-Encoding` header of the response. # @option options [String] :response_content_language # Sets the `Content-Language` header of the response. # @option options [String] :response_content_type # Sets the `Content-Type` header of the response. # @option options [Time,DateTime,Date,Integer,String] :response_expires # Sets the `Expires` header of the response. # @option options [String] :version_id # Version ID used to reference a specific version of the object. # # By default, the `GetObject` operation returns the current version of # an object. To return a different version, use the `versionId` # subresource. # # * If you include a `versionId` in your request header, you must have # the `s3:GetObjectVersion` permission to access a specific version of # an object. The `s3:GetObject` permission is not required in this # scenario. # # * If you request the current version of an object without a specific # `versionId` in the request header, only the `s3:GetObject` # permission is required. The `s3:GetObjectVersion` permission is not # required in this scenario. # # * **Directory buckets** - S3 Versioning isn't enabled and supported # for directory buckets. For this API operation, only the `null` value # of the version ID is supported by directory buckets. You can only # specify `null` to the `versionId` query parameter in the request. # # # # For more information about versioning, see [PutBucketVersioning][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html # @option options [String] :sse_customer_algorithm # Specifies the algorithm to use when decrypting the object (for # example, `AES256`). # # If you encrypt an object by using server-side encryption with # customer-provided encryption keys (SSE-C) when you store the object in # Amazon S3, then when you GET the object, you must use the following # headers: # # * `x-amz-server-side-encryption-customer-algorithm` # # * `x-amz-server-side-encryption-customer-key` # # * `x-amz-server-side-encryption-customer-key-MD5` # # For more information about SSE-C, see [Server-Side Encryption (Using # Customer-Provided Encryption Keys)][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # @option options [String] :sse_customer_key # Specifies the customer-provided encryption key that you originally # provided for Amazon S3 to encrypt the data before storing it. This # value is used to decrypt the object when recovering it and must match # the one used when storing the data. The key must be appropriate for # use with the algorithm specified in the # `x-amz-server-side-encryption-customer-algorithm` header. # # If you encrypt an object by using server-side encryption with # customer-provided encryption keys (SSE-C) when you store the object in # Amazon S3, then when you GET the object, you must use the following # headers: # # * `x-amz-server-side-encryption-customer-algorithm` # # * `x-amz-server-side-encryption-customer-key` # # * `x-amz-server-side-encryption-customer-key-MD5` # # For more information about SSE-C, see [Server-Side Encryption (Using # Customer-Provided Encryption Keys)][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # @option options [String] :sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the customer-provided encryption # key according to RFC 1321. Amazon S3 uses this header for a message # integrity check to ensure that the encryption key was transmitted # without error. # # If you encrypt an object by using server-side encryption with # customer-provided encryption keys (SSE-C) when you store the object in # Amazon S3, then when you GET the object, you must use the following # headers: # # * `x-amz-server-side-encryption-customer-algorithm` # # * `x-amz-server-side-encryption-customer-key` # # * `x-amz-server-side-encryption-customer-key-MD5` # # For more information about SSE-C, see [Server-Side Encryption (Using # Customer-Provided Encryption Keys)][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # @option options [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @option options [Integer] :part_number # Part number of the object being read. This is a positive integer # between 1 and 10,000. Effectively performs a 'ranged' GET request # for the part specified. Useful for downloading just a part of an # object. # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @option options [String] :checksum_mode # To retrieve the checksum, this mode must be enabled. # @return [Types::GetObjectOutput] def get(options = {}, &block) options = options.merge( bucket: @bucket_name, key: @key ) resp = Aws::Plugins::UserAgent.feature('resource') do @client.get_object(options, &block) end resp.data end # @example Request syntax with placeholder values # # multipartupload = object_summary.initiate_multipart_upload({ # acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control # cache_control: "CacheControl", # content_disposition: "ContentDisposition", # content_encoding: "ContentEncoding", # content_language: "ContentLanguage", # content_type: "ContentType", # expires: Time.now, # grant_full_control: "GrantFullControl", # grant_read: "GrantRead", # grant_read_acp: "GrantReadACP", # grant_write_acp: "GrantWriteACP", # metadata: { # "MetadataKey" => "MetadataValue", # }, # server_side_encryption: "AES256", # accepts AES256, aws:kms, aws:kms:dsse # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE # website_redirect_location: "WebsiteRedirectLocation", # sse_customer_algorithm: "SSECustomerAlgorithm", # sse_customer_key: "SSECustomerKey", # sse_customer_key_md5: "SSECustomerKeyMD5", # ssekms_key_id: "SSEKMSKeyId", # ssekms_encryption_context: "SSEKMSEncryptionContext", # bucket_key_enabled: false, # request_payer: "requester", # accepts requester # tagging: "TaggingHeader", # object_lock_mode: "GOVERNANCE", # accepts GOVERNANCE, COMPLIANCE # object_lock_retain_until_date: Time.now, # object_lock_legal_hold_status: "ON", # accepts ON, OFF # expected_bucket_owner: "AccountId", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # }) # @param [Hash] options ({}) # @option options [String] :acl # The canned ACL to apply to the object. Amazon S3 supports a set of # predefined ACLs, known as *canned ACLs*. Each canned ACL has a # predefined set of grantees and permissions. For more information, see # [Canned ACL][1] in the *Amazon S3 User Guide*. # # By default, all objects are private. Only the owner has full access # control. When uploading an object, you can grant access permissions to # individual Amazon Web Services accounts or to predefined groups # defined by Amazon S3. These permissions are then added to the access # control list (ACL) on the new object. For more information, see [Using # ACLs][2]. One way to grant the permissions using the request headers # is to specify a canned ACL with the `x-amz-acl` request header. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html # @option options [String] :cache_control # Specifies caching behavior along the request/reply chain. # @option options [String] :content_disposition # Specifies presentational information for the object. # @option options [String] :content_encoding # Specifies what content encodings have been applied to the object and # thus what decoding mechanisms must be applied to obtain the media-type # referenced by the Content-Type header field. # # For directory buckets, only the `aws-chunked` value is supported in # this header field. # # # @option options [String] :content_language # The language that the content is in. # @option options [String] :content_type # A standard MIME type describing the format of the object data. # @option options [Time,DateTime,Date,Integer,String] :expires # The date and time at which the object is no longer cacheable. # @option options [String] :grant_full_control # Specify access permissions explicitly to give the grantee READ, # READ\_ACP, and WRITE\_ACP permissions on the object. # # By default, all objects are private. Only the owner has full access # control. When uploading an object, you can use this header to # explicitly grant access permissions to specific Amazon Web Services # accounts or groups. This header maps to specific permissions that # Amazon S3 supports in an ACL. For more information, see [Access # Control List (ACL) Overview][1] in the *Amazon S3 User Guide*. # # You specify each grantee as a type=value pair, where the type is one # of the following: # # * `id` – if the value specified is the canonical user ID of an Amazon # Web Services account # # * `uri` – if you are granting permissions to a predefined group # # * `emailAddress` – if the value specified is the email address of an # Amazon Web Services account # # Using email addresses to specify a grantee is only supported in the # following Amazon Web Services Regions: # # * US East (N. Virginia) # # * US West (N. California) # # * US West (Oregon) # # * Asia Pacific (Singapore) # # * Asia Pacific (Sydney) # # * Asia Pacific (Tokyo) # # * Europe (Ireland) # # * South America (São Paulo) # # For a list of all the Amazon S3 supported Regions and endpoints, see # [Regions and Endpoints][2] in the Amazon Web Services General # Reference. # # # # For example, the following `x-amz-grant-read` header grants the Amazon # Web Services accounts identified by account IDs permissions to read # object data and its metadata: # # `x-amz-grant-read: id="11112222333", id="444455556666" ` # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html # [2]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region # @option options [String] :grant_read # Specify access permissions explicitly to allow grantee to read the # object data and its metadata. # # By default, all objects are private. Only the owner has full access # control. When uploading an object, you can use this header to # explicitly grant access permissions to specific Amazon Web Services # accounts or groups. This header maps to specific permissions that # Amazon S3 supports in an ACL. For more information, see [Access # Control List (ACL) Overview][1] in the *Amazon S3 User Guide*. # # You specify each grantee as a type=value pair, where the type is one # of the following: # # * `id` – if the value specified is the canonical user ID of an Amazon # Web Services account # # * `uri` – if you are granting permissions to a predefined group # # * `emailAddress` – if the value specified is the email address of an # Amazon Web Services account # # Using email addresses to specify a grantee is only supported in the # following Amazon Web Services Regions: # # * US East (N. Virginia) # # * US West (N. California) # # * US West (Oregon) # # * Asia Pacific (Singapore) # # * Asia Pacific (Sydney) # # * Asia Pacific (Tokyo) # # * Europe (Ireland) # # * South America (São Paulo) # # For a list of all the Amazon S3 supported Regions and endpoints, see # [Regions and Endpoints][2] in the Amazon Web Services General # Reference. # # # # For example, the following `x-amz-grant-read` header grants the Amazon # Web Services accounts identified by account IDs permissions to read # object data and its metadata: # # `x-amz-grant-read: id="11112222333", id="444455556666" ` # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html # [2]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region # @option options [String] :grant_read_acp # Specify access permissions explicitly to allows grantee to read the # object ACL. # # By default, all objects are private. Only the owner has full access # control. When uploading an object, you can use this header to # explicitly grant access permissions to specific Amazon Web Services # accounts or groups. This header maps to specific permissions that # Amazon S3 supports in an ACL. For more information, see [Access # Control List (ACL) Overview][1] in the *Amazon S3 User Guide*. # # You specify each grantee as a type=value pair, where the type is one # of the following: # # * `id` – if the value specified is the canonical user ID of an Amazon # Web Services account # # * `uri` – if you are granting permissions to a predefined group # # * `emailAddress` – if the value specified is the email address of an # Amazon Web Services account # # Using email addresses to specify a grantee is only supported in the # following Amazon Web Services Regions: # # * US East (N. Virginia) # # * US West (N. California) # # * US West (Oregon) # # * Asia Pacific (Singapore) # # * Asia Pacific (Sydney) # # * Asia Pacific (Tokyo) # # * Europe (Ireland) # # * South America (São Paulo) # # For a list of all the Amazon S3 supported Regions and endpoints, see # [Regions and Endpoints][2] in the Amazon Web Services General # Reference. # # # # For example, the following `x-amz-grant-read` header grants the Amazon # Web Services accounts identified by account IDs permissions to read # object data and its metadata: # # `x-amz-grant-read: id="11112222333", id="444455556666" ` # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html # [2]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region # @option options [String] :grant_write_acp # Specify access permissions explicitly to allows grantee to allow # grantee to write the ACL for the applicable object. # # By default, all objects are private. Only the owner has full access # control. When uploading an object, you can use this header to # explicitly grant access permissions to specific Amazon Web Services # accounts or groups. This header maps to specific permissions that # Amazon S3 supports in an ACL. For more information, see [Access # Control List (ACL) Overview][1] in the *Amazon S3 User Guide*. # # You specify each grantee as a type=value pair, where the type is one # of the following: # # * `id` – if the value specified is the canonical user ID of an Amazon # Web Services account # # * `uri` – if you are granting permissions to a predefined group # # * `emailAddress` – if the value specified is the email address of an # Amazon Web Services account # # Using email addresses to specify a grantee is only supported in the # following Amazon Web Services Regions: # # * US East (N. Virginia) # # * US West (N. California) # # * US West (Oregon) # # * Asia Pacific (Singapore) # # * Asia Pacific (Sydney) # # * Asia Pacific (Tokyo) # # * Europe (Ireland) # # * South America (São Paulo) # # For a list of all the Amazon S3 supported Regions and endpoints, see # [Regions and Endpoints][2] in the Amazon Web Services General # Reference. # # # # For example, the following `x-amz-grant-read` header grants the Amazon # Web Services accounts identified by account IDs permissions to read # object data and its metadata: # # `x-amz-grant-read: id="11112222333", id="444455556666" ` # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html # [2]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region # @option options [Hash] :metadata # A map of metadata to store with the object in S3. # @option options [String] :server_side_encryption # The server-side encryption algorithm used when you store this object # in Amazon S3 (for example, `AES256`, `aws:kms`). # # For directory buckets, only server-side encryption with Amazon S3 # managed keys (SSE-S3) (`AES256`) is supported. # # # @option options [String] :storage_class # By default, Amazon S3 uses the STANDARD Storage Class to store newly # created objects. The STANDARD storage class provides high durability # and high availability. Depending on performance needs, you can specify # a different Storage Class. For more information, see [Storage # Classes][1] in the *Amazon S3 User Guide*. # # * For directory buckets, only the S3 Express One Zone storage class is # supported to store newly created objects. # # * Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html # @option options [String] :website_redirect_location # If the bucket is configured as a website, redirects requests for this # object to another object in the same bucket or to an external URL. # Amazon S3 stores the value of this header in the object metadata. # # This functionality is not supported for directory buckets. # # # @option options [String] :sse_customer_algorithm # Specifies the algorithm to use when encrypting the object (for # example, AES256). # # This functionality is not supported for directory buckets. # # # @option options [String] :sse_customer_key # Specifies the customer-provided encryption key for Amazon S3 to use in # encrypting data. This value is used to store the object and then it is # discarded; Amazon S3 does not store the encryption key. The key must # be appropriate for use with the algorithm specified in the # `x-amz-server-side-encryption-customer-algorithm` header. # # This functionality is not supported for directory buckets. # # # @option options [String] :sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the customer-provided encryption # key according to RFC 1321. Amazon S3 uses this header for a message # integrity check to ensure that the encryption key was transmitted # without error. # # This functionality is not supported for directory buckets. # # # @option options [String] :ssekms_key_id # Specifies the ID (Key ID, Key ARN, or Key Alias) of the symmetric # encryption customer managed key to use for object encryption. # # This functionality is not supported for directory buckets. # # # @option options [String] :ssekms_encryption_context # Specifies the Amazon Web Services KMS Encryption Context to use for # object encryption. The value of this header is a base64-encoded UTF-8 # string holding JSON with the encryption context key-value pairs. # # This functionality is not supported for directory buckets. # # # @option options [Boolean] :bucket_key_enabled # Specifies whether Amazon S3 should use an S3 Bucket Key for object # encryption with server-side encryption using Key Management Service # (KMS) keys (SSE-KMS). Setting this header to `true` causes Amazon S3 # to use an S3 Bucket Key for object encryption with SSE-KMS. # # Specifying this header with an object action doesn’t affect # bucket-level settings for S3 Bucket Key. # # This functionality is not supported for directory buckets. # # # @option options [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @option options [String] :tagging # The tag-set for the object. The tag-set must be encoded as URL Query # parameters. # # This functionality is not supported for directory buckets. # # # @option options [String] :object_lock_mode # Specifies the Object Lock mode that you want to apply to the uploaded # object. # # This functionality is not supported for directory buckets. # # # @option options [Time,DateTime,Date,Integer,String] :object_lock_retain_until_date # Specifies the date and time when you want the Object Lock to expire. # # This functionality is not supported for directory buckets. # # # @option options [String] :object_lock_legal_hold_status # Specifies whether you want to apply a legal hold to the uploaded # object. # # This functionality is not supported for directory buckets. # # # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @option options [String] :checksum_algorithm # Indicates the algorithm that you want Amazon S3 to use to create the # checksum for the object. For more information, see [Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [MultipartUpload] def initiate_multipart_upload(options = {}) options = options.merge( bucket: @bucket_name, key: @key ) resp = Aws::Plugins::UserAgent.feature('resource') do @client.create_multipart_upload(options) end MultipartUpload.new( bucket_name: @bucket_name, object_key: @key, id: resp.data.upload_id, client: @client ) end # @example Request syntax with placeholder values # # object_summary.put({ # acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control # body: source_file, # cache_control: "CacheControl", # content_disposition: "ContentDisposition", # content_encoding: "ContentEncoding", # content_language: "ContentLanguage", # content_length: 1, # content_md5: "ContentMD5", # content_type: "ContentType", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # checksum_crc32: "ChecksumCRC32", # checksum_crc32c: "ChecksumCRC32C", # checksum_sha1: "ChecksumSHA1", # checksum_sha256: "ChecksumSHA256", # expires: Time.now, # grant_full_control: "GrantFullControl", # grant_read: "GrantRead", # grant_read_acp: "GrantReadACP", # grant_write_acp: "GrantWriteACP", # metadata: { # "MetadataKey" => "MetadataValue", # }, # server_side_encryption: "AES256", # accepts AES256, aws:kms, aws:kms:dsse # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE # website_redirect_location: "WebsiteRedirectLocation", # sse_customer_algorithm: "SSECustomerAlgorithm", # sse_customer_key: "SSECustomerKey", # sse_customer_key_md5: "SSECustomerKeyMD5", # ssekms_key_id: "SSEKMSKeyId", # ssekms_encryption_context: "SSEKMSEncryptionContext", # bucket_key_enabled: false, # request_payer: "requester", # accepts requester # tagging: "TaggingHeader", # object_lock_mode: "GOVERNANCE", # accepts GOVERNANCE, COMPLIANCE # object_lock_retain_until_date: Time.now, # object_lock_legal_hold_status: "ON", # accepts ON, OFF # expected_bucket_owner: "AccountId", # }) # @param [Hash] options ({}) # @option options [String] :acl # The canned ACL to apply to the object. For more information, see # [Canned ACL][1] in the *Amazon S3 User Guide*. # # When adding a new object, you can use headers to grant ACL-based # permissions to individual Amazon Web Services accounts or to # predefined groups defined by Amazon S3. These permissions are then # added to the ACL on the object. By default, all objects are private. # Only the owner has full access control. For more information, see # [Access Control List (ACL) Overview][2] and [Managing ACLs Using the # REST API][3] in the *Amazon S3 User Guide*. # # If the bucket that you're uploading objects to uses the bucket owner # enforced setting for S3 Object Ownership, ACLs are disabled and no # longer affect permissions. Buckets that use this setting only accept # PUT requests that don't specify an ACL or PUT requests that specify # bucket owner full control ACLs, such as the # `bucket-owner-full-control` canned ACL or an equivalent form of this # ACL expressed in the XML format. PUT requests that contain other ACLs # (for example, custom grants to certain Amazon Web Services accounts) # fail and return a `400` error with the error code # `AccessControlListNotSupported`. For more information, see [ # Controlling ownership of objects and disabling ACLs][4] in the *Amazon # S3 User Guide*. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html # @option options [String, StringIO, File] :body # Object data. # @option options [String] :cache_control # Can be used to specify caching behavior along the request/reply chain. # For more information, see # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9][1]. # # # # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 # @option options [String] :content_disposition # Specifies presentational information for the object. For more # information, see # [https://www.rfc-editor.org/rfc/rfc6266#section-4][1]. # # # # [1]: https://www.rfc-editor.org/rfc/rfc6266#section-4 # @option options [String] :content_encoding # Specifies what content encodings have been applied to the object and # thus what decoding mechanisms must be applied to obtain the media-type # referenced by the Content-Type header field. For more information, see # [https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding][1]. # # # # [1]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding # @option options [String] :content_language # The language the content is in. # @option options [Integer] :content_length # Size of the body in bytes. This parameter is useful when the size of # the body cannot be determined automatically. For more information, see # [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length][1]. # # # # [1]: https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length # @option options [String] :content_md5 # The base64-encoded 128-bit MD5 digest of the message (without the # headers) according to RFC 1864. This header can be used as a message # integrity check to verify that the data is the same data that was # originally sent. Although it is optional, we recommend using the # Content-MD5 mechanism as an end-to-end integrity check. For more # information about REST request authentication, see [REST # Authentication][1]. # # The `Content-MD5` header is required for any request to upload an # object with a retention period configured using Amazon S3 Object Lock. # For more information about Amazon S3 Object Lock, see [Amazon S3 # Object Lock Overview][2] in the *Amazon S3 User Guide*. # # # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html # @option options [String] :content_type # A standard MIME type describing the format of the contents. For more # information, see # [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type][1]. # # # # [1]: https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type # @option options [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum-algorithm ` or # `x-amz-trailer` header sent. Otherwise, Amazon S3 fails the request # with the HTTP status code `400 Bad Request`. # # For the `x-amz-checksum-algorithm ` header, replace ` algorithm ` with # the supported algorithm from the following list: # # * CRC32 # # * CRC32C # # * SHA1 # # * SHA256 # # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # If the individual checksum value you provide through # `x-amz-checksum-algorithm ` doesn't match the checksum algorithm you # set through `x-amz-sdk-checksum-algorithm`, Amazon S3 ignores any # provided `ChecksumAlgorithm` parameter and uses the checksum algorithm # that matches the provided value in `x-amz-checksum-algorithm `. # # For directory buckets, when you use Amazon Web Services SDKs, `CRC32` # is the default checksum algorithm that's used for performance. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [String] :checksum_crc32 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 32-bit CRC32 checksum of the object. For # more information, see [Checking object integrity][1] in the *Amazon S3 # User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [String] :checksum_crc32c # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 32-bit CRC32C checksum of the object. # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [String] :checksum_sha1 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 160-bit SHA-1 digest of the object. For # more information, see [Checking object integrity][1] in the *Amazon S3 # User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [String] :checksum_sha256 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 256-bit SHA-256 digest of the object. # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [Time,DateTime,Date,Integer,String] :expires # The date and time at which the object is no longer cacheable. For more # information, see # [https://www.rfc-editor.org/rfc/rfc7234#section-5.3][1]. # # # # [1]: https://www.rfc-editor.org/rfc/rfc7234#section-5.3 # @option options [String] :grant_full_control # Gives the grantee READ, READ\_ACP, and WRITE\_ACP permissions on the # object. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # @option options [String] :grant_read # Allows grantee to read the object data and its metadata. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # @option options [String] :grant_read_acp # Allows grantee to read the object ACL. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # @option options [String] :grant_write_acp # Allows grantee to write the ACL for the applicable object. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # @option options [Hash] :metadata # A map of metadata to store with the object in S3. # @option options [String] :server_side_encryption # The server-side encryption algorithm that was used when you store this # object in Amazon S3 (for example, `AES256`, `aws:kms`, # `aws:kms:dsse`). # # General purpose buckets - You have four mutually exclusive # options to protect data using server-side encryption in Amazon S3, # depending on how you choose to manage the encryption keys. # Specifically, the encryption key options are Amazon S3 managed keys # (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and # customer-provided keys (SSE-C). Amazon S3 encrypts data with # server-side encryption by using Amazon S3 managed keys (SSE-S3) by # default. You can optionally tell Amazon S3 to encrypt data at rest by # using server-side encryption with other key options. For more # information, see [Using Server-Side Encryption][1] in the *Amazon S3 # User Guide*. # # Directory buckets - For directory buckets, only the # server-side encryption with Amazon S3 managed keys (SSE-S3) (`AES256`) # value is supported. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html # @option options [String] :storage_class # By default, Amazon S3 uses the STANDARD Storage Class to store newly # created objects. The STANDARD storage class provides high durability # and high availability. Depending on performance needs, you can specify # a different Storage Class. For more information, see [Storage # Classes][1] in the *Amazon S3 User Guide*. # # * For directory buckets, only the S3 Express One Zone storage class is # supported to store newly created objects. # # * Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html # @option options [String] :website_redirect_location # If the bucket is configured as a website, redirects requests for this # object to another object in the same bucket or to an external URL. # Amazon S3 stores the value of this header in the object metadata. For # information about object metadata, see [Object Key and Metadata][1] in # the *Amazon S3 User Guide*. # # In the following example, the request header sets the redirect to an # object (anotherPage.html) in the same bucket: # # `x-amz-website-redirect-location: /anotherPage.html` # # In the following example, the request header sets the object redirect # to another website: # # `x-amz-website-redirect-location: http://www.example.com/` # # For more information about website hosting in Amazon S3, see [Hosting # Websites on Amazon S3][2] and [How to Configure Website Page # Redirects][3] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html # @option options [String] :sse_customer_algorithm # Specifies the algorithm to use when encrypting the object (for # example, `AES256`). # # This functionality is not supported for directory buckets. # # # @option options [String] :sse_customer_key # Specifies the customer-provided encryption key for Amazon S3 to use in # encrypting data. This value is used to store the object and then it is # discarded; Amazon S3 does not store the encryption key. The key must # be appropriate for use with the algorithm specified in the # `x-amz-server-side-encryption-customer-algorithm` header. # # This functionality is not supported for directory buckets. # # # @option options [String] :sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the encryption key according to # RFC 1321. Amazon S3 uses this header for a message integrity check to # ensure that the encryption key was transmitted without error. # # This functionality is not supported for directory buckets. # # # @option options [String] :ssekms_key_id # If `x-amz-server-side-encryption` has a valid value of `aws:kms` or # `aws:kms:dsse`, this header specifies the ID (Key ID, Key ARN, or Key # Alias) of the Key Management Service (KMS) symmetric encryption # customer managed key that was used for the object. If you specify # `x-amz-server-side-encryption:aws:kms` or # `x-amz-server-side-encryption:aws:kms:dsse`, but do not provide` # x-amz-server-side-encryption-aws-kms-key-id`, Amazon S3 uses the # Amazon Web Services managed key (`aws/s3`) to protect the data. If the # KMS key does not exist in the same account that's issuing the # command, you must use the full ARN and not just the ID. # # This functionality is not supported for directory buckets. # # # @option options [String] :ssekms_encryption_context # Specifies the Amazon Web Services KMS Encryption Context to use for # object encryption. The value of this header is a base64-encoded UTF-8 # string holding JSON with the encryption context key-value pairs. This # value is stored as object metadata and automatically gets passed on to # Amazon Web Services KMS for future `GetObject` or `CopyObject` # operations on this object. This value must be explicitly added during # `CopyObject` operations. # # This functionality is not supported for directory buckets. # # # @option options [Boolean] :bucket_key_enabled # Specifies whether Amazon S3 should use an S3 Bucket Key for object # encryption with server-side encryption using Key Management Service # (KMS) keys (SSE-KMS). Setting this header to `true` causes Amazon S3 # to use an S3 Bucket Key for object encryption with SSE-KMS. # # Specifying this header with a PUT action doesn’t affect bucket-level # settings for S3 Bucket Key. # # This functionality is not supported for directory buckets. # # # @option options [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @option options [String] :tagging # The tag-set for the object. The tag-set must be encoded as URL Query # parameters. (For example, "Key1=Value1") # # This functionality is not supported for directory buckets. # # # @option options [String] :object_lock_mode # The Object Lock mode that you want to apply to this object. # # This functionality is not supported for directory buckets. # # # @option options [Time,DateTime,Date,Integer,String] :object_lock_retain_until_date # The date and time when you want this object's Object Lock to expire. # Must be formatted as a timestamp parameter. # # This functionality is not supported for directory buckets. # # # @option options [String] :object_lock_legal_hold_status # Specifies whether a legal hold will be applied to this object. For # more information about S3 Object Lock, see [Object Lock][1] in the # *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @return [Types::PutObjectOutput] def put(options = {}) options = options.merge( bucket: @bucket_name, key: @key ) resp = Aws::Plugins::UserAgent.feature('resource') do @client.put_object(options) end resp.data end # @example Request syntax with placeholder values # # object_summary.restore_object({ # version_id: "ObjectVersionId", # restore_request: { # days: 1, # glacier_job_parameters: { # tier: "Standard", # required, accepts Standard, Bulk, Expedited # }, # type: "SELECT", # accepts SELECT # tier: "Standard", # accepts Standard, Bulk, Expedited # description: "Description", # select_parameters: { # input_serialization: { # required # csv: { # file_header_info: "USE", # accepts USE, IGNORE, NONE # comments: "Comments", # quote_escape_character: "QuoteEscapeCharacter", # record_delimiter: "RecordDelimiter", # field_delimiter: "FieldDelimiter", # quote_character: "QuoteCharacter", # allow_quoted_record_delimiter: false, # }, # compression_type: "NONE", # accepts NONE, GZIP, BZIP2 # json: { # type: "DOCUMENT", # accepts DOCUMENT, LINES # }, # parquet: { # }, # }, # expression_type: "SQL", # required, accepts SQL # expression: "Expression", # required # output_serialization: { # required # csv: { # quote_fields: "ALWAYS", # accepts ALWAYS, ASNEEDED # quote_escape_character: "QuoteEscapeCharacter", # record_delimiter: "RecordDelimiter", # field_delimiter: "FieldDelimiter", # quote_character: "QuoteCharacter", # }, # json: { # record_delimiter: "RecordDelimiter", # }, # }, # }, # output_location: { # s3: { # bucket_name: "BucketName", # required # prefix: "LocationPrefix", # required # encryption: { # encryption_type: "AES256", # required, accepts AES256, aws:kms, aws:kms:dsse # kms_key_id: "SSEKMSKeyId", # kms_context: "KMSContext", # }, # canned_acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control # access_control_list: [ # { # grantee: { # display_name: "DisplayName", # email_address: "EmailAddress", # id: "ID", # type: "CanonicalUser", # required, accepts CanonicalUser, AmazonCustomerByEmail, Group # uri: "URI", # }, # permission: "FULL_CONTROL", # accepts FULL_CONTROL, WRITE, WRITE_ACP, READ, READ_ACP # }, # ], # tagging: { # tag_set: [ # required # { # key: "ObjectKey", # required # value: "Value", # required # }, # ], # }, # user_metadata: [ # { # name: "MetadataKey", # value: "MetadataValue", # }, # ], # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE # }, # }, # }, # request_payer: "requester", # accepts requester # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # expected_bucket_owner: "AccountId", # }) # @param [Hash] options ({}) # @option options [String] :version_id # VersionId used to reference a specific version of the object. # @option options [Types::RestoreRequest] :restore_request # Container for restore job parameters. # @option options [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @option options [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @return [Types::RestoreObjectOutput] def restore_object(options = {}) options = options.merge( bucket: @bucket_name, key: @key ) resp = Aws::Plugins::UserAgent.feature('resource') do @client.restore_object(options) end resp.data end # @!group Associations # @return [ObjectAcl] def acl ObjectAcl.new( bucket_name: @bucket_name, object_key: @key, client: @client ) end # @return [Bucket] def bucket Bucket.new( name: @bucket_name, client: @client ) end # @param [String] id # @return [MultipartUpload] def multipart_upload(id) MultipartUpload.new( bucket_name: @bucket_name, object_key: @key, id: id, client: @client ) end # @return [Object] def object Object.new( bucket_name: @bucket_name, key: @key, client: @client ) end # @param [String] id # @return [ObjectVersion] def version(id) ObjectVersion.new( bucket_name: @bucket_name, object_key: @key, id: id, client: @client ) end # @deprecated # @api private def identifiers { bucket_name: @bucket_name, key: @key } end deprecated(:identifiers) private def extract_bucket_name(args, options) value = args[0] || options.delete(:bucket_name) case value when String then value when nil then raise ArgumentError, "missing required option :bucket_name" else msg = "expected :bucket_name to be a String, got #{value.class}" raise ArgumentError, msg end end def extract_key(args, options) value = args[1] || options.delete(:key) case value when String then value when nil then raise ArgumentError, "missing required option :key" else msg = "expected :key to be a String, got #{value.class}" raise ArgumentError, msg end end def yield_waiter_and_warn(waiter, &block) if !@waiter_block_warned msg = "pass options to configure the waiter; "\ "yielding the waiter is deprecated" warn(msg) @waiter_block_warned = true end yield(waiter.waiter) end def separate_params_and_options(options) opts = Set.new( [:client, :max_attempts, :delay, :before_attempt, :before_wait] ) waiter_opts = {} waiter_params = {} options.each_pair do |key, value| if opts.include?(key) waiter_opts[key] = value else waiter_params[key] = value end end waiter_opts[:client] ||= @client [waiter_opts, waiter_params] end class Collection < Aws::Resources::Collection # @!group Batch Actions # @example Request syntax with placeholder values # # object_summary.batch_delete!({ # mfa: "MFA", # request_payer: "requester", # accepts requester # bypass_governance_retention: false, # expected_bucket_owner: "AccountId", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # }) # @param options ({}) # @option options [String] :mfa # The concatenation of the authentication device's serial number, a # space, and the value that is displayed on your authentication device. # Required to permanently delete a versioned object if versioning is # configured with MFA delete enabled. # # When performing the `DeleteObjects` operation on an MFA delete enabled # bucket, which attempts to delete the specified versioned objects, you # must include an MFA token. If you don't provide an MFA token, the # entire request will fail, even if there are non-versioned objects that # you are trying to delete. If you provide an invalid token, whether # there are versioned object keys in the request or not, the entire # Multi-Object Delete request will fail. For information about MFA # Delete, see [ MFA Delete][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete # @option options [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @option options [Boolean] :bypass_governance_retention # Specifies whether you want to delete this object even if it has a # Governance-type Object Lock in place. To use this header, you must # have the `s3:BypassGovernanceRetention` permission. # # This functionality is not supported for directory buckets. # # # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @option options [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum-algorithm ` or # `x-amz-trailer` header sent. Otherwise, Amazon S3 fails the request # with the HTTP status code `400 Bad Request`. # # For the `x-amz-checksum-algorithm ` header, replace ` algorithm ` with # the supported algorithm from the following list: # # * CRC32 # # * CRC32C # # * SHA1 # # * SHA256 # # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # If the individual checksum value you provide through # `x-amz-checksum-algorithm ` doesn't match the checksum algorithm you # set through `x-amz-sdk-checksum-algorithm`, Amazon S3 ignores any # provided `ChecksumAlgorithm` parameter and uses the checksum algorithm # that matches the provided value in `x-amz-checksum-algorithm `. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [void] def batch_delete!(options = {}) batch_enum.each do |batch| params = Aws::Util.copy_hash(options) params[:bucket] = batch[0].bucket_name params[:delete] ||= {} params[:delete][:objects] ||= [] batch.each do |item| params[:delete][:objects] << { key: item.key } end Aws::Plugins::UserAgent.feature('resource') do batch[0].client.delete_objects(params) end end nil end # @!endgroup end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/bucket.rb0000644000004100000410000015773614563445240020102 0ustar www-datawww-data# frozen_string_literal: true # WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws::S3 class Bucket extend Aws::Deprecations # @overload def initialize(name, options = {}) # @param [String] name # @option options [Client] :client # @overload def initialize(options = {}) # @option options [required, String] :name # @option options [Client] :client def initialize(*args) options = Hash === args.last ? args.pop.dup : {} @name = extract_name(args, options) @data = options.delete(:data) @client = options.delete(:client) || Client.new(options) @waiter_block_warned = false end # @!group Read-Only Attributes # @return [String] def name @name end # Date the bucket was created. This date can change when making changes # to your bucket, such as editing its bucket policy. # @return [Time] def creation_date data[:creation_date] end # @!endgroup # @return [Client] def client @client end # @raise [NotImplementedError] # @api private def load msg = "#load is not implemented, data only available via enumeration" raise NotImplementedError, msg end alias :reload :load # @raise [NotImplementedError] Raises when {#data_loaded?} is `false`. # @return [Types::Bucket] # Returns the data for this {Bucket}. def data load unless @data @data end # @return [Boolean] # Returns `true` if this resource is loaded. Accessing attributes or # {#data} on an unloaded resource will trigger a call to {#load}. def data_loaded? !!@data end # @param [Hash] options ({}) # @return [Boolean] # Returns `true` if the Bucket exists. def exists?(options = {}) begin wait_until_exists(options.merge(max_attempts: 1)) true rescue Aws::Waiters::Errors::UnexpectedError => e raise e.error rescue Aws::Waiters::Errors::WaiterFailed false end end # @param [Hash] options ({}) # @option options [Integer] :max_attempts (20) # @option options [Float] :delay (5) # @option options [Proc] :before_attempt # @option options [Proc] :before_wait # @return [Bucket] def wait_until_exists(options = {}, &block) options, params = separate_params_and_options(options) waiter = Waiters::BucketExists.new(options) yield_waiter_and_warn(waiter, &block) if block_given? Aws::Plugins::UserAgent.feature('resource') do waiter.wait(params.merge(bucket: @name)) end Bucket.new({ name: @name, client: @client }) end # @param [Hash] options ({}) # @option options [Integer] :max_attempts (20) # @option options [Float] :delay (5) # @option options [Proc] :before_attempt # @option options [Proc] :before_wait # @return [Bucket] def wait_until_not_exists(options = {}, &block) options, params = separate_params_and_options(options) waiter = Waiters::BucketNotExists.new(options) yield_waiter_and_warn(waiter, &block) if block_given? Aws::Plugins::UserAgent.feature('resource') do waiter.wait(params.merge(bucket: @name)) end Bucket.new({ name: @name, client: @client }) end # @deprecated Use [Aws::S3::Client] #wait_until instead # # Waiter polls an API operation until a resource enters a desired # state. # # @note The waiting operation is performed on a copy. The original resource # remains unchanged. # # ## Basic Usage # # Waiter will polls until it is successful, it fails by # entering a terminal state, or until a maximum number of attempts # are made. # # # polls in a loop until condition is true # resource.wait_until(options) {|resource| condition} # # ## Example # # instance.wait_until(max_attempts:10, delay:5) do |instance| # instance.state.name == 'running' # end # # ## Configuration # # You can configure the maximum number of polling attempts, and the # delay (in seconds) between each polling attempt. The waiting condition is # set by passing a block to {#wait_until}: # # # poll for ~25 seconds # resource.wait_until(max_attempts:5,delay:5) {|resource|...} # # ## Callbacks # # You can be notified before each polling attempt and before each # delay. If you throw `:success` or `:failure` from these callbacks, # it will terminate the waiter. # # started_at = Time.now # # poll for 1 hour, instead of a number of attempts # proc = Proc.new do |attempts, response| # throw :failure if Time.now - started_at > 3600 # end # # # disable max attempts # instance.wait_until(before_wait:proc, max_attempts:nil) {...} # # ## Handling Errors # # When a waiter is successful, it returns the Resource. When a waiter # fails, it raises an error. # # begin # resource.wait_until(...) # rescue Aws::Waiters::Errors::WaiterFailed # # resource did not enter the desired state in time # end # # @yieldparam [Resource] resource to be used in the waiting condition. # # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter # terminates because the waiter has entered a state that it will not # transition out of, preventing success. # # yet successful. # # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is # encountered while polling for a resource that is not expected. # # @raise [NotImplementedError] Raised when the resource does not # # @option options [Integer] :max_attempts (10) Maximum number of # attempts # @option options [Integer] :delay (10) Delay between each # attempt in seconds # @option options [Proc] :before_attempt (nil) Callback # invoked before each attempt # @option options [Proc] :before_wait (nil) Callback # invoked before each wait # @return [Resource] if the waiter was successful def wait_until(options = {}, &block) self_copy = self.dup attempts = 0 options[:max_attempts] = 10 unless options.key?(:max_attempts) options[:delay] ||= 10 options[:poller] = Proc.new do attempts += 1 if block.call(self_copy) [:success, self_copy] else self_copy.reload unless attempts == options[:max_attempts] :retry end end Aws::Plugins::UserAgent.feature('resource') do Aws::Waiters::Waiter.new(options).wait({}) end end # @!group Actions # @example Request syntax with placeholder values # # bucket.create({ # acl: "private", # accepts private, public-read, public-read-write, authenticated-read # create_bucket_configuration: { # location_constraint: "af-south-1", # accepts af-south-1, ap-east-1, ap-northeast-1, ap-northeast-2, ap-northeast-3, ap-south-1, ap-south-2, ap-southeast-1, ap-southeast-2, ap-southeast-3, ca-central-1, cn-north-1, cn-northwest-1, EU, eu-central-1, eu-north-1, eu-south-1, eu-south-2, eu-west-1, eu-west-2, eu-west-3, me-south-1, sa-east-1, us-east-2, us-gov-east-1, us-gov-west-1, us-west-1, us-west-2 # location: { # type: "AvailabilityZone", # accepts AvailabilityZone # name: "LocationNameAsString", # }, # bucket: { # data_redundancy: "SingleAvailabilityZone", # accepts SingleAvailabilityZone # type: "Directory", # accepts Directory # }, # }, # grant_full_control: "GrantFullControl", # grant_read: "GrantRead", # grant_read_acp: "GrantReadACP", # grant_write: "GrantWrite", # grant_write_acp: "GrantWriteACP", # object_lock_enabled_for_bucket: false, # object_ownership: "BucketOwnerPreferred", # accepts BucketOwnerPreferred, ObjectWriter, BucketOwnerEnforced # }) # @param [Hash] options ({}) # @option options [String] :acl # The canned ACL to apply to the bucket. # # This functionality is not supported for directory buckets. # # # @option options [Types::CreateBucketConfiguration] :create_bucket_configuration # The configuration information for the bucket. # @option options [String] :grant_full_control # Allows grantee the read, write, read ACP, and write ACP permissions on # the bucket. # # This functionality is not supported for directory buckets. # # # @option options [String] :grant_read # Allows grantee to list the objects in the bucket. # # This functionality is not supported for directory buckets. # # # @option options [String] :grant_read_acp # Allows grantee to read the bucket ACL. # # This functionality is not supported for directory buckets. # # # @option options [String] :grant_write # Allows grantee to create new objects in the bucket. # # For the bucket and object owners of existing objects, also allows # deletions and overwrites of those objects. # # This functionality is not supported for directory buckets. # # # @option options [String] :grant_write_acp # Allows grantee to write the ACL for the applicable bucket. # # This functionality is not supported for directory buckets. # # # @option options [Boolean] :object_lock_enabled_for_bucket # Specifies whether you want S3 Object Lock to be enabled for the new # bucket. # # This functionality is not supported for directory buckets. # # # @option options [String] :object_ownership # The container element for object ownership for a bucket's ownership # controls. # # `BucketOwnerPreferred` - Objects uploaded to the bucket change # ownership to the bucket owner if the objects are uploaded with the # `bucket-owner-full-control` canned ACL. # # `ObjectWriter` - The uploading account will own the object if the # object is uploaded with the `bucket-owner-full-control` canned ACL. # # `BucketOwnerEnforced` - Access control lists (ACLs) are disabled and # no longer affect permissions. The bucket owner automatically owns and # has full control over every object in the bucket. The bucket only # accepts PUT requests that don't specify an ACL or specify bucket # owner full control ACLs (such as the predefined # `bucket-owner-full-control` canned ACL or a custom ACL in XML format # that grants the same permissions). # # By default, `ObjectOwnership` is set to `BucketOwnerEnforced` and ACLs # are disabled. We recommend keeping ACLs disabled, except in uncommon # use cases where you must control access for each object individually. # For more information about S3 Object Ownership, see [Controlling # ownership of objects and disabling ACLs for your bucket][1] in the # *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. Directory # buckets use the bucket owner enforced setting for S3 Object Ownership. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html # @return [Types::CreateBucketOutput] def create(options = {}) options = options.merge(bucket: @name) resp = Aws::Plugins::UserAgent.feature('resource') do @client.create_bucket(options) end resp.data end # @example Request syntax with placeholder values # # bucket.delete({ # expected_bucket_owner: "AccountId", # }) # @param [Hash] options ({}) # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # For directory buckets, this header is not supported in this API # operation. If you specify this header, the request fails with the HTTP # status code `501 Not Implemented`. # # # @return [EmptyStructure] def delete(options = {}) options = options.merge(bucket: @name) resp = Aws::Plugins::UserAgent.feature('resource') do @client.delete_bucket(options) end resp.data end # @example Request syntax with placeholder values # # bucket.delete_objects({ # delete: { # required # objects: [ # required # { # key: "ObjectKey", # required # version_id: "ObjectVersionId", # }, # ], # quiet: false, # }, # mfa: "MFA", # request_payer: "requester", # accepts requester # bypass_governance_retention: false, # expected_bucket_owner: "AccountId", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # }) # @param [Hash] options ({}) # @option options [required, Types::Delete] :delete # Container for the request. # @option options [String] :mfa # The concatenation of the authentication device's serial number, a # space, and the value that is displayed on your authentication device. # Required to permanently delete a versioned object if versioning is # configured with MFA delete enabled. # # When performing the `DeleteObjects` operation on an MFA delete enabled # bucket, which attempts to delete the specified versioned objects, you # must include an MFA token. If you don't provide an MFA token, the # entire request will fail, even if there are non-versioned objects that # you are trying to delete. If you provide an invalid token, whether # there are versioned object keys in the request or not, the entire # Multi-Object Delete request will fail. For information about MFA # Delete, see [ MFA Delete][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete # @option options [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @option options [Boolean] :bypass_governance_retention # Specifies whether you want to delete this object even if it has a # Governance-type Object Lock in place. To use this header, you must # have the `s3:BypassGovernanceRetention` permission. # # This functionality is not supported for directory buckets. # # # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @option options [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum-algorithm ` or # `x-amz-trailer` header sent. Otherwise, Amazon S3 fails the request # with the HTTP status code `400 Bad Request`. # # For the `x-amz-checksum-algorithm ` header, replace ` algorithm ` with # the supported algorithm from the following list: # # * CRC32 # # * CRC32C # # * SHA1 # # * SHA256 # # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # If the individual checksum value you provide through # `x-amz-checksum-algorithm ` doesn't match the checksum algorithm you # set through `x-amz-sdk-checksum-algorithm`, Amazon S3 ignores any # provided `ChecksumAlgorithm` parameter and uses the checksum algorithm # that matches the provided value in `x-amz-checksum-algorithm `. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [Types::DeleteObjectsOutput] def delete_objects(options = {}) options = options.merge(bucket: @name) resp = Aws::Plugins::UserAgent.feature('resource') do @client.delete_objects(options) end resp.data end # @example Request syntax with placeholder values # # object = bucket.put_object({ # acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control # body: source_file, # cache_control: "CacheControl", # content_disposition: "ContentDisposition", # content_encoding: "ContentEncoding", # content_language: "ContentLanguage", # content_length: 1, # content_md5: "ContentMD5", # content_type: "ContentType", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # checksum_crc32: "ChecksumCRC32", # checksum_crc32c: "ChecksumCRC32C", # checksum_sha1: "ChecksumSHA1", # checksum_sha256: "ChecksumSHA256", # expires: Time.now, # grant_full_control: "GrantFullControl", # grant_read: "GrantRead", # grant_read_acp: "GrantReadACP", # grant_write_acp: "GrantWriteACP", # key: "ObjectKey", # required # metadata: { # "MetadataKey" => "MetadataValue", # }, # server_side_encryption: "AES256", # accepts AES256, aws:kms, aws:kms:dsse # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE # website_redirect_location: "WebsiteRedirectLocation", # sse_customer_algorithm: "SSECustomerAlgorithm", # sse_customer_key: "SSECustomerKey", # sse_customer_key_md5: "SSECustomerKeyMD5", # ssekms_key_id: "SSEKMSKeyId", # ssekms_encryption_context: "SSEKMSEncryptionContext", # bucket_key_enabled: false, # request_payer: "requester", # accepts requester # tagging: "TaggingHeader", # object_lock_mode: "GOVERNANCE", # accepts GOVERNANCE, COMPLIANCE # object_lock_retain_until_date: Time.now, # object_lock_legal_hold_status: "ON", # accepts ON, OFF # expected_bucket_owner: "AccountId", # }) # @param [Hash] options ({}) # @option options [String] :acl # The canned ACL to apply to the object. For more information, see # [Canned ACL][1] in the *Amazon S3 User Guide*. # # When adding a new object, you can use headers to grant ACL-based # permissions to individual Amazon Web Services accounts or to # predefined groups defined by Amazon S3. These permissions are then # added to the ACL on the object. By default, all objects are private. # Only the owner has full access control. For more information, see # [Access Control List (ACL) Overview][2] and [Managing ACLs Using the # REST API][3] in the *Amazon S3 User Guide*. # # If the bucket that you're uploading objects to uses the bucket owner # enforced setting for S3 Object Ownership, ACLs are disabled and no # longer affect permissions. Buckets that use this setting only accept # PUT requests that don't specify an ACL or PUT requests that specify # bucket owner full control ACLs, such as the # `bucket-owner-full-control` canned ACL or an equivalent form of this # ACL expressed in the XML format. PUT requests that contain other ACLs # (for example, custom grants to certain Amazon Web Services accounts) # fail and return a `400` error with the error code # `AccessControlListNotSupported`. For more information, see [ # Controlling ownership of objects and disabling ACLs][4] in the *Amazon # S3 User Guide*. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html # @option options [String, StringIO, File] :body # Object data. # @option options [String] :cache_control # Can be used to specify caching behavior along the request/reply chain. # For more information, see # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9][1]. # # # # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 # @option options [String] :content_disposition # Specifies presentational information for the object. For more # information, see # [https://www.rfc-editor.org/rfc/rfc6266#section-4][1]. # # # # [1]: https://www.rfc-editor.org/rfc/rfc6266#section-4 # @option options [String] :content_encoding # Specifies what content encodings have been applied to the object and # thus what decoding mechanisms must be applied to obtain the media-type # referenced by the Content-Type header field. For more information, see # [https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding][1]. # # # # [1]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding # @option options [String] :content_language # The language the content is in. # @option options [Integer] :content_length # Size of the body in bytes. This parameter is useful when the size of # the body cannot be determined automatically. For more information, see # [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length][1]. # # # # [1]: https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length # @option options [String] :content_md5 # The base64-encoded 128-bit MD5 digest of the message (without the # headers) according to RFC 1864. This header can be used as a message # integrity check to verify that the data is the same data that was # originally sent. Although it is optional, we recommend using the # Content-MD5 mechanism as an end-to-end integrity check. For more # information about REST request authentication, see [REST # Authentication][1]. # # The `Content-MD5` header is required for any request to upload an # object with a retention period configured using Amazon S3 Object Lock. # For more information about Amazon S3 Object Lock, see [Amazon S3 # Object Lock Overview][2] in the *Amazon S3 User Guide*. # # # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html # @option options [String] :content_type # A standard MIME type describing the format of the contents. For more # information, see # [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type][1]. # # # # [1]: https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type # @option options [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum-algorithm ` or # `x-amz-trailer` header sent. Otherwise, Amazon S3 fails the request # with the HTTP status code `400 Bad Request`. # # For the `x-amz-checksum-algorithm ` header, replace ` algorithm ` with # the supported algorithm from the following list: # # * CRC32 # # * CRC32C # # * SHA1 # # * SHA256 # # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # If the individual checksum value you provide through # `x-amz-checksum-algorithm ` doesn't match the checksum algorithm you # set through `x-amz-sdk-checksum-algorithm`, Amazon S3 ignores any # provided `ChecksumAlgorithm` parameter and uses the checksum algorithm # that matches the provided value in `x-amz-checksum-algorithm `. # # For directory buckets, when you use Amazon Web Services SDKs, `CRC32` # is the default checksum algorithm that's used for performance. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [String] :checksum_crc32 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 32-bit CRC32 checksum of the object. For # more information, see [Checking object integrity][1] in the *Amazon S3 # User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [String] :checksum_crc32c # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 32-bit CRC32C checksum of the object. # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [String] :checksum_sha1 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 160-bit SHA-1 digest of the object. For # more information, see [Checking object integrity][1] in the *Amazon S3 # User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [String] :checksum_sha256 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 256-bit SHA-256 digest of the object. # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [Time,DateTime,Date,Integer,String] :expires # The date and time at which the object is no longer cacheable. For more # information, see # [https://www.rfc-editor.org/rfc/rfc7234#section-5.3][1]. # # # # [1]: https://www.rfc-editor.org/rfc/rfc7234#section-5.3 # @option options [String] :grant_full_control # Gives the grantee READ, READ\_ACP, and WRITE\_ACP permissions on the # object. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # @option options [String] :grant_read # Allows grantee to read the object data and its metadata. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # @option options [String] :grant_read_acp # Allows grantee to read the object ACL. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # @option options [String] :grant_write_acp # Allows grantee to write the ACL for the applicable object. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # @option options [required, String] :key # Object key for which the PUT action was initiated. # @option options [Hash] :metadata # A map of metadata to store with the object in S3. # @option options [String] :server_side_encryption # The server-side encryption algorithm that was used when you store this # object in Amazon S3 (for example, `AES256`, `aws:kms`, # `aws:kms:dsse`). # # General purpose buckets - You have four mutually exclusive # options to protect data using server-side encryption in Amazon S3, # depending on how you choose to manage the encryption keys. # Specifically, the encryption key options are Amazon S3 managed keys # (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and # customer-provided keys (SSE-C). Amazon S3 encrypts data with # server-side encryption by using Amazon S3 managed keys (SSE-S3) by # default. You can optionally tell Amazon S3 to encrypt data at rest by # using server-side encryption with other key options. For more # information, see [Using Server-Side Encryption][1] in the *Amazon S3 # User Guide*. # # Directory buckets - For directory buckets, only the # server-side encryption with Amazon S3 managed keys (SSE-S3) (`AES256`) # value is supported. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html # @option options [String] :storage_class # By default, Amazon S3 uses the STANDARD Storage Class to store newly # created objects. The STANDARD storage class provides high durability # and high availability. Depending on performance needs, you can specify # a different Storage Class. For more information, see [Storage # Classes][1] in the *Amazon S3 User Guide*. # # * For directory buckets, only the S3 Express One Zone storage class is # supported to store newly created objects. # # * Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html # @option options [String] :website_redirect_location # If the bucket is configured as a website, redirects requests for this # object to another object in the same bucket or to an external URL. # Amazon S3 stores the value of this header in the object metadata. For # information about object metadata, see [Object Key and Metadata][1] in # the *Amazon S3 User Guide*. # # In the following example, the request header sets the redirect to an # object (anotherPage.html) in the same bucket: # # `x-amz-website-redirect-location: /anotherPage.html` # # In the following example, the request header sets the object redirect # to another website: # # `x-amz-website-redirect-location: http://www.example.com/` # # For more information about website hosting in Amazon S3, see [Hosting # Websites on Amazon S3][2] and [How to Configure Website Page # Redirects][3] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html # @option options [String] :sse_customer_algorithm # Specifies the algorithm to use when encrypting the object (for # example, `AES256`). # # This functionality is not supported for directory buckets. # # # @option options [String] :sse_customer_key # Specifies the customer-provided encryption key for Amazon S3 to use in # encrypting data. This value is used to store the object and then it is # discarded; Amazon S3 does not store the encryption key. The key must # be appropriate for use with the algorithm specified in the # `x-amz-server-side-encryption-customer-algorithm` header. # # This functionality is not supported for directory buckets. # # # @option options [String] :sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the encryption key according to # RFC 1321. Amazon S3 uses this header for a message integrity check to # ensure that the encryption key was transmitted without error. # # This functionality is not supported for directory buckets. # # # @option options [String] :ssekms_key_id # If `x-amz-server-side-encryption` has a valid value of `aws:kms` or # `aws:kms:dsse`, this header specifies the ID (Key ID, Key ARN, or Key # Alias) of the Key Management Service (KMS) symmetric encryption # customer managed key that was used for the object. If you specify # `x-amz-server-side-encryption:aws:kms` or # `x-amz-server-side-encryption:aws:kms:dsse`, but do not provide` # x-amz-server-side-encryption-aws-kms-key-id`, Amazon S3 uses the # Amazon Web Services managed key (`aws/s3`) to protect the data. If the # KMS key does not exist in the same account that's issuing the # command, you must use the full ARN and not just the ID. # # This functionality is not supported for directory buckets. # # # @option options [String] :ssekms_encryption_context # Specifies the Amazon Web Services KMS Encryption Context to use for # object encryption. The value of this header is a base64-encoded UTF-8 # string holding JSON with the encryption context key-value pairs. This # value is stored as object metadata and automatically gets passed on to # Amazon Web Services KMS for future `GetObject` or `CopyObject` # operations on this object. This value must be explicitly added during # `CopyObject` operations. # # This functionality is not supported for directory buckets. # # # @option options [Boolean] :bucket_key_enabled # Specifies whether Amazon S3 should use an S3 Bucket Key for object # encryption with server-side encryption using Key Management Service # (KMS) keys (SSE-KMS). Setting this header to `true` causes Amazon S3 # to use an S3 Bucket Key for object encryption with SSE-KMS. # # Specifying this header with a PUT action doesn’t affect bucket-level # settings for S3 Bucket Key. # # This functionality is not supported for directory buckets. # # # @option options [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @option options [String] :tagging # The tag-set for the object. The tag-set must be encoded as URL Query # parameters. (For example, "Key1=Value1") # # This functionality is not supported for directory buckets. # # # @option options [String] :object_lock_mode # The Object Lock mode that you want to apply to this object. # # This functionality is not supported for directory buckets. # # # @option options [Time,DateTime,Date,Integer,String] :object_lock_retain_until_date # The date and time when you want this object's Object Lock to expire. # Must be formatted as a timestamp parameter. # # This functionality is not supported for directory buckets. # # # @option options [String] :object_lock_legal_hold_status # Specifies whether a legal hold will be applied to this object. For # more information about S3 Object Lock, see [Object Lock][1] in the # *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @return [Object] def put_object(options = {}) options = options.merge(bucket: @name) Aws::Plugins::UserAgent.feature('resource') do @client.put_object(options) end Object.new( bucket_name: @name, key: options[:key], client: @client ) end # @!group Associations # @return [BucketAcl] def acl BucketAcl.new( bucket_name: @name, client: @client ) end # @return [BucketCors] def cors BucketCors.new( bucket_name: @name, client: @client ) end # @return [BucketLifecycle] def lifecycle BucketLifecycle.new( bucket_name: @name, client: @client ) end # @return [BucketLifecycleConfiguration] def lifecycle_configuration BucketLifecycleConfiguration.new( bucket_name: @name, client: @client ) end # @return [BucketLogging] def logging BucketLogging.new( bucket_name: @name, client: @client ) end # @example Request syntax with placeholder values # # multipart_uploads = bucket.multipart_uploads({ # delimiter: "Delimiter", # encoding_type: "url", # accepts url # key_marker: "KeyMarker", # prefix: "Prefix", # upload_id_marker: "UploadIdMarker", # expected_bucket_owner: "AccountId", # request_payer: "requester", # accepts requester # }) # @param [Hash] options ({}) # @option options [String] :delimiter # Character you use to group keys. # # All keys that contain the same string between the prefix, if # specified, and the first occurrence of the delimiter after the prefix # are grouped under a single result element, `CommonPrefixes`. If you # don't specify the prefix parameter, then the substring starts at the # beginning of the key. The keys that are grouped under `CommonPrefixes` # result element are not returned elsewhere in the response. # # **Directory buckets** - For directory buckets, `/` is the only # supported delimiter. # # # @option options [String] :encoding_type # Requests Amazon S3 to encode the object keys in the response and # specifies the encoding method to use. An object key can contain any # Unicode character; however, the XML 1.0 parser cannot parse some # characters, such as characters with an ASCII value from 0 to 10. For # characters that are not supported in XML 1.0, you can add this # parameter to request that Amazon S3 encode the keys in the response. # @option options [String] :key_marker # Specifies the multipart upload after which listing should begin. # # * **General purpose buckets** - For general purpose buckets, # `key-marker` is an object key. Together with `upload-id-marker`, # this parameter specifies the multipart upload after which listing # should begin. # # If `upload-id-marker` is not specified, only the keys # lexicographically greater than the specified `key-marker` will be # included in the list. # # If `upload-id-marker` is specified, any multipart uploads for a key # equal to the `key-marker` might also be included, provided those # multipart uploads have upload IDs lexicographically greater than the # specified `upload-id-marker`. # # * **Directory buckets** - For directory buckets, `key-marker` is # obfuscated and isn't a real object key. The `upload-id-marker` # parameter isn't supported by directory buckets. To list the # additional multipart uploads, you only need to set the value of # `key-marker` to the `NextKeyMarker` value from the previous # response. # # In the `ListMultipartUploads` response, the multipart uploads # aren't sorted lexicographically based on the object keys. # # # @option options [String] :prefix # Lists in-progress uploads only for those keys that begin with the # specified prefix. You can use prefixes to separate a bucket into # different grouping of keys. (You can think of using `prefix` to make # groups in the same way that you'd use a folder in a file system.) # # **Directory buckets** - For directory buckets, only prefixes that end # in a delimiter (`/`) are supported. # # # @option options [String] :upload_id_marker # Together with key-marker, specifies the multipart upload after which # listing should begin. If key-marker is not specified, the # upload-id-marker parameter is ignored. Otherwise, any multipart # uploads for a key equal to the key-marker might be included in the # list only if they have an upload ID lexicographically greater than the # specified `upload-id-marker`. # # This functionality is not supported for directory buckets. # # # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @option options [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @return [MultipartUpload::Collection] def multipart_uploads(options = {}) batches = Enumerator.new do |y| options = options.merge(bucket: @name) resp = Aws::Plugins::UserAgent.feature('resource') do @client.list_multipart_uploads(options) end resp.each_page do |page| batch = [] page.data.uploads.each do |u| batch << MultipartUpload.new( bucket_name: @name, object_key: u.key, id: u.upload_id, data: u, client: @client ) end y.yield(batch) end end MultipartUpload::Collection.new(batches) end # @return [BucketNotification] def notification BucketNotification.new( bucket_name: @name, client: @client ) end # @param [String] key # @return [Object] def object(key) Object.new( bucket_name: @name, key: key, client: @client ) end # @example Request syntax with placeholder values # # object_versions = bucket.object_versions({ # delimiter: "Delimiter", # encoding_type: "url", # accepts url # key_marker: "KeyMarker", # prefix: "Prefix", # version_id_marker: "VersionIdMarker", # expected_bucket_owner: "AccountId", # request_payer: "requester", # accepts requester # optional_object_attributes: ["RestoreStatus"], # accepts RestoreStatus # }) # @param [Hash] options ({}) # @option options [String] :delimiter # A delimiter is a character that you specify to group keys. All keys # that contain the same string between the `prefix` and the first # occurrence of the delimiter are grouped under a single result element # in `CommonPrefixes`. These groups are counted as one result against # the `max-keys` limitation. These keys are not returned elsewhere in # the response. # @option options [String] :encoding_type # Requests Amazon S3 to encode the object keys in the response and # specifies the encoding method to use. An object key can contain any # Unicode character; however, the XML 1.0 parser cannot parse some # characters, such as characters with an ASCII value from 0 to 10. For # characters that are not supported in XML 1.0, you can add this # parameter to request that Amazon S3 encode the keys in the response. # @option options [String] :key_marker # Specifies the key to start with when listing objects in a bucket. # @option options [String] :prefix # Use this parameter to select only those keys that begin with the # specified prefix. You can use prefixes to separate a bucket into # different groupings of keys. (You can think of using `prefix` to make # groups in the same way that you'd use a folder in a file system.) You # can use `prefix` with `delimiter` to roll up numerous objects into a # single result under `CommonPrefixes`. # @option options [String] :version_id_marker # Specifies the object version you want to start listing from. # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @option options [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @option options [Array] :optional_object_attributes # Specifies the optional fields that you want returned in the response. # Fields that you do not specify are not returned. # @return [ObjectVersion::Collection] def object_versions(options = {}) batches = Enumerator.new do |y| options = options.merge(bucket: @name) resp = Aws::Plugins::UserAgent.feature('resource') do @client.list_object_versions(options) end resp.each_page do |page| batch = [] page.data.versions_delete_markers.each do |v| batch << ObjectVersion.new( bucket_name: @name, object_key: v.key, id: v.version_id, data: v, client: @client ) end y.yield(batch) end end ObjectVersion::Collection.new(batches) end # @example Request syntax with placeholder values # # objects = bucket.objects({ # delimiter: "Delimiter", # encoding_type: "url", # accepts url # prefix: "Prefix", # fetch_owner: false, # start_after: "StartAfter", # request_payer: "requester", # accepts requester # expected_bucket_owner: "AccountId", # optional_object_attributes: ["RestoreStatus"], # accepts RestoreStatus # }) # @param [Hash] options ({}) # @option options [String] :delimiter # A delimiter is a character that you use to group keys. # # * **Directory buckets** - For directory buckets, `/` is the only # supported delimiter. # # * Directory buckets - When you query `ListObjectsV2` with a # delimiter during in-progress multipart uploads, the `CommonPrefixes` # response parameter contains the prefixes that are associated with # the in-progress multipart uploads. For more information about # multipart uploads, see [Multipart Upload Overview][1] in the *Amazon # S3 User Guide*. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html # @option options [String] :encoding_type # Encoding type used by Amazon S3 to encode object keys in the response. # @option options [String] :prefix # Limits the response to keys that begin with the specified prefix. # # **Directory buckets** - For directory buckets, only prefixes that end # in a delimiter (`/`) are supported. # # # @option options [Boolean] :fetch_owner # The owner field is not present in `ListObjectsV2` by default. If you # want to return the owner field with each key in the result, then set # the `FetchOwner` field to `true`. # # **Directory buckets** - For directory buckets, the bucket owner is # returned as the object owner for all objects. # # # @option options [String] :start_after # StartAfter is where you want Amazon S3 to start listing from. Amazon # S3 starts listing after this specified key. StartAfter can be any key # in the bucket. # # This functionality is not supported for directory buckets. # # # @option options [String] :request_payer # Confirms that the requester knows that she or he will be charged for # the list objects request in V2 style. Bucket owners need not specify # this parameter in their requests. # # This functionality is not supported for directory buckets. # # # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @option options [Array] :optional_object_attributes # Specifies the optional fields that you want returned in the response. # Fields that you do not specify are not returned. # # This functionality is not supported for directory buckets. # # # @return [ObjectSummary::Collection] def objects(options = {}) batches = Enumerator.new do |y| options = options.merge(bucket: @name) resp = Aws::Plugins::UserAgent.feature('resource') do @client.list_objects_v2(options) end resp.each_page do |page| batch = [] page.data.contents.each do |c| batch << ObjectSummary.new( bucket_name: @name, key: c.key, data: c, client: @client ) end y.yield(batch) end end ObjectSummary::Collection.new(batches) end # @return [BucketPolicy] def policy BucketPolicy.new( bucket_name: @name, client: @client ) end # @return [BucketRequestPayment] def request_payment BucketRequestPayment.new( bucket_name: @name, client: @client ) end # @return [BucketTagging] def tagging BucketTagging.new( bucket_name: @name, client: @client ) end # @return [BucketVersioning] def versioning BucketVersioning.new( bucket_name: @name, client: @client ) end # @return [BucketWebsite] def website BucketWebsite.new( bucket_name: @name, client: @client ) end # @deprecated # @api private def identifiers { name: @name } end deprecated(:identifiers) private def extract_name(args, options) value = args[0] || options.delete(:name) case value when String then value when nil then raise ArgumentError, "missing required option :name" else msg = "expected :name to be a String, got #{value.class}" raise ArgumentError, msg end end def yield_waiter_and_warn(waiter, &block) if !@waiter_block_warned msg = "pass options to configure the waiter; "\ "yielding the waiter is deprecated" warn(msg) @waiter_block_warned = true end yield(waiter.waiter) end def separate_params_and_options(options) opts = Set.new( [:client, :max_attempts, :delay, :before_attempt, :before_wait] ) waiter_opts = {} waiter_params = {} options.each_pair do |key, value| if opts.include?(key) waiter_opts[key] = value else waiter_params[key] = value end end waiter_opts[:client] ||= @client [waiter_opts, waiter_params] end class Collection < Aws::Resources::Collection; end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/bucket_acl.rb0000644000004100000410000002411114563445240020675 0ustar www-datawww-data# frozen_string_literal: true # WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws::S3 class BucketAcl extend Aws::Deprecations # @overload def initialize(bucket_name, options = {}) # @param [String] bucket_name # @option options [Client] :client # @overload def initialize(options = {}) # @option options [required, String] :bucket_name # @option options [Client] :client def initialize(*args) options = Hash === args.last ? args.pop.dup : {} @bucket_name = extract_bucket_name(args, options) @data = options.delete(:data) @client = options.delete(:client) || Client.new(options) @waiter_block_warned = false end # @!group Read-Only Attributes # @return [String] def bucket_name @bucket_name end # Container for the bucket owner's display name and ID. # @return [Types::Owner] def owner data[:owner] end # A list of grants. # @return [Array] def grants data[:grants] end # @!endgroup # @return [Client] def client @client end # Loads, or reloads {#data} for the current {BucketAcl}. # Returns `self` making it possible to chain methods. # # bucket_acl.reload.data # # @return [self] def load resp = Aws::Plugins::UserAgent.feature('resource') do @client.get_bucket_acl(bucket: @bucket_name) end @data = resp.data self end alias :reload :load # @return [Types::GetBucketAclOutput] # Returns the data for this {BucketAcl}. Calls # {Client#get_bucket_acl} if {#data_loaded?} is `false`. def data load unless @data @data end # @return [Boolean] # Returns `true` if this resource is loaded. Accessing attributes or # {#data} on an unloaded resource will trigger a call to {#load}. def data_loaded? !!@data end # @deprecated Use [Aws::S3::Client] #wait_until instead # # Waiter polls an API operation until a resource enters a desired # state. # # @note The waiting operation is performed on a copy. The original resource # remains unchanged. # # ## Basic Usage # # Waiter will polls until it is successful, it fails by # entering a terminal state, or until a maximum number of attempts # are made. # # # polls in a loop until condition is true # resource.wait_until(options) {|resource| condition} # # ## Example # # instance.wait_until(max_attempts:10, delay:5) do |instance| # instance.state.name == 'running' # end # # ## Configuration # # You can configure the maximum number of polling attempts, and the # delay (in seconds) between each polling attempt. The waiting condition is # set by passing a block to {#wait_until}: # # # poll for ~25 seconds # resource.wait_until(max_attempts:5,delay:5) {|resource|...} # # ## Callbacks # # You can be notified before each polling attempt and before each # delay. If you throw `:success` or `:failure` from these callbacks, # it will terminate the waiter. # # started_at = Time.now # # poll for 1 hour, instead of a number of attempts # proc = Proc.new do |attempts, response| # throw :failure if Time.now - started_at > 3600 # end # # # disable max attempts # instance.wait_until(before_wait:proc, max_attempts:nil) {...} # # ## Handling Errors # # When a waiter is successful, it returns the Resource. When a waiter # fails, it raises an error. # # begin # resource.wait_until(...) # rescue Aws::Waiters::Errors::WaiterFailed # # resource did not enter the desired state in time # end # # @yieldparam [Resource] resource to be used in the waiting condition. # # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter # terminates because the waiter has entered a state that it will not # transition out of, preventing success. # # yet successful. # # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is # encountered while polling for a resource that is not expected. # # @raise [NotImplementedError] Raised when the resource does not # # @option options [Integer] :max_attempts (10) Maximum number of # attempts # @option options [Integer] :delay (10) Delay between each # attempt in seconds # @option options [Proc] :before_attempt (nil) Callback # invoked before each attempt # @option options [Proc] :before_wait (nil) Callback # invoked before each wait # @return [Resource] if the waiter was successful def wait_until(options = {}, &block) self_copy = self.dup attempts = 0 options[:max_attempts] = 10 unless options.key?(:max_attempts) options[:delay] ||= 10 options[:poller] = Proc.new do attempts += 1 if block.call(self_copy) [:success, self_copy] else self_copy.reload unless attempts == options[:max_attempts] :retry end end Aws::Plugins::UserAgent.feature('resource') do Aws::Waiters::Waiter.new(options).wait({}) end end # @!group Actions # @example Request syntax with placeholder values # # bucket_acl.put({ # acl: "private", # accepts private, public-read, public-read-write, authenticated-read # access_control_policy: { # grants: [ # { # grantee: { # display_name: "DisplayName", # email_address: "EmailAddress", # id: "ID", # type: "CanonicalUser", # required, accepts CanonicalUser, AmazonCustomerByEmail, Group # uri: "URI", # }, # permission: "FULL_CONTROL", # accepts FULL_CONTROL, WRITE, WRITE_ACP, READ, READ_ACP # }, # ], # owner: { # display_name: "DisplayName", # id: "ID", # }, # }, # content_md5: "ContentMD5", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # grant_full_control: "GrantFullControl", # grant_read: "GrantRead", # grant_read_acp: "GrantReadACP", # grant_write: "GrantWrite", # grant_write_acp: "GrantWriteACP", # expected_bucket_owner: "AccountId", # }) # @param [Hash] options ({}) # @option options [String] :acl # The canned ACL to apply to the bucket. # @option options [Types::AccessControlPolicy] :access_control_policy # Contains the elements that set the ACL permissions for an object per # grantee. # @option options [String] :content_md5 # The base64-encoded 128-bit MD5 digest of the data. This header must be # used as a message integrity check to verify that the request body was # not corrupted in transit. For more information, go to [RFC 1864.][1] # # For requests made using the Amazon Web Services Command Line Interface # (CLI) or Amazon Web Services SDKs, this field is calculated # automatically. # # # # [1]: http://www.ietf.org/rfc/rfc1864.txt # @option options [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [String] :grant_full_control # Allows grantee the read, write, read ACP, and write ACP permissions on # the bucket. # @option options [String] :grant_read # Allows grantee to list the objects in the bucket. # @option options [String] :grant_read_acp # Allows grantee to read the bucket ACL. # @option options [String] :grant_write # Allows grantee to create new objects in the bucket. # # For the bucket and object owners of existing objects, also allows # deletions and overwrites of those objects. # @option options [String] :grant_write_acp # Allows grantee to write the ACL for the applicable bucket. # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @return [EmptyStructure] def put(options = {}) options = options.merge(bucket: @bucket_name) resp = Aws::Plugins::UserAgent.feature('resource') do @client.put_bucket_acl(options) end resp.data end # @!group Associations # @return [Bucket] def bucket Bucket.new( name: @bucket_name, client: @client ) end # @deprecated # @api private def identifiers { bucket_name: @bucket_name } end deprecated(:identifiers) private def extract_bucket_name(args, options) value = args[0] || options.delete(:bucket_name) case value when String then value when nil then raise ArgumentError, "missing required option :bucket_name" else msg = "expected :bucket_name to be a String, got #{value.class}" raise ArgumentError, msg end end class Collection < Aws::Resources::Collection; end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/client_api.rb0000644000004100000410000121734614563445240020727 0ustar www-datawww-data# frozen_string_literal: true # WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws::S3 # @api private module ClientApi include Seahorse::Model AbortDate = Shapes::TimestampShape.new(name: 'AbortDate') AbortIncompleteMultipartUpload = Shapes::StructureShape.new(name: 'AbortIncompleteMultipartUpload') AbortMultipartUploadOutput = Shapes::StructureShape.new(name: 'AbortMultipartUploadOutput') AbortMultipartUploadRequest = Shapes::StructureShape.new(name: 'AbortMultipartUploadRequest') AbortRuleId = Shapes::StringShape.new(name: 'AbortRuleId') AccelerateConfiguration = Shapes::StructureShape.new(name: 'AccelerateConfiguration') AcceptRanges = Shapes::StringShape.new(name: 'AcceptRanges') AccessControlPolicy = Shapes::StructureShape.new(name: 'AccessControlPolicy') AccessControlTranslation = Shapes::StructureShape.new(name: 'AccessControlTranslation') AccessKeyIdValue = Shapes::StringShape.new(name: 'AccessKeyIdValue') AccessPointAlias = Shapes::BooleanShape.new(name: 'AccessPointAlias') AccessPointArn = Shapes::StringShape.new(name: 'AccessPointArn') AccountId = Shapes::StringShape.new(name: 'AccountId') AllowQuotedRecordDelimiter = Shapes::BooleanShape.new(name: 'AllowQuotedRecordDelimiter') AllowedHeader = Shapes::StringShape.new(name: 'AllowedHeader') AllowedHeaders = Shapes::ListShape.new(name: 'AllowedHeaders', flattened: true) AllowedMethod = Shapes::StringShape.new(name: 'AllowedMethod') AllowedMethods = Shapes::ListShape.new(name: 'AllowedMethods', flattened: true) AllowedOrigin = Shapes::StringShape.new(name: 'AllowedOrigin') AllowedOrigins = Shapes::ListShape.new(name: 'AllowedOrigins', flattened: true) AnalyticsAndOperator = Shapes::StructureShape.new(name: 'AnalyticsAndOperator') AnalyticsConfiguration = Shapes::StructureShape.new(name: 'AnalyticsConfiguration') AnalyticsConfigurationList = Shapes::ListShape.new(name: 'AnalyticsConfigurationList', flattened: true) AnalyticsExportDestination = Shapes::StructureShape.new(name: 'AnalyticsExportDestination') AnalyticsFilter = Shapes::StructureShape.new(name: 'AnalyticsFilter') AnalyticsId = Shapes::StringShape.new(name: 'AnalyticsId') AnalyticsS3BucketDestination = Shapes::StructureShape.new(name: 'AnalyticsS3BucketDestination') AnalyticsS3ExportFileFormat = Shapes::StringShape.new(name: 'AnalyticsS3ExportFileFormat') ArchiveStatus = Shapes::StringShape.new(name: 'ArchiveStatus') Body = Shapes::BlobShape.new(name: 'Body') Bucket = Shapes::StructureShape.new(name: 'Bucket') BucketAccelerateStatus = Shapes::StringShape.new(name: 'BucketAccelerateStatus') BucketAlreadyExists = Shapes::StructureShape.new(name: 'BucketAlreadyExists') BucketAlreadyOwnedByYou = Shapes::StructureShape.new(name: 'BucketAlreadyOwnedByYou') BucketCannedACL = Shapes::StringShape.new(name: 'BucketCannedACL') BucketInfo = Shapes::StructureShape.new(name: 'BucketInfo') BucketKeyEnabled = Shapes::BooleanShape.new(name: 'BucketKeyEnabled') BucketLifecycleConfiguration = Shapes::StructureShape.new(name: 'BucketLifecycleConfiguration') BucketLocationConstraint = Shapes::StringShape.new(name: 'BucketLocationConstraint') BucketLocationName = Shapes::StringShape.new(name: 'BucketLocationName') BucketLoggingStatus = Shapes::StructureShape.new(name: 'BucketLoggingStatus') BucketLogsPermission = Shapes::StringShape.new(name: 'BucketLogsPermission') BucketName = Shapes::StringShape.new(name: 'BucketName') BucketType = Shapes::StringShape.new(name: 'BucketType') BucketVersioningStatus = Shapes::StringShape.new(name: 'BucketVersioningStatus') Buckets = Shapes::ListShape.new(name: 'Buckets') BypassGovernanceRetention = Shapes::BooleanShape.new(name: 'BypassGovernanceRetention') BytesProcessed = Shapes::IntegerShape.new(name: 'BytesProcessed') BytesReturned = Shapes::IntegerShape.new(name: 'BytesReturned') BytesScanned = Shapes::IntegerShape.new(name: 'BytesScanned') CORSConfiguration = Shapes::StructureShape.new(name: 'CORSConfiguration') CORSRule = Shapes::StructureShape.new(name: 'CORSRule') CORSRules = Shapes::ListShape.new(name: 'CORSRules', flattened: true) CSVInput = Shapes::StructureShape.new(name: 'CSVInput') CSVOutput = Shapes::StructureShape.new(name: 'CSVOutput') CacheControl = Shapes::StringShape.new(name: 'CacheControl') Checksum = Shapes::StructureShape.new(name: 'Checksum') ChecksumAlgorithm = Shapes::StringShape.new(name: 'ChecksumAlgorithm') ChecksumAlgorithmList = Shapes::ListShape.new(name: 'ChecksumAlgorithmList', flattened: true) ChecksumCRC32 = Shapes::StringShape.new(name: 'ChecksumCRC32') ChecksumCRC32C = Shapes::StringShape.new(name: 'ChecksumCRC32C') ChecksumMode = Shapes::StringShape.new(name: 'ChecksumMode') ChecksumSHA1 = Shapes::StringShape.new(name: 'ChecksumSHA1') ChecksumSHA256 = Shapes::StringShape.new(name: 'ChecksumSHA256') CloudFunction = Shapes::StringShape.new(name: 'CloudFunction') CloudFunctionConfiguration = Shapes::StructureShape.new(name: 'CloudFunctionConfiguration') CloudFunctionInvocationRole = Shapes::StringShape.new(name: 'CloudFunctionInvocationRole') Code = Shapes::StringShape.new(name: 'Code') Comments = Shapes::StringShape.new(name: 'Comments') CommonPrefix = Shapes::StructureShape.new(name: 'CommonPrefix') CommonPrefixList = Shapes::ListShape.new(name: 'CommonPrefixList', flattened: true) CompleteMultipartUploadOutput = Shapes::StructureShape.new(name: 'CompleteMultipartUploadOutput') CompleteMultipartUploadRequest = Shapes::StructureShape.new(name: 'CompleteMultipartUploadRequest') CompletedMultipartUpload = Shapes::StructureShape.new(name: 'CompletedMultipartUpload') CompletedPart = Shapes::StructureShape.new(name: 'CompletedPart') CompletedPartList = Shapes::ListShape.new(name: 'CompletedPartList', flattened: true) CompressionType = Shapes::StringShape.new(name: 'CompressionType') Condition = Shapes::StructureShape.new(name: 'Condition') ConfirmRemoveSelfBucketAccess = Shapes::BooleanShape.new(name: 'ConfirmRemoveSelfBucketAccess') ContentDisposition = Shapes::StringShape.new(name: 'ContentDisposition') ContentEncoding = Shapes::StringShape.new(name: 'ContentEncoding') ContentLanguage = Shapes::StringShape.new(name: 'ContentLanguage') ContentLength = Shapes::IntegerShape.new(name: 'ContentLength') ContentMD5 = Shapes::StringShape.new(name: 'ContentMD5') ContentRange = Shapes::StringShape.new(name: 'ContentRange') ContentType = Shapes::StringShape.new(name: 'ContentType') ContinuationEvent = Shapes::StructureShape.new(name: 'ContinuationEvent') CopyObjectOutput = Shapes::StructureShape.new(name: 'CopyObjectOutput') CopyObjectRequest = Shapes::StructureShape.new(name: 'CopyObjectRequest') CopyObjectResult = Shapes::StructureShape.new(name: 'CopyObjectResult') CopyPartResult = Shapes::StructureShape.new(name: 'CopyPartResult') CopySource = Shapes::StringShape.new(name: 'CopySource') CopySourceIfMatch = Shapes::StringShape.new(name: 'CopySourceIfMatch') CopySourceIfModifiedSince = Shapes::TimestampShape.new(name: 'CopySourceIfModifiedSince') CopySourceIfNoneMatch = Shapes::StringShape.new(name: 'CopySourceIfNoneMatch') CopySourceIfUnmodifiedSince = Shapes::TimestampShape.new(name: 'CopySourceIfUnmodifiedSince') CopySourceRange = Shapes::StringShape.new(name: 'CopySourceRange') CopySourceSSECustomerAlgorithm = Shapes::StringShape.new(name: 'CopySourceSSECustomerAlgorithm') CopySourceSSECustomerKey = Shapes::StringShape.new(name: 'CopySourceSSECustomerKey') CopySourceSSECustomerKeyMD5 = Shapes::StringShape.new(name: 'CopySourceSSECustomerKeyMD5') CopySourceVersionId = Shapes::StringShape.new(name: 'CopySourceVersionId') CreateBucketConfiguration = Shapes::StructureShape.new(name: 'CreateBucketConfiguration') CreateBucketOutput = Shapes::StructureShape.new(name: 'CreateBucketOutput') CreateBucketRequest = Shapes::StructureShape.new(name: 'CreateBucketRequest') CreateMultipartUploadOutput = Shapes::StructureShape.new(name: 'CreateMultipartUploadOutput') CreateMultipartUploadRequest = Shapes::StructureShape.new(name: 'CreateMultipartUploadRequest') CreateSessionOutput = Shapes::StructureShape.new(name: 'CreateSessionOutput') CreateSessionRequest = Shapes::StructureShape.new(name: 'CreateSessionRequest') CreationDate = Shapes::TimestampShape.new(name: 'CreationDate') DataRedundancy = Shapes::StringShape.new(name: 'DataRedundancy') Date = Shapes::TimestampShape.new(name: 'Date', timestampFormat: "iso8601") Days = Shapes::IntegerShape.new(name: 'Days') DaysAfterInitiation = Shapes::IntegerShape.new(name: 'DaysAfterInitiation') DefaultRetention = Shapes::StructureShape.new(name: 'DefaultRetention') Delete = Shapes::StructureShape.new(name: 'Delete') DeleteBucketAnalyticsConfigurationRequest = Shapes::StructureShape.new(name: 'DeleteBucketAnalyticsConfigurationRequest') DeleteBucketCorsRequest = Shapes::StructureShape.new(name: 'DeleteBucketCorsRequest') DeleteBucketEncryptionRequest = Shapes::StructureShape.new(name: 'DeleteBucketEncryptionRequest') DeleteBucketIntelligentTieringConfigurationRequest = Shapes::StructureShape.new(name: 'DeleteBucketIntelligentTieringConfigurationRequest') DeleteBucketInventoryConfigurationRequest = Shapes::StructureShape.new(name: 'DeleteBucketInventoryConfigurationRequest') DeleteBucketLifecycleRequest = Shapes::StructureShape.new(name: 'DeleteBucketLifecycleRequest') DeleteBucketMetricsConfigurationRequest = Shapes::StructureShape.new(name: 'DeleteBucketMetricsConfigurationRequest') DeleteBucketOwnershipControlsRequest = Shapes::StructureShape.new(name: 'DeleteBucketOwnershipControlsRequest') DeleteBucketPolicyRequest = Shapes::StructureShape.new(name: 'DeleteBucketPolicyRequest') DeleteBucketReplicationRequest = Shapes::StructureShape.new(name: 'DeleteBucketReplicationRequest') DeleteBucketRequest = Shapes::StructureShape.new(name: 'DeleteBucketRequest') DeleteBucketTaggingRequest = Shapes::StructureShape.new(name: 'DeleteBucketTaggingRequest') DeleteBucketWebsiteRequest = Shapes::StructureShape.new(name: 'DeleteBucketWebsiteRequest') DeleteMarker = Shapes::BooleanShape.new(name: 'DeleteMarker') DeleteMarkerEntry = Shapes::StructureShape.new(name: 'DeleteMarkerEntry') DeleteMarkerReplication = Shapes::StructureShape.new(name: 'DeleteMarkerReplication') DeleteMarkerReplicationStatus = Shapes::StringShape.new(name: 'DeleteMarkerReplicationStatus') DeleteMarkerVersionId = Shapes::StringShape.new(name: 'DeleteMarkerVersionId') DeleteMarkers = Shapes::ListShape.new(name: 'DeleteMarkers', flattened: true) DeleteObjectOutput = Shapes::StructureShape.new(name: 'DeleteObjectOutput') DeleteObjectRequest = Shapes::StructureShape.new(name: 'DeleteObjectRequest') DeleteObjectTaggingOutput = Shapes::StructureShape.new(name: 'DeleteObjectTaggingOutput') DeleteObjectTaggingRequest = Shapes::StructureShape.new(name: 'DeleteObjectTaggingRequest') DeleteObjectsOutput = Shapes::StructureShape.new(name: 'DeleteObjectsOutput') DeleteObjectsRequest = Shapes::StructureShape.new(name: 'DeleteObjectsRequest') DeletePublicAccessBlockRequest = Shapes::StructureShape.new(name: 'DeletePublicAccessBlockRequest') DeletedObject = Shapes::StructureShape.new(name: 'DeletedObject') DeletedObjects = Shapes::ListShape.new(name: 'DeletedObjects', flattened: true) Delimiter = Shapes::StringShape.new(name: 'Delimiter') Description = Shapes::StringShape.new(name: 'Description') Destination = Shapes::StructureShape.new(name: 'Destination') DirectoryBucketToken = Shapes::StringShape.new(name: 'DirectoryBucketToken') DisplayName = Shapes::StringShape.new(name: 'DisplayName') ETag = Shapes::StringShape.new(name: 'ETag') EmailAddress = Shapes::StringShape.new(name: 'EmailAddress') EnableRequestProgress = Shapes::BooleanShape.new(name: 'EnableRequestProgress') EncodingType = Shapes::StringShape.new(name: 'EncodingType') Encryption = Shapes::StructureShape.new(name: 'Encryption') EncryptionConfiguration = Shapes::StructureShape.new(name: 'EncryptionConfiguration') End = Shapes::IntegerShape.new(name: 'End') EndEvent = Shapes::StructureShape.new(name: 'EndEvent') Error = Shapes::StructureShape.new(name: 'Error') ErrorCode = Shapes::StringShape.new(name: 'ErrorCode') ErrorDocument = Shapes::StructureShape.new(name: 'ErrorDocument') ErrorMessage = Shapes::StringShape.new(name: 'ErrorMessage') Errors = Shapes::ListShape.new(name: 'Errors', flattened: true) Event = Shapes::StringShape.new(name: 'Event') EventBridgeConfiguration = Shapes::StructureShape.new(name: 'EventBridgeConfiguration') EventList = Shapes::ListShape.new(name: 'EventList', flattened: true) ExistingObjectReplication = Shapes::StructureShape.new(name: 'ExistingObjectReplication') ExistingObjectReplicationStatus = Shapes::StringShape.new(name: 'ExistingObjectReplicationStatus') Expiration = Shapes::StringShape.new(name: 'Expiration') ExpirationStatus = Shapes::StringShape.new(name: 'ExpirationStatus') ExpiredObjectDeleteMarker = Shapes::BooleanShape.new(name: 'ExpiredObjectDeleteMarker') Expires = Shapes::TimestampShape.new(name: 'Expires') ExpiresString = Shapes::StringShape.new(name: 'ExpiresString') ExposeHeader = Shapes::StringShape.new(name: 'ExposeHeader') ExposeHeaders = Shapes::ListShape.new(name: 'ExposeHeaders', flattened: true) Expression = Shapes::StringShape.new(name: 'Expression') ExpressionType = Shapes::StringShape.new(name: 'ExpressionType') FetchOwner = Shapes::BooleanShape.new(name: 'FetchOwner') FieldDelimiter = Shapes::StringShape.new(name: 'FieldDelimiter') FileHeaderInfo = Shapes::StringShape.new(name: 'FileHeaderInfo') FilterRule = Shapes::StructureShape.new(name: 'FilterRule') FilterRuleList = Shapes::ListShape.new(name: 'FilterRuleList', flattened: true) FilterRuleName = Shapes::StringShape.new(name: 'FilterRuleName') FilterRuleValue = Shapes::StringShape.new(name: 'FilterRuleValue') GetBucketAccelerateConfigurationOutput = Shapes::StructureShape.new(name: 'GetBucketAccelerateConfigurationOutput') GetBucketAccelerateConfigurationRequest = Shapes::StructureShape.new(name: 'GetBucketAccelerateConfigurationRequest') GetBucketAclOutput = Shapes::StructureShape.new(name: 'GetBucketAclOutput') GetBucketAclRequest = Shapes::StructureShape.new(name: 'GetBucketAclRequest') GetBucketAnalyticsConfigurationOutput = Shapes::StructureShape.new(name: 'GetBucketAnalyticsConfigurationOutput') GetBucketAnalyticsConfigurationRequest = Shapes::StructureShape.new(name: 'GetBucketAnalyticsConfigurationRequest') GetBucketCorsOutput = Shapes::StructureShape.new(name: 'GetBucketCorsOutput') GetBucketCorsRequest = Shapes::StructureShape.new(name: 'GetBucketCorsRequest') GetBucketEncryptionOutput = Shapes::StructureShape.new(name: 'GetBucketEncryptionOutput') GetBucketEncryptionRequest = Shapes::StructureShape.new(name: 'GetBucketEncryptionRequest') GetBucketIntelligentTieringConfigurationOutput = Shapes::StructureShape.new(name: 'GetBucketIntelligentTieringConfigurationOutput') GetBucketIntelligentTieringConfigurationRequest = Shapes::StructureShape.new(name: 'GetBucketIntelligentTieringConfigurationRequest') GetBucketInventoryConfigurationOutput = Shapes::StructureShape.new(name: 'GetBucketInventoryConfigurationOutput') GetBucketInventoryConfigurationRequest = Shapes::StructureShape.new(name: 'GetBucketInventoryConfigurationRequest') GetBucketLifecycleConfigurationOutput = Shapes::StructureShape.new(name: 'GetBucketLifecycleConfigurationOutput') GetBucketLifecycleConfigurationRequest = Shapes::StructureShape.new(name: 'GetBucketLifecycleConfigurationRequest') GetBucketLifecycleOutput = Shapes::StructureShape.new(name: 'GetBucketLifecycleOutput') GetBucketLifecycleRequest = Shapes::StructureShape.new(name: 'GetBucketLifecycleRequest') GetBucketLocationOutput = Shapes::StructureShape.new(name: 'GetBucketLocationOutput') GetBucketLocationRequest = Shapes::StructureShape.new(name: 'GetBucketLocationRequest') GetBucketLoggingOutput = Shapes::StructureShape.new(name: 'GetBucketLoggingOutput') GetBucketLoggingRequest = Shapes::StructureShape.new(name: 'GetBucketLoggingRequest') GetBucketMetricsConfigurationOutput = Shapes::StructureShape.new(name: 'GetBucketMetricsConfigurationOutput') GetBucketMetricsConfigurationRequest = Shapes::StructureShape.new(name: 'GetBucketMetricsConfigurationRequest') GetBucketNotificationConfigurationRequest = Shapes::StructureShape.new(name: 'GetBucketNotificationConfigurationRequest') GetBucketOwnershipControlsOutput = Shapes::StructureShape.new(name: 'GetBucketOwnershipControlsOutput') GetBucketOwnershipControlsRequest = Shapes::StructureShape.new(name: 'GetBucketOwnershipControlsRequest') GetBucketPolicyOutput = Shapes::StructureShape.new(name: 'GetBucketPolicyOutput') GetBucketPolicyRequest = Shapes::StructureShape.new(name: 'GetBucketPolicyRequest') GetBucketPolicyStatusOutput = Shapes::StructureShape.new(name: 'GetBucketPolicyStatusOutput') GetBucketPolicyStatusRequest = Shapes::StructureShape.new(name: 'GetBucketPolicyStatusRequest') GetBucketReplicationOutput = Shapes::StructureShape.new(name: 'GetBucketReplicationOutput') GetBucketReplicationRequest = Shapes::StructureShape.new(name: 'GetBucketReplicationRequest') GetBucketRequestPaymentOutput = Shapes::StructureShape.new(name: 'GetBucketRequestPaymentOutput') GetBucketRequestPaymentRequest = Shapes::StructureShape.new(name: 'GetBucketRequestPaymentRequest') GetBucketTaggingOutput = Shapes::StructureShape.new(name: 'GetBucketTaggingOutput') GetBucketTaggingRequest = Shapes::StructureShape.new(name: 'GetBucketTaggingRequest') GetBucketVersioningOutput = Shapes::StructureShape.new(name: 'GetBucketVersioningOutput') GetBucketVersioningRequest = Shapes::StructureShape.new(name: 'GetBucketVersioningRequest') GetBucketWebsiteOutput = Shapes::StructureShape.new(name: 'GetBucketWebsiteOutput') GetBucketWebsiteRequest = Shapes::StructureShape.new(name: 'GetBucketWebsiteRequest') GetObjectAclOutput = Shapes::StructureShape.new(name: 'GetObjectAclOutput') GetObjectAclRequest = Shapes::StructureShape.new(name: 'GetObjectAclRequest') GetObjectAttributesOutput = Shapes::StructureShape.new(name: 'GetObjectAttributesOutput') GetObjectAttributesParts = Shapes::StructureShape.new(name: 'GetObjectAttributesParts') GetObjectAttributesRequest = Shapes::StructureShape.new(name: 'GetObjectAttributesRequest') GetObjectLegalHoldOutput = Shapes::StructureShape.new(name: 'GetObjectLegalHoldOutput') GetObjectLegalHoldRequest = Shapes::StructureShape.new(name: 'GetObjectLegalHoldRequest') GetObjectLockConfigurationOutput = Shapes::StructureShape.new(name: 'GetObjectLockConfigurationOutput') GetObjectLockConfigurationRequest = Shapes::StructureShape.new(name: 'GetObjectLockConfigurationRequest') GetObjectOutput = Shapes::StructureShape.new(name: 'GetObjectOutput') GetObjectRequest = Shapes::StructureShape.new(name: 'GetObjectRequest') GetObjectResponseStatusCode = Shapes::IntegerShape.new(name: 'GetObjectResponseStatusCode') GetObjectRetentionOutput = Shapes::StructureShape.new(name: 'GetObjectRetentionOutput') GetObjectRetentionRequest = Shapes::StructureShape.new(name: 'GetObjectRetentionRequest') GetObjectTaggingOutput = Shapes::StructureShape.new(name: 'GetObjectTaggingOutput') GetObjectTaggingRequest = Shapes::StructureShape.new(name: 'GetObjectTaggingRequest') GetObjectTorrentOutput = Shapes::StructureShape.new(name: 'GetObjectTorrentOutput') GetObjectTorrentRequest = Shapes::StructureShape.new(name: 'GetObjectTorrentRequest') GetPublicAccessBlockOutput = Shapes::StructureShape.new(name: 'GetPublicAccessBlockOutput') GetPublicAccessBlockRequest = Shapes::StructureShape.new(name: 'GetPublicAccessBlockRequest') GlacierJobParameters = Shapes::StructureShape.new(name: 'GlacierJobParameters') Grant = Shapes::StructureShape.new(name: 'Grant') GrantFullControl = Shapes::StringShape.new(name: 'GrantFullControl') GrantRead = Shapes::StringShape.new(name: 'GrantRead') GrantReadACP = Shapes::StringShape.new(name: 'GrantReadACP') GrantWrite = Shapes::StringShape.new(name: 'GrantWrite') GrantWriteACP = Shapes::StringShape.new(name: 'GrantWriteACP') Grantee = Shapes::StructureShape.new(name: 'Grantee', xmlNamespace: {"prefix"=>"xsi", "uri"=>"http://www.w3.org/2001/XMLSchema-instance"}) Grants = Shapes::ListShape.new(name: 'Grants') HeadBucketOutput = Shapes::StructureShape.new(name: 'HeadBucketOutput') HeadBucketRequest = Shapes::StructureShape.new(name: 'HeadBucketRequest') HeadObjectOutput = Shapes::StructureShape.new(name: 'HeadObjectOutput') HeadObjectRequest = Shapes::StructureShape.new(name: 'HeadObjectRequest') HostName = Shapes::StringShape.new(name: 'HostName') HttpErrorCodeReturnedEquals = Shapes::StringShape.new(name: 'HttpErrorCodeReturnedEquals') HttpRedirectCode = Shapes::StringShape.new(name: 'HttpRedirectCode') ID = Shapes::StringShape.new(name: 'ID') IfMatch = Shapes::StringShape.new(name: 'IfMatch') IfModifiedSince = Shapes::TimestampShape.new(name: 'IfModifiedSince') IfNoneMatch = Shapes::StringShape.new(name: 'IfNoneMatch') IfUnmodifiedSince = Shapes::TimestampShape.new(name: 'IfUnmodifiedSince') IndexDocument = Shapes::StructureShape.new(name: 'IndexDocument') Initiated = Shapes::TimestampShape.new(name: 'Initiated') Initiator = Shapes::StructureShape.new(name: 'Initiator') InputSerialization = Shapes::StructureShape.new(name: 'InputSerialization') IntelligentTieringAccessTier = Shapes::StringShape.new(name: 'IntelligentTieringAccessTier') IntelligentTieringAndOperator = Shapes::StructureShape.new(name: 'IntelligentTieringAndOperator') IntelligentTieringConfiguration = Shapes::StructureShape.new(name: 'IntelligentTieringConfiguration') IntelligentTieringConfigurationList = Shapes::ListShape.new(name: 'IntelligentTieringConfigurationList', flattened: true) IntelligentTieringDays = Shapes::IntegerShape.new(name: 'IntelligentTieringDays') IntelligentTieringFilter = Shapes::StructureShape.new(name: 'IntelligentTieringFilter') IntelligentTieringId = Shapes::StringShape.new(name: 'IntelligentTieringId') IntelligentTieringStatus = Shapes::StringShape.new(name: 'IntelligentTieringStatus') InvalidObjectState = Shapes::StructureShape.new(name: 'InvalidObjectState') InventoryConfiguration = Shapes::StructureShape.new(name: 'InventoryConfiguration') InventoryConfigurationList = Shapes::ListShape.new(name: 'InventoryConfigurationList', flattened: true) InventoryDestination = Shapes::StructureShape.new(name: 'InventoryDestination') InventoryEncryption = Shapes::StructureShape.new(name: 'InventoryEncryption') InventoryFilter = Shapes::StructureShape.new(name: 'InventoryFilter') InventoryFormat = Shapes::StringShape.new(name: 'InventoryFormat') InventoryFrequency = Shapes::StringShape.new(name: 'InventoryFrequency') InventoryId = Shapes::StringShape.new(name: 'InventoryId') InventoryIncludedObjectVersions = Shapes::StringShape.new(name: 'InventoryIncludedObjectVersions') InventoryOptionalField = Shapes::StringShape.new(name: 'InventoryOptionalField') InventoryOptionalFields = Shapes::ListShape.new(name: 'InventoryOptionalFields') InventoryS3BucketDestination = Shapes::StructureShape.new(name: 'InventoryS3BucketDestination') InventorySchedule = Shapes::StructureShape.new(name: 'InventorySchedule') IsEnabled = Shapes::BooleanShape.new(name: 'IsEnabled') IsLatest = Shapes::BooleanShape.new(name: 'IsLatest') IsPublic = Shapes::BooleanShape.new(name: 'IsPublic') IsRestoreInProgress = Shapes::BooleanShape.new(name: 'IsRestoreInProgress') IsTruncated = Shapes::BooleanShape.new(name: 'IsTruncated') JSONInput = Shapes::StructureShape.new(name: 'JSONInput') JSONOutput = Shapes::StructureShape.new(name: 'JSONOutput') JSONType = Shapes::StringShape.new(name: 'JSONType') KMSContext = Shapes::StringShape.new(name: 'KMSContext') KeyCount = Shapes::IntegerShape.new(name: 'KeyCount') KeyMarker = Shapes::StringShape.new(name: 'KeyMarker') KeyPrefixEquals = Shapes::StringShape.new(name: 'KeyPrefixEquals') LambdaFunctionArn = Shapes::StringShape.new(name: 'LambdaFunctionArn') LambdaFunctionConfiguration = Shapes::StructureShape.new(name: 'LambdaFunctionConfiguration') LambdaFunctionConfigurationList = Shapes::ListShape.new(name: 'LambdaFunctionConfigurationList', flattened: true) LastModified = Shapes::TimestampShape.new(name: 'LastModified') LifecycleConfiguration = Shapes::StructureShape.new(name: 'LifecycleConfiguration') LifecycleExpiration = Shapes::StructureShape.new(name: 'LifecycleExpiration') LifecycleRule = Shapes::StructureShape.new(name: 'LifecycleRule') LifecycleRuleAndOperator = Shapes::StructureShape.new(name: 'LifecycleRuleAndOperator') LifecycleRuleFilter = Shapes::StructureShape.new(name: 'LifecycleRuleFilter') LifecycleRules = Shapes::ListShape.new(name: 'LifecycleRules', flattened: true) ListBucketAnalyticsConfigurationsOutput = Shapes::StructureShape.new(name: 'ListBucketAnalyticsConfigurationsOutput') ListBucketAnalyticsConfigurationsRequest = Shapes::StructureShape.new(name: 'ListBucketAnalyticsConfigurationsRequest') ListBucketIntelligentTieringConfigurationsOutput = Shapes::StructureShape.new(name: 'ListBucketIntelligentTieringConfigurationsOutput') ListBucketIntelligentTieringConfigurationsRequest = Shapes::StructureShape.new(name: 'ListBucketIntelligentTieringConfigurationsRequest') ListBucketInventoryConfigurationsOutput = Shapes::StructureShape.new(name: 'ListBucketInventoryConfigurationsOutput') ListBucketInventoryConfigurationsRequest = Shapes::StructureShape.new(name: 'ListBucketInventoryConfigurationsRequest') ListBucketMetricsConfigurationsOutput = Shapes::StructureShape.new(name: 'ListBucketMetricsConfigurationsOutput') ListBucketMetricsConfigurationsRequest = Shapes::StructureShape.new(name: 'ListBucketMetricsConfigurationsRequest') ListBucketsOutput = Shapes::StructureShape.new(name: 'ListBucketsOutput') ListDirectoryBucketsOutput = Shapes::StructureShape.new(name: 'ListDirectoryBucketsOutput') ListDirectoryBucketsRequest = Shapes::StructureShape.new(name: 'ListDirectoryBucketsRequest') ListMultipartUploadsOutput = Shapes::StructureShape.new(name: 'ListMultipartUploadsOutput') ListMultipartUploadsRequest = Shapes::StructureShape.new(name: 'ListMultipartUploadsRequest') ListObjectVersionsOutput = Shapes::StructureShape.new(name: 'ListObjectVersionsOutput') ListObjectVersionsRequest = Shapes::StructureShape.new(name: 'ListObjectVersionsRequest') ListObjectsOutput = Shapes::StructureShape.new(name: 'ListObjectsOutput') ListObjectsRequest = Shapes::StructureShape.new(name: 'ListObjectsRequest') ListObjectsV2Output = Shapes::StructureShape.new(name: 'ListObjectsV2Output') ListObjectsV2Request = Shapes::StructureShape.new(name: 'ListObjectsV2Request') ListPartsOutput = Shapes::StructureShape.new(name: 'ListPartsOutput') ListPartsRequest = Shapes::StructureShape.new(name: 'ListPartsRequest') Location = Shapes::StringShape.new(name: 'Location') LocationInfo = Shapes::StructureShape.new(name: 'LocationInfo') LocationNameAsString = Shapes::StringShape.new(name: 'LocationNameAsString') LocationPrefix = Shapes::StringShape.new(name: 'LocationPrefix') LocationType = Shapes::StringShape.new(name: 'LocationType') LoggingEnabled = Shapes::StructureShape.new(name: 'LoggingEnabled') MFA = Shapes::StringShape.new(name: 'MFA') MFADelete = Shapes::StringShape.new(name: 'MFADelete') MFADeleteStatus = Shapes::StringShape.new(name: 'MFADeleteStatus') Marker = Shapes::StringShape.new(name: 'Marker') MaxAgeSeconds = Shapes::IntegerShape.new(name: 'MaxAgeSeconds') MaxDirectoryBuckets = Shapes::IntegerShape.new(name: 'MaxDirectoryBuckets') MaxKeys = Shapes::IntegerShape.new(name: 'MaxKeys') MaxParts = Shapes::IntegerShape.new(name: 'MaxParts') MaxUploads = Shapes::IntegerShape.new(name: 'MaxUploads') Message = Shapes::StringShape.new(name: 'Message') Metadata = Shapes::MapShape.new(name: 'Metadata') MetadataDirective = Shapes::StringShape.new(name: 'MetadataDirective') MetadataEntry = Shapes::StructureShape.new(name: 'MetadataEntry') MetadataKey = Shapes::StringShape.new(name: 'MetadataKey') MetadataValue = Shapes::StringShape.new(name: 'MetadataValue') Metrics = Shapes::StructureShape.new(name: 'Metrics') MetricsAndOperator = Shapes::StructureShape.new(name: 'MetricsAndOperator') MetricsConfiguration = Shapes::StructureShape.new(name: 'MetricsConfiguration') MetricsConfigurationList = Shapes::ListShape.new(name: 'MetricsConfigurationList', flattened: true) MetricsFilter = Shapes::StructureShape.new(name: 'MetricsFilter') MetricsId = Shapes::StringShape.new(name: 'MetricsId') MetricsStatus = Shapes::StringShape.new(name: 'MetricsStatus') Minutes = Shapes::IntegerShape.new(name: 'Minutes') MissingMeta = Shapes::IntegerShape.new(name: 'MissingMeta') MultipartUpload = Shapes::StructureShape.new(name: 'MultipartUpload') MultipartUploadId = Shapes::StringShape.new(name: 'MultipartUploadId') MultipartUploadList = Shapes::ListShape.new(name: 'MultipartUploadList', flattened: true) NextKeyMarker = Shapes::StringShape.new(name: 'NextKeyMarker') NextMarker = Shapes::StringShape.new(name: 'NextMarker') NextPartNumberMarker = Shapes::IntegerShape.new(name: 'NextPartNumberMarker') NextToken = Shapes::StringShape.new(name: 'NextToken') NextUploadIdMarker = Shapes::StringShape.new(name: 'NextUploadIdMarker') NextVersionIdMarker = Shapes::StringShape.new(name: 'NextVersionIdMarker') NoSuchBucket = Shapes::StructureShape.new(name: 'NoSuchBucket') NoSuchKey = Shapes::StructureShape.new(name: 'NoSuchKey') NoSuchUpload = Shapes::StructureShape.new(name: 'NoSuchUpload') NoncurrentVersionExpiration = Shapes::StructureShape.new(name: 'NoncurrentVersionExpiration') NoncurrentVersionTransition = Shapes::StructureShape.new(name: 'NoncurrentVersionTransition') NoncurrentVersionTransitionList = Shapes::ListShape.new(name: 'NoncurrentVersionTransitionList', flattened: true) NotificationConfiguration = Shapes::StructureShape.new(name: 'NotificationConfiguration') NotificationConfigurationDeprecated = Shapes::StructureShape.new(name: 'NotificationConfigurationDeprecated') NotificationConfigurationFilter = Shapes::StructureShape.new(name: 'NotificationConfigurationFilter') NotificationId = Shapes::StringShape.new(name: 'NotificationId') Object = Shapes::StructureShape.new(name: 'Object') ObjectAlreadyInActiveTierError = Shapes::StructureShape.new(name: 'ObjectAlreadyInActiveTierError') ObjectAttributes = Shapes::StringShape.new(name: 'ObjectAttributes') ObjectAttributesList = Shapes::ListShape.new(name: 'ObjectAttributesList') ObjectCannedACL = Shapes::StringShape.new(name: 'ObjectCannedACL') ObjectIdentifier = Shapes::StructureShape.new(name: 'ObjectIdentifier') ObjectIdentifierList = Shapes::ListShape.new(name: 'ObjectIdentifierList', flattened: true) ObjectKey = Shapes::StringShape.new(name: 'ObjectKey') ObjectList = Shapes::ListShape.new(name: 'ObjectList', flattened: true) ObjectLockConfiguration = Shapes::StructureShape.new(name: 'ObjectLockConfiguration') ObjectLockEnabled = Shapes::StringShape.new(name: 'ObjectLockEnabled') ObjectLockEnabledForBucket = Shapes::BooleanShape.new(name: 'ObjectLockEnabledForBucket') ObjectLockLegalHold = Shapes::StructureShape.new(name: 'ObjectLockLegalHold') ObjectLockLegalHoldStatus = Shapes::StringShape.new(name: 'ObjectLockLegalHoldStatus') ObjectLockMode = Shapes::StringShape.new(name: 'ObjectLockMode') ObjectLockRetainUntilDate = Shapes::TimestampShape.new(name: 'ObjectLockRetainUntilDate', timestampFormat: "iso8601") ObjectLockRetention = Shapes::StructureShape.new(name: 'ObjectLockRetention') ObjectLockRetentionMode = Shapes::StringShape.new(name: 'ObjectLockRetentionMode') ObjectLockRule = Shapes::StructureShape.new(name: 'ObjectLockRule') ObjectLockToken = Shapes::StringShape.new(name: 'ObjectLockToken') ObjectNotInActiveTierError = Shapes::StructureShape.new(name: 'ObjectNotInActiveTierError') ObjectOwnership = Shapes::StringShape.new(name: 'ObjectOwnership') ObjectPart = Shapes::StructureShape.new(name: 'ObjectPart') ObjectSize = Shapes::IntegerShape.new(name: 'ObjectSize') ObjectSizeGreaterThanBytes = Shapes::IntegerShape.new(name: 'ObjectSizeGreaterThanBytes') ObjectSizeLessThanBytes = Shapes::IntegerShape.new(name: 'ObjectSizeLessThanBytes') ObjectStorageClass = Shapes::StringShape.new(name: 'ObjectStorageClass') ObjectVersion = Shapes::StructureShape.new(name: 'ObjectVersion') ObjectVersionId = Shapes::StringShape.new(name: 'ObjectVersionId') ObjectVersionList = Shapes::ListShape.new(name: 'ObjectVersionList', flattened: true) ObjectVersionStorageClass = Shapes::StringShape.new(name: 'ObjectVersionStorageClass') OptionalObjectAttributes = Shapes::StringShape.new(name: 'OptionalObjectAttributes') OptionalObjectAttributesList = Shapes::ListShape.new(name: 'OptionalObjectAttributesList') OutputLocation = Shapes::StructureShape.new(name: 'OutputLocation') OutputSerialization = Shapes::StructureShape.new(name: 'OutputSerialization') Owner = Shapes::StructureShape.new(name: 'Owner') OwnerOverride = Shapes::StringShape.new(name: 'OwnerOverride') OwnershipControls = Shapes::StructureShape.new(name: 'OwnershipControls') OwnershipControlsRule = Shapes::StructureShape.new(name: 'OwnershipControlsRule') OwnershipControlsRules = Shapes::ListShape.new(name: 'OwnershipControlsRules', flattened: true) ParquetInput = Shapes::StructureShape.new(name: 'ParquetInput') Part = Shapes::StructureShape.new(name: 'Part') PartNumber = Shapes::IntegerShape.new(name: 'PartNumber') PartNumberMarker = Shapes::IntegerShape.new(name: 'PartNumberMarker') PartitionDateSource = Shapes::StringShape.new(name: 'PartitionDateSource') PartitionedPrefix = Shapes::StructureShape.new(name: 'PartitionedPrefix') Parts = Shapes::ListShape.new(name: 'Parts', flattened: true) PartsCount = Shapes::IntegerShape.new(name: 'PartsCount') PartsList = Shapes::ListShape.new(name: 'PartsList', flattened: true) Payer = Shapes::StringShape.new(name: 'Payer') Permission = Shapes::StringShape.new(name: 'Permission') Policy = Shapes::StringShape.new(name: 'Policy') PolicyStatus = Shapes::StructureShape.new(name: 'PolicyStatus') Prefix = Shapes::StringShape.new(name: 'Prefix') Priority = Shapes::IntegerShape.new(name: 'Priority') Progress = Shapes::StructureShape.new(name: 'Progress') ProgressEvent = Shapes::StructureShape.new(name: 'ProgressEvent') Protocol = Shapes::StringShape.new(name: 'Protocol') PublicAccessBlockConfiguration = Shapes::StructureShape.new(name: 'PublicAccessBlockConfiguration') PutBucketAccelerateConfigurationRequest = Shapes::StructureShape.new(name: 'PutBucketAccelerateConfigurationRequest') PutBucketAclRequest = Shapes::StructureShape.new(name: 'PutBucketAclRequest') PutBucketAnalyticsConfigurationRequest = Shapes::StructureShape.new(name: 'PutBucketAnalyticsConfigurationRequest') PutBucketCorsRequest = Shapes::StructureShape.new(name: 'PutBucketCorsRequest') PutBucketEncryptionRequest = Shapes::StructureShape.new(name: 'PutBucketEncryptionRequest') PutBucketIntelligentTieringConfigurationRequest = Shapes::StructureShape.new(name: 'PutBucketIntelligentTieringConfigurationRequest') PutBucketInventoryConfigurationRequest = Shapes::StructureShape.new(name: 'PutBucketInventoryConfigurationRequest') PutBucketLifecycleConfigurationRequest = Shapes::StructureShape.new(name: 'PutBucketLifecycleConfigurationRequest') PutBucketLifecycleRequest = Shapes::StructureShape.new(name: 'PutBucketLifecycleRequest') PutBucketLoggingRequest = Shapes::StructureShape.new(name: 'PutBucketLoggingRequest') PutBucketMetricsConfigurationRequest = Shapes::StructureShape.new(name: 'PutBucketMetricsConfigurationRequest') PutBucketNotificationConfigurationRequest = Shapes::StructureShape.new(name: 'PutBucketNotificationConfigurationRequest') PutBucketNotificationRequest = Shapes::StructureShape.new(name: 'PutBucketNotificationRequest') PutBucketOwnershipControlsRequest = Shapes::StructureShape.new(name: 'PutBucketOwnershipControlsRequest') PutBucketPolicyRequest = Shapes::StructureShape.new(name: 'PutBucketPolicyRequest') PutBucketReplicationRequest = Shapes::StructureShape.new(name: 'PutBucketReplicationRequest') PutBucketRequestPaymentRequest = Shapes::StructureShape.new(name: 'PutBucketRequestPaymentRequest') PutBucketTaggingRequest = Shapes::StructureShape.new(name: 'PutBucketTaggingRequest') PutBucketVersioningRequest = Shapes::StructureShape.new(name: 'PutBucketVersioningRequest') PutBucketWebsiteRequest = Shapes::StructureShape.new(name: 'PutBucketWebsiteRequest') PutObjectAclOutput = Shapes::StructureShape.new(name: 'PutObjectAclOutput') PutObjectAclRequest = Shapes::StructureShape.new(name: 'PutObjectAclRequest') PutObjectLegalHoldOutput = Shapes::StructureShape.new(name: 'PutObjectLegalHoldOutput') PutObjectLegalHoldRequest = Shapes::StructureShape.new(name: 'PutObjectLegalHoldRequest') PutObjectLockConfigurationOutput = Shapes::StructureShape.new(name: 'PutObjectLockConfigurationOutput') PutObjectLockConfigurationRequest = Shapes::StructureShape.new(name: 'PutObjectLockConfigurationRequest') PutObjectOutput = Shapes::StructureShape.new(name: 'PutObjectOutput') PutObjectRequest = Shapes::StructureShape.new(name: 'PutObjectRequest') PutObjectRetentionOutput = Shapes::StructureShape.new(name: 'PutObjectRetentionOutput') PutObjectRetentionRequest = Shapes::StructureShape.new(name: 'PutObjectRetentionRequest') PutObjectTaggingOutput = Shapes::StructureShape.new(name: 'PutObjectTaggingOutput') PutObjectTaggingRequest = Shapes::StructureShape.new(name: 'PutObjectTaggingRequest') PutPublicAccessBlockRequest = Shapes::StructureShape.new(name: 'PutPublicAccessBlockRequest') QueueArn = Shapes::StringShape.new(name: 'QueueArn') QueueConfiguration = Shapes::StructureShape.new(name: 'QueueConfiguration') QueueConfigurationDeprecated = Shapes::StructureShape.new(name: 'QueueConfigurationDeprecated') QueueConfigurationList = Shapes::ListShape.new(name: 'QueueConfigurationList', flattened: true) Quiet = Shapes::BooleanShape.new(name: 'Quiet') QuoteCharacter = Shapes::StringShape.new(name: 'QuoteCharacter') QuoteEscapeCharacter = Shapes::StringShape.new(name: 'QuoteEscapeCharacter') QuoteFields = Shapes::StringShape.new(name: 'QuoteFields') Range = Shapes::StringShape.new(name: 'Range') RecordDelimiter = Shapes::StringShape.new(name: 'RecordDelimiter') RecordsEvent = Shapes::StructureShape.new(name: 'RecordsEvent') Redirect = Shapes::StructureShape.new(name: 'Redirect') RedirectAllRequestsTo = Shapes::StructureShape.new(name: 'RedirectAllRequestsTo') Region = Shapes::StringShape.new(name: 'Region') ReplaceKeyPrefixWith = Shapes::StringShape.new(name: 'ReplaceKeyPrefixWith') ReplaceKeyWith = Shapes::StringShape.new(name: 'ReplaceKeyWith') ReplicaKmsKeyID = Shapes::StringShape.new(name: 'ReplicaKmsKeyID') ReplicaModifications = Shapes::StructureShape.new(name: 'ReplicaModifications') ReplicaModificationsStatus = Shapes::StringShape.new(name: 'ReplicaModificationsStatus') ReplicationConfiguration = Shapes::StructureShape.new(name: 'ReplicationConfiguration') ReplicationRule = Shapes::StructureShape.new(name: 'ReplicationRule') ReplicationRuleAndOperator = Shapes::StructureShape.new(name: 'ReplicationRuleAndOperator') ReplicationRuleFilter = Shapes::StructureShape.new(name: 'ReplicationRuleFilter') ReplicationRuleStatus = Shapes::StringShape.new(name: 'ReplicationRuleStatus') ReplicationRules = Shapes::ListShape.new(name: 'ReplicationRules', flattened: true) ReplicationStatus = Shapes::StringShape.new(name: 'ReplicationStatus') ReplicationTime = Shapes::StructureShape.new(name: 'ReplicationTime') ReplicationTimeStatus = Shapes::StringShape.new(name: 'ReplicationTimeStatus') ReplicationTimeValue = Shapes::StructureShape.new(name: 'ReplicationTimeValue') RequestCharged = Shapes::StringShape.new(name: 'RequestCharged') RequestPayer = Shapes::StringShape.new(name: 'RequestPayer') RequestPaymentConfiguration = Shapes::StructureShape.new(name: 'RequestPaymentConfiguration') RequestProgress = Shapes::StructureShape.new(name: 'RequestProgress') RequestRoute = Shapes::StringShape.new(name: 'RequestRoute') RequestToken = Shapes::StringShape.new(name: 'RequestToken') ResponseCacheControl = Shapes::StringShape.new(name: 'ResponseCacheControl') ResponseContentDisposition = Shapes::StringShape.new(name: 'ResponseContentDisposition') ResponseContentEncoding = Shapes::StringShape.new(name: 'ResponseContentEncoding') ResponseContentLanguage = Shapes::StringShape.new(name: 'ResponseContentLanguage') ResponseContentType = Shapes::StringShape.new(name: 'ResponseContentType') ResponseExpires = Shapes::TimestampShape.new(name: 'ResponseExpires', timestampFormat: "rfc822") Restore = Shapes::StringShape.new(name: 'Restore') RestoreExpiryDate = Shapes::TimestampShape.new(name: 'RestoreExpiryDate') RestoreObjectOutput = Shapes::StructureShape.new(name: 'RestoreObjectOutput') RestoreObjectRequest = Shapes::StructureShape.new(name: 'RestoreObjectRequest') RestoreOutputPath = Shapes::StringShape.new(name: 'RestoreOutputPath') RestoreRequest = Shapes::StructureShape.new(name: 'RestoreRequest') RestoreRequestType = Shapes::StringShape.new(name: 'RestoreRequestType') RestoreStatus = Shapes::StructureShape.new(name: 'RestoreStatus') Role = Shapes::StringShape.new(name: 'Role') RoutingRule = Shapes::StructureShape.new(name: 'RoutingRule') RoutingRules = Shapes::ListShape.new(name: 'RoutingRules') Rule = Shapes::StructureShape.new(name: 'Rule') Rules = Shapes::ListShape.new(name: 'Rules', flattened: true) S3KeyFilter = Shapes::StructureShape.new(name: 'S3KeyFilter') S3Location = Shapes::StructureShape.new(name: 'S3Location') SSECustomerAlgorithm = Shapes::StringShape.new(name: 'SSECustomerAlgorithm') SSECustomerKey = Shapes::StringShape.new(name: 'SSECustomerKey') SSECustomerKeyMD5 = Shapes::StringShape.new(name: 'SSECustomerKeyMD5') SSEKMS = Shapes::StructureShape.new(name: 'SSEKMS') SSEKMSEncryptionContext = Shapes::StringShape.new(name: 'SSEKMSEncryptionContext') SSEKMSKeyId = Shapes::StringShape.new(name: 'SSEKMSKeyId') SSES3 = Shapes::StructureShape.new(name: 'SSES3') ScanRange = Shapes::StructureShape.new(name: 'ScanRange') SelectObjectContentEventStream = Shapes::StructureShape.new(name: 'SelectObjectContentEventStream') SelectObjectContentOutput = Shapes::StructureShape.new(name: 'SelectObjectContentOutput') SelectObjectContentRequest = Shapes::StructureShape.new(name: 'SelectObjectContentRequest') SelectParameters = Shapes::StructureShape.new(name: 'SelectParameters') ServerSideEncryption = Shapes::StringShape.new(name: 'ServerSideEncryption') ServerSideEncryptionByDefault = Shapes::StructureShape.new(name: 'ServerSideEncryptionByDefault') ServerSideEncryptionConfiguration = Shapes::StructureShape.new(name: 'ServerSideEncryptionConfiguration') ServerSideEncryptionRule = Shapes::StructureShape.new(name: 'ServerSideEncryptionRule') ServerSideEncryptionRules = Shapes::ListShape.new(name: 'ServerSideEncryptionRules', flattened: true) SessionCredentialValue = Shapes::StringShape.new(name: 'SessionCredentialValue') SessionCredentials = Shapes::StructureShape.new(name: 'SessionCredentials') SessionExpiration = Shapes::TimestampShape.new(name: 'SessionExpiration') SessionMode = Shapes::StringShape.new(name: 'SessionMode') Setting = Shapes::BooleanShape.new(name: 'Setting') SimplePrefix = Shapes::StructureShape.new(name: 'SimplePrefix') Size = Shapes::IntegerShape.new(name: 'Size') SkipValidation = Shapes::BooleanShape.new(name: 'SkipValidation') SourceSelectionCriteria = Shapes::StructureShape.new(name: 'SourceSelectionCriteria') SseKmsEncryptedObjects = Shapes::StructureShape.new(name: 'SseKmsEncryptedObjects') SseKmsEncryptedObjectsStatus = Shapes::StringShape.new(name: 'SseKmsEncryptedObjectsStatus') Start = Shapes::IntegerShape.new(name: 'Start') StartAfter = Shapes::StringShape.new(name: 'StartAfter') Stats = Shapes::StructureShape.new(name: 'Stats') StatsEvent = Shapes::StructureShape.new(name: 'StatsEvent') StorageClass = Shapes::StringShape.new(name: 'StorageClass') StorageClassAnalysis = Shapes::StructureShape.new(name: 'StorageClassAnalysis') StorageClassAnalysisDataExport = Shapes::StructureShape.new(name: 'StorageClassAnalysisDataExport') StorageClassAnalysisSchemaVersion = Shapes::StringShape.new(name: 'StorageClassAnalysisSchemaVersion') Suffix = Shapes::StringShape.new(name: 'Suffix') Tag = Shapes::StructureShape.new(name: 'Tag') TagCount = Shapes::IntegerShape.new(name: 'TagCount') TagSet = Shapes::ListShape.new(name: 'TagSet') Tagging = Shapes::StructureShape.new(name: 'Tagging') TaggingDirective = Shapes::StringShape.new(name: 'TaggingDirective') TaggingHeader = Shapes::StringShape.new(name: 'TaggingHeader') TargetBucket = Shapes::StringShape.new(name: 'TargetBucket') TargetGrant = Shapes::StructureShape.new(name: 'TargetGrant') TargetGrants = Shapes::ListShape.new(name: 'TargetGrants') TargetObjectKeyFormat = Shapes::StructureShape.new(name: 'TargetObjectKeyFormat') TargetPrefix = Shapes::StringShape.new(name: 'TargetPrefix') Tier = Shapes::StringShape.new(name: 'Tier') Tiering = Shapes::StructureShape.new(name: 'Tiering') TieringList = Shapes::ListShape.new(name: 'TieringList', flattened: true) Token = Shapes::StringShape.new(name: 'Token') TopicArn = Shapes::StringShape.new(name: 'TopicArn') TopicConfiguration = Shapes::StructureShape.new(name: 'TopicConfiguration') TopicConfigurationDeprecated = Shapes::StructureShape.new(name: 'TopicConfigurationDeprecated') TopicConfigurationList = Shapes::ListShape.new(name: 'TopicConfigurationList', flattened: true) Transition = Shapes::StructureShape.new(name: 'Transition') TransitionList = Shapes::ListShape.new(name: 'TransitionList', flattened: true) TransitionStorageClass = Shapes::StringShape.new(name: 'TransitionStorageClass') Type = Shapes::StringShape.new(name: 'Type') URI = Shapes::StringShape.new(name: 'URI') UploadIdMarker = Shapes::StringShape.new(name: 'UploadIdMarker') UploadPartCopyOutput = Shapes::StructureShape.new(name: 'UploadPartCopyOutput') UploadPartCopyRequest = Shapes::StructureShape.new(name: 'UploadPartCopyRequest') UploadPartOutput = Shapes::StructureShape.new(name: 'UploadPartOutput') UploadPartRequest = Shapes::StructureShape.new(name: 'UploadPartRequest') UserMetadata = Shapes::ListShape.new(name: 'UserMetadata') Value = Shapes::StringShape.new(name: 'Value') VersionCount = Shapes::IntegerShape.new(name: 'VersionCount') VersionIdMarker = Shapes::StringShape.new(name: 'VersionIdMarker') VersioningConfiguration = Shapes::StructureShape.new(name: 'VersioningConfiguration') WebsiteConfiguration = Shapes::StructureShape.new(name: 'WebsiteConfiguration') WebsiteRedirectLocation = Shapes::StringShape.new(name: 'WebsiteRedirectLocation') WriteGetObjectResponseRequest = Shapes::StructureShape.new(name: 'WriteGetObjectResponseRequest') Years = Shapes::IntegerShape.new(name: 'Years') AbortIncompleteMultipartUpload.add_member(:days_after_initiation, Shapes::ShapeRef.new(shape: DaysAfterInitiation, location_name: "DaysAfterInitiation")) AbortIncompleteMultipartUpload.struct_class = Types::AbortIncompleteMultipartUpload AbortMultipartUploadOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) AbortMultipartUploadOutput.struct_class = Types::AbortMultipartUploadOutput AbortMultipartUploadRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) AbortMultipartUploadRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key", metadata: {"contextParam"=>{"name"=>"Key"}})) AbortMultipartUploadRequest.add_member(:upload_id, Shapes::ShapeRef.new(shape: MultipartUploadId, required: true, location: "querystring", location_name: "uploadId")) AbortMultipartUploadRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) AbortMultipartUploadRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) AbortMultipartUploadRequest.struct_class = Types::AbortMultipartUploadRequest AccelerateConfiguration.add_member(:status, Shapes::ShapeRef.new(shape: BucketAccelerateStatus, location_name: "Status")) AccelerateConfiguration.struct_class = Types::AccelerateConfiguration AccessControlPolicy.add_member(:grants, Shapes::ShapeRef.new(shape: Grants, location_name: "AccessControlList")) AccessControlPolicy.add_member(:owner, Shapes::ShapeRef.new(shape: Owner, location_name: "Owner")) AccessControlPolicy.struct_class = Types::AccessControlPolicy AccessControlTranslation.add_member(:owner, Shapes::ShapeRef.new(shape: OwnerOverride, required: true, location_name: "Owner")) AccessControlTranslation.struct_class = Types::AccessControlTranslation AllowedHeaders.member = Shapes::ShapeRef.new(shape: AllowedHeader) AllowedMethods.member = Shapes::ShapeRef.new(shape: AllowedMethod) AllowedOrigins.member = Shapes::ShapeRef.new(shape: AllowedOrigin) AnalyticsAndOperator.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) AnalyticsAndOperator.add_member(:tags, Shapes::ShapeRef.new(shape: TagSet, location_name: "Tag", metadata: {"flattened"=>true})) AnalyticsAndOperator.struct_class = Types::AnalyticsAndOperator AnalyticsConfiguration.add_member(:id, Shapes::ShapeRef.new(shape: AnalyticsId, required: true, location_name: "Id")) AnalyticsConfiguration.add_member(:filter, Shapes::ShapeRef.new(shape: AnalyticsFilter, location_name: "Filter")) AnalyticsConfiguration.add_member(:storage_class_analysis, Shapes::ShapeRef.new(shape: StorageClassAnalysis, required: true, location_name: "StorageClassAnalysis")) AnalyticsConfiguration.struct_class = Types::AnalyticsConfiguration AnalyticsConfigurationList.member = Shapes::ShapeRef.new(shape: AnalyticsConfiguration) AnalyticsExportDestination.add_member(:s3_bucket_destination, Shapes::ShapeRef.new(shape: AnalyticsS3BucketDestination, required: true, location_name: "S3BucketDestination")) AnalyticsExportDestination.struct_class = Types::AnalyticsExportDestination AnalyticsFilter.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) AnalyticsFilter.add_member(:tag, Shapes::ShapeRef.new(shape: Tag, location_name: "Tag")) AnalyticsFilter.add_member(:and, Shapes::ShapeRef.new(shape: AnalyticsAndOperator, location_name: "And")) AnalyticsFilter.struct_class = Types::AnalyticsFilter AnalyticsS3BucketDestination.add_member(:format, Shapes::ShapeRef.new(shape: AnalyticsS3ExportFileFormat, required: true, location_name: "Format")) AnalyticsS3BucketDestination.add_member(:bucket_account_id, Shapes::ShapeRef.new(shape: AccountId, location_name: "BucketAccountId")) AnalyticsS3BucketDestination.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location_name: "Bucket")) AnalyticsS3BucketDestination.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) AnalyticsS3BucketDestination.struct_class = Types::AnalyticsS3BucketDestination Bucket.add_member(:name, Shapes::ShapeRef.new(shape: BucketName, location_name: "Name")) Bucket.add_member(:creation_date, Shapes::ShapeRef.new(shape: CreationDate, location_name: "CreationDate")) Bucket.struct_class = Types::Bucket BucketAlreadyExists.struct_class = Types::BucketAlreadyExists BucketAlreadyOwnedByYou.struct_class = Types::BucketAlreadyOwnedByYou BucketInfo.add_member(:data_redundancy, Shapes::ShapeRef.new(shape: DataRedundancy, location_name: "DataRedundancy")) BucketInfo.add_member(:type, Shapes::ShapeRef.new(shape: BucketType, location_name: "Type")) BucketInfo.struct_class = Types::BucketInfo BucketLifecycleConfiguration.add_member(:rules, Shapes::ShapeRef.new(shape: LifecycleRules, required: true, location_name: "Rule")) BucketLifecycleConfiguration.struct_class = Types::BucketLifecycleConfiguration BucketLoggingStatus.add_member(:logging_enabled, Shapes::ShapeRef.new(shape: LoggingEnabled, location_name: "LoggingEnabled")) BucketLoggingStatus.struct_class = Types::BucketLoggingStatus Buckets.member = Shapes::ShapeRef.new(shape: Bucket, location_name: "Bucket") CORSConfiguration.add_member(:cors_rules, Shapes::ShapeRef.new(shape: CORSRules, required: true, location_name: "CORSRule")) CORSConfiguration.struct_class = Types::CORSConfiguration CORSRule.add_member(:id, Shapes::ShapeRef.new(shape: ID, location_name: "ID")) CORSRule.add_member(:allowed_headers, Shapes::ShapeRef.new(shape: AllowedHeaders, location_name: "AllowedHeader")) CORSRule.add_member(:allowed_methods, Shapes::ShapeRef.new(shape: AllowedMethods, required: true, location_name: "AllowedMethod")) CORSRule.add_member(:allowed_origins, Shapes::ShapeRef.new(shape: AllowedOrigins, required: true, location_name: "AllowedOrigin")) CORSRule.add_member(:expose_headers, Shapes::ShapeRef.new(shape: ExposeHeaders, location_name: "ExposeHeader")) CORSRule.add_member(:max_age_seconds, Shapes::ShapeRef.new(shape: MaxAgeSeconds, location_name: "MaxAgeSeconds")) CORSRule.struct_class = Types::CORSRule CORSRules.member = Shapes::ShapeRef.new(shape: CORSRule) CSVInput.add_member(:file_header_info, Shapes::ShapeRef.new(shape: FileHeaderInfo, location_name: "FileHeaderInfo")) CSVInput.add_member(:comments, Shapes::ShapeRef.new(shape: Comments, location_name: "Comments")) CSVInput.add_member(:quote_escape_character, Shapes::ShapeRef.new(shape: QuoteEscapeCharacter, location_name: "QuoteEscapeCharacter")) CSVInput.add_member(:record_delimiter, Shapes::ShapeRef.new(shape: RecordDelimiter, location_name: "RecordDelimiter")) CSVInput.add_member(:field_delimiter, Shapes::ShapeRef.new(shape: FieldDelimiter, location_name: "FieldDelimiter")) CSVInput.add_member(:quote_character, Shapes::ShapeRef.new(shape: QuoteCharacter, location_name: "QuoteCharacter")) CSVInput.add_member(:allow_quoted_record_delimiter, Shapes::ShapeRef.new(shape: AllowQuotedRecordDelimiter, location_name: "AllowQuotedRecordDelimiter")) CSVInput.struct_class = Types::CSVInput CSVOutput.add_member(:quote_fields, Shapes::ShapeRef.new(shape: QuoteFields, location_name: "QuoteFields")) CSVOutput.add_member(:quote_escape_character, Shapes::ShapeRef.new(shape: QuoteEscapeCharacter, location_name: "QuoteEscapeCharacter")) CSVOutput.add_member(:record_delimiter, Shapes::ShapeRef.new(shape: RecordDelimiter, location_name: "RecordDelimiter")) CSVOutput.add_member(:field_delimiter, Shapes::ShapeRef.new(shape: FieldDelimiter, location_name: "FieldDelimiter")) CSVOutput.add_member(:quote_character, Shapes::ShapeRef.new(shape: QuoteCharacter, location_name: "QuoteCharacter")) CSVOutput.struct_class = Types::CSVOutput Checksum.add_member(:checksum_crc32, Shapes::ShapeRef.new(shape: ChecksumCRC32, location_name: "ChecksumCRC32")) Checksum.add_member(:checksum_crc32c, Shapes::ShapeRef.new(shape: ChecksumCRC32C, location_name: "ChecksumCRC32C")) Checksum.add_member(:checksum_sha1, Shapes::ShapeRef.new(shape: ChecksumSHA1, location_name: "ChecksumSHA1")) Checksum.add_member(:checksum_sha256, Shapes::ShapeRef.new(shape: ChecksumSHA256, location_name: "ChecksumSHA256")) Checksum.struct_class = Types::Checksum ChecksumAlgorithmList.member = Shapes::ShapeRef.new(shape: ChecksumAlgorithm) CloudFunctionConfiguration.add_member(:id, Shapes::ShapeRef.new(shape: NotificationId, location_name: "Id")) CloudFunctionConfiguration.add_member(:event, Shapes::ShapeRef.new(shape: Event, deprecated: true, location_name: "Event")) CloudFunctionConfiguration.add_member(:events, Shapes::ShapeRef.new(shape: EventList, location_name: "Event")) CloudFunctionConfiguration.add_member(:cloud_function, Shapes::ShapeRef.new(shape: CloudFunction, location_name: "CloudFunction")) CloudFunctionConfiguration.add_member(:invocation_role, Shapes::ShapeRef.new(shape: CloudFunctionInvocationRole, location_name: "InvocationRole")) CloudFunctionConfiguration.struct_class = Types::CloudFunctionConfiguration CommonPrefix.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) CommonPrefix.struct_class = Types::CommonPrefix CommonPrefixList.member = Shapes::ShapeRef.new(shape: CommonPrefix) CompleteMultipartUploadOutput.add_member(:location, Shapes::ShapeRef.new(shape: Location, location_name: "Location")) CompleteMultipartUploadOutput.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, location_name: "Bucket")) CompleteMultipartUploadOutput.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, location_name: "Key")) CompleteMultipartUploadOutput.add_member(:expiration, Shapes::ShapeRef.new(shape: Expiration, location: "header", location_name: "x-amz-expiration")) CompleteMultipartUploadOutput.add_member(:etag, Shapes::ShapeRef.new(shape: ETag, location_name: "ETag")) CompleteMultipartUploadOutput.add_member(:checksum_crc32, Shapes::ShapeRef.new(shape: ChecksumCRC32, location_name: "ChecksumCRC32")) CompleteMultipartUploadOutput.add_member(:checksum_crc32c, Shapes::ShapeRef.new(shape: ChecksumCRC32C, location_name: "ChecksumCRC32C")) CompleteMultipartUploadOutput.add_member(:checksum_sha1, Shapes::ShapeRef.new(shape: ChecksumSHA1, location_name: "ChecksumSHA1")) CompleteMultipartUploadOutput.add_member(:checksum_sha256, Shapes::ShapeRef.new(shape: ChecksumSHA256, location_name: "ChecksumSHA256")) CompleteMultipartUploadOutput.add_member(:server_side_encryption, Shapes::ShapeRef.new(shape: ServerSideEncryption, location: "header", location_name: "x-amz-server-side-encryption")) CompleteMultipartUploadOutput.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "header", location_name: "x-amz-version-id")) CompleteMultipartUploadOutput.add_member(:ssekms_key_id, Shapes::ShapeRef.new(shape: SSEKMSKeyId, location: "header", location_name: "x-amz-server-side-encryption-aws-kms-key-id")) CompleteMultipartUploadOutput.add_member(:bucket_key_enabled, Shapes::ShapeRef.new(shape: BucketKeyEnabled, location: "header", location_name: "x-amz-server-side-encryption-bucket-key-enabled")) CompleteMultipartUploadOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) CompleteMultipartUploadOutput.struct_class = Types::CompleteMultipartUploadOutput CompleteMultipartUploadRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) CompleteMultipartUploadRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key", metadata: {"contextParam"=>{"name"=>"Key"}})) CompleteMultipartUploadRequest.add_member(:multipart_upload, Shapes::ShapeRef.new(shape: CompletedMultipartUpload, location_name: "CompleteMultipartUpload", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) CompleteMultipartUploadRequest.add_member(:upload_id, Shapes::ShapeRef.new(shape: MultipartUploadId, required: true, location: "querystring", location_name: "uploadId")) CompleteMultipartUploadRequest.add_member(:checksum_crc32, Shapes::ShapeRef.new(shape: ChecksumCRC32, location: "header", location_name: "x-amz-checksum-crc32")) CompleteMultipartUploadRequest.add_member(:checksum_crc32c, Shapes::ShapeRef.new(shape: ChecksumCRC32C, location: "header", location_name: "x-amz-checksum-crc32c")) CompleteMultipartUploadRequest.add_member(:checksum_sha1, Shapes::ShapeRef.new(shape: ChecksumSHA1, location: "header", location_name: "x-amz-checksum-sha1")) CompleteMultipartUploadRequest.add_member(:checksum_sha256, Shapes::ShapeRef.new(shape: ChecksumSHA256, location: "header", location_name: "x-amz-checksum-sha256")) CompleteMultipartUploadRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) CompleteMultipartUploadRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) CompleteMultipartUploadRequest.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) CompleteMultipartUploadRequest.add_member(:sse_customer_key, Shapes::ShapeRef.new(shape: SSECustomerKey, location: "header", location_name: "x-amz-server-side-encryption-customer-key")) CompleteMultipartUploadRequest.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) CompleteMultipartUploadRequest.struct_class = Types::CompleteMultipartUploadRequest CompleteMultipartUploadRequest[:payload] = :multipart_upload CompleteMultipartUploadRequest[:payload_member] = CompleteMultipartUploadRequest.member(:multipart_upload) CompletedMultipartUpload.add_member(:parts, Shapes::ShapeRef.new(shape: CompletedPartList, location_name: "Part")) CompletedMultipartUpload.struct_class = Types::CompletedMultipartUpload CompletedPart.add_member(:etag, Shapes::ShapeRef.new(shape: ETag, location_name: "ETag")) CompletedPart.add_member(:checksum_crc32, Shapes::ShapeRef.new(shape: ChecksumCRC32, location_name: "ChecksumCRC32")) CompletedPart.add_member(:checksum_crc32c, Shapes::ShapeRef.new(shape: ChecksumCRC32C, location_name: "ChecksumCRC32C")) CompletedPart.add_member(:checksum_sha1, Shapes::ShapeRef.new(shape: ChecksumSHA1, location_name: "ChecksumSHA1")) CompletedPart.add_member(:checksum_sha256, Shapes::ShapeRef.new(shape: ChecksumSHA256, location_name: "ChecksumSHA256")) CompletedPart.add_member(:part_number, Shapes::ShapeRef.new(shape: PartNumber, location_name: "PartNumber")) CompletedPart.struct_class = Types::CompletedPart CompletedPartList.member = Shapes::ShapeRef.new(shape: CompletedPart) Condition.add_member(:http_error_code_returned_equals, Shapes::ShapeRef.new(shape: HttpErrorCodeReturnedEquals, location_name: "HttpErrorCodeReturnedEquals")) Condition.add_member(:key_prefix_equals, Shapes::ShapeRef.new(shape: KeyPrefixEquals, location_name: "KeyPrefixEquals")) Condition.struct_class = Types::Condition ContinuationEvent.struct_class = Types::ContinuationEvent CopyObjectOutput.add_member(:copy_object_result, Shapes::ShapeRef.new(shape: CopyObjectResult, location_name: "CopyObjectResult")) CopyObjectOutput.add_member(:expiration, Shapes::ShapeRef.new(shape: Expiration, location: "header", location_name: "x-amz-expiration")) CopyObjectOutput.add_member(:copy_source_version_id, Shapes::ShapeRef.new(shape: CopySourceVersionId, location: "header", location_name: "x-amz-copy-source-version-id")) CopyObjectOutput.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "header", location_name: "x-amz-version-id")) CopyObjectOutput.add_member(:server_side_encryption, Shapes::ShapeRef.new(shape: ServerSideEncryption, location: "header", location_name: "x-amz-server-side-encryption")) CopyObjectOutput.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) CopyObjectOutput.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) CopyObjectOutput.add_member(:ssekms_key_id, Shapes::ShapeRef.new(shape: SSEKMSKeyId, location: "header", location_name: "x-amz-server-side-encryption-aws-kms-key-id")) CopyObjectOutput.add_member(:ssekms_encryption_context, Shapes::ShapeRef.new(shape: SSEKMSEncryptionContext, location: "header", location_name: "x-amz-server-side-encryption-context")) CopyObjectOutput.add_member(:bucket_key_enabled, Shapes::ShapeRef.new(shape: BucketKeyEnabled, location: "header", location_name: "x-amz-server-side-encryption-bucket-key-enabled")) CopyObjectOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) CopyObjectOutput.struct_class = Types::CopyObjectOutput CopyObjectOutput[:payload] = :copy_object_result CopyObjectOutput[:payload_member] = CopyObjectOutput.member(:copy_object_result) CopyObjectRequest.add_member(:acl, Shapes::ShapeRef.new(shape: ObjectCannedACL, location: "header", location_name: "x-amz-acl")) CopyObjectRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) CopyObjectRequest.add_member(:cache_control, Shapes::ShapeRef.new(shape: CacheControl, location: "header", location_name: "Cache-Control")) CopyObjectRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-checksum-algorithm")) CopyObjectRequest.add_member(:content_disposition, Shapes::ShapeRef.new(shape: ContentDisposition, location: "header", location_name: "Content-Disposition")) CopyObjectRequest.add_member(:content_encoding, Shapes::ShapeRef.new(shape: ContentEncoding, location: "header", location_name: "Content-Encoding")) CopyObjectRequest.add_member(:content_language, Shapes::ShapeRef.new(shape: ContentLanguage, location: "header", location_name: "Content-Language")) CopyObjectRequest.add_member(:content_type, Shapes::ShapeRef.new(shape: ContentType, location: "header", location_name: "Content-Type")) CopyObjectRequest.add_member(:copy_source, Shapes::ShapeRef.new(shape: CopySource, required: true, location: "header", location_name: "x-amz-copy-source")) CopyObjectRequest.add_member(:copy_source_if_match, Shapes::ShapeRef.new(shape: CopySourceIfMatch, location: "header", location_name: "x-amz-copy-source-if-match")) CopyObjectRequest.add_member(:copy_source_if_modified_since, Shapes::ShapeRef.new(shape: CopySourceIfModifiedSince, location: "header", location_name: "x-amz-copy-source-if-modified-since")) CopyObjectRequest.add_member(:copy_source_if_none_match, Shapes::ShapeRef.new(shape: CopySourceIfNoneMatch, location: "header", location_name: "x-amz-copy-source-if-none-match")) CopyObjectRequest.add_member(:copy_source_if_unmodified_since, Shapes::ShapeRef.new(shape: CopySourceIfUnmodifiedSince, location: "header", location_name: "x-amz-copy-source-if-unmodified-since")) CopyObjectRequest.add_member(:expires, Shapes::ShapeRef.new(shape: Expires, location: "header", location_name: "Expires")) CopyObjectRequest.add_member(:grant_full_control, Shapes::ShapeRef.new(shape: GrantFullControl, location: "header", location_name: "x-amz-grant-full-control")) CopyObjectRequest.add_member(:grant_read, Shapes::ShapeRef.new(shape: GrantRead, location: "header", location_name: "x-amz-grant-read")) CopyObjectRequest.add_member(:grant_read_acp, Shapes::ShapeRef.new(shape: GrantReadACP, location: "header", location_name: "x-amz-grant-read-acp")) CopyObjectRequest.add_member(:grant_write_acp, Shapes::ShapeRef.new(shape: GrantWriteACP, location: "header", location_name: "x-amz-grant-write-acp")) CopyObjectRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) CopyObjectRequest.add_member(:metadata, Shapes::ShapeRef.new(shape: Metadata, location: "headers", location_name: "x-amz-meta-")) CopyObjectRequest.add_member(:metadata_directive, Shapes::ShapeRef.new(shape: MetadataDirective, location: "header", location_name: "x-amz-metadata-directive")) CopyObjectRequest.add_member(:tagging_directive, Shapes::ShapeRef.new(shape: TaggingDirective, location: "header", location_name: "x-amz-tagging-directive")) CopyObjectRequest.add_member(:server_side_encryption, Shapes::ShapeRef.new(shape: ServerSideEncryption, location: "header", location_name: "x-amz-server-side-encryption")) CopyObjectRequest.add_member(:storage_class, Shapes::ShapeRef.new(shape: StorageClass, location: "header", location_name: "x-amz-storage-class")) CopyObjectRequest.add_member(:website_redirect_location, Shapes::ShapeRef.new(shape: WebsiteRedirectLocation, location: "header", location_name: "x-amz-website-redirect-location")) CopyObjectRequest.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) CopyObjectRequest.add_member(:sse_customer_key, Shapes::ShapeRef.new(shape: SSECustomerKey, location: "header", location_name: "x-amz-server-side-encryption-customer-key")) CopyObjectRequest.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) CopyObjectRequest.add_member(:ssekms_key_id, Shapes::ShapeRef.new(shape: SSEKMSKeyId, location: "header", location_name: "x-amz-server-side-encryption-aws-kms-key-id")) CopyObjectRequest.add_member(:ssekms_encryption_context, Shapes::ShapeRef.new(shape: SSEKMSEncryptionContext, location: "header", location_name: "x-amz-server-side-encryption-context")) CopyObjectRequest.add_member(:bucket_key_enabled, Shapes::ShapeRef.new(shape: BucketKeyEnabled, location: "header", location_name: "x-amz-server-side-encryption-bucket-key-enabled")) CopyObjectRequest.add_member(:copy_source_sse_customer_algorithm, Shapes::ShapeRef.new(shape: CopySourceSSECustomerAlgorithm, location: "header", location_name: "x-amz-copy-source-server-side-encryption-customer-algorithm")) CopyObjectRequest.add_member(:copy_source_sse_customer_key, Shapes::ShapeRef.new(shape: CopySourceSSECustomerKey, location: "header", location_name: "x-amz-copy-source-server-side-encryption-customer-key")) CopyObjectRequest.add_member(:copy_source_sse_customer_key_md5, Shapes::ShapeRef.new(shape: CopySourceSSECustomerKeyMD5, location: "header", location_name: "x-amz-copy-source-server-side-encryption-customer-key-MD5")) CopyObjectRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) CopyObjectRequest.add_member(:tagging, Shapes::ShapeRef.new(shape: TaggingHeader, location: "header", location_name: "x-amz-tagging")) CopyObjectRequest.add_member(:object_lock_mode, Shapes::ShapeRef.new(shape: ObjectLockMode, location: "header", location_name: "x-amz-object-lock-mode")) CopyObjectRequest.add_member(:object_lock_retain_until_date, Shapes::ShapeRef.new(shape: ObjectLockRetainUntilDate, location: "header", location_name: "x-amz-object-lock-retain-until-date")) CopyObjectRequest.add_member(:object_lock_legal_hold_status, Shapes::ShapeRef.new(shape: ObjectLockLegalHoldStatus, location: "header", location_name: "x-amz-object-lock-legal-hold")) CopyObjectRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) CopyObjectRequest.add_member(:expected_source_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-source-expected-bucket-owner")) CopyObjectRequest.struct_class = Types::CopyObjectRequest CopyObjectResult.add_member(:etag, Shapes::ShapeRef.new(shape: ETag, location_name: "ETag")) CopyObjectResult.add_member(:last_modified, Shapes::ShapeRef.new(shape: LastModified, location_name: "LastModified")) CopyObjectResult.add_member(:checksum_crc32, Shapes::ShapeRef.new(shape: ChecksumCRC32, location_name: "ChecksumCRC32")) CopyObjectResult.add_member(:checksum_crc32c, Shapes::ShapeRef.new(shape: ChecksumCRC32C, location_name: "ChecksumCRC32C")) CopyObjectResult.add_member(:checksum_sha1, Shapes::ShapeRef.new(shape: ChecksumSHA1, location_name: "ChecksumSHA1")) CopyObjectResult.add_member(:checksum_sha256, Shapes::ShapeRef.new(shape: ChecksumSHA256, location_name: "ChecksumSHA256")) CopyObjectResult.struct_class = Types::CopyObjectResult CopyPartResult.add_member(:etag, Shapes::ShapeRef.new(shape: ETag, location_name: "ETag")) CopyPartResult.add_member(:last_modified, Shapes::ShapeRef.new(shape: LastModified, location_name: "LastModified")) CopyPartResult.add_member(:checksum_crc32, Shapes::ShapeRef.new(shape: ChecksumCRC32, location_name: "ChecksumCRC32")) CopyPartResult.add_member(:checksum_crc32c, Shapes::ShapeRef.new(shape: ChecksumCRC32C, location_name: "ChecksumCRC32C")) CopyPartResult.add_member(:checksum_sha1, Shapes::ShapeRef.new(shape: ChecksumSHA1, location_name: "ChecksumSHA1")) CopyPartResult.add_member(:checksum_sha256, Shapes::ShapeRef.new(shape: ChecksumSHA256, location_name: "ChecksumSHA256")) CopyPartResult.struct_class = Types::CopyPartResult CreateBucketConfiguration.add_member(:location_constraint, Shapes::ShapeRef.new(shape: BucketLocationConstraint, location_name: "LocationConstraint")) CreateBucketConfiguration.add_member(:location, Shapes::ShapeRef.new(shape: LocationInfo, location_name: "Location")) CreateBucketConfiguration.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketInfo, location_name: "Bucket")) CreateBucketConfiguration.struct_class = Types::CreateBucketConfiguration CreateBucketOutput.add_member(:location, Shapes::ShapeRef.new(shape: Location, location: "header", location_name: "Location")) CreateBucketOutput.struct_class = Types::CreateBucketOutput CreateBucketRequest.add_member(:acl, Shapes::ShapeRef.new(shape: BucketCannedACL, location: "header", location_name: "x-amz-acl")) CreateBucketRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) CreateBucketRequest.add_member(:create_bucket_configuration, Shapes::ShapeRef.new(shape: CreateBucketConfiguration, location_name: "CreateBucketConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) CreateBucketRequest.add_member(:grant_full_control, Shapes::ShapeRef.new(shape: GrantFullControl, location: "header", location_name: "x-amz-grant-full-control")) CreateBucketRequest.add_member(:grant_read, Shapes::ShapeRef.new(shape: GrantRead, location: "header", location_name: "x-amz-grant-read")) CreateBucketRequest.add_member(:grant_read_acp, Shapes::ShapeRef.new(shape: GrantReadACP, location: "header", location_name: "x-amz-grant-read-acp")) CreateBucketRequest.add_member(:grant_write, Shapes::ShapeRef.new(shape: GrantWrite, location: "header", location_name: "x-amz-grant-write")) CreateBucketRequest.add_member(:grant_write_acp, Shapes::ShapeRef.new(shape: GrantWriteACP, location: "header", location_name: "x-amz-grant-write-acp")) CreateBucketRequest.add_member(:object_lock_enabled_for_bucket, Shapes::ShapeRef.new(shape: ObjectLockEnabledForBucket, location: "header", location_name: "x-amz-bucket-object-lock-enabled")) CreateBucketRequest.add_member(:object_ownership, Shapes::ShapeRef.new(shape: ObjectOwnership, location: "header", location_name: "x-amz-object-ownership")) CreateBucketRequest.struct_class = Types::CreateBucketRequest CreateBucketRequest[:payload] = :create_bucket_configuration CreateBucketRequest[:payload_member] = CreateBucketRequest.member(:create_bucket_configuration) CreateMultipartUploadOutput.add_member(:abort_date, Shapes::ShapeRef.new(shape: AbortDate, location: "header", location_name: "x-amz-abort-date")) CreateMultipartUploadOutput.add_member(:abort_rule_id, Shapes::ShapeRef.new(shape: AbortRuleId, location: "header", location_name: "x-amz-abort-rule-id")) CreateMultipartUploadOutput.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, location_name: "Bucket")) CreateMultipartUploadOutput.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, location_name: "Key")) CreateMultipartUploadOutput.add_member(:upload_id, Shapes::ShapeRef.new(shape: MultipartUploadId, location_name: "UploadId")) CreateMultipartUploadOutput.add_member(:server_side_encryption, Shapes::ShapeRef.new(shape: ServerSideEncryption, location: "header", location_name: "x-amz-server-side-encryption")) CreateMultipartUploadOutput.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) CreateMultipartUploadOutput.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) CreateMultipartUploadOutput.add_member(:ssekms_key_id, Shapes::ShapeRef.new(shape: SSEKMSKeyId, location: "header", location_name: "x-amz-server-side-encryption-aws-kms-key-id")) CreateMultipartUploadOutput.add_member(:ssekms_encryption_context, Shapes::ShapeRef.new(shape: SSEKMSEncryptionContext, location: "header", location_name: "x-amz-server-side-encryption-context")) CreateMultipartUploadOutput.add_member(:bucket_key_enabled, Shapes::ShapeRef.new(shape: BucketKeyEnabled, location: "header", location_name: "x-amz-server-side-encryption-bucket-key-enabled")) CreateMultipartUploadOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) CreateMultipartUploadOutput.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-checksum-algorithm")) CreateMultipartUploadOutput.struct_class = Types::CreateMultipartUploadOutput CreateMultipartUploadRequest.add_member(:acl, Shapes::ShapeRef.new(shape: ObjectCannedACL, location: "header", location_name: "x-amz-acl")) CreateMultipartUploadRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) CreateMultipartUploadRequest.add_member(:cache_control, Shapes::ShapeRef.new(shape: CacheControl, location: "header", location_name: "Cache-Control")) CreateMultipartUploadRequest.add_member(:content_disposition, Shapes::ShapeRef.new(shape: ContentDisposition, location: "header", location_name: "Content-Disposition")) CreateMultipartUploadRequest.add_member(:content_encoding, Shapes::ShapeRef.new(shape: ContentEncoding, location: "header", location_name: "Content-Encoding")) CreateMultipartUploadRequest.add_member(:content_language, Shapes::ShapeRef.new(shape: ContentLanguage, location: "header", location_name: "Content-Language")) CreateMultipartUploadRequest.add_member(:content_type, Shapes::ShapeRef.new(shape: ContentType, location: "header", location_name: "Content-Type")) CreateMultipartUploadRequest.add_member(:expires, Shapes::ShapeRef.new(shape: Expires, location: "header", location_name: "Expires")) CreateMultipartUploadRequest.add_member(:grant_full_control, Shapes::ShapeRef.new(shape: GrantFullControl, location: "header", location_name: "x-amz-grant-full-control")) CreateMultipartUploadRequest.add_member(:grant_read, Shapes::ShapeRef.new(shape: GrantRead, location: "header", location_name: "x-amz-grant-read")) CreateMultipartUploadRequest.add_member(:grant_read_acp, Shapes::ShapeRef.new(shape: GrantReadACP, location: "header", location_name: "x-amz-grant-read-acp")) CreateMultipartUploadRequest.add_member(:grant_write_acp, Shapes::ShapeRef.new(shape: GrantWriteACP, location: "header", location_name: "x-amz-grant-write-acp")) CreateMultipartUploadRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key", metadata: {"contextParam"=>{"name"=>"Key"}})) CreateMultipartUploadRequest.add_member(:metadata, Shapes::ShapeRef.new(shape: Metadata, location: "headers", location_name: "x-amz-meta-")) CreateMultipartUploadRequest.add_member(:server_side_encryption, Shapes::ShapeRef.new(shape: ServerSideEncryption, location: "header", location_name: "x-amz-server-side-encryption")) CreateMultipartUploadRequest.add_member(:storage_class, Shapes::ShapeRef.new(shape: StorageClass, location: "header", location_name: "x-amz-storage-class")) CreateMultipartUploadRequest.add_member(:website_redirect_location, Shapes::ShapeRef.new(shape: WebsiteRedirectLocation, location: "header", location_name: "x-amz-website-redirect-location")) CreateMultipartUploadRequest.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) CreateMultipartUploadRequest.add_member(:sse_customer_key, Shapes::ShapeRef.new(shape: SSECustomerKey, location: "header", location_name: "x-amz-server-side-encryption-customer-key")) CreateMultipartUploadRequest.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) CreateMultipartUploadRequest.add_member(:ssekms_key_id, Shapes::ShapeRef.new(shape: SSEKMSKeyId, location: "header", location_name: "x-amz-server-side-encryption-aws-kms-key-id")) CreateMultipartUploadRequest.add_member(:ssekms_encryption_context, Shapes::ShapeRef.new(shape: SSEKMSEncryptionContext, location: "header", location_name: "x-amz-server-side-encryption-context")) CreateMultipartUploadRequest.add_member(:bucket_key_enabled, Shapes::ShapeRef.new(shape: BucketKeyEnabled, location: "header", location_name: "x-amz-server-side-encryption-bucket-key-enabled")) CreateMultipartUploadRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) CreateMultipartUploadRequest.add_member(:tagging, Shapes::ShapeRef.new(shape: TaggingHeader, location: "header", location_name: "x-amz-tagging")) CreateMultipartUploadRequest.add_member(:object_lock_mode, Shapes::ShapeRef.new(shape: ObjectLockMode, location: "header", location_name: "x-amz-object-lock-mode")) CreateMultipartUploadRequest.add_member(:object_lock_retain_until_date, Shapes::ShapeRef.new(shape: ObjectLockRetainUntilDate, location: "header", location_name: "x-amz-object-lock-retain-until-date")) CreateMultipartUploadRequest.add_member(:object_lock_legal_hold_status, Shapes::ShapeRef.new(shape: ObjectLockLegalHoldStatus, location: "header", location_name: "x-amz-object-lock-legal-hold")) CreateMultipartUploadRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) CreateMultipartUploadRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-checksum-algorithm")) CreateMultipartUploadRequest.struct_class = Types::CreateMultipartUploadRequest CreateSessionOutput.add_member(:credentials, Shapes::ShapeRef.new(shape: SessionCredentials, required: true, location_name: "Credentials")) CreateSessionOutput.struct_class = Types::CreateSessionOutput CreateSessionRequest.add_member(:session_mode, Shapes::ShapeRef.new(shape: SessionMode, location: "header", location_name: "x-amz-create-session-mode")) CreateSessionRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) CreateSessionRequest.struct_class = Types::CreateSessionRequest DefaultRetention.add_member(:mode, Shapes::ShapeRef.new(shape: ObjectLockRetentionMode, location_name: "Mode")) DefaultRetention.add_member(:days, Shapes::ShapeRef.new(shape: Days, location_name: "Days")) DefaultRetention.add_member(:years, Shapes::ShapeRef.new(shape: Years, location_name: "Years")) DefaultRetention.struct_class = Types::DefaultRetention Delete.add_member(:objects, Shapes::ShapeRef.new(shape: ObjectIdentifierList, required: true, location_name: "Object")) Delete.add_member(:quiet, Shapes::ShapeRef.new(shape: Quiet, location_name: "Quiet")) Delete.struct_class = Types::Delete DeleteBucketAnalyticsConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) DeleteBucketAnalyticsConfigurationRequest.add_member(:id, Shapes::ShapeRef.new(shape: AnalyticsId, required: true, location: "querystring", location_name: "id")) DeleteBucketAnalyticsConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) DeleteBucketAnalyticsConfigurationRequest.struct_class = Types::DeleteBucketAnalyticsConfigurationRequest DeleteBucketCorsRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) DeleteBucketCorsRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) DeleteBucketCorsRequest.struct_class = Types::DeleteBucketCorsRequest DeleteBucketEncryptionRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) DeleteBucketEncryptionRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) DeleteBucketEncryptionRequest.struct_class = Types::DeleteBucketEncryptionRequest DeleteBucketIntelligentTieringConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) DeleteBucketIntelligentTieringConfigurationRequest.add_member(:id, Shapes::ShapeRef.new(shape: IntelligentTieringId, required: true, location: "querystring", location_name: "id")) DeleteBucketIntelligentTieringConfigurationRequest.struct_class = Types::DeleteBucketIntelligentTieringConfigurationRequest DeleteBucketInventoryConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) DeleteBucketInventoryConfigurationRequest.add_member(:id, Shapes::ShapeRef.new(shape: InventoryId, required: true, location: "querystring", location_name: "id")) DeleteBucketInventoryConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) DeleteBucketInventoryConfigurationRequest.struct_class = Types::DeleteBucketInventoryConfigurationRequest DeleteBucketLifecycleRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) DeleteBucketLifecycleRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) DeleteBucketLifecycleRequest.struct_class = Types::DeleteBucketLifecycleRequest DeleteBucketMetricsConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) DeleteBucketMetricsConfigurationRequest.add_member(:id, Shapes::ShapeRef.new(shape: MetricsId, required: true, location: "querystring", location_name: "id")) DeleteBucketMetricsConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) DeleteBucketMetricsConfigurationRequest.struct_class = Types::DeleteBucketMetricsConfigurationRequest DeleteBucketOwnershipControlsRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) DeleteBucketOwnershipControlsRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) DeleteBucketOwnershipControlsRequest.struct_class = Types::DeleteBucketOwnershipControlsRequest DeleteBucketPolicyRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) DeleteBucketPolicyRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) DeleteBucketPolicyRequest.struct_class = Types::DeleteBucketPolicyRequest DeleteBucketReplicationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) DeleteBucketReplicationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) DeleteBucketReplicationRequest.struct_class = Types::DeleteBucketReplicationRequest DeleteBucketRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) DeleteBucketRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) DeleteBucketRequest.struct_class = Types::DeleteBucketRequest DeleteBucketTaggingRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) DeleteBucketTaggingRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) DeleteBucketTaggingRequest.struct_class = Types::DeleteBucketTaggingRequest DeleteBucketWebsiteRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) DeleteBucketWebsiteRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) DeleteBucketWebsiteRequest.struct_class = Types::DeleteBucketWebsiteRequest DeleteMarkerEntry.add_member(:owner, Shapes::ShapeRef.new(shape: Owner, location_name: "Owner")) DeleteMarkerEntry.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, location_name: "Key")) DeleteMarkerEntry.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location_name: "VersionId")) DeleteMarkerEntry.add_member(:is_latest, Shapes::ShapeRef.new(shape: IsLatest, location_name: "IsLatest")) DeleteMarkerEntry.add_member(:last_modified, Shapes::ShapeRef.new(shape: LastModified, location_name: "LastModified")) DeleteMarkerEntry.struct_class = Types::DeleteMarkerEntry DeleteMarkerReplication.add_member(:status, Shapes::ShapeRef.new(shape: DeleteMarkerReplicationStatus, location_name: "Status")) DeleteMarkerReplication.struct_class = Types::DeleteMarkerReplication DeleteMarkers.member = Shapes::ShapeRef.new(shape: DeleteMarkerEntry) DeleteObjectOutput.add_member(:delete_marker, Shapes::ShapeRef.new(shape: DeleteMarker, location: "header", location_name: "x-amz-delete-marker")) DeleteObjectOutput.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "header", location_name: "x-amz-version-id")) DeleteObjectOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) DeleteObjectOutput.struct_class = Types::DeleteObjectOutput DeleteObjectRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) DeleteObjectRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key", metadata: {"contextParam"=>{"name"=>"Key"}})) DeleteObjectRequest.add_member(:mfa, Shapes::ShapeRef.new(shape: MFA, location: "header", location_name: "x-amz-mfa")) DeleteObjectRequest.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "querystring", location_name: "versionId")) DeleteObjectRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) DeleteObjectRequest.add_member(:bypass_governance_retention, Shapes::ShapeRef.new(shape: BypassGovernanceRetention, location: "header", location_name: "x-amz-bypass-governance-retention")) DeleteObjectRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) DeleteObjectRequest.struct_class = Types::DeleteObjectRequest DeleteObjectTaggingOutput.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "header", location_name: "x-amz-version-id")) DeleteObjectTaggingOutput.struct_class = Types::DeleteObjectTaggingOutput DeleteObjectTaggingRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) DeleteObjectTaggingRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) DeleteObjectTaggingRequest.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "querystring", location_name: "versionId")) DeleteObjectTaggingRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) DeleteObjectTaggingRequest.struct_class = Types::DeleteObjectTaggingRequest DeleteObjectsOutput.add_member(:deleted, Shapes::ShapeRef.new(shape: DeletedObjects, location_name: "Deleted")) DeleteObjectsOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) DeleteObjectsOutput.add_member(:errors, Shapes::ShapeRef.new(shape: Errors, location_name: "Error")) DeleteObjectsOutput.struct_class = Types::DeleteObjectsOutput DeleteObjectsRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) DeleteObjectsRequest.add_member(:delete, Shapes::ShapeRef.new(shape: Delete, required: true, location_name: "Delete", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) DeleteObjectsRequest.add_member(:mfa, Shapes::ShapeRef.new(shape: MFA, location: "header", location_name: "x-amz-mfa")) DeleteObjectsRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) DeleteObjectsRequest.add_member(:bypass_governance_retention, Shapes::ShapeRef.new(shape: BypassGovernanceRetention, location: "header", location_name: "x-amz-bypass-governance-retention")) DeleteObjectsRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) DeleteObjectsRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) DeleteObjectsRequest.struct_class = Types::DeleteObjectsRequest DeleteObjectsRequest[:payload] = :delete DeleteObjectsRequest[:payload_member] = DeleteObjectsRequest.member(:delete) DeletePublicAccessBlockRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) DeletePublicAccessBlockRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) DeletePublicAccessBlockRequest.struct_class = Types::DeletePublicAccessBlockRequest DeletedObject.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, location_name: "Key")) DeletedObject.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location_name: "VersionId")) DeletedObject.add_member(:delete_marker, Shapes::ShapeRef.new(shape: DeleteMarker, location_name: "DeleteMarker")) DeletedObject.add_member(:delete_marker_version_id, Shapes::ShapeRef.new(shape: DeleteMarkerVersionId, location_name: "DeleteMarkerVersionId")) DeletedObject.struct_class = Types::DeletedObject DeletedObjects.member = Shapes::ShapeRef.new(shape: DeletedObject) Destination.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location_name: "Bucket")) Destination.add_member(:account, Shapes::ShapeRef.new(shape: AccountId, location_name: "Account")) Destination.add_member(:storage_class, Shapes::ShapeRef.new(shape: StorageClass, location_name: "StorageClass")) Destination.add_member(:access_control_translation, Shapes::ShapeRef.new(shape: AccessControlTranslation, location_name: "AccessControlTranslation")) Destination.add_member(:encryption_configuration, Shapes::ShapeRef.new(shape: EncryptionConfiguration, location_name: "EncryptionConfiguration")) Destination.add_member(:replication_time, Shapes::ShapeRef.new(shape: ReplicationTime, location_name: "ReplicationTime")) Destination.add_member(:metrics, Shapes::ShapeRef.new(shape: Metrics, location_name: "Metrics")) Destination.struct_class = Types::Destination Encryption.add_member(:encryption_type, Shapes::ShapeRef.new(shape: ServerSideEncryption, required: true, location_name: "EncryptionType")) Encryption.add_member(:kms_key_id, Shapes::ShapeRef.new(shape: SSEKMSKeyId, location_name: "KMSKeyId")) Encryption.add_member(:kms_context, Shapes::ShapeRef.new(shape: KMSContext, location_name: "KMSContext")) Encryption.struct_class = Types::Encryption EncryptionConfiguration.add_member(:replica_kms_key_id, Shapes::ShapeRef.new(shape: ReplicaKmsKeyID, location_name: "ReplicaKmsKeyID")) EncryptionConfiguration.struct_class = Types::EncryptionConfiguration EndEvent.struct_class = Types::EndEvent Error.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, location_name: "Key")) Error.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location_name: "VersionId")) Error.add_member(:code, Shapes::ShapeRef.new(shape: Code, location_name: "Code")) Error.add_member(:message, Shapes::ShapeRef.new(shape: Message, location_name: "Message")) Error.struct_class = Types::Error ErrorDocument.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location_name: "Key")) ErrorDocument.struct_class = Types::ErrorDocument Errors.member = Shapes::ShapeRef.new(shape: Error) EventBridgeConfiguration.struct_class = Types::EventBridgeConfiguration EventList.member = Shapes::ShapeRef.new(shape: Event) ExistingObjectReplication.add_member(:status, Shapes::ShapeRef.new(shape: ExistingObjectReplicationStatus, required: true, location_name: "Status")) ExistingObjectReplication.struct_class = Types::ExistingObjectReplication ExposeHeaders.member = Shapes::ShapeRef.new(shape: ExposeHeader) FilterRule.add_member(:name, Shapes::ShapeRef.new(shape: FilterRuleName, location_name: "Name")) FilterRule.add_member(:value, Shapes::ShapeRef.new(shape: FilterRuleValue, location_name: "Value")) FilterRule.struct_class = Types::FilterRule FilterRuleList.member = Shapes::ShapeRef.new(shape: FilterRule) GetBucketAccelerateConfigurationOutput.add_member(:status, Shapes::ShapeRef.new(shape: BucketAccelerateStatus, location_name: "Status")) GetBucketAccelerateConfigurationOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) GetBucketAccelerateConfigurationOutput.struct_class = Types::GetBucketAccelerateConfigurationOutput GetBucketAccelerateConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) GetBucketAccelerateConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) GetBucketAccelerateConfigurationRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) GetBucketAccelerateConfigurationRequest.struct_class = Types::GetBucketAccelerateConfigurationRequest GetBucketAclOutput.add_member(:owner, Shapes::ShapeRef.new(shape: Owner, location_name: "Owner")) GetBucketAclOutput.add_member(:grants, Shapes::ShapeRef.new(shape: Grants, location_name: "AccessControlList")) GetBucketAclOutput.struct_class = Types::GetBucketAclOutput GetBucketAclRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) GetBucketAclRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) GetBucketAclRequest.struct_class = Types::GetBucketAclRequest GetBucketAnalyticsConfigurationOutput.add_member(:analytics_configuration, Shapes::ShapeRef.new(shape: AnalyticsConfiguration, location_name: "AnalyticsConfiguration")) GetBucketAnalyticsConfigurationOutput.struct_class = Types::GetBucketAnalyticsConfigurationOutput GetBucketAnalyticsConfigurationOutput[:payload] = :analytics_configuration GetBucketAnalyticsConfigurationOutput[:payload_member] = GetBucketAnalyticsConfigurationOutput.member(:analytics_configuration) GetBucketAnalyticsConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) GetBucketAnalyticsConfigurationRequest.add_member(:id, Shapes::ShapeRef.new(shape: AnalyticsId, required: true, location: "querystring", location_name: "id")) GetBucketAnalyticsConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) GetBucketAnalyticsConfigurationRequest.struct_class = Types::GetBucketAnalyticsConfigurationRequest GetBucketCorsOutput.add_member(:cors_rules, Shapes::ShapeRef.new(shape: CORSRules, location_name: "CORSRule")) GetBucketCorsOutput.struct_class = Types::GetBucketCorsOutput GetBucketCorsRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) GetBucketCorsRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) GetBucketCorsRequest.struct_class = Types::GetBucketCorsRequest GetBucketEncryptionOutput.add_member(:server_side_encryption_configuration, Shapes::ShapeRef.new(shape: ServerSideEncryptionConfiguration, location_name: "ServerSideEncryptionConfiguration")) GetBucketEncryptionOutput.struct_class = Types::GetBucketEncryptionOutput GetBucketEncryptionOutput[:payload] = :server_side_encryption_configuration GetBucketEncryptionOutput[:payload_member] = GetBucketEncryptionOutput.member(:server_side_encryption_configuration) GetBucketEncryptionRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) GetBucketEncryptionRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) GetBucketEncryptionRequest.struct_class = Types::GetBucketEncryptionRequest GetBucketIntelligentTieringConfigurationOutput.add_member(:intelligent_tiering_configuration, Shapes::ShapeRef.new(shape: IntelligentTieringConfiguration, location_name: "IntelligentTieringConfiguration")) GetBucketIntelligentTieringConfigurationOutput.struct_class = Types::GetBucketIntelligentTieringConfigurationOutput GetBucketIntelligentTieringConfigurationOutput[:payload] = :intelligent_tiering_configuration GetBucketIntelligentTieringConfigurationOutput[:payload_member] = GetBucketIntelligentTieringConfigurationOutput.member(:intelligent_tiering_configuration) GetBucketIntelligentTieringConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) GetBucketIntelligentTieringConfigurationRequest.add_member(:id, Shapes::ShapeRef.new(shape: IntelligentTieringId, required: true, location: "querystring", location_name: "id")) GetBucketIntelligentTieringConfigurationRequest.struct_class = Types::GetBucketIntelligentTieringConfigurationRequest GetBucketInventoryConfigurationOutput.add_member(:inventory_configuration, Shapes::ShapeRef.new(shape: InventoryConfiguration, location_name: "InventoryConfiguration")) GetBucketInventoryConfigurationOutput.struct_class = Types::GetBucketInventoryConfigurationOutput GetBucketInventoryConfigurationOutput[:payload] = :inventory_configuration GetBucketInventoryConfigurationOutput[:payload_member] = GetBucketInventoryConfigurationOutput.member(:inventory_configuration) GetBucketInventoryConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) GetBucketInventoryConfigurationRequest.add_member(:id, Shapes::ShapeRef.new(shape: InventoryId, required: true, location: "querystring", location_name: "id")) GetBucketInventoryConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) GetBucketInventoryConfigurationRequest.struct_class = Types::GetBucketInventoryConfigurationRequest GetBucketLifecycleConfigurationOutput.add_member(:rules, Shapes::ShapeRef.new(shape: LifecycleRules, location_name: "Rule")) GetBucketLifecycleConfigurationOutput.struct_class = Types::GetBucketLifecycleConfigurationOutput GetBucketLifecycleConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) GetBucketLifecycleConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) GetBucketLifecycleConfigurationRequest.struct_class = Types::GetBucketLifecycleConfigurationRequest GetBucketLifecycleOutput.add_member(:rules, Shapes::ShapeRef.new(shape: Rules, location_name: "Rule")) GetBucketLifecycleOutput.struct_class = Types::GetBucketLifecycleOutput GetBucketLifecycleRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) GetBucketLifecycleRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) GetBucketLifecycleRequest.struct_class = Types::GetBucketLifecycleRequest GetBucketLocationOutput.add_member(:location_constraint, Shapes::ShapeRef.new(shape: BucketLocationConstraint, location_name: "LocationConstraint")) GetBucketLocationOutput.struct_class = Types::GetBucketLocationOutput GetBucketLocationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) GetBucketLocationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) GetBucketLocationRequest.struct_class = Types::GetBucketLocationRequest GetBucketLoggingOutput.add_member(:logging_enabled, Shapes::ShapeRef.new(shape: LoggingEnabled, location_name: "LoggingEnabled")) GetBucketLoggingOutput.struct_class = Types::GetBucketLoggingOutput GetBucketLoggingRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) GetBucketLoggingRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) GetBucketLoggingRequest.struct_class = Types::GetBucketLoggingRequest GetBucketMetricsConfigurationOutput.add_member(:metrics_configuration, Shapes::ShapeRef.new(shape: MetricsConfiguration, location_name: "MetricsConfiguration")) GetBucketMetricsConfigurationOutput.struct_class = Types::GetBucketMetricsConfigurationOutput GetBucketMetricsConfigurationOutput[:payload] = :metrics_configuration GetBucketMetricsConfigurationOutput[:payload_member] = GetBucketMetricsConfigurationOutput.member(:metrics_configuration) GetBucketMetricsConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) GetBucketMetricsConfigurationRequest.add_member(:id, Shapes::ShapeRef.new(shape: MetricsId, required: true, location: "querystring", location_name: "id")) GetBucketMetricsConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) GetBucketMetricsConfigurationRequest.struct_class = Types::GetBucketMetricsConfigurationRequest GetBucketNotificationConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) GetBucketNotificationConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) GetBucketNotificationConfigurationRequest.struct_class = Types::GetBucketNotificationConfigurationRequest GetBucketOwnershipControlsOutput.add_member(:ownership_controls, Shapes::ShapeRef.new(shape: OwnershipControls, location_name: "OwnershipControls")) GetBucketOwnershipControlsOutput.struct_class = Types::GetBucketOwnershipControlsOutput GetBucketOwnershipControlsOutput[:payload] = :ownership_controls GetBucketOwnershipControlsOutput[:payload_member] = GetBucketOwnershipControlsOutput.member(:ownership_controls) GetBucketOwnershipControlsRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) GetBucketOwnershipControlsRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) GetBucketOwnershipControlsRequest.struct_class = Types::GetBucketOwnershipControlsRequest GetBucketPolicyOutput.add_member(:policy, Shapes::ShapeRef.new(shape: Policy, location_name: "Policy")) GetBucketPolicyOutput.struct_class = Types::GetBucketPolicyOutput GetBucketPolicyOutput[:payload] = :policy GetBucketPolicyOutput[:payload_member] = GetBucketPolicyOutput.member(:policy) GetBucketPolicyRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) GetBucketPolicyRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) GetBucketPolicyRequest.struct_class = Types::GetBucketPolicyRequest GetBucketPolicyStatusOutput.add_member(:policy_status, Shapes::ShapeRef.new(shape: PolicyStatus, location_name: "PolicyStatus")) GetBucketPolicyStatusOutput.struct_class = Types::GetBucketPolicyStatusOutput GetBucketPolicyStatusOutput[:payload] = :policy_status GetBucketPolicyStatusOutput[:payload_member] = GetBucketPolicyStatusOutput.member(:policy_status) GetBucketPolicyStatusRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) GetBucketPolicyStatusRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) GetBucketPolicyStatusRequest.struct_class = Types::GetBucketPolicyStatusRequest GetBucketReplicationOutput.add_member(:replication_configuration, Shapes::ShapeRef.new(shape: ReplicationConfiguration, location_name: "ReplicationConfiguration")) GetBucketReplicationOutput.struct_class = Types::GetBucketReplicationOutput GetBucketReplicationOutput[:payload] = :replication_configuration GetBucketReplicationOutput[:payload_member] = GetBucketReplicationOutput.member(:replication_configuration) GetBucketReplicationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) GetBucketReplicationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) GetBucketReplicationRequest.struct_class = Types::GetBucketReplicationRequest GetBucketRequestPaymentOutput.add_member(:payer, Shapes::ShapeRef.new(shape: Payer, location_name: "Payer")) GetBucketRequestPaymentOutput.struct_class = Types::GetBucketRequestPaymentOutput GetBucketRequestPaymentRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) GetBucketRequestPaymentRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) GetBucketRequestPaymentRequest.struct_class = Types::GetBucketRequestPaymentRequest GetBucketTaggingOutput.add_member(:tag_set, Shapes::ShapeRef.new(shape: TagSet, required: true, location_name: "TagSet")) GetBucketTaggingOutput.struct_class = Types::GetBucketTaggingOutput GetBucketTaggingRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) GetBucketTaggingRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) GetBucketTaggingRequest.struct_class = Types::GetBucketTaggingRequest GetBucketVersioningOutput.add_member(:status, Shapes::ShapeRef.new(shape: BucketVersioningStatus, location_name: "Status")) GetBucketVersioningOutput.add_member(:mfa_delete, Shapes::ShapeRef.new(shape: MFADeleteStatus, location_name: "MfaDelete")) GetBucketVersioningOutput.struct_class = Types::GetBucketVersioningOutput GetBucketVersioningRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) GetBucketVersioningRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) GetBucketVersioningRequest.struct_class = Types::GetBucketVersioningRequest GetBucketWebsiteOutput.add_member(:redirect_all_requests_to, Shapes::ShapeRef.new(shape: RedirectAllRequestsTo, location_name: "RedirectAllRequestsTo")) GetBucketWebsiteOutput.add_member(:index_document, Shapes::ShapeRef.new(shape: IndexDocument, location_name: "IndexDocument")) GetBucketWebsiteOutput.add_member(:error_document, Shapes::ShapeRef.new(shape: ErrorDocument, location_name: "ErrorDocument")) GetBucketWebsiteOutput.add_member(:routing_rules, Shapes::ShapeRef.new(shape: RoutingRules, location_name: "RoutingRules")) GetBucketWebsiteOutput.struct_class = Types::GetBucketWebsiteOutput GetBucketWebsiteRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) GetBucketWebsiteRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) GetBucketWebsiteRequest.struct_class = Types::GetBucketWebsiteRequest GetObjectAclOutput.add_member(:owner, Shapes::ShapeRef.new(shape: Owner, location_name: "Owner")) GetObjectAclOutput.add_member(:grants, Shapes::ShapeRef.new(shape: Grants, location_name: "AccessControlList")) GetObjectAclOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) GetObjectAclOutput.struct_class = Types::GetObjectAclOutput GetObjectAclRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) GetObjectAclRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key", metadata: {"contextParam"=>{"name"=>"Key"}})) GetObjectAclRequest.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "querystring", location_name: "versionId")) GetObjectAclRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) GetObjectAclRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) GetObjectAclRequest.struct_class = Types::GetObjectAclRequest GetObjectAttributesOutput.add_member(:delete_marker, Shapes::ShapeRef.new(shape: DeleteMarker, location: "header", location_name: "x-amz-delete-marker")) GetObjectAttributesOutput.add_member(:last_modified, Shapes::ShapeRef.new(shape: LastModified, location: "header", location_name: "Last-Modified")) GetObjectAttributesOutput.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "header", location_name: "x-amz-version-id")) GetObjectAttributesOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) GetObjectAttributesOutput.add_member(:etag, Shapes::ShapeRef.new(shape: ETag, location_name: "ETag")) GetObjectAttributesOutput.add_member(:checksum, Shapes::ShapeRef.new(shape: Checksum, location_name: "Checksum")) GetObjectAttributesOutput.add_member(:object_parts, Shapes::ShapeRef.new(shape: GetObjectAttributesParts, location_name: "ObjectParts")) GetObjectAttributesOutput.add_member(:storage_class, Shapes::ShapeRef.new(shape: StorageClass, location_name: "StorageClass")) GetObjectAttributesOutput.add_member(:object_size, Shapes::ShapeRef.new(shape: ObjectSize, location_name: "ObjectSize")) GetObjectAttributesOutput.struct_class = Types::GetObjectAttributesOutput GetObjectAttributesParts.add_member(:total_parts_count, Shapes::ShapeRef.new(shape: PartsCount, location_name: "PartsCount")) GetObjectAttributesParts.add_member(:part_number_marker, Shapes::ShapeRef.new(shape: PartNumberMarker, location_name: "PartNumberMarker")) GetObjectAttributesParts.add_member(:next_part_number_marker, Shapes::ShapeRef.new(shape: NextPartNumberMarker, location_name: "NextPartNumberMarker")) GetObjectAttributesParts.add_member(:max_parts, Shapes::ShapeRef.new(shape: MaxParts, location_name: "MaxParts")) GetObjectAttributesParts.add_member(:is_truncated, Shapes::ShapeRef.new(shape: IsTruncated, location_name: "IsTruncated")) GetObjectAttributesParts.add_member(:parts, Shapes::ShapeRef.new(shape: PartsList, location_name: "Part")) GetObjectAttributesParts.struct_class = Types::GetObjectAttributesParts GetObjectAttributesRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) GetObjectAttributesRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) GetObjectAttributesRequest.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "querystring", location_name: "versionId")) GetObjectAttributesRequest.add_member(:max_parts, Shapes::ShapeRef.new(shape: MaxParts, location: "header", location_name: "x-amz-max-parts")) GetObjectAttributesRequest.add_member(:part_number_marker, Shapes::ShapeRef.new(shape: PartNumberMarker, location: "header", location_name: "x-amz-part-number-marker")) GetObjectAttributesRequest.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) GetObjectAttributesRequest.add_member(:sse_customer_key, Shapes::ShapeRef.new(shape: SSECustomerKey, location: "header", location_name: "x-amz-server-side-encryption-customer-key")) GetObjectAttributesRequest.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) GetObjectAttributesRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) GetObjectAttributesRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) GetObjectAttributesRequest.add_member(:object_attributes, Shapes::ShapeRef.new(shape: ObjectAttributesList, required: true, location: "header", location_name: "x-amz-object-attributes")) GetObjectAttributesRequest.struct_class = Types::GetObjectAttributesRequest GetObjectLegalHoldOutput.add_member(:legal_hold, Shapes::ShapeRef.new(shape: ObjectLockLegalHold, location_name: "LegalHold")) GetObjectLegalHoldOutput.struct_class = Types::GetObjectLegalHoldOutput GetObjectLegalHoldOutput[:payload] = :legal_hold GetObjectLegalHoldOutput[:payload_member] = GetObjectLegalHoldOutput.member(:legal_hold) GetObjectLegalHoldRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) GetObjectLegalHoldRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) GetObjectLegalHoldRequest.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "querystring", location_name: "versionId")) GetObjectLegalHoldRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) GetObjectLegalHoldRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) GetObjectLegalHoldRequest.struct_class = Types::GetObjectLegalHoldRequest GetObjectLockConfigurationOutput.add_member(:object_lock_configuration, Shapes::ShapeRef.new(shape: ObjectLockConfiguration, location_name: "ObjectLockConfiguration")) GetObjectLockConfigurationOutput.struct_class = Types::GetObjectLockConfigurationOutput GetObjectLockConfigurationOutput[:payload] = :object_lock_configuration GetObjectLockConfigurationOutput[:payload_member] = GetObjectLockConfigurationOutput.member(:object_lock_configuration) GetObjectLockConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) GetObjectLockConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) GetObjectLockConfigurationRequest.struct_class = Types::GetObjectLockConfigurationRequest GetObjectOutput.add_member(:body, Shapes::ShapeRef.new(shape: Body, location_name: "Body", metadata: {"streaming"=>true})) GetObjectOutput.add_member(:delete_marker, Shapes::ShapeRef.new(shape: DeleteMarker, location: "header", location_name: "x-amz-delete-marker")) GetObjectOutput.add_member(:accept_ranges, Shapes::ShapeRef.new(shape: AcceptRanges, location: "header", location_name: "accept-ranges")) GetObjectOutput.add_member(:expiration, Shapes::ShapeRef.new(shape: Expiration, location: "header", location_name: "x-amz-expiration")) GetObjectOutput.add_member(:restore, Shapes::ShapeRef.new(shape: Restore, location: "header", location_name: "x-amz-restore")) GetObjectOutput.add_member(:last_modified, Shapes::ShapeRef.new(shape: LastModified, location: "header", location_name: "Last-Modified")) GetObjectOutput.add_member(:content_length, Shapes::ShapeRef.new(shape: ContentLength, location: "header", location_name: "Content-Length")) GetObjectOutput.add_member(:etag, Shapes::ShapeRef.new(shape: ETag, location: "header", location_name: "ETag")) GetObjectOutput.add_member(:checksum_crc32, Shapes::ShapeRef.new(shape: ChecksumCRC32, location: "header", location_name: "x-amz-checksum-crc32")) GetObjectOutput.add_member(:checksum_crc32c, Shapes::ShapeRef.new(shape: ChecksumCRC32C, location: "header", location_name: "x-amz-checksum-crc32c")) GetObjectOutput.add_member(:checksum_sha1, Shapes::ShapeRef.new(shape: ChecksumSHA1, location: "header", location_name: "x-amz-checksum-sha1")) GetObjectOutput.add_member(:checksum_sha256, Shapes::ShapeRef.new(shape: ChecksumSHA256, location: "header", location_name: "x-amz-checksum-sha256")) GetObjectOutput.add_member(:missing_meta, Shapes::ShapeRef.new(shape: MissingMeta, location: "header", location_name: "x-amz-missing-meta")) GetObjectOutput.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "header", location_name: "x-amz-version-id")) GetObjectOutput.add_member(:cache_control, Shapes::ShapeRef.new(shape: CacheControl, location: "header", location_name: "Cache-Control")) GetObjectOutput.add_member(:content_disposition, Shapes::ShapeRef.new(shape: ContentDisposition, location: "header", location_name: "Content-Disposition")) GetObjectOutput.add_member(:content_encoding, Shapes::ShapeRef.new(shape: ContentEncoding, location: "header", location_name: "Content-Encoding")) GetObjectOutput.add_member(:content_language, Shapes::ShapeRef.new(shape: ContentLanguage, location: "header", location_name: "Content-Language")) GetObjectOutput.add_member(:content_range, Shapes::ShapeRef.new(shape: ContentRange, location: "header", location_name: "Content-Range")) GetObjectOutput.add_member(:content_type, Shapes::ShapeRef.new(shape: ContentType, location: "header", location_name: "Content-Type")) GetObjectOutput.add_member(:expires, Shapes::ShapeRef.new(shape: Expires, location: "header", location_name: "Expires")) GetObjectOutput.add_member(:expires_string, Shapes::ShapeRef.new(shape: ExpiresString, location: "header", location_name: "Expires")) GetObjectOutput.add_member(:website_redirect_location, Shapes::ShapeRef.new(shape: WebsiteRedirectLocation, location: "header", location_name: "x-amz-website-redirect-location")) GetObjectOutput.add_member(:server_side_encryption, Shapes::ShapeRef.new(shape: ServerSideEncryption, location: "header", location_name: "x-amz-server-side-encryption")) GetObjectOutput.add_member(:metadata, Shapes::ShapeRef.new(shape: Metadata, location: "headers", location_name: "x-amz-meta-")) GetObjectOutput.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) GetObjectOutput.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) GetObjectOutput.add_member(:ssekms_key_id, Shapes::ShapeRef.new(shape: SSEKMSKeyId, location: "header", location_name: "x-amz-server-side-encryption-aws-kms-key-id")) GetObjectOutput.add_member(:bucket_key_enabled, Shapes::ShapeRef.new(shape: BucketKeyEnabled, location: "header", location_name: "x-amz-server-side-encryption-bucket-key-enabled")) GetObjectOutput.add_member(:storage_class, Shapes::ShapeRef.new(shape: StorageClass, location: "header", location_name: "x-amz-storage-class")) GetObjectOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) GetObjectOutput.add_member(:replication_status, Shapes::ShapeRef.new(shape: ReplicationStatus, location: "header", location_name: "x-amz-replication-status")) GetObjectOutput.add_member(:parts_count, Shapes::ShapeRef.new(shape: PartsCount, location: "header", location_name: "x-amz-mp-parts-count")) GetObjectOutput.add_member(:tag_count, Shapes::ShapeRef.new(shape: TagCount, location: "header", location_name: "x-amz-tagging-count")) GetObjectOutput.add_member(:object_lock_mode, Shapes::ShapeRef.new(shape: ObjectLockMode, location: "header", location_name: "x-amz-object-lock-mode")) GetObjectOutput.add_member(:object_lock_retain_until_date, Shapes::ShapeRef.new(shape: ObjectLockRetainUntilDate, location: "header", location_name: "x-amz-object-lock-retain-until-date")) GetObjectOutput.add_member(:object_lock_legal_hold_status, Shapes::ShapeRef.new(shape: ObjectLockLegalHoldStatus, location: "header", location_name: "x-amz-object-lock-legal-hold")) GetObjectOutput.struct_class = Types::GetObjectOutput GetObjectOutput[:payload] = :body GetObjectOutput[:payload_member] = GetObjectOutput.member(:body) GetObjectRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) GetObjectRequest.add_member(:if_match, Shapes::ShapeRef.new(shape: IfMatch, location: "header", location_name: "If-Match")) GetObjectRequest.add_member(:if_modified_since, Shapes::ShapeRef.new(shape: IfModifiedSince, location: "header", location_name: "If-Modified-Since")) GetObjectRequest.add_member(:if_none_match, Shapes::ShapeRef.new(shape: IfNoneMatch, location: "header", location_name: "If-None-Match")) GetObjectRequest.add_member(:if_unmodified_since, Shapes::ShapeRef.new(shape: IfUnmodifiedSince, location: "header", location_name: "If-Unmodified-Since")) GetObjectRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key", metadata: {"contextParam"=>{"name"=>"Key"}})) GetObjectRequest.add_member(:range, Shapes::ShapeRef.new(shape: Range, location: "header", location_name: "Range")) GetObjectRequest.add_member(:response_cache_control, Shapes::ShapeRef.new(shape: ResponseCacheControl, location: "querystring", location_name: "response-cache-control")) GetObjectRequest.add_member(:response_content_disposition, Shapes::ShapeRef.new(shape: ResponseContentDisposition, location: "querystring", location_name: "response-content-disposition")) GetObjectRequest.add_member(:response_content_encoding, Shapes::ShapeRef.new(shape: ResponseContentEncoding, location: "querystring", location_name: "response-content-encoding")) GetObjectRequest.add_member(:response_content_language, Shapes::ShapeRef.new(shape: ResponseContentLanguage, location: "querystring", location_name: "response-content-language")) GetObjectRequest.add_member(:response_content_type, Shapes::ShapeRef.new(shape: ResponseContentType, location: "querystring", location_name: "response-content-type")) GetObjectRequest.add_member(:response_expires, Shapes::ShapeRef.new(shape: ResponseExpires, location: "querystring", location_name: "response-expires")) GetObjectRequest.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "querystring", location_name: "versionId")) GetObjectRequest.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) GetObjectRequest.add_member(:sse_customer_key, Shapes::ShapeRef.new(shape: SSECustomerKey, location: "header", location_name: "x-amz-server-side-encryption-customer-key")) GetObjectRequest.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) GetObjectRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) GetObjectRequest.add_member(:part_number, Shapes::ShapeRef.new(shape: PartNumber, location: "querystring", location_name: "partNumber")) GetObjectRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) GetObjectRequest.add_member(:checksum_mode, Shapes::ShapeRef.new(shape: ChecksumMode, location: "header", location_name: "x-amz-checksum-mode")) GetObjectRequest.struct_class = Types::GetObjectRequest GetObjectRetentionOutput.add_member(:retention, Shapes::ShapeRef.new(shape: ObjectLockRetention, location_name: "Retention")) GetObjectRetentionOutput.struct_class = Types::GetObjectRetentionOutput GetObjectRetentionOutput[:payload] = :retention GetObjectRetentionOutput[:payload_member] = GetObjectRetentionOutput.member(:retention) GetObjectRetentionRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) GetObjectRetentionRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) GetObjectRetentionRequest.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "querystring", location_name: "versionId")) GetObjectRetentionRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) GetObjectRetentionRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) GetObjectRetentionRequest.struct_class = Types::GetObjectRetentionRequest GetObjectTaggingOutput.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "header", location_name: "x-amz-version-id")) GetObjectTaggingOutput.add_member(:tag_set, Shapes::ShapeRef.new(shape: TagSet, required: true, location_name: "TagSet")) GetObjectTaggingOutput.struct_class = Types::GetObjectTaggingOutput GetObjectTaggingRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) GetObjectTaggingRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) GetObjectTaggingRequest.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "querystring", location_name: "versionId")) GetObjectTaggingRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) GetObjectTaggingRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) GetObjectTaggingRequest.struct_class = Types::GetObjectTaggingRequest GetObjectTorrentOutput.add_member(:body, Shapes::ShapeRef.new(shape: Body, location_name: "Body", metadata: {"streaming"=>true})) GetObjectTorrentOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) GetObjectTorrentOutput.struct_class = Types::GetObjectTorrentOutput GetObjectTorrentOutput[:payload] = :body GetObjectTorrentOutput[:payload_member] = GetObjectTorrentOutput.member(:body) GetObjectTorrentRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) GetObjectTorrentRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) GetObjectTorrentRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) GetObjectTorrentRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) GetObjectTorrentRequest.struct_class = Types::GetObjectTorrentRequest GetPublicAccessBlockOutput.add_member(:public_access_block_configuration, Shapes::ShapeRef.new(shape: PublicAccessBlockConfiguration, location_name: "PublicAccessBlockConfiguration")) GetPublicAccessBlockOutput.struct_class = Types::GetPublicAccessBlockOutput GetPublicAccessBlockOutput[:payload] = :public_access_block_configuration GetPublicAccessBlockOutput[:payload_member] = GetPublicAccessBlockOutput.member(:public_access_block_configuration) GetPublicAccessBlockRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) GetPublicAccessBlockRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) GetPublicAccessBlockRequest.struct_class = Types::GetPublicAccessBlockRequest GlacierJobParameters.add_member(:tier, Shapes::ShapeRef.new(shape: Tier, required: true, location_name: "Tier")) GlacierJobParameters.struct_class = Types::GlacierJobParameters Grant.add_member(:grantee, Shapes::ShapeRef.new(shape: Grantee, location_name: "Grantee")) Grant.add_member(:permission, Shapes::ShapeRef.new(shape: Permission, location_name: "Permission")) Grant.struct_class = Types::Grant Grantee.add_member(:display_name, Shapes::ShapeRef.new(shape: DisplayName, location_name: "DisplayName")) Grantee.add_member(:email_address, Shapes::ShapeRef.new(shape: EmailAddress, location_name: "EmailAddress")) Grantee.add_member(:id, Shapes::ShapeRef.new(shape: ID, location_name: "ID")) Grantee.add_member(:type, Shapes::ShapeRef.new(shape: Type, required: true, location_name: "xsi:type", metadata: {"xmlAttribute"=>true})) Grantee.add_member(:uri, Shapes::ShapeRef.new(shape: URI, location_name: "URI")) Grantee.struct_class = Types::Grantee Grants.member = Shapes::ShapeRef.new(shape: Grant, location_name: "Grant") HeadBucketOutput.add_member(:bucket_location_type, Shapes::ShapeRef.new(shape: LocationType, location: "header", location_name: "x-amz-bucket-location-type")) HeadBucketOutput.add_member(:bucket_location_name, Shapes::ShapeRef.new(shape: BucketLocationName, location: "header", location_name: "x-amz-bucket-location-name")) HeadBucketOutput.add_member(:bucket_region, Shapes::ShapeRef.new(shape: Region, location: "header", location_name: "x-amz-bucket-region")) HeadBucketOutput.add_member(:access_point_alias, Shapes::ShapeRef.new(shape: AccessPointAlias, location: "header", location_name: "x-amz-access-point-alias")) HeadBucketOutput.struct_class = Types::HeadBucketOutput HeadBucketRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) HeadBucketRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) HeadBucketRequest.struct_class = Types::HeadBucketRequest HeadObjectOutput.add_member(:delete_marker, Shapes::ShapeRef.new(shape: DeleteMarker, location: "header", location_name: "x-amz-delete-marker")) HeadObjectOutput.add_member(:accept_ranges, Shapes::ShapeRef.new(shape: AcceptRanges, location: "header", location_name: "accept-ranges")) HeadObjectOutput.add_member(:expiration, Shapes::ShapeRef.new(shape: Expiration, location: "header", location_name: "x-amz-expiration")) HeadObjectOutput.add_member(:restore, Shapes::ShapeRef.new(shape: Restore, location: "header", location_name: "x-amz-restore")) HeadObjectOutput.add_member(:archive_status, Shapes::ShapeRef.new(shape: ArchiveStatus, location: "header", location_name: "x-amz-archive-status")) HeadObjectOutput.add_member(:last_modified, Shapes::ShapeRef.new(shape: LastModified, location: "header", location_name: "Last-Modified")) HeadObjectOutput.add_member(:content_length, Shapes::ShapeRef.new(shape: ContentLength, location: "header", location_name: "Content-Length")) HeadObjectOutput.add_member(:checksum_crc32, Shapes::ShapeRef.new(shape: ChecksumCRC32, location: "header", location_name: "x-amz-checksum-crc32")) HeadObjectOutput.add_member(:checksum_crc32c, Shapes::ShapeRef.new(shape: ChecksumCRC32C, location: "header", location_name: "x-amz-checksum-crc32c")) HeadObjectOutput.add_member(:checksum_sha1, Shapes::ShapeRef.new(shape: ChecksumSHA1, location: "header", location_name: "x-amz-checksum-sha1")) HeadObjectOutput.add_member(:checksum_sha256, Shapes::ShapeRef.new(shape: ChecksumSHA256, location: "header", location_name: "x-amz-checksum-sha256")) HeadObjectOutput.add_member(:etag, Shapes::ShapeRef.new(shape: ETag, location: "header", location_name: "ETag")) HeadObjectOutput.add_member(:missing_meta, Shapes::ShapeRef.new(shape: MissingMeta, location: "header", location_name: "x-amz-missing-meta")) HeadObjectOutput.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "header", location_name: "x-amz-version-id")) HeadObjectOutput.add_member(:cache_control, Shapes::ShapeRef.new(shape: CacheControl, location: "header", location_name: "Cache-Control")) HeadObjectOutput.add_member(:content_disposition, Shapes::ShapeRef.new(shape: ContentDisposition, location: "header", location_name: "Content-Disposition")) HeadObjectOutput.add_member(:content_encoding, Shapes::ShapeRef.new(shape: ContentEncoding, location: "header", location_name: "Content-Encoding")) HeadObjectOutput.add_member(:content_language, Shapes::ShapeRef.new(shape: ContentLanguage, location: "header", location_name: "Content-Language")) HeadObjectOutput.add_member(:content_type, Shapes::ShapeRef.new(shape: ContentType, location: "header", location_name: "Content-Type")) HeadObjectOutput.add_member(:expires, Shapes::ShapeRef.new(shape: Expires, location: "header", location_name: "Expires")) HeadObjectOutput.add_member(:expires_string, Shapes::ShapeRef.new(shape: ExpiresString, location: "header", location_name: "Expires")) HeadObjectOutput.add_member(:website_redirect_location, Shapes::ShapeRef.new(shape: WebsiteRedirectLocation, location: "header", location_name: "x-amz-website-redirect-location")) HeadObjectOutput.add_member(:server_side_encryption, Shapes::ShapeRef.new(shape: ServerSideEncryption, location: "header", location_name: "x-amz-server-side-encryption")) HeadObjectOutput.add_member(:metadata, Shapes::ShapeRef.new(shape: Metadata, location: "headers", location_name: "x-amz-meta-")) HeadObjectOutput.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) HeadObjectOutput.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) HeadObjectOutput.add_member(:ssekms_key_id, Shapes::ShapeRef.new(shape: SSEKMSKeyId, location: "header", location_name: "x-amz-server-side-encryption-aws-kms-key-id")) HeadObjectOutput.add_member(:bucket_key_enabled, Shapes::ShapeRef.new(shape: BucketKeyEnabled, location: "header", location_name: "x-amz-server-side-encryption-bucket-key-enabled")) HeadObjectOutput.add_member(:storage_class, Shapes::ShapeRef.new(shape: StorageClass, location: "header", location_name: "x-amz-storage-class")) HeadObjectOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) HeadObjectOutput.add_member(:replication_status, Shapes::ShapeRef.new(shape: ReplicationStatus, location: "header", location_name: "x-amz-replication-status")) HeadObjectOutput.add_member(:parts_count, Shapes::ShapeRef.new(shape: PartsCount, location: "header", location_name: "x-amz-mp-parts-count")) HeadObjectOutput.add_member(:object_lock_mode, Shapes::ShapeRef.new(shape: ObjectLockMode, location: "header", location_name: "x-amz-object-lock-mode")) HeadObjectOutput.add_member(:object_lock_retain_until_date, Shapes::ShapeRef.new(shape: ObjectLockRetainUntilDate, location: "header", location_name: "x-amz-object-lock-retain-until-date")) HeadObjectOutput.add_member(:object_lock_legal_hold_status, Shapes::ShapeRef.new(shape: ObjectLockLegalHoldStatus, location: "header", location_name: "x-amz-object-lock-legal-hold")) HeadObjectOutput.struct_class = Types::HeadObjectOutput HeadObjectRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) HeadObjectRequest.add_member(:if_match, Shapes::ShapeRef.new(shape: IfMatch, location: "header", location_name: "If-Match")) HeadObjectRequest.add_member(:if_modified_since, Shapes::ShapeRef.new(shape: IfModifiedSince, location: "header", location_name: "If-Modified-Since")) HeadObjectRequest.add_member(:if_none_match, Shapes::ShapeRef.new(shape: IfNoneMatch, location: "header", location_name: "If-None-Match")) HeadObjectRequest.add_member(:if_unmodified_since, Shapes::ShapeRef.new(shape: IfUnmodifiedSince, location: "header", location_name: "If-Unmodified-Since")) HeadObjectRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key", metadata: {"contextParam"=>{"name"=>"Key"}})) HeadObjectRequest.add_member(:range, Shapes::ShapeRef.new(shape: Range, location: "header", location_name: "Range")) HeadObjectRequest.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "querystring", location_name: "versionId")) HeadObjectRequest.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) HeadObjectRequest.add_member(:sse_customer_key, Shapes::ShapeRef.new(shape: SSECustomerKey, location: "header", location_name: "x-amz-server-side-encryption-customer-key")) HeadObjectRequest.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) HeadObjectRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) HeadObjectRequest.add_member(:part_number, Shapes::ShapeRef.new(shape: PartNumber, location: "querystring", location_name: "partNumber")) HeadObjectRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) HeadObjectRequest.add_member(:checksum_mode, Shapes::ShapeRef.new(shape: ChecksumMode, location: "header", location_name: "x-amz-checksum-mode")) HeadObjectRequest.struct_class = Types::HeadObjectRequest IndexDocument.add_member(:suffix, Shapes::ShapeRef.new(shape: Suffix, required: true, location_name: "Suffix")) IndexDocument.struct_class = Types::IndexDocument Initiator.add_member(:id, Shapes::ShapeRef.new(shape: ID, location_name: "ID")) Initiator.add_member(:display_name, Shapes::ShapeRef.new(shape: DisplayName, location_name: "DisplayName")) Initiator.struct_class = Types::Initiator InputSerialization.add_member(:csv, Shapes::ShapeRef.new(shape: CSVInput, location_name: "CSV")) InputSerialization.add_member(:compression_type, Shapes::ShapeRef.new(shape: CompressionType, location_name: "CompressionType")) InputSerialization.add_member(:json, Shapes::ShapeRef.new(shape: JSONInput, location_name: "JSON")) InputSerialization.add_member(:parquet, Shapes::ShapeRef.new(shape: ParquetInput, location_name: "Parquet")) InputSerialization.struct_class = Types::InputSerialization IntelligentTieringAndOperator.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) IntelligentTieringAndOperator.add_member(:tags, Shapes::ShapeRef.new(shape: TagSet, location_name: "Tag", metadata: {"flattened"=>true})) IntelligentTieringAndOperator.struct_class = Types::IntelligentTieringAndOperator IntelligentTieringConfiguration.add_member(:id, Shapes::ShapeRef.new(shape: IntelligentTieringId, required: true, location_name: "Id")) IntelligentTieringConfiguration.add_member(:filter, Shapes::ShapeRef.new(shape: IntelligentTieringFilter, location_name: "Filter")) IntelligentTieringConfiguration.add_member(:status, Shapes::ShapeRef.new(shape: IntelligentTieringStatus, required: true, location_name: "Status")) IntelligentTieringConfiguration.add_member(:tierings, Shapes::ShapeRef.new(shape: TieringList, required: true, location_name: "Tiering")) IntelligentTieringConfiguration.struct_class = Types::IntelligentTieringConfiguration IntelligentTieringConfigurationList.member = Shapes::ShapeRef.new(shape: IntelligentTieringConfiguration) IntelligentTieringFilter.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) IntelligentTieringFilter.add_member(:tag, Shapes::ShapeRef.new(shape: Tag, location_name: "Tag")) IntelligentTieringFilter.add_member(:and, Shapes::ShapeRef.new(shape: IntelligentTieringAndOperator, location_name: "And")) IntelligentTieringFilter.struct_class = Types::IntelligentTieringFilter InvalidObjectState.add_member(:storage_class, Shapes::ShapeRef.new(shape: StorageClass, location_name: "StorageClass")) InvalidObjectState.add_member(:access_tier, Shapes::ShapeRef.new(shape: IntelligentTieringAccessTier, location_name: "AccessTier")) InvalidObjectState.struct_class = Types::InvalidObjectState InventoryConfiguration.add_member(:destination, Shapes::ShapeRef.new(shape: InventoryDestination, required: true, location_name: "Destination")) InventoryConfiguration.add_member(:is_enabled, Shapes::ShapeRef.new(shape: IsEnabled, required: true, location_name: "IsEnabled")) InventoryConfiguration.add_member(:filter, Shapes::ShapeRef.new(shape: InventoryFilter, location_name: "Filter")) InventoryConfiguration.add_member(:id, Shapes::ShapeRef.new(shape: InventoryId, required: true, location_name: "Id")) InventoryConfiguration.add_member(:included_object_versions, Shapes::ShapeRef.new(shape: InventoryIncludedObjectVersions, required: true, location_name: "IncludedObjectVersions")) InventoryConfiguration.add_member(:optional_fields, Shapes::ShapeRef.new(shape: InventoryOptionalFields, location_name: "OptionalFields")) InventoryConfiguration.add_member(:schedule, Shapes::ShapeRef.new(shape: InventorySchedule, required: true, location_name: "Schedule")) InventoryConfiguration.struct_class = Types::InventoryConfiguration InventoryConfigurationList.member = Shapes::ShapeRef.new(shape: InventoryConfiguration) InventoryDestination.add_member(:s3_bucket_destination, Shapes::ShapeRef.new(shape: InventoryS3BucketDestination, required: true, location_name: "S3BucketDestination")) InventoryDestination.struct_class = Types::InventoryDestination InventoryEncryption.add_member(:sses3, Shapes::ShapeRef.new(shape: SSES3, location_name: "SSE-S3")) InventoryEncryption.add_member(:ssekms, Shapes::ShapeRef.new(shape: SSEKMS, location_name: "SSE-KMS")) InventoryEncryption.struct_class = Types::InventoryEncryption InventoryFilter.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, required: true, location_name: "Prefix")) InventoryFilter.struct_class = Types::InventoryFilter InventoryOptionalFields.member = Shapes::ShapeRef.new(shape: InventoryOptionalField, location_name: "Field") InventoryS3BucketDestination.add_member(:account_id, Shapes::ShapeRef.new(shape: AccountId, location_name: "AccountId")) InventoryS3BucketDestination.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location_name: "Bucket")) InventoryS3BucketDestination.add_member(:format, Shapes::ShapeRef.new(shape: InventoryFormat, required: true, location_name: "Format")) InventoryS3BucketDestination.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) InventoryS3BucketDestination.add_member(:encryption, Shapes::ShapeRef.new(shape: InventoryEncryption, location_name: "Encryption")) InventoryS3BucketDestination.struct_class = Types::InventoryS3BucketDestination InventorySchedule.add_member(:frequency, Shapes::ShapeRef.new(shape: InventoryFrequency, required: true, location_name: "Frequency")) InventorySchedule.struct_class = Types::InventorySchedule JSONInput.add_member(:type, Shapes::ShapeRef.new(shape: JSONType, location_name: "Type")) JSONInput.struct_class = Types::JSONInput JSONOutput.add_member(:record_delimiter, Shapes::ShapeRef.new(shape: RecordDelimiter, location_name: "RecordDelimiter")) JSONOutput.struct_class = Types::JSONOutput LambdaFunctionConfiguration.add_member(:id, Shapes::ShapeRef.new(shape: NotificationId, location_name: "Id")) LambdaFunctionConfiguration.add_member(:lambda_function_arn, Shapes::ShapeRef.new(shape: LambdaFunctionArn, required: true, location_name: "CloudFunction")) LambdaFunctionConfiguration.add_member(:events, Shapes::ShapeRef.new(shape: EventList, required: true, location_name: "Event")) LambdaFunctionConfiguration.add_member(:filter, Shapes::ShapeRef.new(shape: NotificationConfigurationFilter, location_name: "Filter")) LambdaFunctionConfiguration.struct_class = Types::LambdaFunctionConfiguration LambdaFunctionConfigurationList.member = Shapes::ShapeRef.new(shape: LambdaFunctionConfiguration) LifecycleConfiguration.add_member(:rules, Shapes::ShapeRef.new(shape: Rules, required: true, location_name: "Rule")) LifecycleConfiguration.struct_class = Types::LifecycleConfiguration LifecycleExpiration.add_member(:date, Shapes::ShapeRef.new(shape: Date, location_name: "Date")) LifecycleExpiration.add_member(:days, Shapes::ShapeRef.new(shape: Days, location_name: "Days")) LifecycleExpiration.add_member(:expired_object_delete_marker, Shapes::ShapeRef.new(shape: ExpiredObjectDeleteMarker, location_name: "ExpiredObjectDeleteMarker")) LifecycleExpiration.struct_class = Types::LifecycleExpiration LifecycleRule.add_member(:expiration, Shapes::ShapeRef.new(shape: LifecycleExpiration, location_name: "Expiration")) LifecycleRule.add_member(:id, Shapes::ShapeRef.new(shape: ID, location_name: "ID")) LifecycleRule.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, deprecated: true, location_name: "Prefix")) LifecycleRule.add_member(:filter, Shapes::ShapeRef.new(shape: LifecycleRuleFilter, location_name: "Filter")) LifecycleRule.add_member(:status, Shapes::ShapeRef.new(shape: ExpirationStatus, required: true, location_name: "Status")) LifecycleRule.add_member(:transitions, Shapes::ShapeRef.new(shape: TransitionList, location_name: "Transition")) LifecycleRule.add_member(:noncurrent_version_transitions, Shapes::ShapeRef.new(shape: NoncurrentVersionTransitionList, location_name: "NoncurrentVersionTransition")) LifecycleRule.add_member(:noncurrent_version_expiration, Shapes::ShapeRef.new(shape: NoncurrentVersionExpiration, location_name: "NoncurrentVersionExpiration")) LifecycleRule.add_member(:abort_incomplete_multipart_upload, Shapes::ShapeRef.new(shape: AbortIncompleteMultipartUpload, location_name: "AbortIncompleteMultipartUpload")) LifecycleRule.struct_class = Types::LifecycleRule LifecycleRuleAndOperator.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) LifecycleRuleAndOperator.add_member(:tags, Shapes::ShapeRef.new(shape: TagSet, location_name: "Tag", metadata: {"flattened"=>true})) LifecycleRuleAndOperator.add_member(:object_size_greater_than, Shapes::ShapeRef.new(shape: ObjectSizeGreaterThanBytes, location_name: "ObjectSizeGreaterThan")) LifecycleRuleAndOperator.add_member(:object_size_less_than, Shapes::ShapeRef.new(shape: ObjectSizeLessThanBytes, location_name: "ObjectSizeLessThan")) LifecycleRuleAndOperator.struct_class = Types::LifecycleRuleAndOperator LifecycleRuleFilter.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) LifecycleRuleFilter.add_member(:tag, Shapes::ShapeRef.new(shape: Tag, location_name: "Tag")) LifecycleRuleFilter.add_member(:object_size_greater_than, Shapes::ShapeRef.new(shape: ObjectSizeGreaterThanBytes, location_name: "ObjectSizeGreaterThan")) LifecycleRuleFilter.add_member(:object_size_less_than, Shapes::ShapeRef.new(shape: ObjectSizeLessThanBytes, location_name: "ObjectSizeLessThan")) LifecycleRuleFilter.add_member(:and, Shapes::ShapeRef.new(shape: LifecycleRuleAndOperator, location_name: "And")) LifecycleRuleFilter.struct_class = Types::LifecycleRuleFilter LifecycleRules.member = Shapes::ShapeRef.new(shape: LifecycleRule) ListBucketAnalyticsConfigurationsOutput.add_member(:is_truncated, Shapes::ShapeRef.new(shape: IsTruncated, location_name: "IsTruncated")) ListBucketAnalyticsConfigurationsOutput.add_member(:continuation_token, Shapes::ShapeRef.new(shape: Token, location_name: "ContinuationToken")) ListBucketAnalyticsConfigurationsOutput.add_member(:next_continuation_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "NextContinuationToken")) ListBucketAnalyticsConfigurationsOutput.add_member(:analytics_configuration_list, Shapes::ShapeRef.new(shape: AnalyticsConfigurationList, location_name: "AnalyticsConfiguration")) ListBucketAnalyticsConfigurationsOutput.struct_class = Types::ListBucketAnalyticsConfigurationsOutput ListBucketAnalyticsConfigurationsRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) ListBucketAnalyticsConfigurationsRequest.add_member(:continuation_token, Shapes::ShapeRef.new(shape: Token, location: "querystring", location_name: "continuation-token")) ListBucketAnalyticsConfigurationsRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) ListBucketAnalyticsConfigurationsRequest.struct_class = Types::ListBucketAnalyticsConfigurationsRequest ListBucketIntelligentTieringConfigurationsOutput.add_member(:is_truncated, Shapes::ShapeRef.new(shape: IsTruncated, location_name: "IsTruncated")) ListBucketIntelligentTieringConfigurationsOutput.add_member(:continuation_token, Shapes::ShapeRef.new(shape: Token, location_name: "ContinuationToken")) ListBucketIntelligentTieringConfigurationsOutput.add_member(:next_continuation_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "NextContinuationToken")) ListBucketIntelligentTieringConfigurationsOutput.add_member(:intelligent_tiering_configuration_list, Shapes::ShapeRef.new(shape: IntelligentTieringConfigurationList, location_name: "IntelligentTieringConfiguration")) ListBucketIntelligentTieringConfigurationsOutput.struct_class = Types::ListBucketIntelligentTieringConfigurationsOutput ListBucketIntelligentTieringConfigurationsRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) ListBucketIntelligentTieringConfigurationsRequest.add_member(:continuation_token, Shapes::ShapeRef.new(shape: Token, location: "querystring", location_name: "continuation-token")) ListBucketIntelligentTieringConfigurationsRequest.struct_class = Types::ListBucketIntelligentTieringConfigurationsRequest ListBucketInventoryConfigurationsOutput.add_member(:continuation_token, Shapes::ShapeRef.new(shape: Token, location_name: "ContinuationToken")) ListBucketInventoryConfigurationsOutput.add_member(:inventory_configuration_list, Shapes::ShapeRef.new(shape: InventoryConfigurationList, location_name: "InventoryConfiguration")) ListBucketInventoryConfigurationsOutput.add_member(:is_truncated, Shapes::ShapeRef.new(shape: IsTruncated, location_name: "IsTruncated")) ListBucketInventoryConfigurationsOutput.add_member(:next_continuation_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "NextContinuationToken")) ListBucketInventoryConfigurationsOutput.struct_class = Types::ListBucketInventoryConfigurationsOutput ListBucketInventoryConfigurationsRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) ListBucketInventoryConfigurationsRequest.add_member(:continuation_token, Shapes::ShapeRef.new(shape: Token, location: "querystring", location_name: "continuation-token")) ListBucketInventoryConfigurationsRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) ListBucketInventoryConfigurationsRequest.struct_class = Types::ListBucketInventoryConfigurationsRequest ListBucketMetricsConfigurationsOutput.add_member(:is_truncated, Shapes::ShapeRef.new(shape: IsTruncated, location_name: "IsTruncated")) ListBucketMetricsConfigurationsOutput.add_member(:continuation_token, Shapes::ShapeRef.new(shape: Token, location_name: "ContinuationToken")) ListBucketMetricsConfigurationsOutput.add_member(:next_continuation_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "NextContinuationToken")) ListBucketMetricsConfigurationsOutput.add_member(:metrics_configuration_list, Shapes::ShapeRef.new(shape: MetricsConfigurationList, location_name: "MetricsConfiguration")) ListBucketMetricsConfigurationsOutput.struct_class = Types::ListBucketMetricsConfigurationsOutput ListBucketMetricsConfigurationsRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) ListBucketMetricsConfigurationsRequest.add_member(:continuation_token, Shapes::ShapeRef.new(shape: Token, location: "querystring", location_name: "continuation-token")) ListBucketMetricsConfigurationsRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) ListBucketMetricsConfigurationsRequest.struct_class = Types::ListBucketMetricsConfigurationsRequest ListBucketsOutput.add_member(:buckets, Shapes::ShapeRef.new(shape: Buckets, location_name: "Buckets")) ListBucketsOutput.add_member(:owner, Shapes::ShapeRef.new(shape: Owner, location_name: "Owner")) ListBucketsOutput.struct_class = Types::ListBucketsOutput ListDirectoryBucketsOutput.add_member(:buckets, Shapes::ShapeRef.new(shape: Buckets, location_name: "Buckets")) ListDirectoryBucketsOutput.add_member(:continuation_token, Shapes::ShapeRef.new(shape: DirectoryBucketToken, location_name: "ContinuationToken")) ListDirectoryBucketsOutput.struct_class = Types::ListDirectoryBucketsOutput ListDirectoryBucketsRequest.add_member(:continuation_token, Shapes::ShapeRef.new(shape: DirectoryBucketToken, location: "querystring", location_name: "continuation-token")) ListDirectoryBucketsRequest.add_member(:max_directory_buckets, Shapes::ShapeRef.new(shape: MaxDirectoryBuckets, location: "querystring", location_name: "max-directory-buckets")) ListDirectoryBucketsRequest.struct_class = Types::ListDirectoryBucketsRequest ListMultipartUploadsOutput.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, location_name: "Bucket")) ListMultipartUploadsOutput.add_member(:key_marker, Shapes::ShapeRef.new(shape: KeyMarker, location_name: "KeyMarker")) ListMultipartUploadsOutput.add_member(:upload_id_marker, Shapes::ShapeRef.new(shape: UploadIdMarker, location_name: "UploadIdMarker")) ListMultipartUploadsOutput.add_member(:next_key_marker, Shapes::ShapeRef.new(shape: NextKeyMarker, location_name: "NextKeyMarker")) ListMultipartUploadsOutput.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) ListMultipartUploadsOutput.add_member(:delimiter, Shapes::ShapeRef.new(shape: Delimiter, location_name: "Delimiter")) ListMultipartUploadsOutput.add_member(:next_upload_id_marker, Shapes::ShapeRef.new(shape: NextUploadIdMarker, location_name: "NextUploadIdMarker")) ListMultipartUploadsOutput.add_member(:max_uploads, Shapes::ShapeRef.new(shape: MaxUploads, location_name: "MaxUploads")) ListMultipartUploadsOutput.add_member(:is_truncated, Shapes::ShapeRef.new(shape: IsTruncated, location_name: "IsTruncated")) ListMultipartUploadsOutput.add_member(:uploads, Shapes::ShapeRef.new(shape: MultipartUploadList, location_name: "Upload")) ListMultipartUploadsOutput.add_member(:common_prefixes, Shapes::ShapeRef.new(shape: CommonPrefixList, location_name: "CommonPrefixes")) ListMultipartUploadsOutput.add_member(:encoding_type, Shapes::ShapeRef.new(shape: EncodingType, location_name: "EncodingType")) ListMultipartUploadsOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) ListMultipartUploadsOutput.struct_class = Types::ListMultipartUploadsOutput ListMultipartUploadsRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) ListMultipartUploadsRequest.add_member(:delimiter, Shapes::ShapeRef.new(shape: Delimiter, location: "querystring", location_name: "delimiter")) ListMultipartUploadsRequest.add_member(:encoding_type, Shapes::ShapeRef.new(shape: EncodingType, location: "querystring", location_name: "encoding-type")) ListMultipartUploadsRequest.add_member(:key_marker, Shapes::ShapeRef.new(shape: KeyMarker, location: "querystring", location_name: "key-marker")) ListMultipartUploadsRequest.add_member(:max_uploads, Shapes::ShapeRef.new(shape: MaxUploads, location: "querystring", location_name: "max-uploads")) ListMultipartUploadsRequest.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location: "querystring", location_name: "prefix", metadata: {"contextParam"=>{"name"=>"Prefix"}})) ListMultipartUploadsRequest.add_member(:upload_id_marker, Shapes::ShapeRef.new(shape: UploadIdMarker, location: "querystring", location_name: "upload-id-marker")) ListMultipartUploadsRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) ListMultipartUploadsRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) ListMultipartUploadsRequest.struct_class = Types::ListMultipartUploadsRequest ListObjectVersionsOutput.add_member(:is_truncated, Shapes::ShapeRef.new(shape: IsTruncated, location_name: "IsTruncated")) ListObjectVersionsOutput.add_member(:key_marker, Shapes::ShapeRef.new(shape: KeyMarker, location_name: "KeyMarker")) ListObjectVersionsOutput.add_member(:version_id_marker, Shapes::ShapeRef.new(shape: VersionIdMarker, location_name: "VersionIdMarker")) ListObjectVersionsOutput.add_member(:next_key_marker, Shapes::ShapeRef.new(shape: NextKeyMarker, location_name: "NextKeyMarker")) ListObjectVersionsOutput.add_member(:next_version_id_marker, Shapes::ShapeRef.new(shape: NextVersionIdMarker, location_name: "NextVersionIdMarker")) ListObjectVersionsOutput.add_member(:versions, Shapes::ShapeRef.new(shape: ObjectVersionList, location_name: "Version")) ListObjectVersionsOutput.add_member(:delete_markers, Shapes::ShapeRef.new(shape: DeleteMarkers, location_name: "DeleteMarker")) ListObjectVersionsOutput.add_member(:name, Shapes::ShapeRef.new(shape: BucketName, location_name: "Name")) ListObjectVersionsOutput.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) ListObjectVersionsOutput.add_member(:delimiter, Shapes::ShapeRef.new(shape: Delimiter, location_name: "Delimiter")) ListObjectVersionsOutput.add_member(:max_keys, Shapes::ShapeRef.new(shape: MaxKeys, location_name: "MaxKeys")) ListObjectVersionsOutput.add_member(:common_prefixes, Shapes::ShapeRef.new(shape: CommonPrefixList, location_name: "CommonPrefixes")) ListObjectVersionsOutput.add_member(:encoding_type, Shapes::ShapeRef.new(shape: EncodingType, location_name: "EncodingType")) ListObjectVersionsOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) ListObjectVersionsOutput.struct_class = Types::ListObjectVersionsOutput ListObjectVersionsRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) ListObjectVersionsRequest.add_member(:delimiter, Shapes::ShapeRef.new(shape: Delimiter, location: "querystring", location_name: "delimiter")) ListObjectVersionsRequest.add_member(:encoding_type, Shapes::ShapeRef.new(shape: EncodingType, location: "querystring", location_name: "encoding-type")) ListObjectVersionsRequest.add_member(:key_marker, Shapes::ShapeRef.new(shape: KeyMarker, location: "querystring", location_name: "key-marker")) ListObjectVersionsRequest.add_member(:max_keys, Shapes::ShapeRef.new(shape: MaxKeys, location: "querystring", location_name: "max-keys")) ListObjectVersionsRequest.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location: "querystring", location_name: "prefix", metadata: {"contextParam"=>{"name"=>"Prefix"}})) ListObjectVersionsRequest.add_member(:version_id_marker, Shapes::ShapeRef.new(shape: VersionIdMarker, location: "querystring", location_name: "version-id-marker")) ListObjectVersionsRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) ListObjectVersionsRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) ListObjectVersionsRequest.add_member(:optional_object_attributes, Shapes::ShapeRef.new(shape: OptionalObjectAttributesList, location: "header", location_name: "x-amz-optional-object-attributes")) ListObjectVersionsRequest.struct_class = Types::ListObjectVersionsRequest ListObjectsOutput.add_member(:is_truncated, Shapes::ShapeRef.new(shape: IsTruncated, location_name: "IsTruncated")) ListObjectsOutput.add_member(:marker, Shapes::ShapeRef.new(shape: Marker, location_name: "Marker")) ListObjectsOutput.add_member(:next_marker, Shapes::ShapeRef.new(shape: NextMarker, location_name: "NextMarker")) ListObjectsOutput.add_member(:contents, Shapes::ShapeRef.new(shape: ObjectList, location_name: "Contents")) ListObjectsOutput.add_member(:name, Shapes::ShapeRef.new(shape: BucketName, location_name: "Name")) ListObjectsOutput.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) ListObjectsOutput.add_member(:delimiter, Shapes::ShapeRef.new(shape: Delimiter, location_name: "Delimiter")) ListObjectsOutput.add_member(:max_keys, Shapes::ShapeRef.new(shape: MaxKeys, location_name: "MaxKeys")) ListObjectsOutput.add_member(:common_prefixes, Shapes::ShapeRef.new(shape: CommonPrefixList, location_name: "CommonPrefixes")) ListObjectsOutput.add_member(:encoding_type, Shapes::ShapeRef.new(shape: EncodingType, location_name: "EncodingType")) ListObjectsOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) ListObjectsOutput.struct_class = Types::ListObjectsOutput ListObjectsRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) ListObjectsRequest.add_member(:delimiter, Shapes::ShapeRef.new(shape: Delimiter, location: "querystring", location_name: "delimiter")) ListObjectsRequest.add_member(:encoding_type, Shapes::ShapeRef.new(shape: EncodingType, location: "querystring", location_name: "encoding-type")) ListObjectsRequest.add_member(:marker, Shapes::ShapeRef.new(shape: Marker, location: "querystring", location_name: "marker")) ListObjectsRequest.add_member(:max_keys, Shapes::ShapeRef.new(shape: MaxKeys, location: "querystring", location_name: "max-keys")) ListObjectsRequest.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location: "querystring", location_name: "prefix", metadata: {"contextParam"=>{"name"=>"Prefix"}})) ListObjectsRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) ListObjectsRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) ListObjectsRequest.add_member(:optional_object_attributes, Shapes::ShapeRef.new(shape: OptionalObjectAttributesList, location: "header", location_name: "x-amz-optional-object-attributes")) ListObjectsRequest.struct_class = Types::ListObjectsRequest ListObjectsV2Output.add_member(:is_truncated, Shapes::ShapeRef.new(shape: IsTruncated, location_name: "IsTruncated")) ListObjectsV2Output.add_member(:contents, Shapes::ShapeRef.new(shape: ObjectList, location_name: "Contents")) ListObjectsV2Output.add_member(:name, Shapes::ShapeRef.new(shape: BucketName, location_name: "Name")) ListObjectsV2Output.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) ListObjectsV2Output.add_member(:delimiter, Shapes::ShapeRef.new(shape: Delimiter, location_name: "Delimiter")) ListObjectsV2Output.add_member(:max_keys, Shapes::ShapeRef.new(shape: MaxKeys, location_name: "MaxKeys")) ListObjectsV2Output.add_member(:common_prefixes, Shapes::ShapeRef.new(shape: CommonPrefixList, location_name: "CommonPrefixes")) ListObjectsV2Output.add_member(:encoding_type, Shapes::ShapeRef.new(shape: EncodingType, location_name: "EncodingType")) ListObjectsV2Output.add_member(:key_count, Shapes::ShapeRef.new(shape: KeyCount, location_name: "KeyCount")) ListObjectsV2Output.add_member(:continuation_token, Shapes::ShapeRef.new(shape: Token, location_name: "ContinuationToken")) ListObjectsV2Output.add_member(:next_continuation_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "NextContinuationToken")) ListObjectsV2Output.add_member(:start_after, Shapes::ShapeRef.new(shape: StartAfter, location_name: "StartAfter")) ListObjectsV2Output.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) ListObjectsV2Output.struct_class = Types::ListObjectsV2Output ListObjectsV2Request.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) ListObjectsV2Request.add_member(:delimiter, Shapes::ShapeRef.new(shape: Delimiter, location: "querystring", location_name: "delimiter")) ListObjectsV2Request.add_member(:encoding_type, Shapes::ShapeRef.new(shape: EncodingType, location: "querystring", location_name: "encoding-type")) ListObjectsV2Request.add_member(:max_keys, Shapes::ShapeRef.new(shape: MaxKeys, location: "querystring", location_name: "max-keys")) ListObjectsV2Request.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location: "querystring", location_name: "prefix", metadata: {"contextParam"=>{"name"=>"Prefix"}})) ListObjectsV2Request.add_member(:continuation_token, Shapes::ShapeRef.new(shape: Token, location: "querystring", location_name: "continuation-token")) ListObjectsV2Request.add_member(:fetch_owner, Shapes::ShapeRef.new(shape: FetchOwner, location: "querystring", location_name: "fetch-owner")) ListObjectsV2Request.add_member(:start_after, Shapes::ShapeRef.new(shape: StartAfter, location: "querystring", location_name: "start-after")) ListObjectsV2Request.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) ListObjectsV2Request.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) ListObjectsV2Request.add_member(:optional_object_attributes, Shapes::ShapeRef.new(shape: OptionalObjectAttributesList, location: "header", location_name: "x-amz-optional-object-attributes")) ListObjectsV2Request.struct_class = Types::ListObjectsV2Request ListPartsOutput.add_member(:abort_date, Shapes::ShapeRef.new(shape: AbortDate, location: "header", location_name: "x-amz-abort-date")) ListPartsOutput.add_member(:abort_rule_id, Shapes::ShapeRef.new(shape: AbortRuleId, location: "header", location_name: "x-amz-abort-rule-id")) ListPartsOutput.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, location_name: "Bucket")) ListPartsOutput.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, location_name: "Key")) ListPartsOutput.add_member(:upload_id, Shapes::ShapeRef.new(shape: MultipartUploadId, location_name: "UploadId")) ListPartsOutput.add_member(:part_number_marker, Shapes::ShapeRef.new(shape: PartNumberMarker, location_name: "PartNumberMarker")) ListPartsOutput.add_member(:next_part_number_marker, Shapes::ShapeRef.new(shape: NextPartNumberMarker, location_name: "NextPartNumberMarker")) ListPartsOutput.add_member(:max_parts, Shapes::ShapeRef.new(shape: MaxParts, location_name: "MaxParts")) ListPartsOutput.add_member(:is_truncated, Shapes::ShapeRef.new(shape: IsTruncated, location_name: "IsTruncated")) ListPartsOutput.add_member(:parts, Shapes::ShapeRef.new(shape: Parts, location_name: "Part")) ListPartsOutput.add_member(:initiator, Shapes::ShapeRef.new(shape: Initiator, location_name: "Initiator")) ListPartsOutput.add_member(:owner, Shapes::ShapeRef.new(shape: Owner, location_name: "Owner")) ListPartsOutput.add_member(:storage_class, Shapes::ShapeRef.new(shape: StorageClass, location_name: "StorageClass")) ListPartsOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) ListPartsOutput.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location_name: "ChecksumAlgorithm")) ListPartsOutput.struct_class = Types::ListPartsOutput ListPartsRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) ListPartsRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key", metadata: {"contextParam"=>{"name"=>"Key"}})) ListPartsRequest.add_member(:max_parts, Shapes::ShapeRef.new(shape: MaxParts, location: "querystring", location_name: "max-parts")) ListPartsRequest.add_member(:part_number_marker, Shapes::ShapeRef.new(shape: PartNumberMarker, location: "querystring", location_name: "part-number-marker")) ListPartsRequest.add_member(:upload_id, Shapes::ShapeRef.new(shape: MultipartUploadId, required: true, location: "querystring", location_name: "uploadId")) ListPartsRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) ListPartsRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) ListPartsRequest.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) ListPartsRequest.add_member(:sse_customer_key, Shapes::ShapeRef.new(shape: SSECustomerKey, location: "header", location_name: "x-amz-server-side-encryption-customer-key")) ListPartsRequest.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) ListPartsRequest.struct_class = Types::ListPartsRequest LocationInfo.add_member(:type, Shapes::ShapeRef.new(shape: LocationType, location_name: "Type")) LocationInfo.add_member(:name, Shapes::ShapeRef.new(shape: LocationNameAsString, location_name: "Name")) LocationInfo.struct_class = Types::LocationInfo LoggingEnabled.add_member(:target_bucket, Shapes::ShapeRef.new(shape: TargetBucket, required: true, location_name: "TargetBucket")) LoggingEnabled.add_member(:target_grants, Shapes::ShapeRef.new(shape: TargetGrants, location_name: "TargetGrants")) LoggingEnabled.add_member(:target_prefix, Shapes::ShapeRef.new(shape: TargetPrefix, required: true, location_name: "TargetPrefix")) LoggingEnabled.add_member(:target_object_key_format, Shapes::ShapeRef.new(shape: TargetObjectKeyFormat, location_name: "TargetObjectKeyFormat")) LoggingEnabled.struct_class = Types::LoggingEnabled Metadata.key = Shapes::ShapeRef.new(shape: MetadataKey) Metadata.value = Shapes::ShapeRef.new(shape: MetadataValue) MetadataEntry.add_member(:name, Shapes::ShapeRef.new(shape: MetadataKey, location_name: "Name")) MetadataEntry.add_member(:value, Shapes::ShapeRef.new(shape: MetadataValue, location_name: "Value")) MetadataEntry.struct_class = Types::MetadataEntry Metrics.add_member(:status, Shapes::ShapeRef.new(shape: MetricsStatus, required: true, location_name: "Status")) Metrics.add_member(:event_threshold, Shapes::ShapeRef.new(shape: ReplicationTimeValue, location_name: "EventThreshold")) Metrics.struct_class = Types::Metrics MetricsAndOperator.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) MetricsAndOperator.add_member(:tags, Shapes::ShapeRef.new(shape: TagSet, location_name: "Tag", metadata: {"flattened"=>true})) MetricsAndOperator.add_member(:access_point_arn, Shapes::ShapeRef.new(shape: AccessPointArn, location_name: "AccessPointArn")) MetricsAndOperator.struct_class = Types::MetricsAndOperator MetricsConfiguration.add_member(:id, Shapes::ShapeRef.new(shape: MetricsId, required: true, location_name: "Id")) MetricsConfiguration.add_member(:filter, Shapes::ShapeRef.new(shape: MetricsFilter, location_name: "Filter")) MetricsConfiguration.struct_class = Types::MetricsConfiguration MetricsConfigurationList.member = Shapes::ShapeRef.new(shape: MetricsConfiguration) MetricsFilter.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) MetricsFilter.add_member(:tag, Shapes::ShapeRef.new(shape: Tag, location_name: "Tag")) MetricsFilter.add_member(:access_point_arn, Shapes::ShapeRef.new(shape: AccessPointArn, location_name: "AccessPointArn")) MetricsFilter.add_member(:and, Shapes::ShapeRef.new(shape: MetricsAndOperator, location_name: "And")) MetricsFilter.struct_class = Types::MetricsFilter MultipartUpload.add_member(:upload_id, Shapes::ShapeRef.new(shape: MultipartUploadId, location_name: "UploadId")) MultipartUpload.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, location_name: "Key")) MultipartUpload.add_member(:initiated, Shapes::ShapeRef.new(shape: Initiated, location_name: "Initiated")) MultipartUpload.add_member(:storage_class, Shapes::ShapeRef.new(shape: StorageClass, location_name: "StorageClass")) MultipartUpload.add_member(:owner, Shapes::ShapeRef.new(shape: Owner, location_name: "Owner")) MultipartUpload.add_member(:initiator, Shapes::ShapeRef.new(shape: Initiator, location_name: "Initiator")) MultipartUpload.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location_name: "ChecksumAlgorithm")) MultipartUpload.struct_class = Types::MultipartUpload MultipartUploadList.member = Shapes::ShapeRef.new(shape: MultipartUpload) NoSuchBucket.struct_class = Types::NoSuchBucket NoSuchKey.struct_class = Types::NoSuchKey NoSuchUpload.struct_class = Types::NoSuchUpload NoncurrentVersionExpiration.add_member(:noncurrent_days, Shapes::ShapeRef.new(shape: Days, location_name: "NoncurrentDays")) NoncurrentVersionExpiration.add_member(:newer_noncurrent_versions, Shapes::ShapeRef.new(shape: VersionCount, location_name: "NewerNoncurrentVersions")) NoncurrentVersionExpiration.struct_class = Types::NoncurrentVersionExpiration NoncurrentVersionTransition.add_member(:noncurrent_days, Shapes::ShapeRef.new(shape: Days, location_name: "NoncurrentDays")) NoncurrentVersionTransition.add_member(:storage_class, Shapes::ShapeRef.new(shape: TransitionStorageClass, location_name: "StorageClass")) NoncurrentVersionTransition.add_member(:newer_noncurrent_versions, Shapes::ShapeRef.new(shape: VersionCount, location_name: "NewerNoncurrentVersions")) NoncurrentVersionTransition.struct_class = Types::NoncurrentVersionTransition NoncurrentVersionTransitionList.member = Shapes::ShapeRef.new(shape: NoncurrentVersionTransition) NotificationConfiguration.add_member(:topic_configurations, Shapes::ShapeRef.new(shape: TopicConfigurationList, location_name: "TopicConfiguration")) NotificationConfiguration.add_member(:queue_configurations, Shapes::ShapeRef.new(shape: QueueConfigurationList, location_name: "QueueConfiguration")) NotificationConfiguration.add_member(:lambda_function_configurations, Shapes::ShapeRef.new(shape: LambdaFunctionConfigurationList, location_name: "CloudFunctionConfiguration")) NotificationConfiguration.add_member(:event_bridge_configuration, Shapes::ShapeRef.new(shape: EventBridgeConfiguration, location_name: "EventBridgeConfiguration")) NotificationConfiguration.struct_class = Types::NotificationConfiguration NotificationConfigurationDeprecated.add_member(:topic_configuration, Shapes::ShapeRef.new(shape: TopicConfigurationDeprecated, location_name: "TopicConfiguration")) NotificationConfigurationDeprecated.add_member(:queue_configuration, Shapes::ShapeRef.new(shape: QueueConfigurationDeprecated, location_name: "QueueConfiguration")) NotificationConfigurationDeprecated.add_member(:cloud_function_configuration, Shapes::ShapeRef.new(shape: CloudFunctionConfiguration, location_name: "CloudFunctionConfiguration")) NotificationConfigurationDeprecated.struct_class = Types::NotificationConfigurationDeprecated NotificationConfigurationFilter.add_member(:key, Shapes::ShapeRef.new(shape: S3KeyFilter, location_name: "S3Key")) NotificationConfigurationFilter.struct_class = Types::NotificationConfigurationFilter Object.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, location_name: "Key")) Object.add_member(:last_modified, Shapes::ShapeRef.new(shape: LastModified, location_name: "LastModified")) Object.add_member(:etag, Shapes::ShapeRef.new(shape: ETag, location_name: "ETag")) Object.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithmList, location_name: "ChecksumAlgorithm")) Object.add_member(:size, Shapes::ShapeRef.new(shape: Size, location_name: "Size")) Object.add_member(:storage_class, Shapes::ShapeRef.new(shape: ObjectStorageClass, location_name: "StorageClass")) Object.add_member(:owner, Shapes::ShapeRef.new(shape: Owner, location_name: "Owner")) Object.add_member(:restore_status, Shapes::ShapeRef.new(shape: RestoreStatus, location_name: "RestoreStatus")) Object.struct_class = Types::Object ObjectAlreadyInActiveTierError.struct_class = Types::ObjectAlreadyInActiveTierError ObjectAttributesList.member = Shapes::ShapeRef.new(shape: ObjectAttributes) ObjectIdentifier.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location_name: "Key")) ObjectIdentifier.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location_name: "VersionId")) ObjectIdentifier.struct_class = Types::ObjectIdentifier ObjectIdentifierList.member = Shapes::ShapeRef.new(shape: ObjectIdentifier) ObjectList.member = Shapes::ShapeRef.new(shape: Object) ObjectLockConfiguration.add_member(:object_lock_enabled, Shapes::ShapeRef.new(shape: ObjectLockEnabled, location_name: "ObjectLockEnabled")) ObjectLockConfiguration.add_member(:rule, Shapes::ShapeRef.new(shape: ObjectLockRule, location_name: "Rule")) ObjectLockConfiguration.struct_class = Types::ObjectLockConfiguration ObjectLockLegalHold.add_member(:status, Shapes::ShapeRef.new(shape: ObjectLockLegalHoldStatus, location_name: "Status")) ObjectLockLegalHold.struct_class = Types::ObjectLockLegalHold ObjectLockRetention.add_member(:mode, Shapes::ShapeRef.new(shape: ObjectLockRetentionMode, location_name: "Mode")) ObjectLockRetention.add_member(:retain_until_date, Shapes::ShapeRef.new(shape: Date, location_name: "RetainUntilDate")) ObjectLockRetention.struct_class = Types::ObjectLockRetention ObjectLockRule.add_member(:default_retention, Shapes::ShapeRef.new(shape: DefaultRetention, location_name: "DefaultRetention")) ObjectLockRule.struct_class = Types::ObjectLockRule ObjectNotInActiveTierError.struct_class = Types::ObjectNotInActiveTierError ObjectPart.add_member(:part_number, Shapes::ShapeRef.new(shape: PartNumber, location_name: "PartNumber")) ObjectPart.add_member(:size, Shapes::ShapeRef.new(shape: Size, location_name: "Size")) ObjectPart.add_member(:checksum_crc32, Shapes::ShapeRef.new(shape: ChecksumCRC32, location_name: "ChecksumCRC32")) ObjectPart.add_member(:checksum_crc32c, Shapes::ShapeRef.new(shape: ChecksumCRC32C, location_name: "ChecksumCRC32C")) ObjectPart.add_member(:checksum_sha1, Shapes::ShapeRef.new(shape: ChecksumSHA1, location_name: "ChecksumSHA1")) ObjectPart.add_member(:checksum_sha256, Shapes::ShapeRef.new(shape: ChecksumSHA256, location_name: "ChecksumSHA256")) ObjectPart.struct_class = Types::ObjectPart ObjectVersion.add_member(:etag, Shapes::ShapeRef.new(shape: ETag, location_name: "ETag")) ObjectVersion.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithmList, location_name: "ChecksumAlgorithm")) ObjectVersion.add_member(:size, Shapes::ShapeRef.new(shape: Size, location_name: "Size")) ObjectVersion.add_member(:storage_class, Shapes::ShapeRef.new(shape: ObjectVersionStorageClass, location_name: "StorageClass")) ObjectVersion.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, location_name: "Key")) ObjectVersion.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location_name: "VersionId")) ObjectVersion.add_member(:is_latest, Shapes::ShapeRef.new(shape: IsLatest, location_name: "IsLatest")) ObjectVersion.add_member(:last_modified, Shapes::ShapeRef.new(shape: LastModified, location_name: "LastModified")) ObjectVersion.add_member(:owner, Shapes::ShapeRef.new(shape: Owner, location_name: "Owner")) ObjectVersion.add_member(:restore_status, Shapes::ShapeRef.new(shape: RestoreStatus, location_name: "RestoreStatus")) ObjectVersion.struct_class = Types::ObjectVersion ObjectVersionList.member = Shapes::ShapeRef.new(shape: ObjectVersion) OptionalObjectAttributesList.member = Shapes::ShapeRef.new(shape: OptionalObjectAttributes) OutputLocation.add_member(:s3, Shapes::ShapeRef.new(shape: S3Location, location_name: "S3")) OutputLocation.struct_class = Types::OutputLocation OutputSerialization.add_member(:csv, Shapes::ShapeRef.new(shape: CSVOutput, location_name: "CSV")) OutputSerialization.add_member(:json, Shapes::ShapeRef.new(shape: JSONOutput, location_name: "JSON")) OutputSerialization.struct_class = Types::OutputSerialization Owner.add_member(:display_name, Shapes::ShapeRef.new(shape: DisplayName, location_name: "DisplayName")) Owner.add_member(:id, Shapes::ShapeRef.new(shape: ID, location_name: "ID")) Owner.struct_class = Types::Owner OwnershipControls.add_member(:rules, Shapes::ShapeRef.new(shape: OwnershipControlsRules, required: true, location_name: "Rule")) OwnershipControls.struct_class = Types::OwnershipControls OwnershipControlsRule.add_member(:object_ownership, Shapes::ShapeRef.new(shape: ObjectOwnership, required: true, location_name: "ObjectOwnership")) OwnershipControlsRule.struct_class = Types::OwnershipControlsRule OwnershipControlsRules.member = Shapes::ShapeRef.new(shape: OwnershipControlsRule) ParquetInput.struct_class = Types::ParquetInput Part.add_member(:part_number, Shapes::ShapeRef.new(shape: PartNumber, location_name: "PartNumber")) Part.add_member(:last_modified, Shapes::ShapeRef.new(shape: LastModified, location_name: "LastModified")) Part.add_member(:etag, Shapes::ShapeRef.new(shape: ETag, location_name: "ETag")) Part.add_member(:size, Shapes::ShapeRef.new(shape: Size, location_name: "Size")) Part.add_member(:checksum_crc32, Shapes::ShapeRef.new(shape: ChecksumCRC32, location_name: "ChecksumCRC32")) Part.add_member(:checksum_crc32c, Shapes::ShapeRef.new(shape: ChecksumCRC32C, location_name: "ChecksumCRC32C")) Part.add_member(:checksum_sha1, Shapes::ShapeRef.new(shape: ChecksumSHA1, location_name: "ChecksumSHA1")) Part.add_member(:checksum_sha256, Shapes::ShapeRef.new(shape: ChecksumSHA256, location_name: "ChecksumSHA256")) Part.struct_class = Types::Part PartitionedPrefix.add_member(:partition_date_source, Shapes::ShapeRef.new(shape: PartitionDateSource, location_name: "PartitionDateSource")) PartitionedPrefix.struct_class = Types::PartitionedPrefix Parts.member = Shapes::ShapeRef.new(shape: Part) PartsList.member = Shapes::ShapeRef.new(shape: ObjectPart) PolicyStatus.add_member(:is_public, Shapes::ShapeRef.new(shape: IsPublic, location_name: "IsPublic")) PolicyStatus.struct_class = Types::PolicyStatus Progress.add_member(:bytes_scanned, Shapes::ShapeRef.new(shape: BytesScanned, location_name: "BytesScanned")) Progress.add_member(:bytes_processed, Shapes::ShapeRef.new(shape: BytesProcessed, location_name: "BytesProcessed")) Progress.add_member(:bytes_returned, Shapes::ShapeRef.new(shape: BytesReturned, location_name: "BytesReturned")) Progress.struct_class = Types::Progress ProgressEvent.add_member(:details, Shapes::ShapeRef.new(shape: Progress, eventpayload: true, eventpayload_type: 'structure', location_name: "Details", metadata: {"eventpayload"=>true})) ProgressEvent.struct_class = Types::ProgressEvent PublicAccessBlockConfiguration.add_member(:block_public_acls, Shapes::ShapeRef.new(shape: Setting, location_name: "BlockPublicAcls")) PublicAccessBlockConfiguration.add_member(:ignore_public_acls, Shapes::ShapeRef.new(shape: Setting, location_name: "IgnorePublicAcls")) PublicAccessBlockConfiguration.add_member(:block_public_policy, Shapes::ShapeRef.new(shape: Setting, location_name: "BlockPublicPolicy")) PublicAccessBlockConfiguration.add_member(:restrict_public_buckets, Shapes::ShapeRef.new(shape: Setting, location_name: "RestrictPublicBuckets")) PublicAccessBlockConfiguration.struct_class = Types::PublicAccessBlockConfiguration PutBucketAccelerateConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) PutBucketAccelerateConfigurationRequest.add_member(:accelerate_configuration, Shapes::ShapeRef.new(shape: AccelerateConfiguration, required: true, location_name: "AccelerateConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) PutBucketAccelerateConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) PutBucketAccelerateConfigurationRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) PutBucketAccelerateConfigurationRequest.struct_class = Types::PutBucketAccelerateConfigurationRequest PutBucketAccelerateConfigurationRequest[:payload] = :accelerate_configuration PutBucketAccelerateConfigurationRequest[:payload_member] = PutBucketAccelerateConfigurationRequest.member(:accelerate_configuration) PutBucketAclRequest.add_member(:acl, Shapes::ShapeRef.new(shape: BucketCannedACL, location: "header", location_name: "x-amz-acl")) PutBucketAclRequest.add_member(:access_control_policy, Shapes::ShapeRef.new(shape: AccessControlPolicy, location_name: "AccessControlPolicy", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) PutBucketAclRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) PutBucketAclRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) PutBucketAclRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) PutBucketAclRequest.add_member(:grant_full_control, Shapes::ShapeRef.new(shape: GrantFullControl, location: "header", location_name: "x-amz-grant-full-control")) PutBucketAclRequest.add_member(:grant_read, Shapes::ShapeRef.new(shape: GrantRead, location: "header", location_name: "x-amz-grant-read")) PutBucketAclRequest.add_member(:grant_read_acp, Shapes::ShapeRef.new(shape: GrantReadACP, location: "header", location_name: "x-amz-grant-read-acp")) PutBucketAclRequest.add_member(:grant_write, Shapes::ShapeRef.new(shape: GrantWrite, location: "header", location_name: "x-amz-grant-write")) PutBucketAclRequest.add_member(:grant_write_acp, Shapes::ShapeRef.new(shape: GrantWriteACP, location: "header", location_name: "x-amz-grant-write-acp")) PutBucketAclRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) PutBucketAclRequest.struct_class = Types::PutBucketAclRequest PutBucketAclRequest[:payload] = :access_control_policy PutBucketAclRequest[:payload_member] = PutBucketAclRequest.member(:access_control_policy) PutBucketAnalyticsConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) PutBucketAnalyticsConfigurationRequest.add_member(:id, Shapes::ShapeRef.new(shape: AnalyticsId, required: true, location: "querystring", location_name: "id")) PutBucketAnalyticsConfigurationRequest.add_member(:analytics_configuration, Shapes::ShapeRef.new(shape: AnalyticsConfiguration, required: true, location_name: "AnalyticsConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) PutBucketAnalyticsConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) PutBucketAnalyticsConfigurationRequest.struct_class = Types::PutBucketAnalyticsConfigurationRequest PutBucketAnalyticsConfigurationRequest[:payload] = :analytics_configuration PutBucketAnalyticsConfigurationRequest[:payload_member] = PutBucketAnalyticsConfigurationRequest.member(:analytics_configuration) PutBucketCorsRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) PutBucketCorsRequest.add_member(:cors_configuration, Shapes::ShapeRef.new(shape: CORSConfiguration, required: true, location_name: "CORSConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) PutBucketCorsRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) PutBucketCorsRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) PutBucketCorsRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) PutBucketCorsRequest.struct_class = Types::PutBucketCorsRequest PutBucketCorsRequest[:payload] = :cors_configuration PutBucketCorsRequest[:payload_member] = PutBucketCorsRequest.member(:cors_configuration) PutBucketEncryptionRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) PutBucketEncryptionRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) PutBucketEncryptionRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) PutBucketEncryptionRequest.add_member(:server_side_encryption_configuration, Shapes::ShapeRef.new(shape: ServerSideEncryptionConfiguration, required: true, location_name: "ServerSideEncryptionConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) PutBucketEncryptionRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) PutBucketEncryptionRequest.struct_class = Types::PutBucketEncryptionRequest PutBucketEncryptionRequest[:payload] = :server_side_encryption_configuration PutBucketEncryptionRequest[:payload_member] = PutBucketEncryptionRequest.member(:server_side_encryption_configuration) PutBucketIntelligentTieringConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) PutBucketIntelligentTieringConfigurationRequest.add_member(:id, Shapes::ShapeRef.new(shape: IntelligentTieringId, required: true, location: "querystring", location_name: "id")) PutBucketIntelligentTieringConfigurationRequest.add_member(:intelligent_tiering_configuration, Shapes::ShapeRef.new(shape: IntelligentTieringConfiguration, required: true, location_name: "IntelligentTieringConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) PutBucketIntelligentTieringConfigurationRequest.struct_class = Types::PutBucketIntelligentTieringConfigurationRequest PutBucketIntelligentTieringConfigurationRequest[:payload] = :intelligent_tiering_configuration PutBucketIntelligentTieringConfigurationRequest[:payload_member] = PutBucketIntelligentTieringConfigurationRequest.member(:intelligent_tiering_configuration) PutBucketInventoryConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) PutBucketInventoryConfigurationRequest.add_member(:id, Shapes::ShapeRef.new(shape: InventoryId, required: true, location: "querystring", location_name: "id")) PutBucketInventoryConfigurationRequest.add_member(:inventory_configuration, Shapes::ShapeRef.new(shape: InventoryConfiguration, required: true, location_name: "InventoryConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) PutBucketInventoryConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) PutBucketInventoryConfigurationRequest.struct_class = Types::PutBucketInventoryConfigurationRequest PutBucketInventoryConfigurationRequest[:payload] = :inventory_configuration PutBucketInventoryConfigurationRequest[:payload_member] = PutBucketInventoryConfigurationRequest.member(:inventory_configuration) PutBucketLifecycleConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) PutBucketLifecycleConfigurationRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) PutBucketLifecycleConfigurationRequest.add_member(:lifecycle_configuration, Shapes::ShapeRef.new(shape: BucketLifecycleConfiguration, location_name: "LifecycleConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) PutBucketLifecycleConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) PutBucketLifecycleConfigurationRequest.struct_class = Types::PutBucketLifecycleConfigurationRequest PutBucketLifecycleConfigurationRequest[:payload] = :lifecycle_configuration PutBucketLifecycleConfigurationRequest[:payload_member] = PutBucketLifecycleConfigurationRequest.member(:lifecycle_configuration) PutBucketLifecycleRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) PutBucketLifecycleRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) PutBucketLifecycleRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) PutBucketLifecycleRequest.add_member(:lifecycle_configuration, Shapes::ShapeRef.new(shape: LifecycleConfiguration, location_name: "LifecycleConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) PutBucketLifecycleRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) PutBucketLifecycleRequest.struct_class = Types::PutBucketLifecycleRequest PutBucketLifecycleRequest[:payload] = :lifecycle_configuration PutBucketLifecycleRequest[:payload_member] = PutBucketLifecycleRequest.member(:lifecycle_configuration) PutBucketLoggingRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) PutBucketLoggingRequest.add_member(:bucket_logging_status, Shapes::ShapeRef.new(shape: BucketLoggingStatus, required: true, location_name: "BucketLoggingStatus", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) PutBucketLoggingRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) PutBucketLoggingRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) PutBucketLoggingRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) PutBucketLoggingRequest.struct_class = Types::PutBucketLoggingRequest PutBucketLoggingRequest[:payload] = :bucket_logging_status PutBucketLoggingRequest[:payload_member] = PutBucketLoggingRequest.member(:bucket_logging_status) PutBucketMetricsConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) PutBucketMetricsConfigurationRequest.add_member(:id, Shapes::ShapeRef.new(shape: MetricsId, required: true, location: "querystring", location_name: "id")) PutBucketMetricsConfigurationRequest.add_member(:metrics_configuration, Shapes::ShapeRef.new(shape: MetricsConfiguration, required: true, location_name: "MetricsConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) PutBucketMetricsConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) PutBucketMetricsConfigurationRequest.struct_class = Types::PutBucketMetricsConfigurationRequest PutBucketMetricsConfigurationRequest[:payload] = :metrics_configuration PutBucketMetricsConfigurationRequest[:payload_member] = PutBucketMetricsConfigurationRequest.member(:metrics_configuration) PutBucketNotificationConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) PutBucketNotificationConfigurationRequest.add_member(:notification_configuration, Shapes::ShapeRef.new(shape: NotificationConfiguration, required: true, location_name: "NotificationConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) PutBucketNotificationConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) PutBucketNotificationConfigurationRequest.add_member(:skip_destination_validation, Shapes::ShapeRef.new(shape: SkipValidation, location: "header", location_name: "x-amz-skip-destination-validation")) PutBucketNotificationConfigurationRequest.struct_class = Types::PutBucketNotificationConfigurationRequest PutBucketNotificationConfigurationRequest[:payload] = :notification_configuration PutBucketNotificationConfigurationRequest[:payload_member] = PutBucketNotificationConfigurationRequest.member(:notification_configuration) PutBucketNotificationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) PutBucketNotificationRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) PutBucketNotificationRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) PutBucketNotificationRequest.add_member(:notification_configuration, Shapes::ShapeRef.new(shape: NotificationConfigurationDeprecated, required: true, location_name: "NotificationConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) PutBucketNotificationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) PutBucketNotificationRequest.struct_class = Types::PutBucketNotificationRequest PutBucketNotificationRequest[:payload] = :notification_configuration PutBucketNotificationRequest[:payload_member] = PutBucketNotificationRequest.member(:notification_configuration) PutBucketOwnershipControlsRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) PutBucketOwnershipControlsRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) PutBucketOwnershipControlsRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) PutBucketOwnershipControlsRequest.add_member(:ownership_controls, Shapes::ShapeRef.new(shape: OwnershipControls, required: true, location_name: "OwnershipControls", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) PutBucketOwnershipControlsRequest.struct_class = Types::PutBucketOwnershipControlsRequest PutBucketOwnershipControlsRequest[:payload] = :ownership_controls PutBucketOwnershipControlsRequest[:payload_member] = PutBucketOwnershipControlsRequest.member(:ownership_controls) PutBucketPolicyRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) PutBucketPolicyRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) PutBucketPolicyRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) PutBucketPolicyRequest.add_member(:confirm_remove_self_bucket_access, Shapes::ShapeRef.new(shape: ConfirmRemoveSelfBucketAccess, location: "header", location_name: "x-amz-confirm-remove-self-bucket-access")) PutBucketPolicyRequest.add_member(:policy, Shapes::ShapeRef.new(shape: Policy, required: true, location_name: "Policy")) PutBucketPolicyRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) PutBucketPolicyRequest.struct_class = Types::PutBucketPolicyRequest PutBucketPolicyRequest[:payload] = :policy PutBucketPolicyRequest[:payload_member] = PutBucketPolicyRequest.member(:policy) PutBucketReplicationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) PutBucketReplicationRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) PutBucketReplicationRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) PutBucketReplicationRequest.add_member(:replication_configuration, Shapes::ShapeRef.new(shape: ReplicationConfiguration, required: true, location_name: "ReplicationConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) PutBucketReplicationRequest.add_member(:token, Shapes::ShapeRef.new(shape: ObjectLockToken, location: "header", location_name: "x-amz-bucket-object-lock-token")) PutBucketReplicationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) PutBucketReplicationRequest.struct_class = Types::PutBucketReplicationRequest PutBucketReplicationRequest[:payload] = :replication_configuration PutBucketReplicationRequest[:payload_member] = PutBucketReplicationRequest.member(:replication_configuration) PutBucketRequestPaymentRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) PutBucketRequestPaymentRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) PutBucketRequestPaymentRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) PutBucketRequestPaymentRequest.add_member(:request_payment_configuration, Shapes::ShapeRef.new(shape: RequestPaymentConfiguration, required: true, location_name: "RequestPaymentConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) PutBucketRequestPaymentRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) PutBucketRequestPaymentRequest.struct_class = Types::PutBucketRequestPaymentRequest PutBucketRequestPaymentRequest[:payload] = :request_payment_configuration PutBucketRequestPaymentRequest[:payload_member] = PutBucketRequestPaymentRequest.member(:request_payment_configuration) PutBucketTaggingRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) PutBucketTaggingRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) PutBucketTaggingRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) PutBucketTaggingRequest.add_member(:tagging, Shapes::ShapeRef.new(shape: Tagging, required: true, location_name: "Tagging", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) PutBucketTaggingRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) PutBucketTaggingRequest.struct_class = Types::PutBucketTaggingRequest PutBucketTaggingRequest[:payload] = :tagging PutBucketTaggingRequest[:payload_member] = PutBucketTaggingRequest.member(:tagging) PutBucketVersioningRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) PutBucketVersioningRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) PutBucketVersioningRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) PutBucketVersioningRequest.add_member(:mfa, Shapes::ShapeRef.new(shape: MFA, location: "header", location_name: "x-amz-mfa")) PutBucketVersioningRequest.add_member(:versioning_configuration, Shapes::ShapeRef.new(shape: VersioningConfiguration, required: true, location_name: "VersioningConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) PutBucketVersioningRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) PutBucketVersioningRequest.struct_class = Types::PutBucketVersioningRequest PutBucketVersioningRequest[:payload] = :versioning_configuration PutBucketVersioningRequest[:payload_member] = PutBucketVersioningRequest.member(:versioning_configuration) PutBucketWebsiteRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) PutBucketWebsiteRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) PutBucketWebsiteRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) PutBucketWebsiteRequest.add_member(:website_configuration, Shapes::ShapeRef.new(shape: WebsiteConfiguration, required: true, location_name: "WebsiteConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) PutBucketWebsiteRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) PutBucketWebsiteRequest.struct_class = Types::PutBucketWebsiteRequest PutBucketWebsiteRequest[:payload] = :website_configuration PutBucketWebsiteRequest[:payload_member] = PutBucketWebsiteRequest.member(:website_configuration) PutObjectAclOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) PutObjectAclOutput.struct_class = Types::PutObjectAclOutput PutObjectAclRequest.add_member(:acl, Shapes::ShapeRef.new(shape: ObjectCannedACL, location: "header", location_name: "x-amz-acl")) PutObjectAclRequest.add_member(:access_control_policy, Shapes::ShapeRef.new(shape: AccessControlPolicy, location_name: "AccessControlPolicy", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) PutObjectAclRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) PutObjectAclRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) PutObjectAclRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) PutObjectAclRequest.add_member(:grant_full_control, Shapes::ShapeRef.new(shape: GrantFullControl, location: "header", location_name: "x-amz-grant-full-control")) PutObjectAclRequest.add_member(:grant_read, Shapes::ShapeRef.new(shape: GrantRead, location: "header", location_name: "x-amz-grant-read")) PutObjectAclRequest.add_member(:grant_read_acp, Shapes::ShapeRef.new(shape: GrantReadACP, location: "header", location_name: "x-amz-grant-read-acp")) PutObjectAclRequest.add_member(:grant_write, Shapes::ShapeRef.new(shape: GrantWrite, location: "header", location_name: "x-amz-grant-write")) PutObjectAclRequest.add_member(:grant_write_acp, Shapes::ShapeRef.new(shape: GrantWriteACP, location: "header", location_name: "x-amz-grant-write-acp")) PutObjectAclRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key", metadata: {"contextParam"=>{"name"=>"Key"}})) PutObjectAclRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) PutObjectAclRequest.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "querystring", location_name: "versionId")) PutObjectAclRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) PutObjectAclRequest.struct_class = Types::PutObjectAclRequest PutObjectAclRequest[:payload] = :access_control_policy PutObjectAclRequest[:payload_member] = PutObjectAclRequest.member(:access_control_policy) PutObjectLegalHoldOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) PutObjectLegalHoldOutput.struct_class = Types::PutObjectLegalHoldOutput PutObjectLegalHoldRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) PutObjectLegalHoldRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) PutObjectLegalHoldRequest.add_member(:legal_hold, Shapes::ShapeRef.new(shape: ObjectLockLegalHold, location_name: "LegalHold", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) PutObjectLegalHoldRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) PutObjectLegalHoldRequest.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "querystring", location_name: "versionId")) PutObjectLegalHoldRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) PutObjectLegalHoldRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) PutObjectLegalHoldRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) PutObjectLegalHoldRequest.struct_class = Types::PutObjectLegalHoldRequest PutObjectLegalHoldRequest[:payload] = :legal_hold PutObjectLegalHoldRequest[:payload_member] = PutObjectLegalHoldRequest.member(:legal_hold) PutObjectLockConfigurationOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) PutObjectLockConfigurationOutput.struct_class = Types::PutObjectLockConfigurationOutput PutObjectLockConfigurationRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) PutObjectLockConfigurationRequest.add_member(:object_lock_configuration, Shapes::ShapeRef.new(shape: ObjectLockConfiguration, location_name: "ObjectLockConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) PutObjectLockConfigurationRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) PutObjectLockConfigurationRequest.add_member(:token, Shapes::ShapeRef.new(shape: ObjectLockToken, location: "header", location_name: "x-amz-bucket-object-lock-token")) PutObjectLockConfigurationRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) PutObjectLockConfigurationRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) PutObjectLockConfigurationRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) PutObjectLockConfigurationRequest.struct_class = Types::PutObjectLockConfigurationRequest PutObjectLockConfigurationRequest[:payload] = :object_lock_configuration PutObjectLockConfigurationRequest[:payload_member] = PutObjectLockConfigurationRequest.member(:object_lock_configuration) PutObjectOutput.add_member(:expiration, Shapes::ShapeRef.new(shape: Expiration, location: "header", location_name: "x-amz-expiration")) PutObjectOutput.add_member(:etag, Shapes::ShapeRef.new(shape: ETag, location: "header", location_name: "ETag")) PutObjectOutput.add_member(:checksum_crc32, Shapes::ShapeRef.new(shape: ChecksumCRC32, location: "header", location_name: "x-amz-checksum-crc32")) PutObjectOutput.add_member(:checksum_crc32c, Shapes::ShapeRef.new(shape: ChecksumCRC32C, location: "header", location_name: "x-amz-checksum-crc32c")) PutObjectOutput.add_member(:checksum_sha1, Shapes::ShapeRef.new(shape: ChecksumSHA1, location: "header", location_name: "x-amz-checksum-sha1")) PutObjectOutput.add_member(:checksum_sha256, Shapes::ShapeRef.new(shape: ChecksumSHA256, location: "header", location_name: "x-amz-checksum-sha256")) PutObjectOutput.add_member(:server_side_encryption, Shapes::ShapeRef.new(shape: ServerSideEncryption, location: "header", location_name: "x-amz-server-side-encryption")) PutObjectOutput.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "header", location_name: "x-amz-version-id")) PutObjectOutput.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) PutObjectOutput.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) PutObjectOutput.add_member(:ssekms_key_id, Shapes::ShapeRef.new(shape: SSEKMSKeyId, location: "header", location_name: "x-amz-server-side-encryption-aws-kms-key-id")) PutObjectOutput.add_member(:ssekms_encryption_context, Shapes::ShapeRef.new(shape: SSEKMSEncryptionContext, location: "header", location_name: "x-amz-server-side-encryption-context")) PutObjectOutput.add_member(:bucket_key_enabled, Shapes::ShapeRef.new(shape: BucketKeyEnabled, location: "header", location_name: "x-amz-server-side-encryption-bucket-key-enabled")) PutObjectOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) PutObjectOutput.struct_class = Types::PutObjectOutput PutObjectRequest.add_member(:acl, Shapes::ShapeRef.new(shape: ObjectCannedACL, location: "header", location_name: "x-amz-acl")) PutObjectRequest.add_member(:body, Shapes::ShapeRef.new(shape: Body, location_name: "Body", metadata: {"streaming"=>true})) PutObjectRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) PutObjectRequest.add_member(:cache_control, Shapes::ShapeRef.new(shape: CacheControl, location: "header", location_name: "Cache-Control")) PutObjectRequest.add_member(:content_disposition, Shapes::ShapeRef.new(shape: ContentDisposition, location: "header", location_name: "Content-Disposition")) PutObjectRequest.add_member(:content_encoding, Shapes::ShapeRef.new(shape: ContentEncoding, location: "header", location_name: "Content-Encoding")) PutObjectRequest.add_member(:content_language, Shapes::ShapeRef.new(shape: ContentLanguage, location: "header", location_name: "Content-Language")) PutObjectRequest.add_member(:content_length, Shapes::ShapeRef.new(shape: ContentLength, location: "header", location_name: "Content-Length")) PutObjectRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) PutObjectRequest.add_member(:content_type, Shapes::ShapeRef.new(shape: ContentType, location: "header", location_name: "Content-Type")) PutObjectRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) PutObjectRequest.add_member(:checksum_crc32, Shapes::ShapeRef.new(shape: ChecksumCRC32, location: "header", location_name: "x-amz-checksum-crc32")) PutObjectRequest.add_member(:checksum_crc32c, Shapes::ShapeRef.new(shape: ChecksumCRC32C, location: "header", location_name: "x-amz-checksum-crc32c")) PutObjectRequest.add_member(:checksum_sha1, Shapes::ShapeRef.new(shape: ChecksumSHA1, location: "header", location_name: "x-amz-checksum-sha1")) PutObjectRequest.add_member(:checksum_sha256, Shapes::ShapeRef.new(shape: ChecksumSHA256, location: "header", location_name: "x-amz-checksum-sha256")) PutObjectRequest.add_member(:expires, Shapes::ShapeRef.new(shape: Expires, location: "header", location_name: "Expires")) PutObjectRequest.add_member(:grant_full_control, Shapes::ShapeRef.new(shape: GrantFullControl, location: "header", location_name: "x-amz-grant-full-control")) PutObjectRequest.add_member(:grant_read, Shapes::ShapeRef.new(shape: GrantRead, location: "header", location_name: "x-amz-grant-read")) PutObjectRequest.add_member(:grant_read_acp, Shapes::ShapeRef.new(shape: GrantReadACP, location: "header", location_name: "x-amz-grant-read-acp")) PutObjectRequest.add_member(:grant_write_acp, Shapes::ShapeRef.new(shape: GrantWriteACP, location: "header", location_name: "x-amz-grant-write-acp")) PutObjectRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key", metadata: {"contextParam"=>{"name"=>"Key"}})) PutObjectRequest.add_member(:metadata, Shapes::ShapeRef.new(shape: Metadata, location: "headers", location_name: "x-amz-meta-")) PutObjectRequest.add_member(:server_side_encryption, Shapes::ShapeRef.new(shape: ServerSideEncryption, location: "header", location_name: "x-amz-server-side-encryption")) PutObjectRequest.add_member(:storage_class, Shapes::ShapeRef.new(shape: StorageClass, location: "header", location_name: "x-amz-storage-class")) PutObjectRequest.add_member(:website_redirect_location, Shapes::ShapeRef.new(shape: WebsiteRedirectLocation, location: "header", location_name: "x-amz-website-redirect-location")) PutObjectRequest.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) PutObjectRequest.add_member(:sse_customer_key, Shapes::ShapeRef.new(shape: SSECustomerKey, location: "header", location_name: "x-amz-server-side-encryption-customer-key")) PutObjectRequest.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) PutObjectRequest.add_member(:ssekms_key_id, Shapes::ShapeRef.new(shape: SSEKMSKeyId, location: "header", location_name: "x-amz-server-side-encryption-aws-kms-key-id")) PutObjectRequest.add_member(:ssekms_encryption_context, Shapes::ShapeRef.new(shape: SSEKMSEncryptionContext, location: "header", location_name: "x-amz-server-side-encryption-context")) PutObjectRequest.add_member(:bucket_key_enabled, Shapes::ShapeRef.new(shape: BucketKeyEnabled, location: "header", location_name: "x-amz-server-side-encryption-bucket-key-enabled")) PutObjectRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) PutObjectRequest.add_member(:tagging, Shapes::ShapeRef.new(shape: TaggingHeader, location: "header", location_name: "x-amz-tagging")) PutObjectRequest.add_member(:object_lock_mode, Shapes::ShapeRef.new(shape: ObjectLockMode, location: "header", location_name: "x-amz-object-lock-mode")) PutObjectRequest.add_member(:object_lock_retain_until_date, Shapes::ShapeRef.new(shape: ObjectLockRetainUntilDate, location: "header", location_name: "x-amz-object-lock-retain-until-date")) PutObjectRequest.add_member(:object_lock_legal_hold_status, Shapes::ShapeRef.new(shape: ObjectLockLegalHoldStatus, location: "header", location_name: "x-amz-object-lock-legal-hold")) PutObjectRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) PutObjectRequest.struct_class = Types::PutObjectRequest PutObjectRequest[:payload] = :body PutObjectRequest[:payload_member] = PutObjectRequest.member(:body) PutObjectRetentionOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) PutObjectRetentionOutput.struct_class = Types::PutObjectRetentionOutput PutObjectRetentionRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) PutObjectRetentionRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) PutObjectRetentionRequest.add_member(:retention, Shapes::ShapeRef.new(shape: ObjectLockRetention, location_name: "Retention", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) PutObjectRetentionRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) PutObjectRetentionRequest.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "querystring", location_name: "versionId")) PutObjectRetentionRequest.add_member(:bypass_governance_retention, Shapes::ShapeRef.new(shape: BypassGovernanceRetention, location: "header", location_name: "x-amz-bypass-governance-retention")) PutObjectRetentionRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) PutObjectRetentionRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) PutObjectRetentionRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) PutObjectRetentionRequest.struct_class = Types::PutObjectRetentionRequest PutObjectRetentionRequest[:payload] = :retention PutObjectRetentionRequest[:payload_member] = PutObjectRetentionRequest.member(:retention) PutObjectTaggingOutput.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "header", location_name: "x-amz-version-id")) PutObjectTaggingOutput.struct_class = Types::PutObjectTaggingOutput PutObjectTaggingRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) PutObjectTaggingRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) PutObjectTaggingRequest.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "querystring", location_name: "versionId")) PutObjectTaggingRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) PutObjectTaggingRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) PutObjectTaggingRequest.add_member(:tagging, Shapes::ShapeRef.new(shape: Tagging, required: true, location_name: "Tagging", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) PutObjectTaggingRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) PutObjectTaggingRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) PutObjectTaggingRequest.struct_class = Types::PutObjectTaggingRequest PutObjectTaggingRequest[:payload] = :tagging PutObjectTaggingRequest[:payload_member] = PutObjectTaggingRequest.member(:tagging) PutPublicAccessBlockRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) PutPublicAccessBlockRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) PutPublicAccessBlockRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) PutPublicAccessBlockRequest.add_member(:public_access_block_configuration, Shapes::ShapeRef.new(shape: PublicAccessBlockConfiguration, required: true, location_name: "PublicAccessBlockConfiguration", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) PutPublicAccessBlockRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) PutPublicAccessBlockRequest.struct_class = Types::PutPublicAccessBlockRequest PutPublicAccessBlockRequest[:payload] = :public_access_block_configuration PutPublicAccessBlockRequest[:payload_member] = PutPublicAccessBlockRequest.member(:public_access_block_configuration) QueueConfiguration.add_member(:id, Shapes::ShapeRef.new(shape: NotificationId, location_name: "Id")) QueueConfiguration.add_member(:queue_arn, Shapes::ShapeRef.new(shape: QueueArn, required: true, location_name: "Queue")) QueueConfiguration.add_member(:events, Shapes::ShapeRef.new(shape: EventList, required: true, location_name: "Event")) QueueConfiguration.add_member(:filter, Shapes::ShapeRef.new(shape: NotificationConfigurationFilter, location_name: "Filter")) QueueConfiguration.struct_class = Types::QueueConfiguration QueueConfigurationDeprecated.add_member(:id, Shapes::ShapeRef.new(shape: NotificationId, location_name: "Id")) QueueConfigurationDeprecated.add_member(:event, Shapes::ShapeRef.new(shape: Event, deprecated: true, location_name: "Event")) QueueConfigurationDeprecated.add_member(:events, Shapes::ShapeRef.new(shape: EventList, location_name: "Event")) QueueConfigurationDeprecated.add_member(:queue, Shapes::ShapeRef.new(shape: QueueArn, location_name: "Queue")) QueueConfigurationDeprecated.struct_class = Types::QueueConfigurationDeprecated QueueConfigurationList.member = Shapes::ShapeRef.new(shape: QueueConfiguration) RecordsEvent.add_member(:payload, Shapes::ShapeRef.new(shape: Body, eventpayload: true, eventpayload_type: 'blob', location_name: "Payload", metadata: {"eventpayload"=>true})) RecordsEvent.struct_class = Types::RecordsEvent Redirect.add_member(:host_name, Shapes::ShapeRef.new(shape: HostName, location_name: "HostName")) Redirect.add_member(:http_redirect_code, Shapes::ShapeRef.new(shape: HttpRedirectCode, location_name: "HttpRedirectCode")) Redirect.add_member(:protocol, Shapes::ShapeRef.new(shape: Protocol, location_name: "Protocol")) Redirect.add_member(:replace_key_prefix_with, Shapes::ShapeRef.new(shape: ReplaceKeyPrefixWith, location_name: "ReplaceKeyPrefixWith")) Redirect.add_member(:replace_key_with, Shapes::ShapeRef.new(shape: ReplaceKeyWith, location_name: "ReplaceKeyWith")) Redirect.struct_class = Types::Redirect RedirectAllRequestsTo.add_member(:host_name, Shapes::ShapeRef.new(shape: HostName, required: true, location_name: "HostName")) RedirectAllRequestsTo.add_member(:protocol, Shapes::ShapeRef.new(shape: Protocol, location_name: "Protocol")) RedirectAllRequestsTo.struct_class = Types::RedirectAllRequestsTo ReplicaModifications.add_member(:status, Shapes::ShapeRef.new(shape: ReplicaModificationsStatus, required: true, location_name: "Status")) ReplicaModifications.struct_class = Types::ReplicaModifications ReplicationConfiguration.add_member(:role, Shapes::ShapeRef.new(shape: Role, required: true, location_name: "Role")) ReplicationConfiguration.add_member(:rules, Shapes::ShapeRef.new(shape: ReplicationRules, required: true, location_name: "Rule")) ReplicationConfiguration.struct_class = Types::ReplicationConfiguration ReplicationRule.add_member(:id, Shapes::ShapeRef.new(shape: ID, location_name: "ID")) ReplicationRule.add_member(:priority, Shapes::ShapeRef.new(shape: Priority, location_name: "Priority")) ReplicationRule.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, deprecated: true, location_name: "Prefix")) ReplicationRule.add_member(:filter, Shapes::ShapeRef.new(shape: ReplicationRuleFilter, location_name: "Filter")) ReplicationRule.add_member(:status, Shapes::ShapeRef.new(shape: ReplicationRuleStatus, required: true, location_name: "Status")) ReplicationRule.add_member(:source_selection_criteria, Shapes::ShapeRef.new(shape: SourceSelectionCriteria, location_name: "SourceSelectionCriteria")) ReplicationRule.add_member(:existing_object_replication, Shapes::ShapeRef.new(shape: ExistingObjectReplication, location_name: "ExistingObjectReplication")) ReplicationRule.add_member(:destination, Shapes::ShapeRef.new(shape: Destination, required: true, location_name: "Destination")) ReplicationRule.add_member(:delete_marker_replication, Shapes::ShapeRef.new(shape: DeleteMarkerReplication, location_name: "DeleteMarkerReplication")) ReplicationRule.struct_class = Types::ReplicationRule ReplicationRuleAndOperator.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) ReplicationRuleAndOperator.add_member(:tags, Shapes::ShapeRef.new(shape: TagSet, location_name: "Tag", metadata: {"flattened"=>true})) ReplicationRuleAndOperator.struct_class = Types::ReplicationRuleAndOperator ReplicationRuleFilter.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, location_name: "Prefix")) ReplicationRuleFilter.add_member(:tag, Shapes::ShapeRef.new(shape: Tag, location_name: "Tag")) ReplicationRuleFilter.add_member(:and, Shapes::ShapeRef.new(shape: ReplicationRuleAndOperator, location_name: "And")) ReplicationRuleFilter.struct_class = Types::ReplicationRuleFilter ReplicationRules.member = Shapes::ShapeRef.new(shape: ReplicationRule) ReplicationTime.add_member(:status, Shapes::ShapeRef.new(shape: ReplicationTimeStatus, required: true, location_name: "Status")) ReplicationTime.add_member(:time, Shapes::ShapeRef.new(shape: ReplicationTimeValue, required: true, location_name: "Time")) ReplicationTime.struct_class = Types::ReplicationTime ReplicationTimeValue.add_member(:minutes, Shapes::ShapeRef.new(shape: Minutes, location_name: "Minutes")) ReplicationTimeValue.struct_class = Types::ReplicationTimeValue RequestPaymentConfiguration.add_member(:payer, Shapes::ShapeRef.new(shape: Payer, required: true, location_name: "Payer")) RequestPaymentConfiguration.struct_class = Types::RequestPaymentConfiguration RequestProgress.add_member(:enabled, Shapes::ShapeRef.new(shape: EnableRequestProgress, location_name: "Enabled")) RequestProgress.struct_class = Types::RequestProgress RestoreObjectOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) RestoreObjectOutput.add_member(:restore_output_path, Shapes::ShapeRef.new(shape: RestoreOutputPath, location: "header", location_name: "x-amz-restore-output-path")) RestoreObjectOutput.struct_class = Types::RestoreObjectOutput RestoreObjectRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) RestoreObjectRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) RestoreObjectRequest.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "querystring", location_name: "versionId")) RestoreObjectRequest.add_member(:restore_request, Shapes::ShapeRef.new(shape: RestoreRequest, location_name: "RestoreRequest", metadata: {"xmlNamespace"=>{"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"}})) RestoreObjectRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) RestoreObjectRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) RestoreObjectRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) RestoreObjectRequest.struct_class = Types::RestoreObjectRequest RestoreObjectRequest[:payload] = :restore_request RestoreObjectRequest[:payload_member] = RestoreObjectRequest.member(:restore_request) RestoreRequest.add_member(:days, Shapes::ShapeRef.new(shape: Days, location_name: "Days")) RestoreRequest.add_member(:glacier_job_parameters, Shapes::ShapeRef.new(shape: GlacierJobParameters, location_name: "GlacierJobParameters")) RestoreRequest.add_member(:type, Shapes::ShapeRef.new(shape: RestoreRequestType, location_name: "Type")) RestoreRequest.add_member(:tier, Shapes::ShapeRef.new(shape: Tier, location_name: "Tier")) RestoreRequest.add_member(:description, Shapes::ShapeRef.new(shape: Description, location_name: "Description")) RestoreRequest.add_member(:select_parameters, Shapes::ShapeRef.new(shape: SelectParameters, location_name: "SelectParameters")) RestoreRequest.add_member(:output_location, Shapes::ShapeRef.new(shape: OutputLocation, location_name: "OutputLocation")) RestoreRequest.struct_class = Types::RestoreRequest RestoreStatus.add_member(:is_restore_in_progress, Shapes::ShapeRef.new(shape: IsRestoreInProgress, location_name: "IsRestoreInProgress")) RestoreStatus.add_member(:restore_expiry_date, Shapes::ShapeRef.new(shape: RestoreExpiryDate, location_name: "RestoreExpiryDate")) RestoreStatus.struct_class = Types::RestoreStatus RoutingRule.add_member(:condition, Shapes::ShapeRef.new(shape: Condition, location_name: "Condition")) RoutingRule.add_member(:redirect, Shapes::ShapeRef.new(shape: Redirect, required: true, location_name: "Redirect")) RoutingRule.struct_class = Types::RoutingRule RoutingRules.member = Shapes::ShapeRef.new(shape: RoutingRule, location_name: "RoutingRule") Rule.add_member(:expiration, Shapes::ShapeRef.new(shape: LifecycleExpiration, location_name: "Expiration")) Rule.add_member(:id, Shapes::ShapeRef.new(shape: ID, location_name: "ID")) Rule.add_member(:prefix, Shapes::ShapeRef.new(shape: Prefix, required: true, location_name: "Prefix")) Rule.add_member(:status, Shapes::ShapeRef.new(shape: ExpirationStatus, required: true, location_name: "Status")) Rule.add_member(:transition, Shapes::ShapeRef.new(shape: Transition, location_name: "Transition")) Rule.add_member(:noncurrent_version_transition, Shapes::ShapeRef.new(shape: NoncurrentVersionTransition, location_name: "NoncurrentVersionTransition")) Rule.add_member(:noncurrent_version_expiration, Shapes::ShapeRef.new(shape: NoncurrentVersionExpiration, location_name: "NoncurrentVersionExpiration")) Rule.add_member(:abort_incomplete_multipart_upload, Shapes::ShapeRef.new(shape: AbortIncompleteMultipartUpload, location_name: "AbortIncompleteMultipartUpload")) Rule.struct_class = Types::Rule Rules.member = Shapes::ShapeRef.new(shape: Rule) S3KeyFilter.add_member(:filter_rules, Shapes::ShapeRef.new(shape: FilterRuleList, location_name: "FilterRule")) S3KeyFilter.struct_class = Types::S3KeyFilter S3Location.add_member(:bucket_name, Shapes::ShapeRef.new(shape: BucketName, required: true, location_name: "BucketName")) S3Location.add_member(:prefix, Shapes::ShapeRef.new(shape: LocationPrefix, required: true, location_name: "Prefix")) S3Location.add_member(:encryption, Shapes::ShapeRef.new(shape: Encryption, location_name: "Encryption")) S3Location.add_member(:canned_acl, Shapes::ShapeRef.new(shape: ObjectCannedACL, location_name: "CannedACL")) S3Location.add_member(:access_control_list, Shapes::ShapeRef.new(shape: Grants, location_name: "AccessControlList")) S3Location.add_member(:tagging, Shapes::ShapeRef.new(shape: Tagging, location_name: "Tagging")) S3Location.add_member(:user_metadata, Shapes::ShapeRef.new(shape: UserMetadata, location_name: "UserMetadata")) S3Location.add_member(:storage_class, Shapes::ShapeRef.new(shape: StorageClass, location_name: "StorageClass")) S3Location.struct_class = Types::S3Location SSEKMS.add_member(:key_id, Shapes::ShapeRef.new(shape: SSEKMSKeyId, required: true, location_name: "KeyId")) SSEKMS.struct_class = Types::SSEKMS SSES3.struct_class = Types::SSES3 ScanRange.add_member(:start, Shapes::ShapeRef.new(shape: Start, location_name: "Start")) ScanRange.add_member(:end, Shapes::ShapeRef.new(shape: End, location_name: "End")) ScanRange.struct_class = Types::ScanRange SelectObjectContentEventStream.add_member(:records, Shapes::ShapeRef.new(shape: RecordsEvent, event: true, location_name: "Records")) SelectObjectContentEventStream.add_member(:stats, Shapes::ShapeRef.new(shape: StatsEvent, event: true, location_name: "Stats")) SelectObjectContentEventStream.add_member(:progress, Shapes::ShapeRef.new(shape: ProgressEvent, event: true, location_name: "Progress")) SelectObjectContentEventStream.add_member(:cont, Shapes::ShapeRef.new(shape: ContinuationEvent, event: true, location_name: "Cont")) SelectObjectContentEventStream.add_member(:end, Shapes::ShapeRef.new(shape: EndEvent, event: true, location_name: "End")) SelectObjectContentEventStream.struct_class = Types::SelectObjectContentEventStream SelectObjectContentOutput.add_member(:payload, Shapes::ShapeRef.new(shape: SelectObjectContentEventStream, eventstream: true, location_name: "Payload")) SelectObjectContentOutput.struct_class = Types::SelectObjectContentOutput SelectObjectContentOutput[:payload] = :payload SelectObjectContentOutput[:payload_member] = SelectObjectContentOutput.member(:payload) SelectObjectContentRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) SelectObjectContentRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) SelectObjectContentRequest.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) SelectObjectContentRequest.add_member(:sse_customer_key, Shapes::ShapeRef.new(shape: SSECustomerKey, location: "header", location_name: "x-amz-server-side-encryption-customer-key")) SelectObjectContentRequest.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) SelectObjectContentRequest.add_member(:expression, Shapes::ShapeRef.new(shape: Expression, required: true, location_name: "Expression")) SelectObjectContentRequest.add_member(:expression_type, Shapes::ShapeRef.new(shape: ExpressionType, required: true, location_name: "ExpressionType")) SelectObjectContentRequest.add_member(:request_progress, Shapes::ShapeRef.new(shape: RequestProgress, location_name: "RequestProgress")) SelectObjectContentRequest.add_member(:input_serialization, Shapes::ShapeRef.new(shape: InputSerialization, required: true, location_name: "InputSerialization")) SelectObjectContentRequest.add_member(:output_serialization, Shapes::ShapeRef.new(shape: OutputSerialization, required: true, location_name: "OutputSerialization")) SelectObjectContentRequest.add_member(:scan_range, Shapes::ShapeRef.new(shape: ScanRange, location_name: "ScanRange")) SelectObjectContentRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) SelectObjectContentRequest.struct_class = Types::SelectObjectContentRequest SelectParameters.add_member(:input_serialization, Shapes::ShapeRef.new(shape: InputSerialization, required: true, location_name: "InputSerialization")) SelectParameters.add_member(:expression_type, Shapes::ShapeRef.new(shape: ExpressionType, required: true, location_name: "ExpressionType")) SelectParameters.add_member(:expression, Shapes::ShapeRef.new(shape: Expression, required: true, location_name: "Expression")) SelectParameters.add_member(:output_serialization, Shapes::ShapeRef.new(shape: OutputSerialization, required: true, location_name: "OutputSerialization")) SelectParameters.struct_class = Types::SelectParameters ServerSideEncryptionByDefault.add_member(:sse_algorithm, Shapes::ShapeRef.new(shape: ServerSideEncryption, required: true, location_name: "SSEAlgorithm")) ServerSideEncryptionByDefault.add_member(:kms_master_key_id, Shapes::ShapeRef.new(shape: SSEKMSKeyId, location_name: "KMSMasterKeyID")) ServerSideEncryptionByDefault.struct_class = Types::ServerSideEncryptionByDefault ServerSideEncryptionConfiguration.add_member(:rules, Shapes::ShapeRef.new(shape: ServerSideEncryptionRules, required: true, location_name: "Rule")) ServerSideEncryptionConfiguration.struct_class = Types::ServerSideEncryptionConfiguration ServerSideEncryptionRule.add_member(:apply_server_side_encryption_by_default, Shapes::ShapeRef.new(shape: ServerSideEncryptionByDefault, location_name: "ApplyServerSideEncryptionByDefault")) ServerSideEncryptionRule.add_member(:bucket_key_enabled, Shapes::ShapeRef.new(shape: BucketKeyEnabled, location_name: "BucketKeyEnabled")) ServerSideEncryptionRule.struct_class = Types::ServerSideEncryptionRule ServerSideEncryptionRules.member = Shapes::ShapeRef.new(shape: ServerSideEncryptionRule) SessionCredentials.add_member(:access_key_id, Shapes::ShapeRef.new(shape: AccessKeyIdValue, required: true, location_name: "AccessKeyId")) SessionCredentials.add_member(:secret_access_key, Shapes::ShapeRef.new(shape: SessionCredentialValue, required: true, location_name: "SecretAccessKey")) SessionCredentials.add_member(:session_token, Shapes::ShapeRef.new(shape: SessionCredentialValue, required: true, location_name: "SessionToken")) SessionCredentials.add_member(:expiration, Shapes::ShapeRef.new(shape: SessionExpiration, required: true, location_name: "Expiration")) SessionCredentials.struct_class = Types::SessionCredentials SimplePrefix.struct_class = Types::SimplePrefix SourceSelectionCriteria.add_member(:sse_kms_encrypted_objects, Shapes::ShapeRef.new(shape: SseKmsEncryptedObjects, location_name: "SseKmsEncryptedObjects")) SourceSelectionCriteria.add_member(:replica_modifications, Shapes::ShapeRef.new(shape: ReplicaModifications, location_name: "ReplicaModifications")) SourceSelectionCriteria.struct_class = Types::SourceSelectionCriteria SseKmsEncryptedObjects.add_member(:status, Shapes::ShapeRef.new(shape: SseKmsEncryptedObjectsStatus, required: true, location_name: "Status")) SseKmsEncryptedObjects.struct_class = Types::SseKmsEncryptedObjects Stats.add_member(:bytes_scanned, Shapes::ShapeRef.new(shape: BytesScanned, location_name: "BytesScanned")) Stats.add_member(:bytes_processed, Shapes::ShapeRef.new(shape: BytesProcessed, location_name: "BytesProcessed")) Stats.add_member(:bytes_returned, Shapes::ShapeRef.new(shape: BytesReturned, location_name: "BytesReturned")) Stats.struct_class = Types::Stats StatsEvent.add_member(:details, Shapes::ShapeRef.new(shape: Stats, eventpayload: true, eventpayload_type: 'structure', location_name: "Details", metadata: {"eventpayload"=>true})) StatsEvent.struct_class = Types::StatsEvent StorageClassAnalysis.add_member(:data_export, Shapes::ShapeRef.new(shape: StorageClassAnalysisDataExport, location_name: "DataExport")) StorageClassAnalysis.struct_class = Types::StorageClassAnalysis StorageClassAnalysisDataExport.add_member(:output_schema_version, Shapes::ShapeRef.new(shape: StorageClassAnalysisSchemaVersion, required: true, location_name: "OutputSchemaVersion")) StorageClassAnalysisDataExport.add_member(:destination, Shapes::ShapeRef.new(shape: AnalyticsExportDestination, required: true, location_name: "Destination")) StorageClassAnalysisDataExport.struct_class = Types::StorageClassAnalysisDataExport Tag.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location_name: "Key")) Tag.add_member(:value, Shapes::ShapeRef.new(shape: Value, required: true, location_name: "Value")) Tag.struct_class = Types::Tag TagSet.member = Shapes::ShapeRef.new(shape: Tag, location_name: "Tag") Tagging.add_member(:tag_set, Shapes::ShapeRef.new(shape: TagSet, required: true, location_name: "TagSet")) Tagging.struct_class = Types::Tagging TargetGrant.add_member(:grantee, Shapes::ShapeRef.new(shape: Grantee, location_name: "Grantee")) TargetGrant.add_member(:permission, Shapes::ShapeRef.new(shape: BucketLogsPermission, location_name: "Permission")) TargetGrant.struct_class = Types::TargetGrant TargetGrants.member = Shapes::ShapeRef.new(shape: TargetGrant, location_name: "Grant") TargetObjectKeyFormat.add_member(:simple_prefix, Shapes::ShapeRef.new(shape: SimplePrefix, location_name: "SimplePrefix")) TargetObjectKeyFormat.add_member(:partitioned_prefix, Shapes::ShapeRef.new(shape: PartitionedPrefix, location_name: "PartitionedPrefix")) TargetObjectKeyFormat.struct_class = Types::TargetObjectKeyFormat Tiering.add_member(:days, Shapes::ShapeRef.new(shape: IntelligentTieringDays, required: true, location_name: "Days")) Tiering.add_member(:access_tier, Shapes::ShapeRef.new(shape: IntelligentTieringAccessTier, required: true, location_name: "AccessTier")) Tiering.struct_class = Types::Tiering TieringList.member = Shapes::ShapeRef.new(shape: Tiering) TopicConfiguration.add_member(:id, Shapes::ShapeRef.new(shape: NotificationId, location_name: "Id")) TopicConfiguration.add_member(:topic_arn, Shapes::ShapeRef.new(shape: TopicArn, required: true, location_name: "Topic")) TopicConfiguration.add_member(:events, Shapes::ShapeRef.new(shape: EventList, required: true, location_name: "Event")) TopicConfiguration.add_member(:filter, Shapes::ShapeRef.new(shape: NotificationConfigurationFilter, location_name: "Filter")) TopicConfiguration.struct_class = Types::TopicConfiguration TopicConfigurationDeprecated.add_member(:id, Shapes::ShapeRef.new(shape: NotificationId, location_name: "Id")) TopicConfigurationDeprecated.add_member(:events, Shapes::ShapeRef.new(shape: EventList, location_name: "Event")) TopicConfigurationDeprecated.add_member(:event, Shapes::ShapeRef.new(shape: Event, deprecated: true, location_name: "Event")) TopicConfigurationDeprecated.add_member(:topic, Shapes::ShapeRef.new(shape: TopicArn, location_name: "Topic")) TopicConfigurationDeprecated.struct_class = Types::TopicConfigurationDeprecated TopicConfigurationList.member = Shapes::ShapeRef.new(shape: TopicConfiguration) Transition.add_member(:date, Shapes::ShapeRef.new(shape: Date, location_name: "Date")) Transition.add_member(:days, Shapes::ShapeRef.new(shape: Days, location_name: "Days")) Transition.add_member(:storage_class, Shapes::ShapeRef.new(shape: TransitionStorageClass, location_name: "StorageClass")) Transition.struct_class = Types::Transition TransitionList.member = Shapes::ShapeRef.new(shape: Transition) UploadPartCopyOutput.add_member(:copy_source_version_id, Shapes::ShapeRef.new(shape: CopySourceVersionId, location: "header", location_name: "x-amz-copy-source-version-id")) UploadPartCopyOutput.add_member(:copy_part_result, Shapes::ShapeRef.new(shape: CopyPartResult, location_name: "CopyPartResult")) UploadPartCopyOutput.add_member(:server_side_encryption, Shapes::ShapeRef.new(shape: ServerSideEncryption, location: "header", location_name: "x-amz-server-side-encryption")) UploadPartCopyOutput.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) UploadPartCopyOutput.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) UploadPartCopyOutput.add_member(:ssekms_key_id, Shapes::ShapeRef.new(shape: SSEKMSKeyId, location: "header", location_name: "x-amz-server-side-encryption-aws-kms-key-id")) UploadPartCopyOutput.add_member(:bucket_key_enabled, Shapes::ShapeRef.new(shape: BucketKeyEnabled, location: "header", location_name: "x-amz-server-side-encryption-bucket-key-enabled")) UploadPartCopyOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) UploadPartCopyOutput.struct_class = Types::UploadPartCopyOutput UploadPartCopyOutput[:payload] = :copy_part_result UploadPartCopyOutput[:payload_member] = UploadPartCopyOutput.member(:copy_part_result) UploadPartCopyRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) UploadPartCopyRequest.add_member(:copy_source, Shapes::ShapeRef.new(shape: CopySource, required: true, location: "header", location_name: "x-amz-copy-source")) UploadPartCopyRequest.add_member(:copy_source_if_match, Shapes::ShapeRef.new(shape: CopySourceIfMatch, location: "header", location_name: "x-amz-copy-source-if-match")) UploadPartCopyRequest.add_member(:copy_source_if_modified_since, Shapes::ShapeRef.new(shape: CopySourceIfModifiedSince, location: "header", location_name: "x-amz-copy-source-if-modified-since")) UploadPartCopyRequest.add_member(:copy_source_if_none_match, Shapes::ShapeRef.new(shape: CopySourceIfNoneMatch, location: "header", location_name: "x-amz-copy-source-if-none-match")) UploadPartCopyRequest.add_member(:copy_source_if_unmodified_since, Shapes::ShapeRef.new(shape: CopySourceIfUnmodifiedSince, location: "header", location_name: "x-amz-copy-source-if-unmodified-since")) UploadPartCopyRequest.add_member(:copy_source_range, Shapes::ShapeRef.new(shape: CopySourceRange, location: "header", location_name: "x-amz-copy-source-range")) UploadPartCopyRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key")) UploadPartCopyRequest.add_member(:part_number, Shapes::ShapeRef.new(shape: PartNumber, required: true, location: "querystring", location_name: "partNumber")) UploadPartCopyRequest.add_member(:upload_id, Shapes::ShapeRef.new(shape: MultipartUploadId, required: true, location: "querystring", location_name: "uploadId")) UploadPartCopyRequest.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) UploadPartCopyRequest.add_member(:sse_customer_key, Shapes::ShapeRef.new(shape: SSECustomerKey, location: "header", location_name: "x-amz-server-side-encryption-customer-key")) UploadPartCopyRequest.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) UploadPartCopyRequest.add_member(:copy_source_sse_customer_algorithm, Shapes::ShapeRef.new(shape: CopySourceSSECustomerAlgorithm, location: "header", location_name: "x-amz-copy-source-server-side-encryption-customer-algorithm")) UploadPartCopyRequest.add_member(:copy_source_sse_customer_key, Shapes::ShapeRef.new(shape: CopySourceSSECustomerKey, location: "header", location_name: "x-amz-copy-source-server-side-encryption-customer-key")) UploadPartCopyRequest.add_member(:copy_source_sse_customer_key_md5, Shapes::ShapeRef.new(shape: CopySourceSSECustomerKeyMD5, location: "header", location_name: "x-amz-copy-source-server-side-encryption-customer-key-MD5")) UploadPartCopyRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) UploadPartCopyRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) UploadPartCopyRequest.add_member(:expected_source_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-source-expected-bucket-owner")) UploadPartCopyRequest.struct_class = Types::UploadPartCopyRequest UploadPartOutput.add_member(:server_side_encryption, Shapes::ShapeRef.new(shape: ServerSideEncryption, location: "header", location_name: "x-amz-server-side-encryption")) UploadPartOutput.add_member(:etag, Shapes::ShapeRef.new(shape: ETag, location: "header", location_name: "ETag")) UploadPartOutput.add_member(:checksum_crc32, Shapes::ShapeRef.new(shape: ChecksumCRC32, location: "header", location_name: "x-amz-checksum-crc32")) UploadPartOutput.add_member(:checksum_crc32c, Shapes::ShapeRef.new(shape: ChecksumCRC32C, location: "header", location_name: "x-amz-checksum-crc32c")) UploadPartOutput.add_member(:checksum_sha1, Shapes::ShapeRef.new(shape: ChecksumSHA1, location: "header", location_name: "x-amz-checksum-sha1")) UploadPartOutput.add_member(:checksum_sha256, Shapes::ShapeRef.new(shape: ChecksumSHA256, location: "header", location_name: "x-amz-checksum-sha256")) UploadPartOutput.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) UploadPartOutput.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) UploadPartOutput.add_member(:ssekms_key_id, Shapes::ShapeRef.new(shape: SSEKMSKeyId, location: "header", location_name: "x-amz-server-side-encryption-aws-kms-key-id")) UploadPartOutput.add_member(:bucket_key_enabled, Shapes::ShapeRef.new(shape: BucketKeyEnabled, location: "header", location_name: "x-amz-server-side-encryption-bucket-key-enabled")) UploadPartOutput.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-request-charged")) UploadPartOutput.struct_class = Types::UploadPartOutput UploadPartRequest.add_member(:body, Shapes::ShapeRef.new(shape: Body, location_name: "Body", metadata: {"streaming"=>true})) UploadPartRequest.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location: "uri", location_name: "Bucket", metadata: {"contextParam"=>{"name"=>"Bucket"}})) UploadPartRequest.add_member(:content_length, Shapes::ShapeRef.new(shape: ContentLength, location: "header", location_name: "Content-Length")) UploadPartRequest.add_member(:content_md5, Shapes::ShapeRef.new(shape: ContentMD5, location: "header", location_name: "Content-MD5")) UploadPartRequest.add_member(:checksum_algorithm, Shapes::ShapeRef.new(shape: ChecksumAlgorithm, location: "header", location_name: "x-amz-sdk-checksum-algorithm")) UploadPartRequest.add_member(:checksum_crc32, Shapes::ShapeRef.new(shape: ChecksumCRC32, location: "header", location_name: "x-amz-checksum-crc32")) UploadPartRequest.add_member(:checksum_crc32c, Shapes::ShapeRef.new(shape: ChecksumCRC32C, location: "header", location_name: "x-amz-checksum-crc32c")) UploadPartRequest.add_member(:checksum_sha1, Shapes::ShapeRef.new(shape: ChecksumSHA1, location: "header", location_name: "x-amz-checksum-sha1")) UploadPartRequest.add_member(:checksum_sha256, Shapes::ShapeRef.new(shape: ChecksumSHA256, location: "header", location_name: "x-amz-checksum-sha256")) UploadPartRequest.add_member(:key, Shapes::ShapeRef.new(shape: ObjectKey, required: true, location: "uri", location_name: "Key", metadata: {"contextParam"=>{"name"=>"Key"}})) UploadPartRequest.add_member(:part_number, Shapes::ShapeRef.new(shape: PartNumber, required: true, location: "querystring", location_name: "partNumber")) UploadPartRequest.add_member(:upload_id, Shapes::ShapeRef.new(shape: MultipartUploadId, required: true, location: "querystring", location_name: "uploadId")) UploadPartRequest.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-server-side-encryption-customer-algorithm")) UploadPartRequest.add_member(:sse_customer_key, Shapes::ShapeRef.new(shape: SSECustomerKey, location: "header", location_name: "x-amz-server-side-encryption-customer-key")) UploadPartRequest.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-server-side-encryption-customer-key-MD5")) UploadPartRequest.add_member(:request_payer, Shapes::ShapeRef.new(shape: RequestPayer, location: "header", location_name: "x-amz-request-payer")) UploadPartRequest.add_member(:expected_bucket_owner, Shapes::ShapeRef.new(shape: AccountId, location: "header", location_name: "x-amz-expected-bucket-owner")) UploadPartRequest.struct_class = Types::UploadPartRequest UploadPartRequest[:payload] = :body UploadPartRequest[:payload_member] = UploadPartRequest.member(:body) UserMetadata.member = Shapes::ShapeRef.new(shape: MetadataEntry, location_name: "MetadataEntry") VersioningConfiguration.add_member(:mfa_delete, Shapes::ShapeRef.new(shape: MFADelete, location_name: "MfaDelete")) VersioningConfiguration.add_member(:status, Shapes::ShapeRef.new(shape: BucketVersioningStatus, location_name: "Status")) VersioningConfiguration.struct_class = Types::VersioningConfiguration WebsiteConfiguration.add_member(:error_document, Shapes::ShapeRef.new(shape: ErrorDocument, location_name: "ErrorDocument")) WebsiteConfiguration.add_member(:index_document, Shapes::ShapeRef.new(shape: IndexDocument, location_name: "IndexDocument")) WebsiteConfiguration.add_member(:redirect_all_requests_to, Shapes::ShapeRef.new(shape: RedirectAllRequestsTo, location_name: "RedirectAllRequestsTo")) WebsiteConfiguration.add_member(:routing_rules, Shapes::ShapeRef.new(shape: RoutingRules, location_name: "RoutingRules")) WebsiteConfiguration.struct_class = Types::WebsiteConfiguration WriteGetObjectResponseRequest.add_member(:request_route, Shapes::ShapeRef.new(shape: RequestRoute, required: true, location: "header", location_name: "x-amz-request-route", metadata: {"hostLabel"=>true, "hostLabelName"=>"RequestRoute"})) WriteGetObjectResponseRequest.add_member(:request_token, Shapes::ShapeRef.new(shape: RequestToken, required: true, location: "header", location_name: "x-amz-request-token")) WriteGetObjectResponseRequest.add_member(:body, Shapes::ShapeRef.new(shape: Body, location_name: "Body", metadata: {"streaming"=>true})) WriteGetObjectResponseRequest.add_member(:status_code, Shapes::ShapeRef.new(shape: GetObjectResponseStatusCode, location: "header", location_name: "x-amz-fwd-status")) WriteGetObjectResponseRequest.add_member(:error_code, Shapes::ShapeRef.new(shape: ErrorCode, location: "header", location_name: "x-amz-fwd-error-code")) WriteGetObjectResponseRequest.add_member(:error_message, Shapes::ShapeRef.new(shape: ErrorMessage, location: "header", location_name: "x-amz-fwd-error-message")) WriteGetObjectResponseRequest.add_member(:accept_ranges, Shapes::ShapeRef.new(shape: AcceptRanges, location: "header", location_name: "x-amz-fwd-header-accept-ranges")) WriteGetObjectResponseRequest.add_member(:cache_control, Shapes::ShapeRef.new(shape: CacheControl, location: "header", location_name: "x-amz-fwd-header-Cache-Control")) WriteGetObjectResponseRequest.add_member(:content_disposition, Shapes::ShapeRef.new(shape: ContentDisposition, location: "header", location_name: "x-amz-fwd-header-Content-Disposition")) WriteGetObjectResponseRequest.add_member(:content_encoding, Shapes::ShapeRef.new(shape: ContentEncoding, location: "header", location_name: "x-amz-fwd-header-Content-Encoding")) WriteGetObjectResponseRequest.add_member(:content_language, Shapes::ShapeRef.new(shape: ContentLanguage, location: "header", location_name: "x-amz-fwd-header-Content-Language")) WriteGetObjectResponseRequest.add_member(:content_length, Shapes::ShapeRef.new(shape: ContentLength, location: "header", location_name: "Content-Length")) WriteGetObjectResponseRequest.add_member(:content_range, Shapes::ShapeRef.new(shape: ContentRange, location: "header", location_name: "x-amz-fwd-header-Content-Range")) WriteGetObjectResponseRequest.add_member(:content_type, Shapes::ShapeRef.new(shape: ContentType, location: "header", location_name: "x-amz-fwd-header-Content-Type")) WriteGetObjectResponseRequest.add_member(:checksum_crc32, Shapes::ShapeRef.new(shape: ChecksumCRC32, location: "header", location_name: "x-amz-fwd-header-x-amz-checksum-crc32")) WriteGetObjectResponseRequest.add_member(:checksum_crc32c, Shapes::ShapeRef.new(shape: ChecksumCRC32C, location: "header", location_name: "x-amz-fwd-header-x-amz-checksum-crc32c")) WriteGetObjectResponseRequest.add_member(:checksum_sha1, Shapes::ShapeRef.new(shape: ChecksumSHA1, location: "header", location_name: "x-amz-fwd-header-x-amz-checksum-sha1")) WriteGetObjectResponseRequest.add_member(:checksum_sha256, Shapes::ShapeRef.new(shape: ChecksumSHA256, location: "header", location_name: "x-amz-fwd-header-x-amz-checksum-sha256")) WriteGetObjectResponseRequest.add_member(:delete_marker, Shapes::ShapeRef.new(shape: DeleteMarker, location: "header", location_name: "x-amz-fwd-header-x-amz-delete-marker")) WriteGetObjectResponseRequest.add_member(:etag, Shapes::ShapeRef.new(shape: ETag, location: "header", location_name: "x-amz-fwd-header-ETag")) WriteGetObjectResponseRequest.add_member(:expires, Shapes::ShapeRef.new(shape: Expires, location: "header", location_name: "x-amz-fwd-header-Expires")) WriteGetObjectResponseRequest.add_member(:expiration, Shapes::ShapeRef.new(shape: Expiration, location: "header", location_name: "x-amz-fwd-header-x-amz-expiration")) WriteGetObjectResponseRequest.add_member(:last_modified, Shapes::ShapeRef.new(shape: LastModified, location: "header", location_name: "x-amz-fwd-header-Last-Modified")) WriteGetObjectResponseRequest.add_member(:missing_meta, Shapes::ShapeRef.new(shape: MissingMeta, location: "header", location_name: "x-amz-fwd-header-x-amz-missing-meta")) WriteGetObjectResponseRequest.add_member(:metadata, Shapes::ShapeRef.new(shape: Metadata, location: "headers", location_name: "x-amz-meta-")) WriteGetObjectResponseRequest.add_member(:object_lock_mode, Shapes::ShapeRef.new(shape: ObjectLockMode, location: "header", location_name: "x-amz-fwd-header-x-amz-object-lock-mode")) WriteGetObjectResponseRequest.add_member(:object_lock_legal_hold_status, Shapes::ShapeRef.new(shape: ObjectLockLegalHoldStatus, location: "header", location_name: "x-amz-fwd-header-x-amz-object-lock-legal-hold")) WriteGetObjectResponseRequest.add_member(:object_lock_retain_until_date, Shapes::ShapeRef.new(shape: ObjectLockRetainUntilDate, location: "header", location_name: "x-amz-fwd-header-x-amz-object-lock-retain-until-date")) WriteGetObjectResponseRequest.add_member(:parts_count, Shapes::ShapeRef.new(shape: PartsCount, location: "header", location_name: "x-amz-fwd-header-x-amz-mp-parts-count")) WriteGetObjectResponseRequest.add_member(:replication_status, Shapes::ShapeRef.new(shape: ReplicationStatus, location: "header", location_name: "x-amz-fwd-header-x-amz-replication-status")) WriteGetObjectResponseRequest.add_member(:request_charged, Shapes::ShapeRef.new(shape: RequestCharged, location: "header", location_name: "x-amz-fwd-header-x-amz-request-charged")) WriteGetObjectResponseRequest.add_member(:restore, Shapes::ShapeRef.new(shape: Restore, location: "header", location_name: "x-amz-fwd-header-x-amz-restore")) WriteGetObjectResponseRequest.add_member(:server_side_encryption, Shapes::ShapeRef.new(shape: ServerSideEncryption, location: "header", location_name: "x-amz-fwd-header-x-amz-server-side-encryption")) WriteGetObjectResponseRequest.add_member(:sse_customer_algorithm, Shapes::ShapeRef.new(shape: SSECustomerAlgorithm, location: "header", location_name: "x-amz-fwd-header-x-amz-server-side-encryption-customer-algorithm")) WriteGetObjectResponseRequest.add_member(:ssekms_key_id, Shapes::ShapeRef.new(shape: SSEKMSKeyId, location: "header", location_name: "x-amz-fwd-header-x-amz-server-side-encryption-aws-kms-key-id")) WriteGetObjectResponseRequest.add_member(:sse_customer_key_md5, Shapes::ShapeRef.new(shape: SSECustomerKeyMD5, location: "header", location_name: "x-amz-fwd-header-x-amz-server-side-encryption-customer-key-MD5")) WriteGetObjectResponseRequest.add_member(:storage_class, Shapes::ShapeRef.new(shape: StorageClass, location: "header", location_name: "x-amz-fwd-header-x-amz-storage-class")) WriteGetObjectResponseRequest.add_member(:tag_count, Shapes::ShapeRef.new(shape: TagCount, location: "header", location_name: "x-amz-fwd-header-x-amz-tagging-count")) WriteGetObjectResponseRequest.add_member(:version_id, Shapes::ShapeRef.new(shape: ObjectVersionId, location: "header", location_name: "x-amz-fwd-header-x-amz-version-id")) WriteGetObjectResponseRequest.add_member(:bucket_key_enabled, Shapes::ShapeRef.new(shape: BucketKeyEnabled, location: "header", location_name: "x-amz-fwd-header-x-amz-server-side-encryption-bucket-key-enabled")) WriteGetObjectResponseRequest.struct_class = Types::WriteGetObjectResponseRequest WriteGetObjectResponseRequest[:payload] = :body WriteGetObjectResponseRequest[:payload_member] = WriteGetObjectResponseRequest.member(:body) # @api private API = Seahorse::Model::Api.new.tap do |api| api.version = "2006-03-01" api.metadata = { "apiVersion" => "2006-03-01", "checksumFormat" => "md5", "endpointPrefix" => "s3", "globalEndpoint" => "s3.amazonaws.com", "protocol" => "rest-xml", "serviceAbbreviation" => "Amazon S3", "serviceFullName" => "Amazon Simple Storage Service", "serviceId" => "S3", "uid" => "s3-2006-03-01", } api.add_operation(:abort_multipart_upload, Seahorse::Model::Operation.new.tap do |o| o.name = "AbortMultipartUpload" o.http_method = "DELETE" o.http_request_uri = "/{Key+}" o.input = Shapes::ShapeRef.new(shape: AbortMultipartUploadRequest) o.output = Shapes::ShapeRef.new(shape: AbortMultipartUploadOutput) o.errors << Shapes::ShapeRef.new(shape: NoSuchUpload) end) api.add_operation(:complete_multipart_upload, Seahorse::Model::Operation.new.tap do |o| o.name = "CompleteMultipartUpload" o.http_method = "POST" o.http_request_uri = "/{Key+}" o.input = Shapes::ShapeRef.new(shape: CompleteMultipartUploadRequest) o.output = Shapes::ShapeRef.new(shape: CompleteMultipartUploadOutput) end) api.add_operation(:copy_object, Seahorse::Model::Operation.new.tap do |o| o.name = "CopyObject" o.http_method = "PUT" o.http_request_uri = "/{Key+}" o.input = Shapes::ShapeRef.new(shape: CopyObjectRequest) o.output = Shapes::ShapeRef.new(shape: CopyObjectOutput) o.errors << Shapes::ShapeRef.new(shape: ObjectNotInActiveTierError) end) api.add_operation(:create_bucket, Seahorse::Model::Operation.new.tap do |o| o.name = "CreateBucket" o.http_method = "PUT" o.http_request_uri = "/" o.input = Shapes::ShapeRef.new(shape: CreateBucketRequest) o.output = Shapes::ShapeRef.new(shape: CreateBucketOutput) o.errors << Shapes::ShapeRef.new(shape: BucketAlreadyExists) o.errors << Shapes::ShapeRef.new(shape: BucketAlreadyOwnedByYou) end) api.add_operation(:create_multipart_upload, Seahorse::Model::Operation.new.tap do |o| o.name = "CreateMultipartUpload" o.http_method = "POST" o.http_request_uri = "/{Key+}?uploads" o.input = Shapes::ShapeRef.new(shape: CreateMultipartUploadRequest) o.output = Shapes::ShapeRef.new(shape: CreateMultipartUploadOutput) end) api.add_operation(:create_session, Seahorse::Model::Operation.new.tap do |o| o.name = "CreateSession" o.http_method = "GET" o.http_request_uri = "/?session" o.input = Shapes::ShapeRef.new(shape: CreateSessionRequest) o.output = Shapes::ShapeRef.new(shape: CreateSessionOutput) o.errors << Shapes::ShapeRef.new(shape: NoSuchBucket) end) api.add_operation(:delete_bucket, Seahorse::Model::Operation.new.tap do |o| o.name = "DeleteBucket" o.http_method = "DELETE" o.http_request_uri = "/" o.input = Shapes::ShapeRef.new(shape: DeleteBucketRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:delete_bucket_analytics_configuration, Seahorse::Model::Operation.new.tap do |o| o.name = "DeleteBucketAnalyticsConfiguration" o.http_method = "DELETE" o.http_request_uri = "/?analytics" o.input = Shapes::ShapeRef.new(shape: DeleteBucketAnalyticsConfigurationRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:delete_bucket_cors, Seahorse::Model::Operation.new.tap do |o| o.name = "DeleteBucketCors" o.http_method = "DELETE" o.http_request_uri = "/?cors" o.input = Shapes::ShapeRef.new(shape: DeleteBucketCorsRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:delete_bucket_encryption, Seahorse::Model::Operation.new.tap do |o| o.name = "DeleteBucketEncryption" o.http_method = "DELETE" o.http_request_uri = "/?encryption" o.input = Shapes::ShapeRef.new(shape: DeleteBucketEncryptionRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:delete_bucket_intelligent_tiering_configuration, Seahorse::Model::Operation.new.tap do |o| o.name = "DeleteBucketIntelligentTieringConfiguration" o.http_method = "DELETE" o.http_request_uri = "/?intelligent-tiering" o.input = Shapes::ShapeRef.new(shape: DeleteBucketIntelligentTieringConfigurationRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:delete_bucket_inventory_configuration, Seahorse::Model::Operation.new.tap do |o| o.name = "DeleteBucketInventoryConfiguration" o.http_method = "DELETE" o.http_request_uri = "/?inventory" o.input = Shapes::ShapeRef.new(shape: DeleteBucketInventoryConfigurationRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:delete_bucket_lifecycle, Seahorse::Model::Operation.new.tap do |o| o.name = "DeleteBucketLifecycle" o.http_method = "DELETE" o.http_request_uri = "/?lifecycle" o.input = Shapes::ShapeRef.new(shape: DeleteBucketLifecycleRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:delete_bucket_metrics_configuration, Seahorse::Model::Operation.new.tap do |o| o.name = "DeleteBucketMetricsConfiguration" o.http_method = "DELETE" o.http_request_uri = "/?metrics" o.input = Shapes::ShapeRef.new(shape: DeleteBucketMetricsConfigurationRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:delete_bucket_ownership_controls, Seahorse::Model::Operation.new.tap do |o| o.name = "DeleteBucketOwnershipControls" o.http_method = "DELETE" o.http_request_uri = "/?ownershipControls" o.input = Shapes::ShapeRef.new(shape: DeleteBucketOwnershipControlsRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:delete_bucket_policy, Seahorse::Model::Operation.new.tap do |o| o.name = "DeleteBucketPolicy" o.http_method = "DELETE" o.http_request_uri = "/?policy" o.input = Shapes::ShapeRef.new(shape: DeleteBucketPolicyRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:delete_bucket_replication, Seahorse::Model::Operation.new.tap do |o| o.name = "DeleteBucketReplication" o.http_method = "DELETE" o.http_request_uri = "/?replication" o.input = Shapes::ShapeRef.new(shape: DeleteBucketReplicationRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:delete_bucket_tagging, Seahorse::Model::Operation.new.tap do |o| o.name = "DeleteBucketTagging" o.http_method = "DELETE" o.http_request_uri = "/?tagging" o.input = Shapes::ShapeRef.new(shape: DeleteBucketTaggingRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:delete_bucket_website, Seahorse::Model::Operation.new.tap do |o| o.name = "DeleteBucketWebsite" o.http_method = "DELETE" o.http_request_uri = "/?website" o.input = Shapes::ShapeRef.new(shape: DeleteBucketWebsiteRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:delete_object, Seahorse::Model::Operation.new.tap do |o| o.name = "DeleteObject" o.http_method = "DELETE" o.http_request_uri = "/{Key+}" o.input = Shapes::ShapeRef.new(shape: DeleteObjectRequest) o.output = Shapes::ShapeRef.new(shape: DeleteObjectOutput) end) api.add_operation(:delete_object_tagging, Seahorse::Model::Operation.new.tap do |o| o.name = "DeleteObjectTagging" o.http_method = "DELETE" o.http_request_uri = "/{Key+}?tagging" o.input = Shapes::ShapeRef.new(shape: DeleteObjectTaggingRequest) o.output = Shapes::ShapeRef.new(shape: DeleteObjectTaggingOutput) end) api.add_operation(:delete_objects, Seahorse::Model::Operation.new.tap do |o| o.name = "DeleteObjects" o.http_method = "POST" o.http_request_uri = "/?delete" o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.input = Shapes::ShapeRef.new(shape: DeleteObjectsRequest) o.output = Shapes::ShapeRef.new(shape: DeleteObjectsOutput) end) api.add_operation(:delete_public_access_block, Seahorse::Model::Operation.new.tap do |o| o.name = "DeletePublicAccessBlock" o.http_method = "DELETE" o.http_request_uri = "/?publicAccessBlock" o.input = Shapes::ShapeRef.new(shape: DeletePublicAccessBlockRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:get_bucket_accelerate_configuration, Seahorse::Model::Operation.new.tap do |o| o.name = "GetBucketAccelerateConfiguration" o.http_method = "GET" o.http_request_uri = "/?accelerate" o.input = Shapes::ShapeRef.new(shape: GetBucketAccelerateConfigurationRequest) o.output = Shapes::ShapeRef.new(shape: GetBucketAccelerateConfigurationOutput) end) api.add_operation(:get_bucket_acl, Seahorse::Model::Operation.new.tap do |o| o.name = "GetBucketAcl" o.http_method = "GET" o.http_request_uri = "/?acl" o.input = Shapes::ShapeRef.new(shape: GetBucketAclRequest) o.output = Shapes::ShapeRef.new(shape: GetBucketAclOutput) end) api.add_operation(:get_bucket_analytics_configuration, Seahorse::Model::Operation.new.tap do |o| o.name = "GetBucketAnalyticsConfiguration" o.http_method = "GET" o.http_request_uri = "/?analytics" o.input = Shapes::ShapeRef.new(shape: GetBucketAnalyticsConfigurationRequest) o.output = Shapes::ShapeRef.new(shape: GetBucketAnalyticsConfigurationOutput) end) api.add_operation(:get_bucket_cors, Seahorse::Model::Operation.new.tap do |o| o.name = "GetBucketCors" o.http_method = "GET" o.http_request_uri = "/?cors" o.input = Shapes::ShapeRef.new(shape: GetBucketCorsRequest) o.output = Shapes::ShapeRef.new(shape: GetBucketCorsOutput) end) api.add_operation(:get_bucket_encryption, Seahorse::Model::Operation.new.tap do |o| o.name = "GetBucketEncryption" o.http_method = "GET" o.http_request_uri = "/?encryption" o.input = Shapes::ShapeRef.new(shape: GetBucketEncryptionRequest) o.output = Shapes::ShapeRef.new(shape: GetBucketEncryptionOutput) end) api.add_operation(:get_bucket_intelligent_tiering_configuration, Seahorse::Model::Operation.new.tap do |o| o.name = "GetBucketIntelligentTieringConfiguration" o.http_method = "GET" o.http_request_uri = "/?intelligent-tiering" o.input = Shapes::ShapeRef.new(shape: GetBucketIntelligentTieringConfigurationRequest) o.output = Shapes::ShapeRef.new(shape: GetBucketIntelligentTieringConfigurationOutput) end) api.add_operation(:get_bucket_inventory_configuration, Seahorse::Model::Operation.new.tap do |o| o.name = "GetBucketInventoryConfiguration" o.http_method = "GET" o.http_request_uri = "/?inventory" o.input = Shapes::ShapeRef.new(shape: GetBucketInventoryConfigurationRequest) o.output = Shapes::ShapeRef.new(shape: GetBucketInventoryConfigurationOutput) end) api.add_operation(:get_bucket_lifecycle, Seahorse::Model::Operation.new.tap do |o| o.name = "GetBucketLifecycle" o.http_method = "GET" o.http_request_uri = "/?lifecycle" o.deprecated = true o.input = Shapes::ShapeRef.new(shape: GetBucketLifecycleRequest) o.output = Shapes::ShapeRef.new(shape: GetBucketLifecycleOutput) end) api.add_operation(:get_bucket_lifecycle_configuration, Seahorse::Model::Operation.new.tap do |o| o.name = "GetBucketLifecycleConfiguration" o.http_method = "GET" o.http_request_uri = "/?lifecycle" o.input = Shapes::ShapeRef.new(shape: GetBucketLifecycleConfigurationRequest) o.output = Shapes::ShapeRef.new(shape: GetBucketLifecycleConfigurationOutput) end) api.add_operation(:get_bucket_location, Seahorse::Model::Operation.new.tap do |o| o.name = "GetBucketLocation" o.http_method = "GET" o.http_request_uri = "/?location" o.input = Shapes::ShapeRef.new(shape: GetBucketLocationRequest) o.output = Shapes::ShapeRef.new(shape: GetBucketLocationOutput) end) api.add_operation(:get_bucket_logging, Seahorse::Model::Operation.new.tap do |o| o.name = "GetBucketLogging" o.http_method = "GET" o.http_request_uri = "/?logging" o.input = Shapes::ShapeRef.new(shape: GetBucketLoggingRequest) o.output = Shapes::ShapeRef.new(shape: GetBucketLoggingOutput) end) api.add_operation(:get_bucket_metrics_configuration, Seahorse::Model::Operation.new.tap do |o| o.name = "GetBucketMetricsConfiguration" o.http_method = "GET" o.http_request_uri = "/?metrics" o.input = Shapes::ShapeRef.new(shape: GetBucketMetricsConfigurationRequest) o.output = Shapes::ShapeRef.new(shape: GetBucketMetricsConfigurationOutput) end) api.add_operation(:get_bucket_notification, Seahorse::Model::Operation.new.tap do |o| o.name = "GetBucketNotification" o.http_method = "GET" o.http_request_uri = "/?notification" o.deprecated = true o.input = Shapes::ShapeRef.new(shape: GetBucketNotificationConfigurationRequest) o.output = Shapes::ShapeRef.new(shape: NotificationConfigurationDeprecated) end) api.add_operation(:get_bucket_notification_configuration, Seahorse::Model::Operation.new.tap do |o| o.name = "GetBucketNotificationConfiguration" o.http_method = "GET" o.http_request_uri = "/?notification" o.input = Shapes::ShapeRef.new(shape: GetBucketNotificationConfigurationRequest) o.output = Shapes::ShapeRef.new(shape: NotificationConfiguration) end) api.add_operation(:get_bucket_ownership_controls, Seahorse::Model::Operation.new.tap do |o| o.name = "GetBucketOwnershipControls" o.http_method = "GET" o.http_request_uri = "/?ownershipControls" o.input = Shapes::ShapeRef.new(shape: GetBucketOwnershipControlsRequest) o.output = Shapes::ShapeRef.new(shape: GetBucketOwnershipControlsOutput) end) api.add_operation(:get_bucket_policy, Seahorse::Model::Operation.new.tap do |o| o.name = "GetBucketPolicy" o.http_method = "GET" o.http_request_uri = "/?policy" o.input = Shapes::ShapeRef.new(shape: GetBucketPolicyRequest) o.output = Shapes::ShapeRef.new(shape: GetBucketPolicyOutput) end) api.add_operation(:get_bucket_policy_status, Seahorse::Model::Operation.new.tap do |o| o.name = "GetBucketPolicyStatus" o.http_method = "GET" o.http_request_uri = "/?policyStatus" o.input = Shapes::ShapeRef.new(shape: GetBucketPolicyStatusRequest) o.output = Shapes::ShapeRef.new(shape: GetBucketPolicyStatusOutput) end) api.add_operation(:get_bucket_replication, Seahorse::Model::Operation.new.tap do |o| o.name = "GetBucketReplication" o.http_method = "GET" o.http_request_uri = "/?replication" o.input = Shapes::ShapeRef.new(shape: GetBucketReplicationRequest) o.output = Shapes::ShapeRef.new(shape: GetBucketReplicationOutput) end) api.add_operation(:get_bucket_request_payment, Seahorse::Model::Operation.new.tap do |o| o.name = "GetBucketRequestPayment" o.http_method = "GET" o.http_request_uri = "/?requestPayment" o.input = Shapes::ShapeRef.new(shape: GetBucketRequestPaymentRequest) o.output = Shapes::ShapeRef.new(shape: GetBucketRequestPaymentOutput) end) api.add_operation(:get_bucket_tagging, Seahorse::Model::Operation.new.tap do |o| o.name = "GetBucketTagging" o.http_method = "GET" o.http_request_uri = "/?tagging" o.input = Shapes::ShapeRef.new(shape: GetBucketTaggingRequest) o.output = Shapes::ShapeRef.new(shape: GetBucketTaggingOutput) end) api.add_operation(:get_bucket_versioning, Seahorse::Model::Operation.new.tap do |o| o.name = "GetBucketVersioning" o.http_method = "GET" o.http_request_uri = "/?versioning" o.input = Shapes::ShapeRef.new(shape: GetBucketVersioningRequest) o.output = Shapes::ShapeRef.new(shape: GetBucketVersioningOutput) end) api.add_operation(:get_bucket_website, Seahorse::Model::Operation.new.tap do |o| o.name = "GetBucketWebsite" o.http_method = "GET" o.http_request_uri = "/?website" o.input = Shapes::ShapeRef.new(shape: GetBucketWebsiteRequest) o.output = Shapes::ShapeRef.new(shape: GetBucketWebsiteOutput) end) api.add_operation(:get_object, Seahorse::Model::Operation.new.tap do |o| o.name = "GetObject" o.http_method = "GET" o.http_request_uri = "/{Key+}" o.http_checksum = { "requestValidationModeMember" => "checksum_mode", "responseAlgorithms" => ["CRC32", "CRC32C", "SHA256", "SHA1"], } o.http_checksum = { "requestValidationModeMember" => "checksum_mode", "responseAlgorithms" => ["CRC32", "CRC32C", "SHA256", "SHA1"], } o.input = Shapes::ShapeRef.new(shape: GetObjectRequest) o.output = Shapes::ShapeRef.new(shape: GetObjectOutput) o.errors << Shapes::ShapeRef.new(shape: NoSuchKey) o.errors << Shapes::ShapeRef.new(shape: InvalidObjectState) end) api.add_operation(:get_object_acl, Seahorse::Model::Operation.new.tap do |o| o.name = "GetObjectAcl" o.http_method = "GET" o.http_request_uri = "/{Key+}?acl" o.input = Shapes::ShapeRef.new(shape: GetObjectAclRequest) o.output = Shapes::ShapeRef.new(shape: GetObjectAclOutput) o.errors << Shapes::ShapeRef.new(shape: NoSuchKey) end) api.add_operation(:get_object_attributes, Seahorse::Model::Operation.new.tap do |o| o.name = "GetObjectAttributes" o.http_method = "GET" o.http_request_uri = "/{Key+}?attributes" o.input = Shapes::ShapeRef.new(shape: GetObjectAttributesRequest) o.output = Shapes::ShapeRef.new(shape: GetObjectAttributesOutput) o.errors << Shapes::ShapeRef.new(shape: NoSuchKey) end) api.add_operation(:get_object_legal_hold, Seahorse::Model::Operation.new.tap do |o| o.name = "GetObjectLegalHold" o.http_method = "GET" o.http_request_uri = "/{Key+}?legal-hold" o.input = Shapes::ShapeRef.new(shape: GetObjectLegalHoldRequest) o.output = Shapes::ShapeRef.new(shape: GetObjectLegalHoldOutput) end) api.add_operation(:get_object_lock_configuration, Seahorse::Model::Operation.new.tap do |o| o.name = "GetObjectLockConfiguration" o.http_method = "GET" o.http_request_uri = "/?object-lock" o.input = Shapes::ShapeRef.new(shape: GetObjectLockConfigurationRequest) o.output = Shapes::ShapeRef.new(shape: GetObjectLockConfigurationOutput) end) api.add_operation(:get_object_retention, Seahorse::Model::Operation.new.tap do |o| o.name = "GetObjectRetention" o.http_method = "GET" o.http_request_uri = "/{Key+}?retention" o.input = Shapes::ShapeRef.new(shape: GetObjectRetentionRequest) o.output = Shapes::ShapeRef.new(shape: GetObjectRetentionOutput) end) api.add_operation(:get_object_tagging, Seahorse::Model::Operation.new.tap do |o| o.name = "GetObjectTagging" o.http_method = "GET" o.http_request_uri = "/{Key+}?tagging" o.input = Shapes::ShapeRef.new(shape: GetObjectTaggingRequest) o.output = Shapes::ShapeRef.new(shape: GetObjectTaggingOutput) end) api.add_operation(:get_object_torrent, Seahorse::Model::Operation.new.tap do |o| o.name = "GetObjectTorrent" o.http_method = "GET" o.http_request_uri = "/{Key+}?torrent" o.input = Shapes::ShapeRef.new(shape: GetObjectTorrentRequest) o.output = Shapes::ShapeRef.new(shape: GetObjectTorrentOutput) end) api.add_operation(:get_public_access_block, Seahorse::Model::Operation.new.tap do |o| o.name = "GetPublicAccessBlock" o.http_method = "GET" o.http_request_uri = "/?publicAccessBlock" o.input = Shapes::ShapeRef.new(shape: GetPublicAccessBlockRequest) o.output = Shapes::ShapeRef.new(shape: GetPublicAccessBlockOutput) end) api.add_operation(:head_bucket, Seahorse::Model::Operation.new.tap do |o| o.name = "HeadBucket" o.http_method = "HEAD" o.http_request_uri = "/" o.input = Shapes::ShapeRef.new(shape: HeadBucketRequest) o.output = Shapes::ShapeRef.new(shape: HeadBucketOutput) o.errors << Shapes::ShapeRef.new(shape: NoSuchBucket) end) api.add_operation(:head_object, Seahorse::Model::Operation.new.tap do |o| o.name = "HeadObject" o.http_method = "HEAD" o.http_request_uri = "/{Key+}" o.input = Shapes::ShapeRef.new(shape: HeadObjectRequest) o.output = Shapes::ShapeRef.new(shape: HeadObjectOutput) o.errors << Shapes::ShapeRef.new(shape: NoSuchKey) end) api.add_operation(:list_bucket_analytics_configurations, Seahorse::Model::Operation.new.tap do |o| o.name = "ListBucketAnalyticsConfigurations" o.http_method = "GET" o.http_request_uri = "/?analytics" o.input = Shapes::ShapeRef.new(shape: ListBucketAnalyticsConfigurationsRequest) o.output = Shapes::ShapeRef.new(shape: ListBucketAnalyticsConfigurationsOutput) end) api.add_operation(:list_bucket_intelligent_tiering_configurations, Seahorse::Model::Operation.new.tap do |o| o.name = "ListBucketIntelligentTieringConfigurations" o.http_method = "GET" o.http_request_uri = "/?intelligent-tiering" o.input = Shapes::ShapeRef.new(shape: ListBucketIntelligentTieringConfigurationsRequest) o.output = Shapes::ShapeRef.new(shape: ListBucketIntelligentTieringConfigurationsOutput) end) api.add_operation(:list_bucket_inventory_configurations, Seahorse::Model::Operation.new.tap do |o| o.name = "ListBucketInventoryConfigurations" o.http_method = "GET" o.http_request_uri = "/?inventory" o.input = Shapes::ShapeRef.new(shape: ListBucketInventoryConfigurationsRequest) o.output = Shapes::ShapeRef.new(shape: ListBucketInventoryConfigurationsOutput) end) api.add_operation(:list_bucket_metrics_configurations, Seahorse::Model::Operation.new.tap do |o| o.name = "ListBucketMetricsConfigurations" o.http_method = "GET" o.http_request_uri = "/?metrics" o.input = Shapes::ShapeRef.new(shape: ListBucketMetricsConfigurationsRequest) o.output = Shapes::ShapeRef.new(shape: ListBucketMetricsConfigurationsOutput) end) api.add_operation(:list_buckets, Seahorse::Model::Operation.new.tap do |o| o.name = "ListBuckets" o.http_method = "GET" o.http_request_uri = "/" o.input = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) o.output = Shapes::ShapeRef.new(shape: ListBucketsOutput) end) api.add_operation(:list_directory_buckets, Seahorse::Model::Operation.new.tap do |o| o.name = "ListDirectoryBuckets" o.http_method = "GET" o.http_request_uri = "/" o.input = Shapes::ShapeRef.new(shape: ListDirectoryBucketsRequest) o.output = Shapes::ShapeRef.new(shape: ListDirectoryBucketsOutput) o[:pager] = Aws::Pager.new( limit_key: "max_directory_buckets", tokens: { "continuation_token" => "continuation_token" } ) end) api.add_operation(:list_multipart_uploads, Seahorse::Model::Operation.new.tap do |o| o.name = "ListMultipartUploads" o.http_method = "GET" o.http_request_uri = "/?uploads" o.input = Shapes::ShapeRef.new(shape: ListMultipartUploadsRequest) o.output = Shapes::ShapeRef.new(shape: ListMultipartUploadsOutput) o[:pager] = Aws::Pager.new( more_results: "is_truncated", limit_key: "max_uploads", tokens: { "next_key_marker" => "key_marker", "next_upload_id_marker" => "upload_id_marker" } ) end) api.add_operation(:list_object_versions, Seahorse::Model::Operation.new.tap do |o| o.name = "ListObjectVersions" o.http_method = "GET" o.http_request_uri = "/?versions" o.input = Shapes::ShapeRef.new(shape: ListObjectVersionsRequest) o.output = Shapes::ShapeRef.new(shape: ListObjectVersionsOutput) o[:pager] = Aws::Pager.new( more_results: "is_truncated", limit_key: "max_keys", tokens: { "next_key_marker" => "key_marker", "next_version_id_marker" => "version_id_marker" } ) end) api.add_operation(:list_objects, Seahorse::Model::Operation.new.tap do |o| o.name = "ListObjects" o.http_method = "GET" o.http_request_uri = "/" o.input = Shapes::ShapeRef.new(shape: ListObjectsRequest) o.output = Shapes::ShapeRef.new(shape: ListObjectsOutput) o.errors << Shapes::ShapeRef.new(shape: NoSuchBucket) o[:pager] = Aws::Pager.new( more_results: "is_truncated", limit_key: "max_keys", tokens: { "next_marker || contents[-1].key" => "marker" } ) end) api.add_operation(:list_objects_v2, Seahorse::Model::Operation.new.tap do |o| o.name = "ListObjectsV2" o.http_method = "GET" o.http_request_uri = "/?list-type=2" o.input = Shapes::ShapeRef.new(shape: ListObjectsV2Request) o.output = Shapes::ShapeRef.new(shape: ListObjectsV2Output) o.errors << Shapes::ShapeRef.new(shape: NoSuchBucket) o[:pager] = Aws::Pager.new( limit_key: "max_keys", tokens: { "next_continuation_token" => "continuation_token" } ) end) api.add_operation(:list_parts, Seahorse::Model::Operation.new.tap do |o| o.name = "ListParts" o.http_method = "GET" o.http_request_uri = "/{Key+}" o.input = Shapes::ShapeRef.new(shape: ListPartsRequest) o.output = Shapes::ShapeRef.new(shape: ListPartsOutput) o[:pager] = Aws::Pager.new( more_results: "is_truncated", limit_key: "max_parts", tokens: { "next_part_number_marker" => "part_number_marker" } ) end) api.add_operation(:put_bucket_accelerate_configuration, Seahorse::Model::Operation.new.tap do |o| o.name = "PutBucketAccelerateConfiguration" o.http_method = "PUT" o.http_request_uri = "/?accelerate" o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => false, } o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => false, } o.input = Shapes::ShapeRef.new(shape: PutBucketAccelerateConfigurationRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:put_bucket_acl, Seahorse::Model::Operation.new.tap do |o| o.name = "PutBucketAcl" o.http_method = "PUT" o.http_request_uri = "/?acl" o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.input = Shapes::ShapeRef.new(shape: PutBucketAclRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:put_bucket_analytics_configuration, Seahorse::Model::Operation.new.tap do |o| o.name = "PutBucketAnalyticsConfiguration" o.http_method = "PUT" o.http_request_uri = "/?analytics" o.input = Shapes::ShapeRef.new(shape: PutBucketAnalyticsConfigurationRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:put_bucket_cors, Seahorse::Model::Operation.new.tap do |o| o.name = "PutBucketCors" o.http_method = "PUT" o.http_request_uri = "/?cors" o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.input = Shapes::ShapeRef.new(shape: PutBucketCorsRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:put_bucket_encryption, Seahorse::Model::Operation.new.tap do |o| o.name = "PutBucketEncryption" o.http_method = "PUT" o.http_request_uri = "/?encryption" o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.input = Shapes::ShapeRef.new(shape: PutBucketEncryptionRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:put_bucket_intelligent_tiering_configuration, Seahorse::Model::Operation.new.tap do |o| o.name = "PutBucketIntelligentTieringConfiguration" o.http_method = "PUT" o.http_request_uri = "/?intelligent-tiering" o.input = Shapes::ShapeRef.new(shape: PutBucketIntelligentTieringConfigurationRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:put_bucket_inventory_configuration, Seahorse::Model::Operation.new.tap do |o| o.name = "PutBucketInventoryConfiguration" o.http_method = "PUT" o.http_request_uri = "/?inventory" o.input = Shapes::ShapeRef.new(shape: PutBucketInventoryConfigurationRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:put_bucket_lifecycle, Seahorse::Model::Operation.new.tap do |o| o.name = "PutBucketLifecycle" o.http_method = "PUT" o.http_request_uri = "/?lifecycle" o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.deprecated = true o.input = Shapes::ShapeRef.new(shape: PutBucketLifecycleRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:put_bucket_lifecycle_configuration, Seahorse::Model::Operation.new.tap do |o| o.name = "PutBucketLifecycleConfiguration" o.http_method = "PUT" o.http_request_uri = "/?lifecycle" o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.input = Shapes::ShapeRef.new(shape: PutBucketLifecycleConfigurationRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:put_bucket_logging, Seahorse::Model::Operation.new.tap do |o| o.name = "PutBucketLogging" o.http_method = "PUT" o.http_request_uri = "/?logging" o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.input = Shapes::ShapeRef.new(shape: PutBucketLoggingRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:put_bucket_metrics_configuration, Seahorse::Model::Operation.new.tap do |o| o.name = "PutBucketMetricsConfiguration" o.http_method = "PUT" o.http_request_uri = "/?metrics" o.input = Shapes::ShapeRef.new(shape: PutBucketMetricsConfigurationRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:put_bucket_notification, Seahorse::Model::Operation.new.tap do |o| o.name = "PutBucketNotification" o.http_method = "PUT" o.http_request_uri = "/?notification" o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.deprecated = true o.input = Shapes::ShapeRef.new(shape: PutBucketNotificationRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:put_bucket_notification_configuration, Seahorse::Model::Operation.new.tap do |o| o.name = "PutBucketNotificationConfiguration" o.http_method = "PUT" o.http_request_uri = "/?notification" o.input = Shapes::ShapeRef.new(shape: PutBucketNotificationConfigurationRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:put_bucket_ownership_controls, Seahorse::Model::Operation.new.tap do |o| o.name = "PutBucketOwnershipControls" o.http_method = "PUT" o.http_request_uri = "/?ownershipControls" o.http_checksum = { "requestChecksumRequired" => true, } o.input = Shapes::ShapeRef.new(shape: PutBucketOwnershipControlsRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:put_bucket_policy, Seahorse::Model::Operation.new.tap do |o| o.name = "PutBucketPolicy" o.http_method = "PUT" o.http_request_uri = "/?policy" o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.input = Shapes::ShapeRef.new(shape: PutBucketPolicyRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:put_bucket_replication, Seahorse::Model::Operation.new.tap do |o| o.name = "PutBucketReplication" o.http_method = "PUT" o.http_request_uri = "/?replication" o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.input = Shapes::ShapeRef.new(shape: PutBucketReplicationRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:put_bucket_request_payment, Seahorse::Model::Operation.new.tap do |o| o.name = "PutBucketRequestPayment" o.http_method = "PUT" o.http_request_uri = "/?requestPayment" o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.input = Shapes::ShapeRef.new(shape: PutBucketRequestPaymentRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:put_bucket_tagging, Seahorse::Model::Operation.new.tap do |o| o.name = "PutBucketTagging" o.http_method = "PUT" o.http_request_uri = "/?tagging" o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.input = Shapes::ShapeRef.new(shape: PutBucketTaggingRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:put_bucket_versioning, Seahorse::Model::Operation.new.tap do |o| o.name = "PutBucketVersioning" o.http_method = "PUT" o.http_request_uri = "/?versioning" o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.input = Shapes::ShapeRef.new(shape: PutBucketVersioningRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:put_bucket_website, Seahorse::Model::Operation.new.tap do |o| o.name = "PutBucketWebsite" o.http_method = "PUT" o.http_request_uri = "/?website" o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.input = Shapes::ShapeRef.new(shape: PutBucketWebsiteRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:put_object, Seahorse::Model::Operation.new.tap do |o| o.name = "PutObject" o.http_method = "PUT" o.http_request_uri = "/{Key+}" o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => false, } o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => false, } o.input = Shapes::ShapeRef.new(shape: PutObjectRequest) o.output = Shapes::ShapeRef.new(shape: PutObjectOutput) end) api.add_operation(:put_object_acl, Seahorse::Model::Operation.new.tap do |o| o.name = "PutObjectAcl" o.http_method = "PUT" o.http_request_uri = "/{Key+}?acl" o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.input = Shapes::ShapeRef.new(shape: PutObjectAclRequest) o.output = Shapes::ShapeRef.new(shape: PutObjectAclOutput) o.errors << Shapes::ShapeRef.new(shape: NoSuchKey) end) api.add_operation(:put_object_legal_hold, Seahorse::Model::Operation.new.tap do |o| o.name = "PutObjectLegalHold" o.http_method = "PUT" o.http_request_uri = "/{Key+}?legal-hold" o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.input = Shapes::ShapeRef.new(shape: PutObjectLegalHoldRequest) o.output = Shapes::ShapeRef.new(shape: PutObjectLegalHoldOutput) end) api.add_operation(:put_object_lock_configuration, Seahorse::Model::Operation.new.tap do |o| o.name = "PutObjectLockConfiguration" o.http_method = "PUT" o.http_request_uri = "/?object-lock" o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.input = Shapes::ShapeRef.new(shape: PutObjectLockConfigurationRequest) o.output = Shapes::ShapeRef.new(shape: PutObjectLockConfigurationOutput) end) api.add_operation(:put_object_retention, Seahorse::Model::Operation.new.tap do |o| o.name = "PutObjectRetention" o.http_method = "PUT" o.http_request_uri = "/{Key+}?retention" o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.input = Shapes::ShapeRef.new(shape: PutObjectRetentionRequest) o.output = Shapes::ShapeRef.new(shape: PutObjectRetentionOutput) end) api.add_operation(:put_object_tagging, Seahorse::Model::Operation.new.tap do |o| o.name = "PutObjectTagging" o.http_method = "PUT" o.http_request_uri = "/{Key+}?tagging" o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.input = Shapes::ShapeRef.new(shape: PutObjectTaggingRequest) o.output = Shapes::ShapeRef.new(shape: PutObjectTaggingOutput) end) api.add_operation(:put_public_access_block, Seahorse::Model::Operation.new.tap do |o| o.name = "PutPublicAccessBlock" o.http_method = "PUT" o.http_request_uri = "/?publicAccessBlock" o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => true, } o.input = Shapes::ShapeRef.new(shape: PutPublicAccessBlockRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) api.add_operation(:restore_object, Seahorse::Model::Operation.new.tap do |o| o.name = "RestoreObject" o.http_method = "POST" o.http_request_uri = "/{Key+}?restore" o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => false, } o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => false, } o.input = Shapes::ShapeRef.new(shape: RestoreObjectRequest) o.output = Shapes::ShapeRef.new(shape: RestoreObjectOutput) o.errors << Shapes::ShapeRef.new(shape: ObjectAlreadyInActiveTierError) end) api.add_operation(:select_object_content, Seahorse::Model::Operation.new.tap do |o| o.name = "SelectObjectContent" o.http_method = "POST" o.http_request_uri = "/{Key+}?select&select-type=2" o.input = Shapes::ShapeRef.new(shape: SelectObjectContentRequest, location_name: "SelectObjectContentRequest", metadata: { "xmlNamespace" => {"uri"=>"http://s3.amazonaws.com/doc/2006-03-01/"} } ) o.output = Shapes::ShapeRef.new(shape: SelectObjectContentOutput) end) api.add_operation(:upload_part, Seahorse::Model::Operation.new.tap do |o| o.name = "UploadPart" o.http_method = "PUT" o.http_request_uri = "/{Key+}" o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => false, } o.http_checksum = { "requestAlgorithmMember" => "checksum_algorithm", "requestChecksumRequired" => false, } o.input = Shapes::ShapeRef.new(shape: UploadPartRequest) o.output = Shapes::ShapeRef.new(shape: UploadPartOutput) end) api.add_operation(:upload_part_copy, Seahorse::Model::Operation.new.tap do |o| o.name = "UploadPartCopy" o.http_method = "PUT" o.http_request_uri = "/{Key+}" o.input = Shapes::ShapeRef.new(shape: UploadPartCopyRequest) o.output = Shapes::ShapeRef.new(shape: UploadPartCopyOutput) end) api.add_operation(:write_get_object_response, Seahorse::Model::Operation.new.tap do |o| o.name = "WriteGetObjectResponse" o.http_method = "POST" o.http_request_uri = "/WriteGetObjectResponse" o['authtype'] = "v4-unsigned-body" o.endpoint_pattern = { "hostPrefix" => "{RequestRoute}.", } o.input = Shapes::ShapeRef.new(shape: WriteGetObjectResponseRequest) o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) end) end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/endpoint_provider.rb0000644000004100000410000023647314563445240022353 0ustar www-datawww-data# frozen_string_literal: true # WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws::S3 class EndpointProvider def resolve_endpoint(parameters) bucket = parameters.bucket region = parameters.region use_fips = parameters.use_fips use_dual_stack = parameters.use_dual_stack endpoint = parameters.endpoint force_path_style = parameters.force_path_style accelerate = parameters.accelerate use_global_endpoint = parameters.use_global_endpoint use_object_lambda_endpoint = parameters.use_object_lambda_endpoint key = parameters.key prefix = parameters.prefix disable_access_points = parameters.disable_access_points disable_multi_region_access_points = parameters.disable_multi_region_access_points use_arn_region = parameters.use_arn_region use_s3_express_control_endpoint = parameters.use_s3_express_control_endpoint disable_s3_express_session_auth = parameters.disable_s3_express_session_auth if Aws::Endpoints::Matchers.set?(region) if Aws::Endpoints::Matchers.boolean_equals?(accelerate, true) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) raise ArgumentError, "Accelerate cannot be used with FIPS" end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.set?(endpoint) raise ArgumentError, "Cannot set dual-stack in combination with a custom endpoint." end if Aws::Endpoints::Matchers.set?(endpoint) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) raise ArgumentError, "A custom endpoint cannot be combined with FIPS" end if Aws::Endpoints::Matchers.set?(endpoint) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, true) raise ArgumentError, "A custom endpoint cannot be combined with S3 Accelerate" end if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && (partition_result = Aws::Endpoints::Matchers.aws_partition(region)) && Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(partition_result, "name"), "aws-cn") raise ArgumentError, "Partition does not support FIPS" end if Aws::Endpoints::Matchers.set?(bucket) && (bucket_suffix = Aws::Endpoints::Matchers.substring(bucket, 0, 6, true)) && Aws::Endpoints::Matchers.string_equals?(bucket_suffix, "--x-s3") if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) raise ArgumentError, "S3Express does not support Dual-stack." end if Aws::Endpoints::Matchers.boolean_equals?(accelerate, true) raise ArgumentError, "S3Express does not support S3 Accelerate." end if Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) if Aws::Endpoints::Matchers.set?(disable_s3_express_session_auth) && Aws::Endpoints::Matchers.boolean_equals?(disable_s3_express_session_auth, true) if Aws::Endpoints::Matchers.boolean_equals?(Aws::Endpoints::Matchers.attr(url, "isIp"), true) if (uri_encoded_bucket = Aws::Endpoints::Matchers.uri_encode(bucket)) return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}/#{uri_encoded_bucket}#{url['path']}", headers: {}, properties: {"backend"=>"S3Express", "authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3express", "signingRegion"=>"#{region}"}]}) end end if Aws::Endpoints::Matchers.aws_virtual_hostable_s3_bucket?(bucket, false) return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{bucket}.#{url['authority']}#{url['path']}", headers: {}, properties: {"backend"=>"S3Express", "authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3express", "signingRegion"=>"#{region}"}]}) end raise ArgumentError, "S3Express bucket name is not a valid virtual hostable name." end if Aws::Endpoints::Matchers.boolean_equals?(Aws::Endpoints::Matchers.attr(url, "isIp"), true) if (uri_encoded_bucket = Aws::Endpoints::Matchers.uri_encode(bucket)) return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}/#{uri_encoded_bucket}#{url['path']}", headers: {}, properties: {"backend"=>"S3Express", "authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4-s3express", "signingName"=>"s3express", "signingRegion"=>"#{region}"}]}) end end if Aws::Endpoints::Matchers.aws_virtual_hostable_s3_bucket?(bucket, false) return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{bucket}.#{url['authority']}#{url['path']}", headers: {}, properties: {"backend"=>"S3Express", "authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4-s3express", "signingName"=>"s3express", "signingRegion"=>"#{region}"}]}) end raise ArgumentError, "S3Express bucket name is not a valid virtual hostable name." end if Aws::Endpoints::Matchers.set?(use_s3_express_control_endpoint) && Aws::Endpoints::Matchers.boolean_equals?(use_s3_express_control_endpoint, true) if (uri_encoded_bucket = Aws::Endpoints::Matchers.uri_encode(bucket)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) return Aws::Endpoints::Endpoint.new(url: "https://s3express-control-fips.#{region}.amazonaws.com/#{uri_encoded_bucket}", headers: {}, properties: {"backend"=>"S3Express", "authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3express", "signingRegion"=>"#{region}"}]}) end return Aws::Endpoints::Endpoint.new(url: "https://s3express-control.#{region}.amazonaws.com/#{uri_encoded_bucket}", headers: {}, properties: {"backend"=>"S3Express", "authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3express", "signingRegion"=>"#{region}"}]}) end end if Aws::Endpoints::Matchers.aws_virtual_hostable_s3_bucket?(bucket, false) if Aws::Endpoints::Matchers.set?(disable_s3_express_session_auth) && Aws::Endpoints::Matchers.boolean_equals?(disable_s3_express_session_auth, true) if (s3express_availability_zone_id = Aws::Endpoints::Matchers.substring(bucket, 6, 14, true)) && (s3express_availability_zone_delim = Aws::Endpoints::Matchers.substring(bucket, 14, 16, true)) && Aws::Endpoints::Matchers.string_equals?(s3express_availability_zone_delim, "--") if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3express-fips-#{s3express_availability_zone_id}.#{region}.amazonaws.com", headers: {}, properties: {"backend"=>"S3Express", "authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3express", "signingRegion"=>"#{region}"}]}) end return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3express-#{s3express_availability_zone_id}.#{region}.amazonaws.com", headers: {}, properties: {"backend"=>"S3Express", "authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3express", "signingRegion"=>"#{region}"}]}) end if (s3express_availability_zone_id = Aws::Endpoints::Matchers.substring(bucket, 6, 15, true)) && (s3express_availability_zone_delim = Aws::Endpoints::Matchers.substring(bucket, 15, 17, true)) && Aws::Endpoints::Matchers.string_equals?(s3express_availability_zone_delim, "--") if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3express-fips-#{s3express_availability_zone_id}.#{region}.amazonaws.com", headers: {}, properties: {"backend"=>"S3Express", "authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3express", "signingRegion"=>"#{region}"}]}) end return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3express-#{s3express_availability_zone_id}.#{region}.amazonaws.com", headers: {}, properties: {"backend"=>"S3Express", "authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3express", "signingRegion"=>"#{region}"}]}) end raise ArgumentError, "Unrecognized S3Express bucket name format." end if (s3express_availability_zone_id = Aws::Endpoints::Matchers.substring(bucket, 6, 14, true)) && (s3express_availability_zone_delim = Aws::Endpoints::Matchers.substring(bucket, 14, 16, true)) && Aws::Endpoints::Matchers.string_equals?(s3express_availability_zone_delim, "--") if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3express-fips-#{s3express_availability_zone_id}.#{region}.amazonaws.com", headers: {}, properties: {"backend"=>"S3Express", "authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4-s3express", "signingName"=>"s3express", "signingRegion"=>"#{region}"}]}) end return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3express-#{s3express_availability_zone_id}.#{region}.amazonaws.com", headers: {}, properties: {"backend"=>"S3Express", "authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4-s3express", "signingName"=>"s3express", "signingRegion"=>"#{region}"}]}) end if (s3express_availability_zone_id = Aws::Endpoints::Matchers.substring(bucket, 6, 15, true)) && (s3express_availability_zone_delim = Aws::Endpoints::Matchers.substring(bucket, 15, 17, true)) && Aws::Endpoints::Matchers.string_equals?(s3express_availability_zone_delim, "--") if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3express-fips-#{s3express_availability_zone_id}.#{region}.amazonaws.com", headers: {}, properties: {"backend"=>"S3Express", "authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4-s3express", "signingName"=>"s3express", "signingRegion"=>"#{region}"}]}) end return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3express-#{s3express_availability_zone_id}.#{region}.amazonaws.com", headers: {}, properties: {"backend"=>"S3Express", "authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4-s3express", "signingName"=>"s3express", "signingRegion"=>"#{region}"}]}) end raise ArgumentError, "Unrecognized S3Express bucket name format." end raise ArgumentError, "S3Express bucket name is not a valid virtual hostable name." end if Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(bucket)) && Aws::Endpoints::Matchers.set?(use_s3_express_control_endpoint) && Aws::Endpoints::Matchers.boolean_equals?(use_s3_express_control_endpoint, true) if Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['path']}", headers: {}, properties: {"backend"=>"S3Express", "authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3express", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) return Aws::Endpoints::Endpoint.new(url: "https://s3express-control-fips.#{region}.amazonaws.com", headers: {}, properties: {"backend"=>"S3Express", "authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3express", "signingRegion"=>"#{region}"}]}) end return Aws::Endpoints::Endpoint.new(url: "https://s3express-control.#{region}.amazonaws.com", headers: {}, properties: {"backend"=>"S3Express", "authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3express", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.set?(bucket) && (hardware_type = Aws::Endpoints::Matchers.substring(bucket, 49, 50, true)) && (region_prefix = Aws::Endpoints::Matchers.substring(bucket, 8, 12, true)) && (bucket_alias_suffix = Aws::Endpoints::Matchers.substring(bucket, 0, 7, true)) && (outpost_id = Aws::Endpoints::Matchers.substring(bucket, 32, 49, true)) && (region_partition = Aws::Endpoints::Matchers.aws_partition(region)) && Aws::Endpoints::Matchers.string_equals?(bucket_alias_suffix, "--op-s3") if Aws::Endpoints::Matchers.valid_host_label?(outpost_id, false) if Aws::Endpoints::Matchers.string_equals?(hardware_type, "e") if Aws::Endpoints::Matchers.string_equals?(region_prefix, "beta") if Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) raise ArgumentError, "Expected a endpoint to be specified but no endpoint was found" end if Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.ec2.#{url['authority']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3-outposts", "signingRegion"=>"#{region}"}]}) end end return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.ec2.s3-outposts.#{region}.#{region_partition['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3-outposts", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.string_equals?(hardware_type, "o") if Aws::Endpoints::Matchers.string_equals?(region_prefix, "beta") if Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) raise ArgumentError, "Expected a endpoint to be specified but no endpoint was found" end if Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.op-#{outpost_id}.#{url['authority']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3-outposts", "signingRegion"=>"#{region}"}]}) end end return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.op-#{outpost_id}.s3-outposts.#{region}.#{region_partition['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3-outposts", "signingRegion"=>"#{region}"}]}) end raise ArgumentError, "Unrecognized hardware type: \"Expected hardware type o or e but got #{hardware_type}\"" end raise ArgumentError, "Invalid ARN: The outpost Id must only contain a-z, A-Z, 0-9 and `-`." end if Aws::Endpoints::Matchers.set?(bucket) if Aws::Endpoints::Matchers.set?(endpoint) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(Aws::Endpoints::Matchers.parse_url(endpoint))) raise ArgumentError, "Custom endpoint `#{endpoint}` was not a valid URI" end if Aws::Endpoints::Matchers.boolean_equals?(force_path_style, false) && Aws::Endpoints::Matchers.aws_virtual_hostable_s3_bucket?(bucket, false) if (partition_result = Aws::Endpoints::Matchers.aws_partition(region)) if Aws::Endpoints::Matchers.valid_host_label?(region, false) if Aws::Endpoints::Matchers.boolean_equals?(accelerate, true) && Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(partition_result, "name"), "aws-cn") raise ArgumentError, "S3 Accelerate cannot be used in this region" end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3-fips.dualstack.us-east-1.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3-fips.dualstack.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3-fips.dualstack.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3-fips.us-east-1.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3-fips.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3-fips.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3-accelerate.dualstack.us-east-1.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3-accelerate.dualstack.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3-accelerate.dualstack.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3.dualstack.us-east-1.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3.dualstack.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3.dualstack.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(Aws::Endpoints::Matchers.attr(url, "isIp"), true) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['normalizedPath']}#{bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(Aws::Endpoints::Matchers.attr(url, "isIp"), false) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{bucket}.#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(Aws::Endpoints::Matchers.attr(url, "isIp"), true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) if Aws::Endpoints::Matchers.string_equals?(region, "us-east-1") return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['normalizedPath']}#{bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['normalizedPath']}#{bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(Aws::Endpoints::Matchers.attr(url, "isIp"), false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) if Aws::Endpoints::Matchers.string_equals?(region, "us-east-1") return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{bucket}.#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{bucket}.#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(Aws::Endpoints::Matchers.attr(url, "isIp"), true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['normalizedPath']}#{bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(Aws::Endpoints::Matchers.attr(url, "isIp"), false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{bucket}.#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3-accelerate.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) if Aws::Endpoints::Matchers.string_equals?(region, "us-east-1") return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3-accelerate.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3-accelerate.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3-accelerate.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) if Aws::Endpoints::Matchers.string_equals?(region, "us-east-1") return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) return Aws::Endpoints::Endpoint.new(url: "https://#{bucket}.s3.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end end raise ArgumentError, "Invalid region: region was not a valid DNS name." end end if Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(url, "scheme"), "http") && Aws::Endpoints::Matchers.aws_virtual_hostable_s3_bucket?(bucket, true) && Aws::Endpoints::Matchers.boolean_equals?(force_path_style, false) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) if (partition_result = Aws::Endpoints::Matchers.aws_partition(region)) if Aws::Endpoints::Matchers.valid_host_label?(region, false) return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{bucket}.#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end raise ArgumentError, "Invalid region: region was not a valid DNS name." end end if Aws::Endpoints::Matchers.boolean_equals?(force_path_style, false) && (bucket_arn = Aws::Endpoints::Matchers.aws_parse_arn(bucket)) if (arn_type = Aws::Endpoints::Matchers.attr(bucket_arn, "resourceId[0]")) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(arn_type, "")) if Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(bucket_arn, "service"), "s3-object-lambda") if Aws::Endpoints::Matchers.string_equals?(arn_type, "accesspoint") if (access_point_name = Aws::Endpoints::Matchers.attr(bucket_arn, "resourceId[1]")) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(access_point_name, "")) if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) raise ArgumentError, "S3 Object Lambda does not support Dual-stack" end if Aws::Endpoints::Matchers.boolean_equals?(accelerate, true) raise ArgumentError, "S3 Object Lambda does not support S3 Accelerate" end if Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(bucket_arn, "region"), "")) if Aws::Endpoints::Matchers.set?(disable_access_points) && Aws::Endpoints::Matchers.boolean_equals?(disable_access_points, true) raise ArgumentError, "Access points are not supported for this operation" end if Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(Aws::Endpoints::Matchers.attr(bucket_arn, "resourceId[2]"))) if Aws::Endpoints::Matchers.set?(use_arn_region) && Aws::Endpoints::Matchers.boolean_equals?(use_arn_region, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(bucket_arn, "region"), "#{region}")) raise ArgumentError, "Invalid configuration: region from ARN `#{bucket_arn['region']}` does not match client region `#{region}` and UseArnRegion is `false`" end if (bucket_partition = Aws::Endpoints::Matchers.aws_partition(Aws::Endpoints::Matchers.attr(bucket_arn, "region"))) if (partition_result = Aws::Endpoints::Matchers.aws_partition(region)) if Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(bucket_partition, "name"), Aws::Endpoints::Matchers.attr(partition_result, "name")) if Aws::Endpoints::Matchers.valid_host_label?(Aws::Endpoints::Matchers.attr(bucket_arn, "region"), true) if Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(bucket_arn, "accountId"), "") raise ArgumentError, "Invalid ARN: Missing account id" end if Aws::Endpoints::Matchers.valid_host_label?(Aws::Endpoints::Matchers.attr(bucket_arn, "accountId"), false) if Aws::Endpoints::Matchers.valid_host_label?(access_point_name, false) if Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{access_point_name}-#{bucket_arn['accountId']}.#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3-object-lambda", "signingRegion"=>"#{bucket_arn['region']}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) return Aws::Endpoints::Endpoint.new(url: "https://#{access_point_name}-#{bucket_arn['accountId']}.s3-object-lambda-fips.#{bucket_arn['region']}.#{bucket_partition['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3-object-lambda", "signingRegion"=>"#{bucket_arn['region']}"}]}) end return Aws::Endpoints::Endpoint.new(url: "https://#{access_point_name}-#{bucket_arn['accountId']}.s3-object-lambda.#{bucket_arn['region']}.#{bucket_partition['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3-object-lambda", "signingRegion"=>"#{bucket_arn['region']}"}]}) end raise ArgumentError, "Invalid ARN: The access point name may only contain a-z, A-Z, 0-9 and `-`. Found: `#{access_point_name}`" end raise ArgumentError, "Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `#{bucket_arn['accountId']}`" end raise ArgumentError, "Invalid region in ARN: `#{bucket_arn['region']}` (invalid DNS name)" end raise ArgumentError, "Client was configured for partition `#{partition_result['name']}` but ARN (`#{bucket}`) has `#{bucket_partition['name']}`" end end end raise ArgumentError, "Invalid ARN: The ARN may only contain a single resource component after `accesspoint`." end raise ArgumentError, "Invalid ARN: bucket ARN is missing a region" end raise ArgumentError, "Invalid ARN: Expected a resource of the format `accesspoint:` but no name was provided" end raise ArgumentError, "Invalid ARN: Object Lambda ARNs only support `accesspoint` arn types, but found: `#{arn_type}`" end if Aws::Endpoints::Matchers.string_equals?(arn_type, "accesspoint") if (access_point_name = Aws::Endpoints::Matchers.attr(bucket_arn, "resourceId[1]")) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(access_point_name, "")) if Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(bucket_arn, "region"), "")) if Aws::Endpoints::Matchers.string_equals?(arn_type, "accesspoint") if Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(bucket_arn, "region"), "")) if Aws::Endpoints::Matchers.set?(disable_access_points) && Aws::Endpoints::Matchers.boolean_equals?(disable_access_points, true) raise ArgumentError, "Access points are not supported for this operation" end if Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(Aws::Endpoints::Matchers.attr(bucket_arn, "resourceId[2]"))) if Aws::Endpoints::Matchers.set?(use_arn_region) && Aws::Endpoints::Matchers.boolean_equals?(use_arn_region, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(bucket_arn, "region"), "#{region}")) raise ArgumentError, "Invalid configuration: region from ARN `#{bucket_arn['region']}` does not match client region `#{region}` and UseArnRegion is `false`" end if (bucket_partition = Aws::Endpoints::Matchers.aws_partition(Aws::Endpoints::Matchers.attr(bucket_arn, "region"))) if (partition_result = Aws::Endpoints::Matchers.aws_partition(region)) if Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(bucket_partition, "name"), "#{partition_result['name']}") if Aws::Endpoints::Matchers.valid_host_label?(Aws::Endpoints::Matchers.attr(bucket_arn, "region"), true) if Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(bucket_arn, "service"), "s3") if Aws::Endpoints::Matchers.valid_host_label?(Aws::Endpoints::Matchers.attr(bucket_arn, "accountId"), false) if Aws::Endpoints::Matchers.valid_host_label?(access_point_name, false) if Aws::Endpoints::Matchers.boolean_equals?(accelerate, true) raise ArgumentError, "Access Points do not support S3 Accelerate" end if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) return Aws::Endpoints::Endpoint.new(url: "https://#{access_point_name}-#{bucket_arn['accountId']}.s3-accesspoint-fips.dualstack.#{bucket_arn['region']}.#{bucket_partition['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{bucket_arn['region']}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) return Aws::Endpoints::Endpoint.new(url: "https://#{access_point_name}-#{bucket_arn['accountId']}.s3-accesspoint-fips.#{bucket_arn['region']}.#{bucket_partition['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{bucket_arn['region']}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) return Aws::Endpoints::Endpoint.new(url: "https://#{access_point_name}-#{bucket_arn['accountId']}.s3-accesspoint.dualstack.#{bucket_arn['region']}.#{bucket_partition['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{bucket_arn['region']}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{access_point_name}-#{bucket_arn['accountId']}.#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{bucket_arn['region']}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) return Aws::Endpoints::Endpoint.new(url: "https://#{access_point_name}-#{bucket_arn['accountId']}.s3-accesspoint.#{bucket_arn['region']}.#{bucket_partition['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{bucket_arn['region']}"}]}) end end raise ArgumentError, "Invalid ARN: The access point name may only contain a-z, A-Z, 0-9 and `-`. Found: `#{access_point_name}`" end raise ArgumentError, "Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `#{bucket_arn['accountId']}`" end raise ArgumentError, "Invalid ARN: The ARN was not for the S3 service, found: #{bucket_arn['service']}" end raise ArgumentError, "Invalid region in ARN: `#{bucket_arn['region']}` (invalid DNS name)" end raise ArgumentError, "Client was configured for partition `#{partition_result['name']}` but ARN (`#{bucket}`) has `#{bucket_partition['name']}`" end end end raise ArgumentError, "Invalid ARN: The ARN may only contain a single resource component after `accesspoint`." end end end if Aws::Endpoints::Matchers.valid_host_label?(access_point_name, true) if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) raise ArgumentError, "S3 MRAP does not support dual-stack" end if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) raise ArgumentError, "S3 MRAP does not support FIPS" end if Aws::Endpoints::Matchers.boolean_equals?(accelerate, true) raise ArgumentError, "S3 MRAP does not support S3 Accelerate" end if Aws::Endpoints::Matchers.boolean_equals?(disable_multi_region_access_points, true) raise ArgumentError, "Invalid configuration: Multi-Region Access Point ARNs are disabled." end if (mrap_partition = Aws::Endpoints::Matchers.aws_partition(region)) if Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(mrap_partition, "name"), Aws::Endpoints::Matchers.attr(bucket_arn, "partition")) return Aws::Endpoints::Endpoint.new(url: "https://#{access_point_name}.accesspoint.s3-global.#{mrap_partition['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4a", "signingName"=>"s3", "signingRegionSet"=>["*"]}]}) end raise ArgumentError, "Client was configured for partition `#{mrap_partition['name']}` but bucket referred to partition `#{bucket_arn['partition']}`" end end raise ArgumentError, "Invalid Access Point Name" end raise ArgumentError, "Invalid ARN: Expected a resource of the format `accesspoint:` but no name was provided" end if Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(bucket_arn, "service"), "s3-outposts") if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) raise ArgumentError, "S3 Outposts does not support Dual-stack" end if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) raise ArgumentError, "S3 Outposts does not support FIPS" end if Aws::Endpoints::Matchers.boolean_equals?(accelerate, true) raise ArgumentError, "S3 Outposts does not support S3 Accelerate" end if Aws::Endpoints::Matchers.set?(Aws::Endpoints::Matchers.attr(bucket_arn, "resourceId[4]")) raise ArgumentError, "Invalid Arn: Outpost Access Point ARN contains sub resources" end if (outpost_id = Aws::Endpoints::Matchers.attr(bucket_arn, "resourceId[1]")) if Aws::Endpoints::Matchers.valid_host_label?(outpost_id, false) if Aws::Endpoints::Matchers.set?(use_arn_region) && Aws::Endpoints::Matchers.boolean_equals?(use_arn_region, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(bucket_arn, "region"), "#{region}")) raise ArgumentError, "Invalid configuration: region from ARN `#{bucket_arn['region']}` does not match client region `#{region}` and UseArnRegion is `false`" end if (bucket_partition = Aws::Endpoints::Matchers.aws_partition(Aws::Endpoints::Matchers.attr(bucket_arn, "region"))) if (partition_result = Aws::Endpoints::Matchers.aws_partition(region)) if Aws::Endpoints::Matchers.string_equals?(Aws::Endpoints::Matchers.attr(bucket_partition, "name"), Aws::Endpoints::Matchers.attr(partition_result, "name")) if Aws::Endpoints::Matchers.valid_host_label?(Aws::Endpoints::Matchers.attr(bucket_arn, "region"), true) if Aws::Endpoints::Matchers.valid_host_label?(Aws::Endpoints::Matchers.attr(bucket_arn, "accountId"), false) if (outpost_type = Aws::Endpoints::Matchers.attr(bucket_arn, "resourceId[2]")) if (access_point_name = Aws::Endpoints::Matchers.attr(bucket_arn, "resourceId[3]")) if Aws::Endpoints::Matchers.string_equals?(outpost_type, "accesspoint") if Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) return Aws::Endpoints::Endpoint.new(url: "https://#{access_point_name}-#{bucket_arn['accountId']}.#{outpost_id}.#{url['authority']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3-outposts", "signingRegion"=>"#{bucket_arn['region']}"}]}) end return Aws::Endpoints::Endpoint.new(url: "https://#{access_point_name}-#{bucket_arn['accountId']}.#{outpost_id}.s3-outposts.#{bucket_arn['region']}.#{bucket_partition['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3-outposts", "signingRegion"=>"#{bucket_arn['region']}"}]}) end raise ArgumentError, "Expected an outpost type `accesspoint`, found #{outpost_type}" end raise ArgumentError, "Invalid ARN: expected an access point name" end raise ArgumentError, "Invalid ARN: Expected a 4-component resource" end raise ArgumentError, "Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `#{bucket_arn['accountId']}`" end raise ArgumentError, "Invalid region in ARN: `#{bucket_arn['region']}` (invalid DNS name)" end raise ArgumentError, "Client was configured for partition `#{partition_result['name']}` but ARN (`#{bucket}`) has `#{bucket_partition['name']}`" end end end raise ArgumentError, "Invalid ARN: The outpost Id may only contain a-z, A-Z, 0-9 and `-`. Found: `#{outpost_id}`" end raise ArgumentError, "Invalid ARN: The Outpost Id was not set" end raise ArgumentError, "Invalid ARN: Unrecognized format: #{bucket} (type: #{arn_type})" end raise ArgumentError, "Invalid ARN: No ARN type specified" end if (arn_prefix = Aws::Endpoints::Matchers.substring(bucket, 0, 4, false)) && Aws::Endpoints::Matchers.string_equals?(arn_prefix, "arn:") && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(Aws::Endpoints::Matchers.aws_parse_arn(bucket))) raise ArgumentError, "Invalid ARN: `#{bucket}` was not a valid ARN" end if Aws::Endpoints::Matchers.boolean_equals?(force_path_style, true) && Aws::Endpoints::Matchers.aws_parse_arn(bucket) raise ArgumentError, "Path-style addressing cannot be used with ARN buckets" end if (uri_encoded_bucket = Aws::Endpoints::Matchers.uri_encode(bucket)) if (partition_result = Aws::Endpoints::Matchers.aws_partition(region)) if Aws::Endpoints::Matchers.boolean_equals?(accelerate, false) if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.dualstack.us-east-1.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.dualstack.#{region}.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.dualstack.#{region}.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.us-east-1.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.#{region}.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.#{region}.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") return Aws::Endpoints::Endpoint.new(url: "https://s3.dualstack.us-east-1.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) return Aws::Endpoints::Endpoint.new(url: "https://s3.dualstack.#{region}.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) return Aws::Endpoints::Endpoint.new(url: "https://s3.dualstack.#{region}.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['normalizedPath']}#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) if Aws::Endpoints::Matchers.string_equals?(region, "us-east-1") return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['normalizedPath']}#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['normalizedPath']}#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['normalizedPath']}#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") return Aws::Endpoints::Endpoint.new(url: "https://s3.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) if Aws::Endpoints::Matchers.string_equals?(region, "us-east-1") return Aws::Endpoints::Endpoint.new(url: "https://s3.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end return Aws::Endpoints::Endpoint.new(url: "https://s3.#{region}.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) return Aws::Endpoints::Endpoint.new(url: "https://s3.#{region}.#{partition_result['dnsSuffix']}/#{uri_encoded_bucket}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end end raise ArgumentError, "Path-style addressing cannot be used with S3 Accelerate" end end end if Aws::Endpoints::Matchers.set?(use_object_lambda_endpoint) && Aws::Endpoints::Matchers.boolean_equals?(use_object_lambda_endpoint, true) if (partition_result = Aws::Endpoints::Matchers.aws_partition(region)) if Aws::Endpoints::Matchers.valid_host_label?(region, true) if Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) raise ArgumentError, "S3 Object Lambda does not support Dual-stack" end if Aws::Endpoints::Matchers.boolean_equals?(accelerate, true) raise ArgumentError, "S3 Object Lambda does not support S3 Accelerate" end if Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3-object-lambda", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) return Aws::Endpoints::Endpoint.new(url: "https://s3-object-lambda-fips.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3-object-lambda", "signingRegion"=>"#{region}"}]}) end return Aws::Endpoints::Endpoint.new(url: "https://s3-object-lambda.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3-object-lambda", "signingRegion"=>"#{region}"}]}) end raise ArgumentError, "Invalid region: region was not a valid DNS name." end end if Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(bucket)) if (partition_result = Aws::Endpoints::Matchers.aws_partition(region)) if Aws::Endpoints::Matchers.valid_host_label?(region, true) if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.dualstack.us-east-1.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.dualstack.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.dualstack.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.us-east-1.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) return Aws::Endpoints::Endpoint.new(url: "https://s3-fips.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") return Aws::Endpoints::Endpoint.new(url: "https://s3.dualstack.us-east-1.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) return Aws::Endpoints::Endpoint.new(url: "https://s3.dualstack.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, true) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) return Aws::Endpoints::Endpoint.new(url: "https://s3.dualstack.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) if Aws::Endpoints::Matchers.string_equals?(region, "us-east-1") return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.set?(endpoint) && (url = Aws::Endpoints::Matchers.parse_url(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) return Aws::Endpoints::Endpoint.new(url: "#{url['scheme']}://#{url['authority']}#{url['path']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.string_equals?(region, "aws-global") return Aws::Endpoints::Endpoint.new(url: "https://s3.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"us-east-1"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, true) if Aws::Endpoints::Matchers.string_equals?(region, "us-east-1") return Aws::Endpoints::Endpoint.new(url: "https://s3.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end return Aws::Endpoints::Endpoint.new(url: "https://s3.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end if Aws::Endpoints::Matchers.boolean_equals?(use_fips, false) && Aws::Endpoints::Matchers.boolean_equals?(use_dual_stack, false) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.set?(endpoint)) && Aws::Endpoints::Matchers.not(Aws::Endpoints::Matchers.string_equals?(region, "aws-global")) && Aws::Endpoints::Matchers.boolean_equals?(use_global_endpoint, false) return Aws::Endpoints::Endpoint.new(url: "https://s3.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {"authSchemes"=>[{"disableDoubleEncoding"=>true, "name"=>"sigv4", "signingName"=>"s3", "signingRegion"=>"#{region}"}]}) end end raise ArgumentError, "Invalid region: region was not a valid DNS name." end end end raise ArgumentError, "A region must be set when sending requests to S3." raise ArgumentError, 'No endpoint could be resolved' end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/presigner.rb0000644000004100000410000002312014563445240020576 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 class Presigner # @api private ONE_WEEK = 60 * 60 * 24 * 7 # @api private FIFTEEN_MINUTES = 60 * 15 # @api private BLACKLISTED_HEADERS = [ 'accept', 'amz-sdk-request', 'cache-control', 'content-length', # due to a ELB bug 'expect', 'from', 'if-match', 'if-none-match', 'if-modified-since', 'if-unmodified-since', 'if-range', 'max-forwards', 'pragma', 'proxy-authorization', 'referer', 'te', 'user-agent' ].freeze # @option options [Client] :client Optionally provide an existing # S3 client def initialize(options = {}) @client = options[:client] || Aws::S3::Client.new end # Create presigned URLs for S3 operations. # # @example # signer = Aws::S3::Presigner.new # url = signer.presigned_url(:get_object, bucket: "bucket", key: "key") # # @param [Symbol] method Symbolized method name of the operation you want # to presign. # # @option params [Integer] :expires_in (900) The number of seconds # before the presigned URL expires. Defaults to 15 minutes. As signature # version 4 has a maximum expiry time of one week for presigned URLs, # attempts to set this value to greater than one week (604800) will # raise an exception. The min value of this option and the credentials # expiration time is used in the presigned URL. # # @option params [Time] :time (Time.now) The starting time for when the # presigned url becomes active. # # @option params [Boolean] :secure (true) When `false`, a HTTP URL # is returned instead of the default HTTPS URL. # # @option params [Boolean] :virtual_host (false) When `true`, the # bucket name will be used as the hostname. # # @option params [Boolean] :use_accelerate_endpoint (false) When `true`, # Presigner will attempt to use accelerated endpoint. # # @option params [Array] :whitelist_headers ([]) Additional # headers to be included for the signed request. Certain headers beyond # the authorization header could, in theory, be changed for various # reasons (including but not limited to proxies) while in transit and # after signing. This would lead to signature errors being returned, # despite no actual problems with signing. (see BLACKLISTED_HEADERS) # # @raise [ArgumentError] Raises an ArgumentError if `:expires_in` # exceeds one week. # # @return [String] a presigned url def presigned_url(method, params = {}) url, _headers = _presigned_request(method, params) url end # Allows you to create presigned URL requests for S3 operations. This # method returns a tuple containing the URL and the signed X-amz-* headers # to be used with the presigned url. # # @example # signer = Aws::S3::Presigner.new # url, headers = signer.presigned_request( # :get_object, bucket: "bucket", key: "key" # ) # # @param [Symbol] method Symbolized method name of the operation you want # to presign. # # @option params [Integer] :expires_in (900) The number of seconds # before the presigned URL expires. Defaults to 15 minutes. As signature # version 4 has a maximum expiry time of one week for presigned URLs, # attempts to set this value to greater than one week (604800) will # raise an exception. The min value of this option and the credentials # expiration time is used in the presigned URL. # # @option params [Time] :time (Time.now) The starting time for when the # presigned url becomes active. # # @option params [Boolean] :secure (true) When `false`, a HTTP URL # is returned instead of the default HTTPS URL. # # @option params [Boolean] :virtual_host (false) When `true`, the # bucket name will be used as the hostname. This will cause # the returned URL to be 'http' and not 'https'. # # @option params [Boolean] :use_accelerate_endpoint (false) When `true`, # Presigner will attempt to use accelerated endpoint. # # @option params [Array] :whitelist_headers ([]) Additional # headers to be included for the signed request. Certain headers beyond # the authorization header could, in theory, be changed for various # reasons (including but not limited to proxies) while in transit and # after signing. This would lead to signature errors being returned, # despite no actual problems with signing. (see BLACKLISTED_HEADERS) # # @raise [ArgumentError] Raises an ArgumentError if `:expires_in` # exceeds one week. # # @return [String, Hash] A tuple with a presigned URL and headers that # should be included with the request. def presigned_request(method, params = {}) _presigned_request(method, params, false) end private def _presigned_request(method, params, hoist = true) virtual_host = params.delete(:virtual_host) time = params.delete(:time) unsigned_headers = unsigned_headers(params) secure = params.delete(:secure) != false expires_in = expires_in(params) req = @client.build_request(method, params) use_bucket_as_hostname(req) if virtual_host handle_presigned_url_context(req) x_amz_headers = sign_but_dont_send( req, expires_in, secure, time, unsigned_headers, hoist ) [req.send_request.data, x_amz_headers] end def unsigned_headers(params) whitelist_headers = params.delete(:whitelist_headers) || [] BLACKLISTED_HEADERS - whitelist_headers end def expires_in(params) if (expires_in = params.delete(:expires_in)) if expires_in > ONE_WEEK raise ArgumentError, "expires_in value of #{expires_in} exceeds one-week maximum." elsif expires_in <= 0 raise ArgumentError, "expires_in value of #{expires_in} cannot be 0 or less." end expires_in else FIFTEEN_MINUTES end end def use_bucket_as_hostname(req) req.handle(priority: 35) do |context| uri = context.http_request.endpoint uri.host = context.params[:bucket] uri.path.sub!("/#{context.params[:bucket]}", '') @handler.call(context) end end # Used for excluding presigned_urls from API request count. # # Store context information as early as possible, to allow # handlers to perform decisions based on this flag if need. def handle_presigned_url_context(req) req.handle(step: :initialize, priority: 98) do |context| context[:presigned_url] = true @handler.call(context) end end # @param [Seahorse::Client::Request] req def sign_but_dont_send( req, expires_in, secure, time, unsigned_headers, hoist = true ) x_amz_headers = {} http_req = req.context.http_request req.handlers.remove(Aws::S3::Plugins::S3Signer::LegacyHandler) req.handlers.remove(Aws::Plugins::Sign::Handler) req.handlers.remove(Seahorse::Client::Plugins::ContentLength::Handler) req.handle(step: :send) do |context| # if an endpoint was not provided, force secure or insecure if context.config.regional_endpoint http_req.endpoint.scheme = secure ? 'https' : 'http' http_req.endpoint.port = secure ? 443 : 80 end query = http_req.endpoint.query ? http_req.endpoint.query.split('&') : [] http_req.headers.each do |key, value| next unless key =~ /^x-amz/i if hoist value = Aws::Sigv4::Signer.uri_escape(value) key = Aws::Sigv4::Signer.uri_escape(key) # hoist x-amz-* headers to the querystring http_req.headers.delete(key) query << "#{key}=#{value}" else x_amz_headers[key] = value end end http_req.endpoint.query = query.join('&') unless query.empty? auth_scheme = context[:auth_scheme] scheme_name = auth_scheme['name'] region = if scheme_name == 'sigv4a' auth_scheme['signingRegionSet'].first else auth_scheme['signingRegion'] end signer = Aws::Sigv4::Signer.new( service: auth_scheme['signingName'] || 's3', region: context[:sigv4_region] || region || context.config.region, credentials_provider: context[:sigv4_credentials] || context.config.credentials, signing_algorithm: scheme_name.to_sym, uri_escape_path: !!!auth_scheme['disableDoubleEncoding'], unsigned_headers: unsigned_headers, apply_checksum_header: false ) url = signer.presign_url( http_method: http_req.http_method, url: http_req.endpoint, headers: http_req.headers, body_digest: 'UNSIGNED-PAYLOAD', expires_in: expires_in, time: time ).to_s Seahorse::Client::Response.new(context: context, data: url) end # Return the headers x_amz_headers end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/encryptionV2/0000755000004100000410000000000014563445240020657 5ustar www-datawww-dataaws-sdk-s3-1.143.0/lib/aws-sdk-s3/encryptionV2/io_decrypter.rb0000644000004100000410000000156314563445240023701 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 module EncryptionV2 # @api private class IODecrypter # @param [OpenSSL::Cipher] cipher # @param [IO#write] io An IO-like object that responds to `#write`. def initialize(cipher, io) @cipher = cipher # Ensure that IO is reset between retries @io = io.tap { |io| io.truncate(0) if io.respond_to?(:truncate) } @cipher_buffer = String.new end # @return [#write] attr_reader :io def write(chunk) # decrypt and write if @cipher.method(:update).arity == 1 @io.write(@cipher.update(chunk)) else @io.write(@cipher.update(chunk, @cipher_buffer)) end end def finalize @io.write(@cipher.final) end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/encryptionV2/io_encrypter.rb0000644000004100000410000000351514563445240023712 0ustar www-datawww-data# frozen_string_literal: true require 'stringio' require 'tempfile' module Aws module S3 module EncryptionV2 # Provides an IO wrapper encrypting a stream of data. # @api private class IOEncrypter # @api private ONE_MEGABYTE = 1024 * 1024 def initialize(cipher, io) @encrypted = io.size <= ONE_MEGABYTE ? encrypt_to_stringio(cipher, io.read) : encrypt_to_tempfile(cipher, io) @size = @encrypted.size end # @return [Integer] attr_reader :size def read(bytes = nil, output_buffer = nil) if @encrypted.is_a?(Tempfile) && @encrypted.closed? @encrypted.open @encrypted.binmode end @encrypted.read(bytes, output_buffer) end def rewind @encrypted.rewind end # @api private def close @encrypted.close if @encrypted.is_a?(Tempfile) end private def encrypt_to_stringio(cipher, plain_text) if plain_text.empty? StringIO.new(cipher.final + cipher.auth_tag) else StringIO.new(cipher.update(plain_text) + cipher.final + cipher.auth_tag) end end def encrypt_to_tempfile(cipher, io) encrypted = Tempfile.new(self.object_id.to_s) encrypted.binmode while chunk = io.read(ONE_MEGABYTE, read_buffer ||= String.new) if cipher.method(:update).arity == 1 encrypted.write(cipher.update(chunk)) else encrypted.write(cipher.update(chunk, cipher_buffer ||= String.new)) end end encrypted.write(cipher.final) encrypted.write(cipher.auth_tag) encrypted.rewind encrypted end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/encryptionV2/encrypt_handler.rb0000644000004100000410000000420114563445240024362 0ustar www-datawww-data# frozen_string_literal: true require 'base64' module Aws module S3 module EncryptionV2 # @api private class EncryptHandler < Seahorse::Client::Handler def call(context) envelope, cipher = context[:encryption][:cipher_provider] .encryption_cipher( kms_encryption_context: context[:encryption][:kms_encryption_context] ) context[:encryption][:cipher] = cipher apply_encryption_envelope(context, envelope) apply_encryption_cipher(context, cipher) apply_cse_user_agent(context) @handler.call(context) end private def apply_encryption_envelope(context, envelope) if context[:encryption][:envelope_location] == :instruction_file suffix = context[:encryption][:instruction_file_suffix] context.client.put_object( bucket: context.params[:bucket], key: context.params[:key] + suffix, body: Json.dump(envelope) ) else # :metadata context.params[:metadata] ||= {} context.params[:metadata].update(envelope) end end def apply_encryption_cipher(context, cipher) io = context.params[:body] || '' io = StringIO.new(io) if io.is_a? String context.params[:body] = IOEncrypter.new(cipher, io) context.params[:metadata] ||= {} context.params[:metadata]['x-amz-unencrypted-content-length'] = io.size if context.params.delete(:content_md5) raise ArgumentError, 'Setting content_md5 on client side '\ 'encrypted objects is deprecated.' end context.http_response.on_headers do context.params[:body].close end end def apply_cse_user_agent(context) if context.config.user_agent_suffix.nil? context.config.user_agent_suffix = EC_USER_AGENT elsif !context.config.user_agent_suffix.include? EC_USER_AGENT context.config.user_agent_suffix += " #{EC_USER_AGENT}" end end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/encryptionV2/materials.rb0000644000004100000410000000327614563445240023175 0ustar www-datawww-data# frozen_string_literal: true require 'base64' module Aws module S3 module EncryptionV2 class Materials # @option options [required, OpenSSL::PKey::RSA, String] :key # The master key to use for encrypting/decrypting all objects. # # @option options [String] :description ('{}') # The encryption materials description. This is must be # a JSON document string. # def initialize(options = {}) @key = validate_key(options[:key]) @description = validate_desc(options[:description]) end # @return [OpenSSL::PKey::RSA, String] attr_reader :key # @return [String] attr_reader :description private def validate_key(key) case key when OpenSSL::PKey::RSA then key when String if [32, 24, 16].include?(key.bytesize) key else msg = 'invalid key, symmetric key required to be 16, 24, or '\ '32 bytes in length, saw length ' + key.bytesize.to_s raise ArgumentError, msg end else msg = 'invalid encryption key, expected an OpenSSL::PKey::RSA key '\ '(for asymmetric encryption) or a String (for symmetric '\ 'encryption).' raise ArgumentError, msg end end def validate_desc(description) Json.load(description) description rescue Json::ParseError, EncodingError msg = 'expected description to be a valid JSON document string' raise ArgumentError, msg end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/encryptionV2/default_key_provider.rb0000644000004100000410000000224414563445240025414 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 module EncryptionV2 # The default key provider is constructed with a single key # that is used for both encryption and decryption, ignoring # the possible per-object envelope encryption materials description. # @api private class DefaultKeyProvider include KeyProvider # @option options [required, OpenSSL::PKey::RSA, String] :encryption_key # The master key to use for encrypting objects. # @option options [String] :materials_description ('{}') # A description of the encryption key. def initialize(options = {}) @encryption_materials = Materials.new( key: options[:encryption_key], description: options[:materials_description] || '{}' ) end # @return [Materials] def encryption_materials @encryption_materials end # @param [String] materials_description # @return Returns the key given in the constructor. def key_for(materials_description) @encryption_materials.key end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/encryptionV2/decrypt_handler.rb0000644000004100000410000001662014563445240024360 0ustar www-datawww-data# frozen_string_literal: true require 'base64' module Aws module S3 module EncryptionV2 # @api private class DecryptHandler < Seahorse::Client::Handler @@warned_response_target_proc = false V1_ENVELOPE_KEYS = %w( x-amz-key x-amz-iv x-amz-matdesc ) V2_ENVELOPE_KEYS = %w( x-amz-key-v2 x-amz-iv x-amz-cek-alg x-amz-wrap-alg x-amz-matdesc ) V2_OPTIONAL_KEYS = %w(x-amz-tag-len) POSSIBLE_ENVELOPE_KEYS = (V1_ENVELOPE_KEYS + V2_ENVELOPE_KEYS + V2_OPTIONAL_KEYS).uniq POSSIBLE_WRAPPING_FORMATS = %w( AES/GCM kms kms+context RSA-OAEP-SHA1 ) POSSIBLE_ENCRYPTION_FORMATS = %w( AES/GCM/NoPadding AES/CBC/PKCS5Padding AES/CBC/PKCS7Padding ) AUTH_REQUIRED_CEK_ALGS = %w(AES/GCM/NoPadding) def call(context) attach_http_event_listeners(context) apply_cse_user_agent(context) if context[:response_target].is_a?(Proc) && !@@warned_response_target_proc @@warned_response_target_proc = true warn(':response_target is a Proc, or a block was provided. ' \ 'Read the entire object to the ' \ 'end before you start using the decrypted data. This is to ' \ 'verify that the object has not been modified since it ' \ 'was encrypted.') end @handler.call(context) end private def attach_http_event_listeners(context) context.http_response.on_headers(200) do cipher, envelope = decryption_cipher(context) decrypter = body_contains_auth_tag?(envelope) ? authenticated_decrypter(context, cipher, envelope) : IODecrypter.new(cipher, context.http_response.body) context.http_response.body = decrypter end context.http_response.on_success(200) do decrypter = context.http_response.body decrypter.finalize decrypter.io.rewind if decrypter.io.respond_to?(:rewind) context.http_response.body = decrypter.io end context.http_response.on_error do if context.http_response.body.respond_to?(:io) context.http_response.body = context.http_response.body.io end end end def decryption_cipher(context) if (envelope = get_encryption_envelope(context)) cipher = context[:encryption][:cipher_provider] .decryption_cipher( envelope, context[:encryption] ) [cipher, envelope] else raise Errors::DecryptionError, "unable to locate encryption envelope" end end def get_encryption_envelope(context) if context[:encryption][:envelope_location] == :metadata envelope_from_metadata(context) || envelope_from_instr_file(context) else envelope_from_instr_file(context) || envelope_from_metadata(context) end end def envelope_from_metadata(context) possible_envelope = {} POSSIBLE_ENVELOPE_KEYS.each do |suffix| if value = context.http_response.headers["x-amz-meta-#{suffix}"] possible_envelope[suffix] = value end end extract_envelope(possible_envelope) end def envelope_from_instr_file(context) suffix = context[:encryption][:instruction_file_suffix] possible_envelope = Json.load(context.client.get_object( bucket: context.params[:bucket], key: context.params[:key] + suffix ).body.read) extract_envelope(possible_envelope) rescue S3::Errors::ServiceError, Json::ParseError nil end def extract_envelope(hash) return nil unless hash return v1_envelope(hash) if hash.key?('x-amz-key') return v2_envelope(hash) if hash.key?('x-amz-key-v2') if hash.keys.any? { |key| key.match(/^x-amz-key-(.+)$/) } msg = "unsupported envelope encryption version #{$1}" raise Errors::DecryptionError, msg end end def v1_envelope(envelope) envelope end def v2_envelope(envelope) unless POSSIBLE_ENCRYPTION_FORMATS.include? envelope['x-amz-cek-alg'] alg = envelope['x-amz-cek-alg'].inspect msg = "unsupported content encrypting key (cek) format: #{alg}" raise Errors::DecryptionError, msg end unless POSSIBLE_WRAPPING_FORMATS.include? envelope['x-amz-wrap-alg'] alg = envelope['x-amz-wrap-alg'].inspect msg = "unsupported key wrapping algorithm: #{alg}" raise Errors::DecryptionError, msg end unless (missing_keys = V2_ENVELOPE_KEYS - envelope.keys).empty? msg = "incomplete v2 encryption envelope:\n" msg += " missing: #{missing_keys.join(',')}\n" raise Errors::DecryptionError, msg end envelope end # This method fetches the tag from the end of the object by # making a GET Object w/range request. This auth tag is used # to initialize the cipher, and the decrypter truncates the # auth tag from the body when writing the final bytes. def authenticated_decrypter(context, cipher, envelope) http_resp = context.http_response content_length = http_resp.headers['content-length'].to_i auth_tag_length = auth_tag_length(envelope) auth_tag = context.client.get_object( bucket: context.params[:bucket], key: context.params[:key], version_id: context.params[:version_id], range: "bytes=-#{auth_tag_length}" ).body.read cipher.auth_tag = auth_tag cipher.auth_data = '' # The encrypted object contains both the cipher text # plus a trailing auth tag. IOAuthDecrypter.new( io: http_resp.body, encrypted_content_length: content_length - auth_tag_length, cipher: cipher) end def body_contains_auth_tag?(envelope) AUTH_REQUIRED_CEK_ALGS.include?(envelope['x-amz-cek-alg']) end # Determine the auth tag length from the algorithm # Validate it against the value provided in the x-amz-tag-len # Return the tag length in bytes def auth_tag_length(envelope) tag_length = case envelope['x-amz-cek-alg'] when 'AES/GCM/NoPadding' then AES_GCM_TAG_LEN_BYTES else raise ArgumentError, 'Unsupported cek-alg: ' \ "#{envelope['x-amz-cek-alg']}" end if (tag_length * 8) != envelope['x-amz-tag-len'].to_i raise Errors::DecryptionError, 'x-amz-tag-len does not match expected' end tag_length end def apply_cse_user_agent(context) if context.config.user_agent_suffix.nil? context.config.user_agent_suffix = EC_USER_AGENT elsif !context.config.user_agent_suffix.include? EC_USER_AGENT context.config.user_agent_suffix += " #{EC_USER_AGENT}" end end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/encryptionV2/utils.rb0000644000004100000410000000724214563445240022351 0ustar www-datawww-data# frozen_string_literal: true require 'openssl' module Aws module S3 module EncryptionV2 # @api private module Utils class << self def encrypt_aes_gcm(key, data, auth_data) cipher = aes_encryption_cipher(:GCM, key) cipher.iv = (iv = cipher.random_iv) cipher.auth_data = auth_data iv + cipher.update(data) + cipher.final + cipher.auth_tag end def encrypt_rsa(key, data, auth_data) # Plaintext must be KeyLengthInBytes (1 Byte) + DataKey + AuthData buf = [data.bytesize] + data.unpack('C*') + auth_data.unpack('C*') key.public_encrypt(buf.pack('C*'), OpenSSL::PKey::RSA::PKCS1_OAEP_PADDING) end def decrypt(key, data) begin case key when OpenSSL::PKey::RSA # asymmetric decryption key.private_decrypt(data) when String # symmetric Decryption cipher = aes_cipher(:decrypt, :ECB, key, nil) cipher.update(data) + cipher.final end rescue OpenSSL::Cipher::CipherError msg = 'decryption failed, possible incorrect key' raise Errors::DecryptionError, msg end end def decrypt_aes_gcm(key, data, auth_data) # data is iv (12B) + key + tag (16B) buf = data.unpack('C*') iv = buf[0,12].pack('C*') # iv will always be 12 bytes tag = buf[-16, 16].pack('C*') # tag is 16 bytes enc_key = buf[12, buf.size - (12+16)].pack('C*') cipher = aes_cipher(:decrypt, :GCM, key, iv) cipher.auth_tag = tag cipher.auth_data = auth_data cipher.update(enc_key) + cipher.final end # returns the decrypted data + auth_data def decrypt_rsa(key, enc_data) # Plaintext must be KeyLengthInBytes (1 Byte) + DataKey + AuthData buf = key.private_decrypt(enc_data, OpenSSL::PKey::RSA::PKCS1_OAEP_PADDING).unpack('C*') key_length = buf[0] data = buf[1, key_length].pack('C*') auth_data = buf[key_length+1, buf.length - key_length].pack('C*') [data, auth_data] end # @param [String] block_mode "CBC" or "ECB" # @param [OpenSSL::PKey::RSA, String, nil] key # @param [String, nil] iv The initialization vector def aes_encryption_cipher(block_mode, key = nil, iv = nil) aes_cipher(:encrypt, block_mode, key, iv) end # @param [String] block_mode "CBC" or "ECB" # @param [OpenSSL::PKey::RSA, String, nil] key # @param [String, nil] iv The initialization vector def aes_decryption_cipher(block_mode, key = nil, iv = nil) aes_cipher(:decrypt, block_mode, key, iv) end # @param [String] mode "encrypt" or "decrypt" # @param [String] block_mode "CBC" or "ECB" # @param [OpenSSL::PKey::RSA, String, nil] key # @param [String, nil] iv The initialization vector def aes_cipher(mode, block_mode, key, iv) cipher = key ? OpenSSL::Cipher.new("aes-#{cipher_size(key)}-#{block_mode.downcase}") : OpenSSL::Cipher.new("aes-256-#{block_mode.downcase}") cipher.send(mode) # encrypt or decrypt cipher.key = key if key cipher.iv = iv if iv cipher end # @param [String] key # @return [Integer] # @raise ArgumentError def cipher_size(key) key.bytesize * 8 end end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/encryptionV2/io_auth_decrypter.rb0000644000004100000410000000324414563445240024720 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 module EncryptionV2 # @api private class IOAuthDecrypter # @option options [required, IO#write] :io # An IO-like object that responds to {#write}. # @option options [required, Integer] :encrypted_content_length # The number of bytes to decrypt from the `:io` object. # This should be the total size of `:io` minus the length of # the cipher auth tag. # @option options [required, OpenSSL::Cipher] :cipher An initialized # cipher that can be used to decrypt the bytes as they are # written to the `:io` object. The cipher should already have # its `#auth_tag` set. def initialize(options = {}) @decrypter = IODecrypter.new(options[:cipher], options[:io]) @max_bytes = options[:encrypted_content_length] @bytes_written = 0 end def write(chunk) chunk = truncate_chunk(chunk) if chunk.bytesize > 0 @bytes_written += chunk.bytesize @decrypter.write(chunk) end end def finalize @decrypter.finalize end def io @decrypter.io end private def truncate_chunk(chunk) if chunk.bytesize + @bytes_written <= @max_bytes chunk elsif @bytes_written < @max_bytes chunk[0..(@max_bytes - @bytes_written - 1)] else # If the tag was sent over after the full body has been read, # we don't want to accidentally append it. "" end end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/encryptionV2/default_cipher_provider.rb0000644000004100000410000001424514563445240026102 0ustar www-datawww-data# frozen_string_literal: true require 'base64' module Aws module S3 module EncryptionV2 # @api private class DefaultCipherProvider def initialize(options = {}) @key_provider = options[:key_provider] @key_wrap_schema = validate_key_wrap( options[:key_wrap_schema], @key_provider.encryption_materials.key ) @content_encryption_schema = validate_cek( options[:content_encryption_schema] ) end # @return [Array] Creates an returns a new encryption # envelope and encryption cipher. def encryption_cipher(options = {}) validate_options(options) cipher = Utils.aes_encryption_cipher(:GCM) if @key_provider.encryption_materials.key.is_a? OpenSSL::PKey::RSA enc_key = encode64( encrypt_rsa(envelope_key(cipher), @content_encryption_schema) ) else enc_key = encode64( encrypt_aes_gcm(envelope_key(cipher), @content_encryption_schema) ) end envelope = { 'x-amz-key-v2' => enc_key, 'x-amz-cek-alg' => @content_encryption_schema, 'x-amz-tag-len' => (AES_GCM_TAG_LEN_BYTES * 8).to_s, 'x-amz-wrap-alg' => @key_wrap_schema, 'x-amz-iv' => encode64(envelope_iv(cipher)), 'x-amz-matdesc' => materials_description } cipher.auth_data = '' # auth_data must be set after key and iv [envelope, cipher] end # @return [Cipher] Given an encryption envelope, returns a # decryption cipher. def decryption_cipher(envelope, options = {}) validate_options(options) master_key = @key_provider.key_for(envelope['x-amz-matdesc']) if envelope.key? 'x-amz-key' unless options[:security_profile] == :v2_and_legacy raise Errors::LegacyDecryptionError end # Support for decryption of legacy objects key = Utils.decrypt(master_key, decode64(envelope['x-amz-key'])) iv = decode64(envelope['x-amz-iv']) Utils.aes_decryption_cipher(:CBC, key, iv) else if envelope['x-amz-cek-alg'] != 'AES/GCM/NoPadding' raise ArgumentError, 'Unsupported cek-alg: ' \ "#{envelope['x-amz-cek-alg']}" end key = case envelope['x-amz-wrap-alg'] when 'AES/GCM' if master_key.is_a? OpenSSL::PKey::RSA raise ArgumentError, 'Key mismatch - Client is configured' \ ' with an RSA key and the x-amz-wrap-alg is AES/GCM.' end Utils.decrypt_aes_gcm(master_key, decode64(envelope['x-amz-key-v2']), envelope['x-amz-cek-alg']) when 'RSA-OAEP-SHA1' unless master_key.is_a? OpenSSL::PKey::RSA raise ArgumentError, 'Key mismatch - Client is configured' \ ' with an AES key and the x-amz-wrap-alg is RSA-OAEP-SHA1.' end key, cek_alg = Utils.decrypt_rsa(master_key, decode64(envelope['x-amz-key-v2'])) raise Errors::CEKAlgMismatchError unless cek_alg == envelope['x-amz-cek-alg'] key when 'kms+context' raise ArgumentError, 'Key mismatch - Client is configured' \ ' with a user provided key and the x-amz-wrap-alg is' \ ' kms+context. Please configure the client with the' \ ' required kms_key_id' else raise ArgumentError, 'Unsupported wrap-alg: ' \ "#{envelope['x-amz-wrap-alg']}" end iv = decode64(envelope['x-amz-iv']) Utils.aes_decryption_cipher(:GCM, key, iv) end end private # Validate that the key_wrap_schema # is valid, supported and matches the provided key. # Returns the string version for the x-amz-key-wrap-alg def validate_key_wrap(key_wrap_schema, key) if key.is_a? OpenSSL::PKey::RSA unless key_wrap_schema == :rsa_oaep_sha1 raise ArgumentError, ':key_wrap_schema must be set to :rsa_oaep_sha1 for RSA keys.' end else unless key_wrap_schema == :aes_gcm raise ArgumentError, ':key_wrap_schema must be set to :aes_gcm for AES keys.' end end case key_wrap_schema when :rsa_oaep_sha1 then 'RSA-OAEP-SHA1' when :aes_gcm then 'AES/GCM' when :kms_context raise ArgumentError, 'A kms_key_id is required when using :kms_context.' else raise ArgumentError, "Unsupported key_wrap_schema: #{key_wrap_schema}" end end def validate_cek(content_encryption_schema) case content_encryption_schema when :aes_gcm_no_padding "AES/GCM/NoPadding" else raise ArgumentError, "Unsupported content_encryption_schema: #{content_encryption_schema}" end end def envelope_key(cipher) cipher.key = cipher.random_key end def envelope_iv(cipher) cipher.iv = cipher.random_iv end def encrypt_aes_gcm(data, auth_data) Utils.encrypt_aes_gcm(@key_provider.encryption_materials.key, data, auth_data) end def encrypt_rsa(data, auth_data) Utils.encrypt_rsa(@key_provider.encryption_materials.key, data, auth_data) end def materials_description @key_provider.encryption_materials.description end def encode64(str) Base64.encode64(str).split("\n") * '' end def decode64(str) Base64.decode64(str) end def validate_options(options) if !options[:kms_encryption_context].nil? raise ArgumentError, 'Cannot provide :kms_encryption_context ' \ 'with non KMS client.' end end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/encryptionV2/kms_cipher_provider.rb0000644000004100000410000001433114563445240025244 0ustar www-datawww-data# frozen_string_literal: true require 'base64' module Aws module S3 module EncryptionV2 # @api private class KmsCipherProvider def initialize(options = {}) @kms_key_id = validate_kms_key(options[:kms_key_id]) @kms_client = options[:kms_client] @key_wrap_schema = validate_key_wrap( options[:key_wrap_schema] ) @content_encryption_schema = validate_cek( options[:content_encryption_schema] ) end # @return [Array] Creates and returns a new encryption # envelope and encryption cipher. def encryption_cipher(options = {}) validate_key_for_encryption encryption_context = build_encryption_context(@content_encryption_schema, options) key_data = Aws::Plugins::UserAgent.feature('S3CryptoV2') do @kms_client.generate_data_key( key_id: @kms_key_id, encryption_context: encryption_context, key_spec: 'AES_256' ) end cipher = Utils.aes_encryption_cipher(:GCM) cipher.key = key_data.plaintext envelope = { 'x-amz-key-v2' => encode64(key_data.ciphertext_blob), 'x-amz-iv' => encode64(cipher.iv = cipher.random_iv), 'x-amz-cek-alg' => @content_encryption_schema, 'x-amz-tag-len' => (AES_GCM_TAG_LEN_BYTES * 8).to_s, 'x-amz-wrap-alg' => @key_wrap_schema, 'x-amz-matdesc' => Json.dump(encryption_context) } cipher.auth_data = '' # auth_data must be set after key and iv [envelope, cipher] end # @return [Cipher] Given an encryption envelope, returns a # decryption cipher. def decryption_cipher(envelope, options = {}) encryption_context = Json.load(envelope['x-amz-matdesc']) cek_alg = envelope['x-amz-cek-alg'] case envelope['x-amz-wrap-alg'] when 'kms' unless options[:security_profile] == :v2_and_legacy raise Errors::LegacyDecryptionError end when 'kms+context' if cek_alg != encryption_context['aws:x-amz-cek-alg'] raise Errors::CEKAlgMismatchError end if encryption_context != build_encryption_context(cek_alg, options) raise Errors::DecryptionError, 'Value of encryption context from'\ ' envelope does not match the provided encryption context' end when 'AES/GCM' raise ArgumentError, 'Key mismatch - Client is configured' \ ' with a KMS key and the x-amz-wrap-alg is AES/GCM.' when 'RSA-OAEP-SHA1' raise ArgumentError, 'Key mismatch - Client is configured' \ ' with a KMS key and the x-amz-wrap-alg is RSA-OAEP-SHA1.' else raise ArgumentError, 'Unsupported wrap-alg: ' \ "#{envelope['x-amz-wrap-alg']}" end any_cmk_mode = false || options[:kms_allow_decrypt_with_any_cmk] decrypt_options = { ciphertext_blob: decode64(envelope['x-amz-key-v2']), encryption_context: encryption_context } unless any_cmk_mode decrypt_options[:key_id] = @kms_key_id end key = Aws::Plugins::UserAgent.feature('S3CryptoV2') do @kms_client.decrypt(decrypt_options).plaintext end iv = decode64(envelope['x-amz-iv']) block_mode = case cek_alg when 'AES/CBC/PKCS5Padding' :CBC when 'AES/CBC/PKCS7Padding' :CBC when 'AES/GCM/NoPadding' :GCM else type = envelope['x-amz-cek-alg'].inspect msg = "unsupported content encrypting key (cek) format: #{type}" raise Errors::DecryptionError, msg end Utils.aes_decryption_cipher(block_mode, key, iv) end private def validate_key_wrap(key_wrap_schema) case key_wrap_schema when :kms_context then 'kms+context' else raise ArgumentError, "Unsupported key_wrap_schema: #{key_wrap_schema}" end end def validate_cek(content_encryption_schema) case content_encryption_schema when :aes_gcm_no_padding "AES/GCM/NoPadding" else raise ArgumentError, "Unsupported content_encryption_schema: #{content_encryption_schema}" end end def validate_kms_key(kms_key_id) if kms_key_id.nil? || kms_key_id.length.zero? raise ArgumentError, 'KMS CMK ID was not specified. ' \ 'Please specify a CMK ID, ' \ 'or set kms_key_id: :kms_allow_decrypt_with_any_cmk to use ' \ 'any valid CMK from the object.' end if kms_key_id.is_a?(Symbol) && kms_key_id != :kms_allow_decrypt_with_any_cmk raise ArgumentError, 'kms_key_id must be a valid KMS CMK or be ' \ 'set to :kms_allow_decrypt_with_any_cmk' end kms_key_id end def build_encryption_context(cek_alg, options = {}) kms_context = (options[:kms_encryption_context] || {}) .each_with_object({}) { |(k, v), h| h[k.to_s] = v } if kms_context.include? 'aws:x-amz-cek-alg' raise ArgumentError, 'Conflict in reserved KMS Encryption Context ' \ 'key aws:x-amz-cek-alg. This value is reserved for the S3 ' \ 'Encryption Client and cannot be set by the user.' end { 'aws:x-amz-cek-alg' => cek_alg }.merge(kms_context) end def encode64(str) Base64.encode64(str).split("\n") * "" end def decode64(str) Base64.decode64(str) end def validate_key_for_encryption if @kms_key_id == :kms_allow_decrypt_with_any_cmk raise ArgumentError, 'Unable to encrypt/write objects with '\ 'kms_key_id = :kms_allow_decrypt_with_any_cmk. Provide ' \ 'a valid kms_key_id on client construction.' end end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/encryptionV2/client.rb0000644000004100000410000005730714563445240022476 0ustar www-datawww-data# frozen_string_literal: true require 'forwardable' module Aws module S3 REQUIRED_PARAMS = [:key_wrap_schema, :content_encryption_schema, :security_profile] SUPPORTED_SECURITY_PROFILES = [:v2, :v2_and_legacy] # Provides an encryption client that encrypts and decrypts data client-side, # storing the encrypted data in Amazon S3. The `EncryptionV2::Client` (V2 Client) # provides improved security over the `Encryption::Client` (V1 Client) # by using more modern and secure algorithms. You can use the V2 Client # to continue decrypting objects encrypted using deprecated algorithms # by setting security_profile: :v2_and_legacy. The latest V1 Client also # supports reading and decrypting objects encrypted by the V2 Client. # # This client uses a process called "envelope encryption". Your private # encryption keys and your data's plain-text are **never** sent to # Amazon S3. **If you lose you encryption keys, you will not be able to # decrypt your data.** # # ## Envelope Encryption Overview # # The goal of envelope encryption is to combine the performance of # fast symmetric encryption while maintaining the secure key management # that asymmetric keys provide. # # A one-time-use symmetric key (envelope key) is generated client-side. # This is used to encrypt the data client-side. This key is then # encrypted by your master key and stored alongside your data in Amazon # S3. # # When accessing your encrypted data with the encryption client, # the encrypted envelope key is retrieved and decrypted client-side # with your master key. The envelope key is then used to decrypt the # data client-side. # # One of the benefits of envelope encryption is that if your master key # is compromised, you have the option of just re-encrypting the stored # envelope symmetric keys, instead of re-encrypting all of the # data in your account. # # ## Basic Usage # # The encryption client requires an {Aws::S3::Client}. If you do not # provide a `:client`, then a client will be constructed for you. # # require 'openssl' # key = OpenSSL::PKey::RSA.new(1024) # # # encryption client # s3 = Aws::S3::EncryptionV2::Client.new( # encryption_key: key, # key_wrap_schema: :rsa_oaep_sha1, # the key_wrap_schema must be rsa_oaep_sha1 for asymmetric keys # content_encryption_schema: :aes_gcm_no_padding, # security_profile: :v2 # use :v2_and_legacy to allow reading/decrypting objects encrypted by the V1 encryption client # ) # # # round-trip an object, encrypted/decrypted locally # s3.put_object(bucket:'aws-sdk', key:'secret', body:'handshake') # s3.get_object(bucket:'aws-sdk', key:'secret').body.read # #=> 'handshake' # # # reading encrypted object without the encryption client # # results in the getting the cipher text # Aws::S3::Client.new.get_object(bucket:'aws-sdk', key:'secret').body.read # #=> "... cipher text ..." # # ## Required Configuration # # You must configure all of the following: # # * a key or key provider - See the Keys section below. The key provided determines # the key wrapping schema(s) supported for both encryption and decryption. # * `key_wrap_schema` - The key wrapping schema. It must match the type of key configured. # * `content_encryption_schema` - The only supported value currently is `:aes_gcm_no_padding`. # More options will be added in future releases. # * `security_profile` - Determines the support for reading objects written # using older key wrap or content encryption schemas. If you need to read # legacy objects encrypted by an existing V1 Client, then set this to `:v2_and_legacy`. # Otherwise, set it to `:v2` # # ## Keys # # For client-side encryption to work, you must provide one of the following: # # * An encryption key # * A {KeyProvider} # * A KMS encryption key id # # Additionally, the key wrapping schema must agree with the type of the key: # * :aes_gcm: An AES encryption key or a key provider. # * :rsa_oaep_sha1: An RSA encryption key or key provider. # * :kms_context: A KMS encryption key id # # ### An Encryption Key # # You can pass a single encryption key. This is used as a master key # encrypting and decrypting all object keys. # # key = OpenSSL::Cipher.new("AES-256-ECB").random_key # symmetric key - used with `key_wrap_schema: :aes_gcm` # key = OpenSSL::PKey::RSA.new(1024) # asymmetric key pair - used with `key_wrap_schema: :rsa_oaep_sha1` # # s3 = Aws::S3::EncryptionV2::Client.new( # encryption_key: key, # key_wrap_schema: :aes_gcm, # or :rsa_oaep_sha1 if using RSA # content_encryption_schema: :aes_gcm_no_padding, # security_profile: :v2 # ) # # ### Key Provider # # Alternatively, you can use a {KeyProvider}. A key provider makes # it easy to work with multiple keys and simplifies key rotation. # # ### KMS Encryption Key Id # # If you pass the id of an AWS Key Management Service (KMS) key and # use :kms_content for the key_wrap_schema, then KMS will be used to # generate, encrypt and decrypt object keys. # # # keep track of the kms key id # kms = Aws::KMS::Client.new # key_id = kms.create_key.key_metadata.key_id # # Aws::S3::EncryptionV2::Client.new( # kms_key_id: key_id, # kms_client: kms, # key_wrap_schema: :kms_context, # content_encryption_schema: :aes_gcm_no_padding, # security_profile: :v2 # ) # # ## Custom Key Providers # # A {KeyProvider} is any object that responds to: # # * `#encryption_materials` # * `#key_for(materials_description)` # # Here is a trivial implementation of an in-memory key provider. # This is provided as a demonstration of the key provider interface, # and should not be used in production: # # class KeyProvider # # def initialize(default_key_name, keys) # @keys = keys # @encryption_materials = Aws::S3::EncryptionV2::Materials.new( # key: @keys[default_key_name], # description: JSON.dump(key: default_key_name), # ) # end # # attr_reader :encryption_materials # # def key_for(matdesc) # key_name = JSON.parse(matdesc)['key'] # if key = @keys[key_name] # key # else # raise "encryption key not found for: #{matdesc.inspect}" # end # end # end # # Given the above key provider, you can create an encryption client that # chooses the key to use based on the materials description stored with # the encrypted object. This makes it possible to use multiple keys # and simplifies key rotation. # # # uses "new-key" for encrypting objects, uses either for decrypting # keys = KeyProvider.new('new-key', { # "old-key" => Base64.decode64("kM5UVbhE/4rtMZJfsadYEdm2vaKFsmV2f5+URSeUCV4="), # "new-key" => Base64.decode64("w1WLio3agRWRTSJK/Ouh8NHoqRQ6fn5WbSXDTHjXMSo="), # }), # # # chooses the key based on the materials description stored # # with the encrypted object # s3 = Aws::S3::EncryptionV2::Client.new( # key_provider: keys, # key_wrap_schema: ..., # content_encryption_schema: :aes_gcm_no_padding, # security_profile: :v2 # ) # # ## Materials Description # # A materials description is JSON document string that is stored # in the metadata (or instruction file) of an encrypted object. # The {DefaultKeyProvider} uses the empty JSON document `"{}"`. # # When building a key provider, you are free to store whatever # information you need to identify the master key that was used # to encrypt the object. # # ## Envelope Location # # By default, the encryption client store the encryption envelope # with the object, as metadata. You can choose to have the envelope # stored in a separate "instruction file". An instruction file # is an object, with the key of the encrypted object, suffixed with # `".instruction"`. # # Specify the `:envelope_location` option as `:instruction_file` to # use an instruction file for storing the envelope. # # # default behavior # s3 = Aws::S3::EncryptionV2::Client.new( # key_provider: ..., # envelope_location: :metadata, # ) # # # store envelope in a separate object # s3 = Aws::S3::EncryptionV2::Client.new( # key_provider: ..., # envelope_location: :instruction_file, # instruction_file_suffix: '.instruction' # default # key_wrap_schema: ..., # content_encryption_schema: :aes_gcm_no_padding, # security_profile: :v2 # ) # # When using an instruction file, multiple requests are made when # putting and getting the object. **This may cause issues if you are # issuing concurrent PUT and GET requests to an encrypted object.** # module EncryptionV2 class Client extend Deprecations extend Forwardable def_delegators :@client, :config, :delete_object, :head_object, :build_request # Creates a new encryption client. You must configure all of the following: # # * a key or key provider - The key provided also determines the key wrapping # schema(s) supported for both encryption and decryption. # * `key_wrap_schema` - The key wrapping schema. It must match the type of key configured. # * `content_encryption_schema` - The only supported value currently is `:aes_gcm_no_padding` # More options will be added in future releases. # * `security_profile` - Determines the support for reading objects written # using older key wrap or content encryption schemas. If you need to read # legacy objects encrypted by an existing V1 Client, then set this to `:v2_and_legacy`. # Otherwise, set it to `:v2` # # To configure the key you must provide one of the following set of options: # # * `:encryption_key` # * `:kms_key_id` # * `:key_provider` # # You may also pass any other options accepted by `Client#initialize`. # # @option options [S3::Client] :client A basic S3 client that is used # to make api calls. If a `:client` is not provided, a new {S3::Client} # will be constructed. # # @option options [OpenSSL::PKey::RSA, String] :encryption_key The master # key to use for encrypting/decrypting all objects. # # @option options [String] :kms_key_id When you provide a `:kms_key_id`, # then AWS Key Management Service (KMS) will be used to manage the # object encryption keys. By default a {KMS::Client} will be # constructed for KMS API calls. Alternatively, you can provide # your own via `:kms_client`. To only support decryption/reads, you may # provide `:allow_decrypt_with_any_cmk` which will use # the implicit CMK associated with the data during reads but will # not allow you to encrypt/write objects with this client. # # @option options [#key_for] :key_provider Any object that responds # to `#key_for`. This method should accept a materials description # JSON document string and return return an encryption key. # # @option options [required, Symbol] :key_wrap_schema The Key wrapping # schema to be used. It must match the type of key configured. # Must be one of the following: # # * :kms_context (Must provide kms_key_id) # * :aes_gcm (Must provide an AES (string) key) # * :rsa_oaep_sha1 (Must provide an RSA key) # # @option options [required, Symbol] :content_encryption_schema # Must be one of the following: # # * :aes_gcm_no_padding # # @option options [Required, Symbol] :security_profile # Determines the support for reading objects written using older # key wrap or content encryption schemas. # Must be one of the following: # # * :v2 - Reads of legacy (v1) objects are NOT allowed # * :v2_and_legacy - Enables reading of legacy (V1) schemas. # # @option options [Symbol] :envelope_location (:metadata) Where to # store the envelope encryption keys. By default, the envelope is # stored with the encrypted object. If you pass `:instruction_file`, # then the envelope is stored in a separate object in Amazon S3. # # @option options [String] :instruction_file_suffix ('.instruction') # When `:envelope_location` is `:instruction_file` then the # instruction file uses the object key with this suffix appended. # # @option options [KMS::Client] :kms_client A default {KMS::Client} # is constructed when using KMS to manage encryption keys. # def initialize(options = {}) validate_params(options) @client = extract_client(options) @cipher_provider = cipher_provider(options) @envelope_location = extract_location(options) @instruction_file_suffix = extract_suffix(options) @kms_allow_decrypt_with_any_cmk = options[:kms_key_id] == :kms_allow_decrypt_with_any_cmk @security_profile = extract_security_profile(options) end # @return [S3::Client] attr_reader :client # @return [KeyProvider, nil] Returns `nil` if you are using # AWS Key Management Service (KMS). attr_reader :key_provider # @return [Symbol] Determines the support for reading objects written # using older key wrap or content encryption schemas. attr_reader :security_profile # @return [Boolean] If true the provided KMS key_id will not be used # during decrypt, allowing decryption with the key_id from the object. attr_reader :kms_allow_decrypt_with_any_cmk # @return [Symbol<:metadata, :instruction_file>] attr_reader :envelope_location # @return [String] When {#envelope_location} is `:instruction_file`, # the envelope is stored in the object with the object key suffixed # by this string. attr_reader :instruction_file_suffix # Uploads an object to Amazon S3, encrypting data client-side. # See {S3::Client#put_object} for documentation on accepted # request parameters. # @option params [Hash] :kms_encryption_context Additional encryption # context to use with KMS. Applies only when KMS is used. In order # to decrypt the object you will need to provide the identical # :kms_encryption_context to `get_object`. # @option (see S3::Client#put_object) # @return (see S3::Client#put_object) # @see S3::Client#put_object def put_object(params = {}) kms_encryption_context = params.delete(:kms_encryption_context) req = @client.build_request(:put_object, params) req.handlers.add(EncryptHandler, priority: 95) req.context[:encryption] = { cipher_provider: @cipher_provider, envelope_location: @envelope_location, instruction_file_suffix: @instruction_file_suffix, kms_encryption_context: kms_encryption_context } Aws::Plugins::UserAgent.feature('S3CryptoV2') do req.send_request end end # Gets an object from Amazon S3, decrypting data locally. # See {S3::Client#get_object} for documentation on accepted # request parameters. # Warning: If you provide a block to get_object or set the request # parameter :response_target to a Proc, then read the entire object to the # end before you start using the decrypted data. This is to verify that # the object has not been modified since it was encrypted. # # @option options [Symbol] :security_profile # Determines the support for reading objects written using older # key wrap or content encryption schemas. Overrides the value set # on client construction if provided. # Must be one of the following: # # * :v2 - Reads of legacy (v1) objects are NOT allowed # * :v2_and_legacy - Enables reading of legacy (V1) schemas. # @option params [String] :instruction_file_suffix The suffix # used to find the instruction file containing the encryption # envelope. You should not set this option when the envelope # is stored in the object metadata. Defaults to # {#instruction_file_suffix}. # @option params [Hash] :kms_encryption_context Additional encryption # context to use with KMS. Applies only when KMS is used. # @option options [Boolean] :kms_allow_decrypt_with_any_cmk (false) # By default the KMS CMK ID (kms_key_id) will be used during decrypt # and will fail if there is a mismatch. Setting this to true # will use the implicit CMK associated with the data. # @option (see S3::Client#get_object) # @return (see S3::Client#get_object) # @see S3::Client#get_object # @note The `:range` request parameter is not supported. def get_object(params = {}, &block) if params[:range] raise NotImplementedError, '#get_object with :range not supported' end envelope_location, instruction_file_suffix = envelope_options(params) kms_encryption_context = params.delete(:kms_encryption_context) kms_any_cmk_mode = kms_any_cmk_mode(params) security_profile = security_profile_from_params(params) req = @client.build_request(:get_object, params) req.handlers.add(DecryptHandler) req.context[:encryption] = { cipher_provider: @cipher_provider, envelope_location: envelope_location, instruction_file_suffix: instruction_file_suffix, kms_encryption_context: kms_encryption_context, kms_allow_decrypt_with_any_cmk: kms_any_cmk_mode, security_profile: security_profile } Aws::Plugins::UserAgent.feature('S3CryptoV2') do req.send_request(target: block) end end private # Validate required parameters exist and don't conflict. # The cek_alg and wrap_alg are passed on to the CipherProviders # and further validated there def validate_params(options) unless (missing_params = REQUIRED_PARAMS - options.keys).empty? raise ArgumentError, "Missing required parameter(s): "\ "#{missing_params.map{ |s| ":#{s}" }.join(', ')}" end wrap_alg = options[:key_wrap_schema] # validate that the wrap alg matches the type of key given case wrap_alg when :kms_context unless options[:kms_key_id] raise ArgumentError, 'You must provide :kms_key_id to use :kms_context' end end end def extract_client(options) options[:client] || begin options = options.dup options.delete(:kms_key_id) options.delete(:kms_client) options.delete(:key_provider) options.delete(:encryption_key) options.delete(:envelope_location) options.delete(:instruction_file_suffix) REQUIRED_PARAMS.each { |p| options.delete(p) } S3::Client.new(options) end end def kms_client(options) options[:kms_client] || begin KMS::Client.new( region: @client.config.region, credentials: @client.config.credentials, ) end end def cipher_provider(options) if options[:kms_key_id] KmsCipherProvider.new( kms_key_id: options[:kms_key_id], kms_client: kms_client(options), key_wrap_schema: options[:key_wrap_schema], content_encryption_schema: options[:content_encryption_schema] ) else @key_provider = extract_key_provider(options) DefaultCipherProvider.new( key_provider: @key_provider, key_wrap_schema: options[:key_wrap_schema], content_encryption_schema: options[:content_encryption_schema] ) end end def extract_key_provider(options) if options[:key_provider] options[:key_provider] elsif options[:encryption_key] DefaultKeyProvider.new(options) else msg = 'you must pass a :kms_key_id, :key_provider, or :encryption_key' raise ArgumentError, msg end end def envelope_options(params) location = params.delete(:envelope_location) || @envelope_location suffix = params.delete(:instruction_file_suffix) if suffix [:instruction_file, suffix] else [location, @instruction_file_suffix] end end def extract_location(options) location = options[:envelope_location] || :metadata if [:metadata, :instruction_file].include?(location) location else msg = ':envelope_location must be :metadata or :instruction_file '\ "got #{location.inspect}" raise ArgumentError, msg end end def extract_suffix(options) suffix = options[:instruction_file_suffix] || '.instruction' if suffix.is_a? String suffix else msg = ':instruction_file_suffix must be a String' raise ArgumentError, msg end end def kms_any_cmk_mode(params) if !params[:kms_allow_decrypt_with_any_cmk].nil? params.delete(:kms_allow_decrypt_with_any_cmk) else @kms_allow_decrypt_with_any_cmk end end def extract_security_profile(options) validate_security_profile(options[:security_profile]) end def security_profile_from_params(params) security_profile = if !params[:security_profile].nil? params.delete(:security_profile) else @security_profile end validate_security_profile(security_profile) end def validate_security_profile(security_profile) unless SUPPORTED_SECURITY_PROFILES.include? security_profile raise ArgumentError, "Unsupported security profile: :#{security_profile}. " \ "Please provide one of: #{SUPPORTED_SECURITY_PROFILES.map { |s| ":#{s}" }.join(', ')}" end if security_profile == :v2_and_legacy && !@warned_about_legacy @warned_about_legacy = true warn( 'The S3 Encryption Client is configured to read encrypted objects ' \ "with legacy encryption modes. If you don't have objects " \ 'encrypted with these legacy modes, you should disable support ' \ 'for them to enhance security.' ) end security_profile end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/encryptionV2/errors.rb0000644000004100000410000000217414563445240022524 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 module EncryptionV2 module Errors # Generic DecryptionError class DecryptionError < RuntimeError; end class EncryptionError < RuntimeError; end # Raised when attempting to decrypt a legacy (V1) encrypted object # when using a security_profile that does not support it. class LegacyDecryptionError < DecryptionError def initialize(*args) msg = 'The requested object is ' \ 'encrypted with V1 encryption schemas that have been disabled ' \ 'by client configuration security_profile = :v2. Retry with ' \ ':v2_and_legacy or re-encrypt the object.' super(msg) end end class CEKAlgMismatchError < DecryptionError def initialize(*args) msg = 'The content encryption algorithm used at encryption time ' \ 'does not match the algorithm stored for decryption time. ' \ 'The object may be altered or corrupted.' super(msg) end end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/encryptionV2/key_provider.rb0000644000004100000410000000157414563445240023715 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 module EncryptionV2 # This module defines the interface required for a {Client#key_provider}. # A key provider is any object that: # # * Responds to {#encryption_materials} with an {Materials} object. # # * Responds to {#key_for}, receiving a JSON document String, # returning an encryption key. The returned encryption key # must be one of: # # * `OpenSSL::PKey::RSA` - for asymmetric encryption # * `String` - 32, 24, or 16 bytes long, for symmetric encryption # module KeyProvider # @return [Materials] def encryption_materials; end # @param [String] materials_description # @return [OpenSSL::PKey::RSA, String] encryption_key def key_for(materials_description); end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/bucket_notification.rb0000644000004100000410000003015714563445240022633 0ustar www-datawww-data# frozen_string_literal: true # WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws::S3 class BucketNotification extend Aws::Deprecations # @overload def initialize(bucket_name, options = {}) # @param [String] bucket_name # @option options [Client] :client # @overload def initialize(options = {}) # @option options [required, String] :bucket_name # @option options [Client] :client def initialize(*args) options = Hash === args.last ? args.pop.dup : {} @bucket_name = extract_bucket_name(args, options) @data = options.delete(:data) @client = options.delete(:client) || Client.new(options) @waiter_block_warned = false end # @!group Read-Only Attributes # @return [String] def bucket_name @bucket_name end # The topic to which notifications are sent and the events for which # notifications are generated. # @return [Array] def topic_configurations data[:topic_configurations] end # The Amazon Simple Queue Service queues to publish messages to and the # events for which to publish messages. # @return [Array] def queue_configurations data[:queue_configurations] end # Describes the Lambda functions to invoke and the events for which to # invoke them. # @return [Array] def lambda_function_configurations data[:lambda_function_configurations] end # Enables delivery of events to Amazon EventBridge. # @return [Types::EventBridgeConfiguration] def event_bridge_configuration data[:event_bridge_configuration] end # @!endgroup # @return [Client] def client @client end # Loads, or reloads {#data} for the current {BucketNotification}. # Returns `self` making it possible to chain methods. # # bucket_notification.reload.data # # @return [self] def load resp = Aws::Plugins::UserAgent.feature('resource') do @client.get_bucket_notification_configuration(bucket: @bucket_name) end @data = resp.data self end alias :reload :load # @return [Types::NotificationConfiguration] # Returns the data for this {BucketNotification}. Calls # {Client#get_bucket_notification_configuration} if {#data_loaded?} is `false`. def data load unless @data @data end # @return [Boolean] # Returns `true` if this resource is loaded. Accessing attributes or # {#data} on an unloaded resource will trigger a call to {#load}. def data_loaded? !!@data end # @deprecated Use [Aws::S3::Client] #wait_until instead # # Waiter polls an API operation until a resource enters a desired # state. # # @note The waiting operation is performed on a copy. The original resource # remains unchanged. # # ## Basic Usage # # Waiter will polls until it is successful, it fails by # entering a terminal state, or until a maximum number of attempts # are made. # # # polls in a loop until condition is true # resource.wait_until(options) {|resource| condition} # # ## Example # # instance.wait_until(max_attempts:10, delay:5) do |instance| # instance.state.name == 'running' # end # # ## Configuration # # You can configure the maximum number of polling attempts, and the # delay (in seconds) between each polling attempt. The waiting condition is # set by passing a block to {#wait_until}: # # # poll for ~25 seconds # resource.wait_until(max_attempts:5,delay:5) {|resource|...} # # ## Callbacks # # You can be notified before each polling attempt and before each # delay. If you throw `:success` or `:failure` from these callbacks, # it will terminate the waiter. # # started_at = Time.now # # poll for 1 hour, instead of a number of attempts # proc = Proc.new do |attempts, response| # throw :failure if Time.now - started_at > 3600 # end # # # disable max attempts # instance.wait_until(before_wait:proc, max_attempts:nil) {...} # # ## Handling Errors # # When a waiter is successful, it returns the Resource. When a waiter # fails, it raises an error. # # begin # resource.wait_until(...) # rescue Aws::Waiters::Errors::WaiterFailed # # resource did not enter the desired state in time # end # # @yieldparam [Resource] resource to be used in the waiting condition. # # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter # terminates because the waiter has entered a state that it will not # transition out of, preventing success. # # yet successful. # # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is # encountered while polling for a resource that is not expected. # # @raise [NotImplementedError] Raised when the resource does not # # @option options [Integer] :max_attempts (10) Maximum number of # attempts # @option options [Integer] :delay (10) Delay between each # attempt in seconds # @option options [Proc] :before_attempt (nil) Callback # invoked before each attempt # @option options [Proc] :before_wait (nil) Callback # invoked before each wait # @return [Resource] if the waiter was successful def wait_until(options = {}, &block) self_copy = self.dup attempts = 0 options[:max_attempts] = 10 unless options.key?(:max_attempts) options[:delay] ||= 10 options[:poller] = Proc.new do attempts += 1 if block.call(self_copy) [:success, self_copy] else self_copy.reload unless attempts == options[:max_attempts] :retry end end Aws::Plugins::UserAgent.feature('resource') do Aws::Waiters::Waiter.new(options).wait({}) end end # @!group Actions # @example Request syntax with placeholder values # # bucket_notification.put({ # notification_configuration: { # required # topic_configurations: [ # { # id: "NotificationId", # topic_arn: "TopicArn", # required # events: ["s3:ReducedRedundancyLostObject"], # required, accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete # filter: { # key: { # filter_rules: [ # { # name: "prefix", # accepts prefix, suffix # value: "FilterRuleValue", # }, # ], # }, # }, # }, # ], # queue_configurations: [ # { # id: "NotificationId", # queue_arn: "QueueArn", # required # events: ["s3:ReducedRedundancyLostObject"], # required, accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete # filter: { # key: { # filter_rules: [ # { # name: "prefix", # accepts prefix, suffix # value: "FilterRuleValue", # }, # ], # }, # }, # }, # ], # lambda_function_configurations: [ # { # id: "NotificationId", # lambda_function_arn: "LambdaFunctionArn", # required # events: ["s3:ReducedRedundancyLostObject"], # required, accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete # filter: { # key: { # filter_rules: [ # { # name: "prefix", # accepts prefix, suffix # value: "FilterRuleValue", # }, # ], # }, # }, # }, # ], # event_bridge_configuration: { # }, # }, # expected_bucket_owner: "AccountId", # skip_destination_validation: false, # }) # @param [Hash] options ({}) # @option options [required, Types::NotificationConfiguration] :notification_configuration # A container for specifying the notification configuration of the # bucket. If this element is empty, notifications are turned off for the # bucket. # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @option options [Boolean] :skip_destination_validation # Skips validation of Amazon SQS, Amazon SNS, and Lambda destinations. # True or false value. # @return [EmptyStructure] def put(options = {}) options = options.merge(bucket: @bucket_name) resp = Aws::Plugins::UserAgent.feature('resource') do @client.put_bucket_notification_configuration(options) end resp.data end # @!group Associations # @return [Bucket] def bucket Bucket.new( name: @bucket_name, client: @client ) end # @deprecated # @api private def identifiers { bucket_name: @bucket_name } end deprecated(:identifiers) private def extract_bucket_name(args, options) value = args[0] || options.delete(:bucket_name) case value when String then value when nil then raise ArgumentError, "missing required option :bucket_name" else msg = "expected :bucket_name to be a String, got #{value.class}" raise ArgumentError, msg end end class Collection < Aws::Resources::Collection; end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/bucket_logging.rb0000644000004100000410000002243614563445240021574 0ustar www-datawww-data# frozen_string_literal: true # WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws::S3 class BucketLogging extend Aws::Deprecations # @overload def initialize(bucket_name, options = {}) # @param [String] bucket_name # @option options [Client] :client # @overload def initialize(options = {}) # @option options [required, String] :bucket_name # @option options [Client] :client def initialize(*args) options = Hash === args.last ? args.pop.dup : {} @bucket_name = extract_bucket_name(args, options) @data = options.delete(:data) @client = options.delete(:client) || Client.new(options) @waiter_block_warned = false end # @!group Read-Only Attributes # @return [String] def bucket_name @bucket_name end # Describes where logs are stored and the prefix that Amazon S3 assigns # to all log object keys for a bucket. For more information, see [PUT # Bucket logging][1] in the *Amazon S3 API Reference*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html # @return [Types::LoggingEnabled] def logging_enabled data[:logging_enabled] end # @!endgroup # @return [Client] def client @client end # Loads, or reloads {#data} for the current {BucketLogging}. # Returns `self` making it possible to chain methods. # # bucket_logging.reload.data # # @return [self] def load resp = Aws::Plugins::UserAgent.feature('resource') do @client.get_bucket_logging(bucket: @bucket_name) end @data = resp.data self end alias :reload :load # @return [Types::GetBucketLoggingOutput] # Returns the data for this {BucketLogging}. Calls # {Client#get_bucket_logging} if {#data_loaded?} is `false`. def data load unless @data @data end # @return [Boolean] # Returns `true` if this resource is loaded. Accessing attributes or # {#data} on an unloaded resource will trigger a call to {#load}. def data_loaded? !!@data end # @deprecated Use [Aws::S3::Client] #wait_until instead # # Waiter polls an API operation until a resource enters a desired # state. # # @note The waiting operation is performed on a copy. The original resource # remains unchanged. # # ## Basic Usage # # Waiter will polls until it is successful, it fails by # entering a terminal state, or until a maximum number of attempts # are made. # # # polls in a loop until condition is true # resource.wait_until(options) {|resource| condition} # # ## Example # # instance.wait_until(max_attempts:10, delay:5) do |instance| # instance.state.name == 'running' # end # # ## Configuration # # You can configure the maximum number of polling attempts, and the # delay (in seconds) between each polling attempt. The waiting condition is # set by passing a block to {#wait_until}: # # # poll for ~25 seconds # resource.wait_until(max_attempts:5,delay:5) {|resource|...} # # ## Callbacks # # You can be notified before each polling attempt and before each # delay. If you throw `:success` or `:failure` from these callbacks, # it will terminate the waiter. # # started_at = Time.now # # poll for 1 hour, instead of a number of attempts # proc = Proc.new do |attempts, response| # throw :failure if Time.now - started_at > 3600 # end # # # disable max attempts # instance.wait_until(before_wait:proc, max_attempts:nil) {...} # # ## Handling Errors # # When a waiter is successful, it returns the Resource. When a waiter # fails, it raises an error. # # begin # resource.wait_until(...) # rescue Aws::Waiters::Errors::WaiterFailed # # resource did not enter the desired state in time # end # # @yieldparam [Resource] resource to be used in the waiting condition. # # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter # terminates because the waiter has entered a state that it will not # transition out of, preventing success. # # yet successful. # # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is # encountered while polling for a resource that is not expected. # # @raise [NotImplementedError] Raised when the resource does not # # @option options [Integer] :max_attempts (10) Maximum number of # attempts # @option options [Integer] :delay (10) Delay between each # attempt in seconds # @option options [Proc] :before_attempt (nil) Callback # invoked before each attempt # @option options [Proc] :before_wait (nil) Callback # invoked before each wait # @return [Resource] if the waiter was successful def wait_until(options = {}, &block) self_copy = self.dup attempts = 0 options[:max_attempts] = 10 unless options.key?(:max_attempts) options[:delay] ||= 10 options[:poller] = Proc.new do attempts += 1 if block.call(self_copy) [:success, self_copy] else self_copy.reload unless attempts == options[:max_attempts] :retry end end Aws::Plugins::UserAgent.feature('resource') do Aws::Waiters::Waiter.new(options).wait({}) end end # @!group Actions # @example Request syntax with placeholder values # # bucket_logging.put({ # bucket_logging_status: { # required # logging_enabled: { # target_bucket: "TargetBucket", # required # target_grants: [ # { # grantee: { # display_name: "DisplayName", # email_address: "EmailAddress", # id: "ID", # type: "CanonicalUser", # required, accepts CanonicalUser, AmazonCustomerByEmail, Group # uri: "URI", # }, # permission: "FULL_CONTROL", # accepts FULL_CONTROL, READ, WRITE # }, # ], # target_prefix: "TargetPrefix", # required # target_object_key_format: { # simple_prefix: { # }, # partitioned_prefix: { # partition_date_source: "EventTime", # accepts EventTime, DeliveryTime # }, # }, # }, # }, # content_md5: "ContentMD5", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # expected_bucket_owner: "AccountId", # }) # @param [Hash] options ({}) # @option options [required, Types::BucketLoggingStatus] :bucket_logging_status # Container for logging status information. # @option options [String] :content_md5 # The MD5 hash of the `PutBucketLogging` request body. # # For requests made using the Amazon Web Services Command Line Interface # (CLI) or Amazon Web Services SDKs, this field is calculated # automatically. # @option options [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @return [EmptyStructure] def put(options = {}) options = options.merge(bucket: @bucket_name) resp = Aws::Plugins::UserAgent.feature('resource') do @client.put_bucket_logging(options) end resp.data end # @!group Associations # @return [Bucket] def bucket Bucket.new( name: @bucket_name, client: @client ) end # @deprecated # @api private def identifiers { bucket_name: @bucket_name } end deprecated(:identifiers) private def extract_bucket_name(args, options) value = args[0] || options.delete(:bucket_name) case value when String then value when nil then raise ArgumentError, "missing required option :bucket_name" else msg = "expected :bucket_name to be a String, got #{value.class}" raise ArgumentError, msg end end class Collection < Aws::Resources::Collection; end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/bucket_lifecycle.rb0000644000004100000410000002357114563445240022106 0ustar www-datawww-data# frozen_string_literal: true # WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws::S3 class BucketLifecycle extend Aws::Deprecations # @overload def initialize(bucket_name, options = {}) # @param [String] bucket_name # @option options [Client] :client # @overload def initialize(options = {}) # @option options [required, String] :bucket_name # @option options [Client] :client def initialize(*args) options = Hash === args.last ? args.pop.dup : {} @bucket_name = extract_bucket_name(args, options) @data = options.delete(:data) @client = options.delete(:client) || Client.new(options) @waiter_block_warned = false end # @!group Read-Only Attributes # @return [String] def bucket_name @bucket_name end # Container for a lifecycle rule. # @return [Array] def rules data[:rules] end # @!endgroup # @return [Client] def client @client end # Loads, or reloads {#data} for the current {BucketLifecycle}. # Returns `self` making it possible to chain methods. # # bucket_lifecycle.reload.data # # @return [self] def load resp = Aws::Plugins::UserAgent.feature('resource') do @client.get_bucket_lifecycle(bucket: @bucket_name) end @data = resp.data self end alias :reload :load # @return [Types::GetBucketLifecycleOutput] # Returns the data for this {BucketLifecycle}. Calls # {Client#get_bucket_lifecycle} if {#data_loaded?} is `false`. def data load unless @data @data end # @return [Boolean] # Returns `true` if this resource is loaded. Accessing attributes or # {#data} on an unloaded resource will trigger a call to {#load}. def data_loaded? !!@data end # @deprecated Use [Aws::S3::Client] #wait_until instead # # Waiter polls an API operation until a resource enters a desired # state. # # @note The waiting operation is performed on a copy. The original resource # remains unchanged. # # ## Basic Usage # # Waiter will polls until it is successful, it fails by # entering a terminal state, or until a maximum number of attempts # are made. # # # polls in a loop until condition is true # resource.wait_until(options) {|resource| condition} # # ## Example # # instance.wait_until(max_attempts:10, delay:5) do |instance| # instance.state.name == 'running' # end # # ## Configuration # # You can configure the maximum number of polling attempts, and the # delay (in seconds) between each polling attempt. The waiting condition is # set by passing a block to {#wait_until}: # # # poll for ~25 seconds # resource.wait_until(max_attempts:5,delay:5) {|resource|...} # # ## Callbacks # # You can be notified before each polling attempt and before each # delay. If you throw `:success` or `:failure` from these callbacks, # it will terminate the waiter. # # started_at = Time.now # # poll for 1 hour, instead of a number of attempts # proc = Proc.new do |attempts, response| # throw :failure if Time.now - started_at > 3600 # end # # # disable max attempts # instance.wait_until(before_wait:proc, max_attempts:nil) {...} # # ## Handling Errors # # When a waiter is successful, it returns the Resource. When a waiter # fails, it raises an error. # # begin # resource.wait_until(...) # rescue Aws::Waiters::Errors::WaiterFailed # # resource did not enter the desired state in time # end # # @yieldparam [Resource] resource to be used in the waiting condition. # # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter # terminates because the waiter has entered a state that it will not # transition out of, preventing success. # # yet successful. # # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is # encountered while polling for a resource that is not expected. # # @raise [NotImplementedError] Raised when the resource does not # # @option options [Integer] :max_attempts (10) Maximum number of # attempts # @option options [Integer] :delay (10) Delay between each # attempt in seconds # @option options [Proc] :before_attempt (nil) Callback # invoked before each attempt # @option options [Proc] :before_wait (nil) Callback # invoked before each wait # @return [Resource] if the waiter was successful def wait_until(options = {}, &block) self_copy = self.dup attempts = 0 options[:max_attempts] = 10 unless options.key?(:max_attempts) options[:delay] ||= 10 options[:poller] = Proc.new do attempts += 1 if block.call(self_copy) [:success, self_copy] else self_copy.reload unless attempts == options[:max_attempts] :retry end end Aws::Plugins::UserAgent.feature('resource') do Aws::Waiters::Waiter.new(options).wait({}) end end # @!group Actions # @example Request syntax with placeholder values # # bucket_lifecycle.delete({ # expected_bucket_owner: "AccountId", # }) # @param [Hash] options ({}) # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @return [EmptyStructure] def delete(options = {}) options = options.merge(bucket: @bucket_name) resp = Aws::Plugins::UserAgent.feature('resource') do @client.delete_bucket_lifecycle(options) end resp.data end # @example Request syntax with placeholder values # # bucket_lifecycle.put({ # content_md5: "ContentMD5", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # lifecycle_configuration: { # rules: [ # required # { # expiration: { # date: Time.now, # days: 1, # expired_object_delete_marker: false, # }, # id: "ID", # prefix: "Prefix", # required # status: "Enabled", # required, accepts Enabled, Disabled # transition: { # date: Time.now, # days: 1, # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR # }, # noncurrent_version_transition: { # noncurrent_days: 1, # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR # newer_noncurrent_versions: 1, # }, # noncurrent_version_expiration: { # noncurrent_days: 1, # newer_noncurrent_versions: 1, # }, # abort_incomplete_multipart_upload: { # days_after_initiation: 1, # }, # }, # ], # }, # expected_bucket_owner: "AccountId", # }) # @param [Hash] options ({}) # @option options [String] :content_md5 # For requests made using the Amazon Web Services Command Line Interface # (CLI) or Amazon Web Services SDKs, this field is calculated # automatically. # @option options [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [Types::LifecycleConfiguration] :lifecycle_configuration # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @return [EmptyStructure] def put(options = {}) options = options.merge(bucket: @bucket_name) resp = Aws::Plugins::UserAgent.feature('resource') do @client.put_bucket_lifecycle(options) end resp.data end # @!group Associations # @return [Bucket] def bucket Bucket.new( name: @bucket_name, client: @client ) end # @deprecated # @api private def identifiers { bucket_name: @bucket_name } end deprecated(:identifiers) private def extract_bucket_name(args, options) value = args[0] || options.delete(:bucket_name) case value when String then value when nil then raise ArgumentError, "missing required option :bucket_name" else msg = "expected :bucket_name to be a String, got #{value.class}" raise ArgumentError, msg end end class Collection < Aws::Resources::Collection; end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/bucket_cors.rb0000644000004100000410000002334614563445240021115 0ustar www-datawww-data# frozen_string_literal: true # WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws::S3 class BucketCors extend Aws::Deprecations # @overload def initialize(bucket_name, options = {}) # @param [String] bucket_name # @option options [Client] :client # @overload def initialize(options = {}) # @option options [required, String] :bucket_name # @option options [Client] :client def initialize(*args) options = Hash === args.last ? args.pop.dup : {} @bucket_name = extract_bucket_name(args, options) @data = options.delete(:data) @client = options.delete(:client) || Client.new(options) @waiter_block_warned = false end # @!group Read-Only Attributes # @return [String] def bucket_name @bucket_name end # A set of origins and methods (cross-origin access that you want to # allow). You can add up to 100 rules to the configuration. # @return [Array] def cors_rules data[:cors_rules] end # @!endgroup # @return [Client] def client @client end # Loads, or reloads {#data} for the current {BucketCors}. # Returns `self` making it possible to chain methods. # # bucket_cors.reload.data # # @return [self] def load resp = Aws::Plugins::UserAgent.feature('resource') do @client.get_bucket_cors(bucket: @bucket_name) end @data = resp.data self end alias :reload :load # @return [Types::GetBucketCorsOutput] # Returns the data for this {BucketCors}. Calls # {Client#get_bucket_cors} if {#data_loaded?} is `false`. def data load unless @data @data end # @return [Boolean] # Returns `true` if this resource is loaded. Accessing attributes or # {#data} on an unloaded resource will trigger a call to {#load}. def data_loaded? !!@data end # @deprecated Use [Aws::S3::Client] #wait_until instead # # Waiter polls an API operation until a resource enters a desired # state. # # @note The waiting operation is performed on a copy. The original resource # remains unchanged. # # ## Basic Usage # # Waiter will polls until it is successful, it fails by # entering a terminal state, or until a maximum number of attempts # are made. # # # polls in a loop until condition is true # resource.wait_until(options) {|resource| condition} # # ## Example # # instance.wait_until(max_attempts:10, delay:5) do |instance| # instance.state.name == 'running' # end # # ## Configuration # # You can configure the maximum number of polling attempts, and the # delay (in seconds) between each polling attempt. The waiting condition is # set by passing a block to {#wait_until}: # # # poll for ~25 seconds # resource.wait_until(max_attempts:5,delay:5) {|resource|...} # # ## Callbacks # # You can be notified before each polling attempt and before each # delay. If you throw `:success` or `:failure` from these callbacks, # it will terminate the waiter. # # started_at = Time.now # # poll for 1 hour, instead of a number of attempts # proc = Proc.new do |attempts, response| # throw :failure if Time.now - started_at > 3600 # end # # # disable max attempts # instance.wait_until(before_wait:proc, max_attempts:nil) {...} # # ## Handling Errors # # When a waiter is successful, it returns the Resource. When a waiter # fails, it raises an error. # # begin # resource.wait_until(...) # rescue Aws::Waiters::Errors::WaiterFailed # # resource did not enter the desired state in time # end # # @yieldparam [Resource] resource to be used in the waiting condition. # # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter # terminates because the waiter has entered a state that it will not # transition out of, preventing success. # # yet successful. # # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is # encountered while polling for a resource that is not expected. # # @raise [NotImplementedError] Raised when the resource does not # # @option options [Integer] :max_attempts (10) Maximum number of # attempts # @option options [Integer] :delay (10) Delay between each # attempt in seconds # @option options [Proc] :before_attempt (nil) Callback # invoked before each attempt # @option options [Proc] :before_wait (nil) Callback # invoked before each wait # @return [Resource] if the waiter was successful def wait_until(options = {}, &block) self_copy = self.dup attempts = 0 options[:max_attempts] = 10 unless options.key?(:max_attempts) options[:delay] ||= 10 options[:poller] = Proc.new do attempts += 1 if block.call(self_copy) [:success, self_copy] else self_copy.reload unless attempts == options[:max_attempts] :retry end end Aws::Plugins::UserAgent.feature('resource') do Aws::Waiters::Waiter.new(options).wait({}) end end # @!group Actions # @example Request syntax with placeholder values # # bucket_cors.delete({ # expected_bucket_owner: "AccountId", # }) # @param [Hash] options ({}) # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @return [EmptyStructure] def delete(options = {}) options = options.merge(bucket: @bucket_name) resp = Aws::Plugins::UserAgent.feature('resource') do @client.delete_bucket_cors(options) end resp.data end # @example Request syntax with placeholder values # # bucket_cors.put({ # cors_configuration: { # required # cors_rules: [ # required # { # id: "ID", # allowed_headers: ["AllowedHeader"], # allowed_methods: ["AllowedMethod"], # required # allowed_origins: ["AllowedOrigin"], # required # expose_headers: ["ExposeHeader"], # max_age_seconds: 1, # }, # ], # }, # content_md5: "ContentMD5", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # expected_bucket_owner: "AccountId", # }) # @param [Hash] options ({}) # @option options [required, Types::CORSConfiguration] :cors_configuration # Describes the cross-origin access configuration for objects in an # Amazon S3 bucket. For more information, see [Enabling Cross-Origin # Resource Sharing][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html # @option options [String] :content_md5 # The base64-encoded 128-bit MD5 digest of the data. This header must be # used as a message integrity check to verify that the request body was # not corrupted in transit. For more information, go to [RFC 1864.][1] # # For requests made using the Amazon Web Services Command Line Interface # (CLI) or Amazon Web Services SDKs, this field is calculated # automatically. # # # # [1]: http://www.ietf.org/rfc/rfc1864.txt # @option options [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @return [EmptyStructure] def put(options = {}) options = options.merge(bucket: @bucket_name) resp = Aws::Plugins::UserAgent.feature('resource') do @client.put_bucket_cors(options) end resp.data end # @!group Associations # @return [Bucket] def bucket Bucket.new( name: @bucket_name, client: @client ) end # @deprecated # @api private def identifiers { bucket_name: @bucket_name } end deprecated(:identifiers) private def extract_bucket_name(args, options) value = args[0] || options.delete(:bucket_name) case value when String then value when nil then raise ArgumentError, "missing required option :bucket_name" else msg = "expected :bucket_name to be a String, got #{value.class}" raise ArgumentError, msg end end class Collection < Aws::Resources::Collection; end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/object_copier.rb0000644000004100000410000000572514563445240021422 0ustar www-datawww-data# frozen_string_literal: true require 'thread' module Aws module S3 # @api private class ObjectCopier # @param [S3::Object] object def initialize(object, options = {}) @object = object @options = options.merge(client: @object.client) end def copy_from(source, options = {}) copy_object(source, @object, merge_options(source, options)) end def copy_to(target, options = {}) copy_object(@object, target, merge_options(target, options)) end private def copy_object(source, target, options) target_bucket, target_key = copy_target(target) options[:bucket] = target_bucket options[:key] = target_key options[:copy_source] = copy_source(source) Aws::Plugins::UserAgent.feature('s3-transfer') do if options.delete(:multipart_copy) apply_source_client(source, options) ObjectMultipartCopier.new(@options).copy(options) else @object.client.copy_object(options) end end end def copy_source(source) case source when String then source when Hash src = "#{source[:bucket]}/#{escape(source[:key])}" src += "?versionId=#{source[:version_id]}" if source.key?(:version_id) src when S3::Object, S3::ObjectSummary "#{source.bucket_name}/#{escape(source.key)}" when S3::ObjectVersion "#{source.bucket_name}/#{escape(source.object_key)}?versionId=#{source.id}" else msg = "expected source to be an Aws::S3::Object, Hash, or String" raise ArgumentError, msg end end def copy_target(target) case target when String then target.match(/([^\/]+?)\/(.+)/)[1,2] when Hash then target.values_at(:bucket, :key) when S3::Object then [target.bucket_name, target.key] else msg = "expected target to be an Aws::S3::Object, Hash, or String" raise ArgumentError, msg end end def merge_options(source_or_target, options) if Hash === source_or_target source_or_target.inject(options.dup) do |opts, (key, value)| opts[key] = value unless [:bucket, :key, :version_id].include?(key) opts end else options.dup end end def apply_source_client(source, options) if source.respond_to?(:client) options[:copy_source_client] ||= source.client end if options[:copy_source_region] config = @object.client.config config = config.each_pair.inject({}) { |h, (k,v)| h[k] = v; h } config[:region] = options.delete(:copy_source_region) options[:copy_source_client] ||= S3::Client.new(config) end options[:copy_source_client] ||= @object.client end def escape(str) Seahorse::Util.uri_path_escape(str) end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/client.rb0000644000004100000410000325610214563445240020071 0ustar www-datawww-data# frozen_string_literal: true # WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE require 'seahorse/client/plugins/content_length.rb' require 'aws-sdk-core/plugins/credentials_configuration.rb' require 'aws-sdk-core/plugins/logging.rb' require 'aws-sdk-core/plugins/param_converter.rb' require 'aws-sdk-core/plugins/param_validator.rb' require 'aws-sdk-core/plugins/user_agent.rb' require 'aws-sdk-core/plugins/helpful_socket_errors.rb' require 'aws-sdk-core/plugins/retry_errors.rb' require 'aws-sdk-core/plugins/global_configuration.rb' require 'aws-sdk-core/plugins/regional_endpoint.rb' require 'aws-sdk-core/plugins/endpoint_discovery.rb' require 'aws-sdk-core/plugins/endpoint_pattern.rb' require 'aws-sdk-core/plugins/response_paging.rb' require 'aws-sdk-core/plugins/stub_responses.rb' require 'aws-sdk-core/plugins/idempotency_token.rb' require 'aws-sdk-core/plugins/jsonvalue_converter.rb' require 'aws-sdk-core/plugins/client_metrics_plugin.rb' require 'aws-sdk-core/plugins/client_metrics_send_plugin.rb' require 'aws-sdk-core/plugins/transfer_encoding.rb' require 'aws-sdk-core/plugins/http_checksum.rb' require 'aws-sdk-core/plugins/checksum_algorithm.rb' require 'aws-sdk-core/plugins/request_compression.rb' require 'aws-sdk-core/plugins/defaults_mode.rb' require 'aws-sdk-core/plugins/recursion_detection.rb' require 'aws-sdk-core/plugins/sign.rb' require 'aws-sdk-core/plugins/protocols/rest_xml.rb' require 'aws-sdk-s3/plugins/accelerate.rb' require 'aws-sdk-s3/plugins/arn.rb' require 'aws-sdk-s3/plugins/bucket_dns.rb' require 'aws-sdk-s3/plugins/bucket_name_restrictions.rb' require 'aws-sdk-s3/plugins/dualstack.rb' require 'aws-sdk-s3/plugins/expect_100_continue.rb' require 'aws-sdk-s3/plugins/express_session_auth.rb' require 'aws-sdk-s3/plugins/get_bucket_location_fix.rb' require 'aws-sdk-s3/plugins/http_200_errors.rb' require 'aws-sdk-s3/plugins/iad_regional_endpoint.rb' require 'aws-sdk-s3/plugins/location_constraint.rb' require 'aws-sdk-s3/plugins/md5s.rb' require 'aws-sdk-s3/plugins/redirects.rb' require 'aws-sdk-s3/plugins/s3_host_id.rb' require 'aws-sdk-s3/plugins/s3_signer.rb' require 'aws-sdk-s3/plugins/sse_cpk.rb' require 'aws-sdk-s3/plugins/skip_whole_multipart_get_checksums.rb' require 'aws-sdk-s3/plugins/streaming_retry.rb' require 'aws-sdk-s3/plugins/url_encoded_keys.rb' require 'aws-sdk-core/plugins/event_stream_configuration.rb' Aws::Plugins::GlobalConfiguration.add_identifier(:s3) module Aws::S3 # An API client for S3. To construct a client, you need to configure a `:region` and `:credentials`. # # client = Aws::S3::Client.new( # region: region_name, # credentials: credentials, # # ... # ) # # For details on configuring region and credentials see # the [developer guide](/sdk-for-ruby/v3/developer-guide/setup-config.html). # # See {#initialize} for a full list of supported configuration options. class Client < Seahorse::Client::Base include Aws::ClientStubs @identifier = :s3 set_api(ClientApi::API) add_plugin(Seahorse::Client::Plugins::ContentLength) add_plugin(Aws::Plugins::CredentialsConfiguration) add_plugin(Aws::Plugins::Logging) add_plugin(Aws::Plugins::ParamConverter) add_plugin(Aws::Plugins::ParamValidator) add_plugin(Aws::Plugins::UserAgent) add_plugin(Aws::Plugins::HelpfulSocketErrors) add_plugin(Aws::Plugins::RetryErrors) add_plugin(Aws::Plugins::GlobalConfiguration) add_plugin(Aws::Plugins::RegionalEndpoint) add_plugin(Aws::Plugins::EndpointDiscovery) add_plugin(Aws::Plugins::EndpointPattern) add_plugin(Aws::Plugins::ResponsePaging) add_plugin(Aws::Plugins::StubResponses) add_plugin(Aws::Plugins::IdempotencyToken) add_plugin(Aws::Plugins::JsonvalueConverter) add_plugin(Aws::Plugins::ClientMetricsPlugin) add_plugin(Aws::Plugins::ClientMetricsSendPlugin) add_plugin(Aws::Plugins::TransferEncoding) add_plugin(Aws::Plugins::HttpChecksum) add_plugin(Aws::Plugins::ChecksumAlgorithm) add_plugin(Aws::Plugins::RequestCompression) add_plugin(Aws::Plugins::DefaultsMode) add_plugin(Aws::Plugins::RecursionDetection) add_plugin(Aws::Plugins::Sign) add_plugin(Aws::Plugins::Protocols::RestXml) add_plugin(Aws::S3::Plugins::Accelerate) add_plugin(Aws::S3::Plugins::ARN) add_plugin(Aws::S3::Plugins::BucketDns) add_plugin(Aws::S3::Plugins::BucketNameRestrictions) add_plugin(Aws::S3::Plugins::Dualstack) add_plugin(Aws::S3::Plugins::Expect100Continue) add_plugin(Aws::S3::Plugins::ExpressSessionAuth) add_plugin(Aws::S3::Plugins::GetBucketLocationFix) add_plugin(Aws::S3::Plugins::Http200Errors) add_plugin(Aws::S3::Plugins::IADRegionalEndpoint) add_plugin(Aws::S3::Plugins::LocationConstraint) add_plugin(Aws::S3::Plugins::Md5s) add_plugin(Aws::S3::Plugins::Redirects) add_plugin(Aws::S3::Plugins::S3HostId) add_plugin(Aws::S3::Plugins::S3Signer) add_plugin(Aws::S3::Plugins::SseCpk) add_plugin(Aws::S3::Plugins::SkipWholeMultipartGetChecksums) add_plugin(Aws::S3::Plugins::StreamingRetry) add_plugin(Aws::S3::Plugins::UrlEncodedKeys) add_plugin(Aws::Plugins::EventStreamConfiguration) add_plugin(Aws::S3::Plugins::Endpoints) # @overload initialize(options) # @param [Hash] options # @option options [required, Aws::CredentialProvider] :credentials # Your AWS credentials. This can be an instance of any one of the # following classes: # # * `Aws::Credentials` - Used for configuring static, non-refreshing # credentials. # # * `Aws::SharedCredentials` - Used for loading static credentials from a # shared file, such as `~/.aws/config`. # # * `Aws::AssumeRoleCredentials` - Used when you need to assume a role. # # * `Aws::AssumeRoleWebIdentityCredentials` - Used when you need to # assume a role after providing credentials via the web. # # * `Aws::SSOCredentials` - Used for loading credentials from AWS SSO using an # access token generated from `aws login`. # # * `Aws::ProcessCredentials` - Used for loading credentials from a # process that outputs to stdout. # # * `Aws::InstanceProfileCredentials` - Used for loading credentials # from an EC2 IMDS on an EC2 instance. # # * `Aws::ECSCredentials` - Used for loading credentials from # instances running in ECS. # # * `Aws::CognitoIdentityCredentials` - Used for loading credentials # from the Cognito Identity service. # # When `:credentials` are not configured directly, the following # locations will be searched for credentials: # # * `Aws.config[:credentials]` # * The `:access_key_id`, `:secret_access_key`, and `:session_token` options. # * ENV['AWS_ACCESS_KEY_ID'], ENV['AWS_SECRET_ACCESS_KEY'] # * `~/.aws/credentials` # * `~/.aws/config` # * EC2/ECS IMDS instance profile - When used by default, the timeouts # are very aggressive. Construct and pass an instance of # `Aws::InstanceProfileCredentails` or `Aws::ECSCredentials` to # enable retries and extended timeouts. Instance profile credential # fetching can be disabled by setting ENV['AWS_EC2_METADATA_DISABLED'] # to true. # # @option options [required, String] :region # The AWS region to connect to. The configured `:region` is # used to determine the service `:endpoint`. When not passed, # a default `:region` is searched for in the following locations: # # * `Aws.config[:region]` # * `ENV['AWS_REGION']` # * `ENV['AMAZON_REGION']` # * `ENV['AWS_DEFAULT_REGION']` # * `~/.aws/credentials` # * `~/.aws/config` # # @option options [String] :access_key_id # # @option options [Boolean] :active_endpoint_cache (false) # When set to `true`, a thread polling for endpoints will be running in # the background every 60 secs (default). Defaults to `false`. # # @option options [Boolean] :adaptive_retry_wait_to_fill (true) # Used only in `adaptive` retry mode. When true, the request will sleep # until there is sufficent client side capacity to retry the request. # When false, the request will raise a `RetryCapacityNotAvailableError` and will # not retry instead of sleeping. # # @option options [Boolean] :client_side_monitoring (false) # When `true`, client-side metrics will be collected for all API requests from # this client. # # @option options [String] :client_side_monitoring_client_id ("") # Allows you to provide an identifier for this client which will be attached to # all generated client side metrics. Defaults to an empty string. # # @option options [String] :client_side_monitoring_host ("127.0.0.1") # Allows you to specify the DNS hostname or IPv4 or IPv6 address that the client # side monitoring agent is running on, where client metrics will be published via UDP. # # @option options [Integer] :client_side_monitoring_port (31000) # Required for publishing client metrics. The port that the client side monitoring # agent is running on, where client metrics will be published via UDP. # # @option options [Aws::ClientSideMonitoring::Publisher] :client_side_monitoring_publisher (Aws::ClientSideMonitoring::Publisher) # Allows you to provide a custom client-side monitoring publisher class. By default, # will use the Client Side Monitoring Agent Publisher. # # @option options [Boolean] :compute_checksums (true) # When `true` a MD5 checksum will be computed and sent in the Content Md5 # header for :put_object and :upload_part. When `false`, MD5 checksums # will not be computed for these operations. Checksums are still computed # for operations requiring them. Checksum errors returned by Amazon S3 are # automatically retried up to `:retry_limit` times. # # @option options [Boolean] :convert_params (true) # When `true`, an attempt is made to coerce request parameters into # the required types. # # @option options [Boolean] :correct_clock_skew (true) # Used only in `standard` and adaptive retry modes. Specifies whether to apply # a clock skew correction and retry requests with skewed client clocks. # # @option options [String] :defaults_mode ("legacy") # See {Aws::DefaultsModeConfiguration} for a list of the # accepted modes and the configuration defaults that are included. # # @option options [Boolean] :disable_host_prefix_injection (false) # Set to true to disable SDK automatically adding host prefix # to default service endpoint when available. # # @option options [Boolean] :disable_request_compression (false) # When set to 'true' the request body will not be compressed # for supported operations. # # @option options [Boolean] :disable_s3_express_session_auth # Parameter to indicate whether S3Express session auth should be disabled # # @option options [String] :endpoint # The client endpoint is normally constructed from the `:region` # option. You should only configure an `:endpoint` when connecting # to test or custom endpoints. This should be a valid HTTP(S) URI. # # @option options [Integer] :endpoint_cache_max_entries (1000) # Used for the maximum size limit of the LRU cache storing endpoints data # for endpoint discovery enabled operations. Defaults to 1000. # # @option options [Integer] :endpoint_cache_max_threads (10) # Used for the maximum threads in use for polling endpoints to be cached, defaults to 10. # # @option options [Integer] :endpoint_cache_poll_interval (60) # When :endpoint_discovery and :active_endpoint_cache is enabled, # Use this option to config the time interval in seconds for making # requests fetching endpoints information. Defaults to 60 sec. # # @option options [Boolean] :endpoint_discovery (false) # When set to `true`, endpoint discovery will be enabled for operations when available. # # @option options [Proc] :event_stream_handler # When an EventStream or Proc object is provided, it will be used as callback for each chunk of event stream response received along the way. # # @option options [Aws::S3::ExpressCredentialsProvider] :express_credentials_provider # Credential Provider for S3 Express endpoints. Manages credentials # for different buckets. # # @option options [Boolean] :follow_redirects (true) # When `true`, this client will follow 307 redirects returned # by Amazon S3. # # @option options [Boolean] :force_path_style (false) # When set to `true`, the bucket name is always left in the # request URI and never moved to the host as a sub-domain. # # @option options [Boolean] :ignore_configured_endpoint_urls # Setting to true disables use of endpoint URLs provided via environment # variables and the shared configuration file. # # @option options [Proc] :input_event_stream_handler # When an EventStream or Proc object is provided, it can be used for sending events for the event stream. # # @option options [Aws::Log::Formatter] :log_formatter (Aws::Log::Formatter.default) # The log formatter. # # @option options [Symbol] :log_level (:info) # The log level to send messages to the `:logger` at. # # @option options [Logger] :logger # The Logger instance to send log messages to. If this option # is not set, logging will be disabled. # # @option options [Integer] :max_attempts (3) # An integer representing the maximum number attempts that will be made for # a single request, including the initial attempt. For example, # setting this value to 5 will result in a request being retried up to # 4 times. Used in `standard` and `adaptive` retry modes. # # @option options [Proc] :output_event_stream_handler # When an EventStream or Proc object is provided, it will be used as callback for each chunk of event stream response received along the way. # # @option options [String] :profile ("default") # Used when loading credentials from the shared credentials file # at HOME/.aws/credentials. When not specified, 'default' is used. # # @option options [Integer] :request_min_compression_size_bytes (10240) # The minimum size in bytes that triggers compression for request # bodies. The value must be non-negative integer value between 0 # and 10485780 bytes inclusive. # # @option options [Boolean] :require_https_for_sse_cpk (true) # When `true`, the endpoint **must** be HTTPS for all operations # where server-side-encryption is used with customer-provided keys. # This should only be disabled for local testing. # # @option options [Proc] :retry_backoff # A proc or lambda used for backoff. Defaults to 2**retries * retry_base_delay. # This option is only used in the `legacy` retry mode. # # @option options [Float] :retry_base_delay (0.3) # The base delay in seconds used by the default backoff function. This option # is only used in the `legacy` retry mode. # # @option options [Symbol] :retry_jitter (:none) # A delay randomiser function used by the default backoff function. # Some predefined functions can be referenced by name - :none, :equal, :full, # otherwise a Proc that takes and returns a number. This option is only used # in the `legacy` retry mode. # # @see https://www.awsarchitectureblog.com/2015/03/backoff.html # # @option options [Integer] :retry_limit (3) # The maximum number of times to retry failed requests. Only # ~ 500 level server errors and certain ~ 400 level client errors # are retried. Generally, these are throttling errors, data # checksum errors, networking errors, timeout errors, auth errors, # endpoint discovery, and errors from expired credentials. # This option is only used in the `legacy` retry mode. # # @option options [Integer] :retry_max_delay (0) # The maximum number of seconds to delay between retries (0 for no limit) # used by the default backoff function. This option is only used in the # `legacy` retry mode. # # @option options [String] :retry_mode ("legacy") # Specifies which retry algorithm to use. Values are: # # * `legacy` - The pre-existing retry behavior. This is default value if # no retry mode is provided. # # * `standard` - A standardized set of retry rules across the AWS SDKs. # This includes support for retry quotas, which limit the number of # unsuccessful retries a client can make. # # * `adaptive` - An experimental retry mode that includes all the # functionality of `standard` mode along with automatic client side # throttling. This is a provisional mode that may change behavior # in the future. # # # @option options [Boolean] :s3_disable_multiregion_access_points (false) # When set to `false` this will option will raise errors when multi-region # access point ARNs are used. Multi-region access points can potentially # result in cross region requests. # # @option options [String] :s3_us_east_1_regional_endpoint ("legacy") # Pass in `regional` to enable the `us-east-1` regional endpoint. # Defaults to `legacy` mode which uses the global endpoint. # # @option options [Boolean] :s3_use_arn_region (true) # For S3 ARNs passed into the `:bucket` parameter, this option will # use the region in the ARN, allowing for cross-region requests to # be made. Set to `false` to use the client's region instead. # # @option options [String] :sdk_ua_app_id # A unique and opaque application ID that is appended to the # User-Agent header as app/. It should have a # maximum length of 50. # # @option options [String] :secret_access_key # # @option options [String] :session_token # # @option options [Boolean] :stub_responses (false) # Causes the client to return stubbed responses. By default # fake responses are generated and returned. You can specify # the response data to return or errors to raise by calling # {ClientStubs#stub_responses}. See {ClientStubs} for more information. # # ** Please note ** When response stubbing is enabled, no HTTP # requests are made, and retries are disabled. # # @option options [Aws::TokenProvider] :token_provider # A Bearer Token Provider. This can be an instance of any one of the # following classes: # # * `Aws::StaticTokenProvider` - Used for configuring static, non-refreshing # tokens. # # * `Aws::SSOTokenProvider` - Used for loading tokens from AWS SSO using an # access token generated from `aws login`. # # When `:token_provider` is not configured directly, the `Aws::TokenProviderChain` # will be used to search for tokens configured for your profile in shared configuration files. # # @option options [Boolean] :use_accelerate_endpoint (false) # When set to `true`, accelerated bucket endpoints will be used # for all object operations. You must first enable accelerate for # each bucket. [Go here for more information](http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). # # @option options [Boolean] :use_dualstack_endpoint # When set to `true`, dualstack enabled endpoints (with `.aws` TLD) # will be used if available. # # @option options [Boolean] :use_fips_endpoint # When set to `true`, fips compatible endpoints will be used if available. # When a `fips` region is used, the region is normalized and this config # is set to `true`. # # @option options [Boolean] :validate_params (true) # When `true`, request parameters are validated before # sending the request. # # @option options [Aws::S3::EndpointProvider] :endpoint_provider # The endpoint provider used to resolve endpoints. Any object that responds to `#resolve_endpoint(parameters)` where `parameters` is a Struct similar to `Aws::S3::EndpointParameters` # # @option options [URI::HTTP,String] :http_proxy A proxy to send # requests through. Formatted like 'http://proxy.com:123'. # # @option options [Float] :http_open_timeout (15) The number of # seconds to wait when opening a HTTP session before raising a # `Timeout::Error`. # # @option options [Float] :http_read_timeout (60) The default # number of seconds to wait for response data. This value can # safely be set per-request on the session. # # @option options [Float] :http_idle_timeout (5) The number of # seconds a connection is allowed to sit idle before it is # considered stale. Stale connections are closed and removed # from the pool before making a request. # # @option options [Float] :http_continue_timeout (1) The number of # seconds to wait for a 100-continue response before sending the # request body. This option has no effect unless the request has # "Expect" header set to "100-continue". Defaults to `nil` which # disables this behaviour. This value can safely be set per # request on the session. # # @option options [Float] :ssl_timeout (nil) Sets the SSL timeout # in seconds. # # @option options [Boolean] :http_wire_trace (false) When `true`, # HTTP debug output will be sent to the `:logger`. # # @option options [Boolean] :ssl_verify_peer (true) When `true`, # SSL peer certificates are verified when establishing a # connection. # # @option options [String] :ssl_ca_bundle Full path to the SSL # certificate authority bundle file that should be used when # verifying peer certificates. If you do not pass # `:ssl_ca_bundle` or `:ssl_ca_directory` the the system default # will be used if available. # # @option options [String] :ssl_ca_directory Full path of the # directory that contains the unbundled SSL certificate # authority files for verifying peer certificates. If you do # not pass `:ssl_ca_bundle` or `:ssl_ca_directory` the the # system default will be used if available. # def initialize(*args) super end # @!group API Operations # This operation aborts a multipart upload. After a multipart upload is # aborted, no additional parts can be uploaded using that upload ID. The # storage consumed by any previously uploaded parts will be freed. # However, if any part uploads are currently in progress, those part # uploads might or might not succeed. As a result, it might be necessary # to abort a given multipart upload multiple times in order to # completely free all storage consumed by all parts. # # To verify that all parts have been removed and prevent getting charged # for the part storage, you should call the [ListParts][1] API operation # and ensure that the parts list is empty. # # **Directory buckets** - For directory buckets, you must make requests # for this API operation to the Zonal endpoint. These endpoints support # virtual-hosted-style requests in the format # `https://bucket_name.s3express-az_id.region.amazonaws.com/key-name `. # Path-style requests are not supported. For more information, see # [Regional and Zonal endpoints][2] in the *Amazon S3 User Guide*. # # # # Permissions # : * **General purpose bucket permissions** - For information about # permissions required to use the multipart upload, see [Multipart # Upload and Permissions][3] in the *Amazon S3 User Guide*. # # * **Directory bucket permissions** - To grant access to this API # operation on a directory bucket, we recommend that you use the [ # `CreateSession` ][4] API operation for session-based # authorization. Specifically, you grant the # `s3express:CreateSession` permission to the directory bucket in a # bucket policy or an IAM identity-based policy. Then, you make the # `CreateSession` API call on the bucket to obtain a session token. # With the session token in your request header, you can make API # requests to this operation. After the session token expires, you # make another `CreateSession` API call to generate a new session # token for use. Amazon Web Services CLI or SDKs create session and # refresh the session token automatically to avoid service # interruptions when a session expires. For more information about # authorization, see [ `CreateSession` ][4]. # # HTTP Host header syntax # # : Directory buckets - The HTTP Host header syntax is ` # Bucket_name.s3express-az_id.region.amazonaws.com`. # # The following operations are related to `AbortMultipartUpload`: # # * [CreateMultipartUpload][5] # # * [UploadPart][6] # # * [CompleteMultipartUpload][7] # # * [ListParts][1] # # * [ListMultipartUploads][8] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html # # @option params [required, String] :bucket # The bucket name to which the upload was taking place. # # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style requests # are not supported. Directory bucket names must be unique in the chosen # Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, you # must provide the alias of the access point in place of the bucket name # or specify the access point ARN. When using the access point ARN, you # must direct requests to the access point hostname. The access point # hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the bucket # name. For more information about access point ARNs, see [Using access # points][2] in the *Amazon S3 User Guide*. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. The # S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][3] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # # @option params [required, String] :key # Key of the object for which the multipart upload was initiated. # # @option params [required, String] :upload_id # Upload ID that identifies the multipart upload. # # @option params [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::AbortMultipartUploadOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::AbortMultipartUploadOutput#request_charged #request_charged} => String # # # @example Example: To abort a multipart upload # # # The following example aborts a multipart upload. # # resp = client.abort_multipart_upload({ # bucket: "examplebucket", # key: "bigobject", # upload_id: "xadcOB_7YPBOJuoFiQ9cz4P3Pe6FIZwO4f7wN93uHsNBEw97pl5eNwzExg0LAT2dUN91cOmrEQHDsP3WA60CEg--", # }) # # resp.to_h outputs the following: # { # } # # @example Request syntax with placeholder values # # resp = client.abort_multipart_upload({ # bucket: "BucketName", # required # key: "ObjectKey", # required # upload_id: "MultipartUploadId", # required # request_payer: "requester", # accepts requester # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.request_charged #=> String, one of "requester" # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload AWS API Documentation # # @overload abort_multipart_upload(params = {}) # @param [Hash] params ({}) def abort_multipart_upload(params = {}, options = {}) req = build_request(:abort_multipart_upload, params) req.send_request(options) end # Completes a multipart upload by assembling previously uploaded parts. # # You first initiate the multipart upload and then upload all parts # using the [UploadPart][1] operation or the [UploadPartCopy][2] # operation. After successfully uploading all relevant parts of an # upload, you call this `CompleteMultipartUpload` operation to complete # the upload. Upon receiving this request, Amazon S3 concatenates all # the parts in ascending order by part number to create a new object. In # the CompleteMultipartUpload request, you must provide the parts list # and ensure that the parts list is complete. The # CompleteMultipartUpload API operation concatenates the parts that you # provide in the list. For each part in the list, you must provide the # `PartNumber` value and the `ETag` value that are returned after that # part was uploaded. # # The processing of a CompleteMultipartUpload request could take several # minutes to finalize. After Amazon S3 begins processing the request, it # sends an HTTP response header that specifies a `200 OK` response. # While processing is in progress, Amazon S3 periodically sends white # space characters to keep the connection from timing out. A request # could fail after the initial `200 OK` response has been sent. This # means that a `200 OK` response can contain either a success or an # error. The error response might be embedded in the `200 OK` response. # If you call this API operation directly, make sure to design your # application to parse the contents of the response and handle it # appropriately. If you use Amazon Web Services SDKs, SDKs handle this # condition. The SDKs detect the embedded error and apply error handling # per your configuration settings (including automatically retrying the # request as appropriate). If the condition persists, the SDKs throw an # exception (or, for the SDKs that don't use exceptions, they return an # error). # # Note that if `CompleteMultipartUpload` fails, applications should be # prepared to retry the failed requests. For more information, see # [Amazon S3 Error Best Practices][3]. # # You can't use `Content-Type: application/x-www-form-urlencoded` for # the CompleteMultipartUpload requests. Also, if you don't provide a # `Content-Type` header, `CompleteMultipartUpload` can still return a # `200 OK` response. # # For more information about multipart uploads, see [Uploading Objects # Using Multipart Upload][4] in the *Amazon S3 User Guide*. # # **Directory buckets** - For directory buckets, you must make requests # for this API operation to the Zonal endpoint. These endpoints support # virtual-hosted-style requests in the format # `https://bucket_name.s3express-az_id.region.amazonaws.com/key-name `. # Path-style requests are not supported. For more information, see # [Regional and Zonal endpoints][5] in the *Amazon S3 User Guide*. # # # # Permissions # : * **General purpose bucket permissions** - For information about # permissions required to use the multipart upload API, see # [Multipart Upload and Permissions][6] in the *Amazon S3 User # Guide*. # # * **Directory bucket permissions** - To grant access to this API # operation on a directory bucket, we recommend that you use the [ # `CreateSession` ][7] API operation for session-based # authorization. Specifically, you grant the # `s3express:CreateSession` permission to the directory bucket in a # bucket policy or an IAM identity-based policy. Then, you make the # `CreateSession` API call on the bucket to obtain a session token. # With the session token in your request header, you can make API # requests to this operation. After the session token expires, you # make another `CreateSession` API call to generate a new session # token for use. Amazon Web Services CLI or SDKs create session and # refresh the session token automatically to avoid service # interruptions when a session expires. For more information about # authorization, see [ `CreateSession` ][7]. # # Special errors # : * Error Code: `EntityTooSmall` # # * Description: Your proposed upload is smaller than the minimum # allowed object size. Each part must be at least 5 MB in size, # except the last part. # # * HTTP Status Code: 400 Bad Request # # * Error Code: `InvalidPart` # # * Description: One or more of the specified parts could not be # found. The part might not have been uploaded, or the specified # ETag might not have matched the uploaded part's ETag. # # * HTTP Status Code: 400 Bad Request # # * Error Code: `InvalidPartOrder` # # * Description: The list of parts was not in ascending order. The # parts list must be specified in order by part number. # # * HTTP Status Code: 400 Bad Request # # * Error Code: `NoSuchUpload` # # * Description: The specified multipart upload does not exist. The # upload ID might be invalid, or the multipart upload might have # been aborted or completed. # # * HTTP Status Code: 404 Not Found # # HTTP Host header syntax # # : Directory buckets - The HTTP Host header syntax is ` # Bucket_name.s3express-az_id.region.amazonaws.com`. # # The following operations are related to `CompleteMultipartUpload`: # # * [CreateMultipartUpload][8] # # * [UploadPart][1] # # * [AbortMultipartUpload][9] # # * [ListParts][10] # # * [ListMultipartUploads][11] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html # [9]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html # [10]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html # [11]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html # # @option params [required, String] :bucket # Name of the bucket to which the multipart upload was initiated. # # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style requests # are not supported. Directory bucket names must be unique in the chosen # Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, you # must provide the alias of the access point in place of the bucket name # or specify the access point ARN. When using the access point ARN, you # must direct requests to the access point hostname. The access point # hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the bucket # name. For more information about access point ARNs, see [Using access # points][2] in the *Amazon S3 User Guide*. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. The # S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][3] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # # @option params [required, String] :key # Object key for which the multipart upload was initiated. # # @option params [Types::CompletedMultipartUpload] :multipart_upload # The container for the multipart upload request information. # # @option params [required, String] :upload_id # ID for the initiated multipart upload. # # @option params [String] :checksum_crc32 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 32-bit CRC32 checksum of the object. For # more information, see [Checking object integrity][1] in the *Amazon S3 # User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [String] :checksum_crc32c # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 32-bit CRC32C checksum of the object. # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [String] :checksum_sha1 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 160-bit SHA-1 digest of the object. For # more information, see [Checking object integrity][1] in the *Amazon S3 # User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [String] :checksum_sha256 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 256-bit SHA-256 digest of the object. # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @option params [String] :sse_customer_algorithm # The server-side encryption (SSE) algorithm used to encrypt the object. # This parameter is required only when the object was created using a # checksum algorithm or if your bucket policy requires the use of SSE-C. # For more information, see [Protecting data using SSE-C keys][1] in the # *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html#ssec-require-condition-key # # @option params [String] :sse_customer_key # The server-side encryption (SSE) customer managed key. This parameter # is needed only when the object was created using a checksum algorithm. # For more information, see [Protecting data using SSE-C keys][1] in the # *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # # @option params [String] :sse_customer_key_md5 # The MD5 server-side encryption (SSE) customer managed key. This # parameter is needed only when the object was created using a checksum # algorithm. For more information, see [Protecting data using SSE-C # keys][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # # @return [Types::CompleteMultipartUploadOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::CompleteMultipartUploadOutput#location #location} => String # * {Types::CompleteMultipartUploadOutput#bucket #bucket} => String # * {Types::CompleteMultipartUploadOutput#key #key} => String # * {Types::CompleteMultipartUploadOutput#expiration #expiration} => String # * {Types::CompleteMultipartUploadOutput#etag #etag} => String # * {Types::CompleteMultipartUploadOutput#checksum_crc32 #checksum_crc32} => String # * {Types::CompleteMultipartUploadOutput#checksum_crc32c #checksum_crc32c} => String # * {Types::CompleteMultipartUploadOutput#checksum_sha1 #checksum_sha1} => String # * {Types::CompleteMultipartUploadOutput#checksum_sha256 #checksum_sha256} => String # * {Types::CompleteMultipartUploadOutput#server_side_encryption #server_side_encryption} => String # * {Types::CompleteMultipartUploadOutput#version_id #version_id} => String # * {Types::CompleteMultipartUploadOutput#ssekms_key_id #ssekms_key_id} => String # * {Types::CompleteMultipartUploadOutput#bucket_key_enabled #bucket_key_enabled} => Boolean # * {Types::CompleteMultipartUploadOutput#request_charged #request_charged} => String # # # @example Example: To complete multipart upload # # # The following example completes a multipart upload. # # resp = client.complete_multipart_upload({ # bucket: "examplebucket", # key: "bigobject", # multipart_upload: { # parts: [ # { # etag: "\"d8c2eafd90c266e19ab9dcacc479f8af\"", # part_number: 1, # }, # { # etag: "\"d8c2eafd90c266e19ab9dcacc479f8af\"", # part_number: 2, # }, # ], # }, # upload_id: "7YPBOJuoFiQ9cz4P3Pe6FIZwO4f7wN93uHsNBEw97pl5eNwzExg0LAT2dUN91cOmrEQHDsP3WA60CEg--", # }) # # resp.to_h outputs the following: # { # bucket: "acexamplebucket", # etag: "\"4d9031c7644d8081c2829f4ea23c55f7-2\"", # key: "bigobject", # location: "https://examplebucket.s3..amazonaws.com/bigobject", # } # # @example Request syntax with placeholder values # # resp = client.complete_multipart_upload({ # bucket: "BucketName", # required # key: "ObjectKey", # required # multipart_upload: { # parts: [ # { # etag: "ETag", # checksum_crc32: "ChecksumCRC32", # checksum_crc32c: "ChecksumCRC32C", # checksum_sha1: "ChecksumSHA1", # checksum_sha256: "ChecksumSHA256", # part_number: 1, # }, # ], # }, # upload_id: "MultipartUploadId", # required # checksum_crc32: "ChecksumCRC32", # checksum_crc32c: "ChecksumCRC32C", # checksum_sha1: "ChecksumSHA1", # checksum_sha256: "ChecksumSHA256", # request_payer: "requester", # accepts requester # expected_bucket_owner: "AccountId", # sse_customer_algorithm: "SSECustomerAlgorithm", # sse_customer_key: "SSECustomerKey", # sse_customer_key_md5: "SSECustomerKeyMD5", # }) # # @example Response structure # # resp.location #=> String # resp.bucket #=> String # resp.key #=> String # resp.expiration #=> String # resp.etag #=> String # resp.checksum_crc32 #=> String # resp.checksum_crc32c #=> String # resp.checksum_sha1 #=> String # resp.checksum_sha256 #=> String # resp.server_side_encryption #=> String, one of "AES256", "aws:kms", "aws:kms:dsse" # resp.version_id #=> String # resp.ssekms_key_id #=> String # resp.bucket_key_enabled #=> Boolean # resp.request_charged #=> String, one of "requester" # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload AWS API Documentation # # @overload complete_multipart_upload(params = {}) # @param [Hash] params ({}) def complete_multipart_upload(params = {}, options = {}) req = build_request(:complete_multipart_upload, params) req.send_request(options) end # Creates a copy of an object that is already stored in Amazon S3. # # You can store individual objects of up to 5 TB in Amazon S3. You # create a copy of your object up to 5 GB in size in a single atomic # action using this API. However, to copy an object greater than 5 GB, # you must use the multipart upload Upload Part - Copy (UploadPartCopy) # API. For more information, see [Copy Object Using the REST Multipart # Upload API][1]. # # # # You can copy individual objects between general purpose buckets, # between directory buckets, and between general purpose buckets and # directory buckets. # # Directory buckets - For directory buckets, you must make # requests for this API operation to the Zonal endpoint. These endpoints # support virtual-hosted-style requests in the format # `https://bucket_name.s3express-az_id.region.amazonaws.com/key-name `. # Path-style requests are not supported. For more information, see # [Regional and Zonal endpoints][2] in the *Amazon S3 User Guide*. # # # # Both the Region that you want to copy the object from and the Region # that you want to copy the object to must be enabled for your account. # # Amazon S3 transfer acceleration does not support cross-Region copies. # If you request a cross-Region copy using a transfer acceleration # endpoint, you get a `400 Bad Request` error. For more information, see # [Transfer Acceleration][3]. # # Authentication and authorization # # : All `CopyObject` requests must be authenticated and signed by using # IAM credentials (access key ID and secret access key for the IAM # identities). All headers with the `x-amz-` prefix, including # `x-amz-copy-source`, must be signed. For more information, see [REST # Authentication][4]. # # **Directory buckets** - You must use the IAM credentials to # authenticate and authorize your access to the `CopyObject` API # operation, instead of using the temporary security credentials # through the `CreateSession` API operation. # # Amazon Web Services CLI or SDKs handles authentication and # authorization on your behalf. # # Permissions # # : You must have *read* access to the source object and *write* access # to the destination bucket. # # * **General purpose bucket permissions** - You must have permissions # in an IAM policy based on the source and destination bucket types # in a `CopyObject` operation. # # * If the source object is in a general purpose bucket, you must # have s3:GetObject permission to read the # source object that is being copied. # # * If the destination bucket is a general purpose bucket, you must # have s3:PubObject permission to write the # object copy to the destination bucket. # # * **Directory bucket permissions** - You must have permissions in a # bucket policy or an IAM identity-based policy based on the source # and destination bucket types in a `CopyObject` operation. # # * If the source object that you want to copy is in a directory # bucket, you must have the # s3express:CreateSession permission in the # `Action` element of a policy to read the object. By default, the # session is in the `ReadWrite` mode. If you want to restrict the # access, you can explicitly set the `s3express:SessionMode` # condition key to `ReadOnly` on the copy source bucket. # # * If the copy destination is a directory bucket, you must have the # s3express:CreateSession permission in the # `Action` element of a policy to write the object to the # destination. The `s3express:SessionMode` condition key can't be # set to `ReadOnly` on the copy destination bucket. # # For example policies, see [Example bucket policies for S3 Express # One Zone][5] and [Amazon Web Services Identity and Access # Management (IAM) identity-based policies for S3 Express One # Zone][6] in the *Amazon S3 User Guide*. # # Response and special errors # # : When the request is an HTTP 1.1 request, the response is chunk # encoded. When the request is not an HTTP 1.1 request, the response # would not contain the `Content-Length`. You always need to read the # entire response body to check if the copy succeeds. to keep the # connection alive while we copy the data. # # * If the copy is successful, you receive a response with information # about the copied object. # # * A copy request might return an error when Amazon S3 receives the # copy request or while Amazon S3 is copying the files. A `200 OK` # response can contain either a success or an error. # # * If the error occurs before the copy action starts, you receive a # standard Amazon S3 error. # # * If the error occurs during the copy operation, the error # response is embedded in the `200 OK` response. For example, in a # cross-region copy, you may encounter throttling and receive a # `200 OK` response. For more information, see [Resolve the Error # 200 response when copying objects to Amazon # S3](repost.aws/knowledge-center/s3-resolve-200-internalerror). # The `200 OK` status code means the copy was accepted, but it # doesn't mean the copy is complete. Another example is when you # disconnect from Amazon S3 before the copy is complete, Amazon S3 # might cancel the copy and you may receive a `200 OK` response. # You must stay connected to Amazon S3 until the entire response # is successfully received and processed. # # If you call this API operation directly, make sure to design # your application to parse the content of the response and handle # it appropriately. If you use Amazon Web Services SDKs, SDKs # handle this condition. The SDKs detect the embedded error and # apply error handling per your configuration settings (including # automatically retrying the request as appropriate). If the # condition persists, the SDKs throw an exception (or, for the # SDKs that don't use exceptions, they return an error). # # Charge # # : The copy request charge is based on the storage class and Region # that you specify for the destination object. The request can also # result in a data retrieval charge for the source if the source # storage class bills for data retrieval. For pricing information, see # [Amazon S3 pricing][7]. # # HTTP Host header syntax # # : Directory buckets - The HTTP Host header syntax is ` # Bucket_name.s3express-az_id.region.amazonaws.com`. # # The following operations are related to `CopyObject`: # # * [PutObject][8] # # * [GetObject][9] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html # [7]: http://aws.amazon.com/s3/pricing/ # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html # [9]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html # # @option params [String] :acl # The canned access control list (ACL) to apply to the object. # # When you copy an object, the ACL metadata is not preserved and is set # to `private` by default. Only the owner has full access control. To # override the default ACL setting, specify a new ACL when you generate # a copy request. For more information, see [Using ACLs][1]. # # If the destination bucket that you're copying objects to uses the # bucket owner enforced setting for S3 Object Ownership, ACLs are # disabled and no longer affect permissions. Buckets that use this # setting only accept `PUT` requests that don't specify an ACL or `PUT` # requests that specify bucket owner full control ACLs, such as the # `bucket-owner-full-control` canned ACL or an equivalent form of this # ACL expressed in the XML format. For more information, see # [Controlling ownership of objects and disabling ACLs][2] in the # *Amazon S3 User Guide*. # # * If your destination bucket uses the bucket owner enforced setting # for Object Ownership, all objects written to the bucket by any # account will be owned by the bucket owner. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html # # @option params [required, String] :bucket # The name of the destination bucket. # # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style requests # are not supported. Directory bucket names must be unique in the chosen # Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, you # must provide the alias of the access point in place of the bucket name # or specify the access point ARN. When using the access point ARN, you # must direct requests to the access point hostname. The access point # hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the bucket # name. For more information about access point ARNs, see [Using access # points][2] in the *Amazon S3 User Guide*. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. The # S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][3] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # # @option params [String] :cache_control # Specifies the caching behavior along the request/reply chain. # # @option params [String] :checksum_algorithm # Indicates the algorithm that you want Amazon S3 to use to create the # checksum for the object. For more information, see [Checking object # integrity][1] in the *Amazon S3 User Guide*. # # When you copy an object, if the source object has a checksum, that # checksum value will be copied to the new object by default. If the # `CopyObject` request does not include this `x-amz-checksum-algorithm` # header, the checksum algorithm will be copied from the source object # to the destination object (if it's present on the source object). You # can optionally specify a different checksum algorithm to use with the # `x-amz-checksum-algorithm` header. Unrecognized or unsupported values # will respond with the HTTP status code `400 Bad Request`. # # For directory buckets, when you use Amazon Web Services SDKs, `CRC32` # is the default checksum algorithm that's used for performance. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [String] :content_disposition # Specifies presentational information for the object. Indicates whether # an object should be displayed in a web browser or downloaded as a # file. It allows specifying the desired filename for the downloaded # file. # # @option params [String] :content_encoding # Specifies what content encodings have been applied to the object and # thus what decoding mechanisms must be applied to obtain the media-type # referenced by the Content-Type header field. # # For directory buckets, only the `aws-chunked` value is supported in # this header field. # # # # @option params [String] :content_language # The language the content is in. # # @option params [String] :content_type # A standard MIME type that describes the format of the object data. # # @option params [required, String] :copy_source # Specifies the source object for the copy operation. The source object # can be up to 5 GB. If the source object is an object that was uploaded # by using a multipart upload, the object copy will be a single part # object after the source object is copied to the destination bucket. # # You specify the value of the copy source in one of two formats, # depending on whether you want to access the source object through an # [access point][1]: # # * For objects not accessed through an access point, specify the name # of the source bucket and the key of the source object, separated by # a slash (/). For example, to copy the object `reports/january.pdf` # from the general purpose bucket `awsexamplebucket`, use # `awsexamplebucket/reports/january.pdf`. The value must be # URL-encoded. To copy the object `reports/january.pdf` from the # directory bucket `awsexamplebucket--use1-az5--x-s3`, use # `awsexamplebucket--use1-az5--x-s3/reports/january.pdf`. The value # must be URL-encoded. # # * For objects accessed through access points, specify the Amazon # Resource Name (ARN) of the object as accessed through the access # point, in the format # `arn:aws:s3:::accesspoint//object/`. # For example, to copy the object `reports/january.pdf` through access # point `my-access-point` owned by account `123456789012` in Region # `us-west-2`, use the URL encoding of # `arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf`. # The value must be URL encoded. # # * Amazon S3 supports copy operations using Access points only when # the source and destination buckets are in the same Amazon Web # Services Region. # # * Access points are not supported by directory buckets. # # # # Alternatively, for objects accessed through Amazon S3 on Outposts, # specify the ARN of the object as accessed in the format # `arn:aws:s3-outposts:::outpost//object/`. # For example, to copy the object `reports/january.pdf` through # outpost `my-outpost` owned by account `123456789012` in Region # `us-west-2`, use the URL encoding of # `arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf`. # The value must be URL-encoded. # # If your source bucket versioning is enabled, the `x-amz-copy-source` # header by default identifies the current version of an object to copy. # If the current version is a delete marker, Amazon S3 behaves as if the # object was deleted. To copy a different version, use the `versionId` # query parameter. Specifically, append `?versionId=` to the # value (for example, # `awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893`). # If you don't specify a version ID, Amazon S3 copies the latest # version of the source object. # # If you enable versioning on the destination bucket, Amazon S3 # generates a unique version ID for the copied object. This version ID # is different from the version ID of the source object. Amazon S3 # returns the version ID of the copied object in the `x-amz-version-id` # response header in the response. # # If you do not enable versioning or suspend it on the destination # bucket, the version ID that Amazon S3 generates in the # `x-amz-version-id` response header is always null. # # **Directory buckets** - S3 Versioning isn't enabled and supported for # directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html # # @option params [String] :copy_source_if_match # Copies the object if its entity tag (ETag) matches the specified tag. # # If both the `x-amz-copy-source-if-match` and # `x-amz-copy-source-if-unmodified-since` headers are present in the # request and evaluate as follows, Amazon S3 returns `200 OK` and copies # the data: # # * `x-amz-copy-source-if-match` condition evaluates to true # # * `x-amz-copy-source-if-unmodified-since` condition evaluates to false # # @option params [Time,DateTime,Date,Integer,String] :copy_source_if_modified_since # Copies the object if it has been modified since the specified time. # # If both the `x-amz-copy-source-if-none-match` and # `x-amz-copy-source-if-modified-since` headers are present in the # request and evaluate as follows, Amazon S3 returns the `412 # Precondition Failed` response code: # # * `x-amz-copy-source-if-none-match` condition evaluates to false # # * `x-amz-copy-source-if-modified-since` condition evaluates to true # # @option params [String] :copy_source_if_none_match # Copies the object if its entity tag (ETag) is different than the # specified ETag. # # If both the `x-amz-copy-source-if-none-match` and # `x-amz-copy-source-if-modified-since` headers are present in the # request and evaluate as follows, Amazon S3 returns the `412 # Precondition Failed` response code: # # * `x-amz-copy-source-if-none-match` condition evaluates to false # # * `x-amz-copy-source-if-modified-since` condition evaluates to true # # @option params [Time,DateTime,Date,Integer,String] :copy_source_if_unmodified_since # Copies the object if it hasn't been modified since the specified # time. # # If both the `x-amz-copy-source-if-match` and # `x-amz-copy-source-if-unmodified-since` headers are present in the # request and evaluate as follows, Amazon S3 returns `200 OK` and copies # the data: # # * `x-amz-copy-source-if-match` condition evaluates to true # # * `x-amz-copy-source-if-unmodified-since` condition evaluates to false # # @option params [Time,DateTime,Date,Integer,String] :expires # The date and time at which the object is no longer cacheable. # # @option params [String] :grant_full_control # Gives the grantee READ, READ\_ACP, and WRITE\_ACP permissions on the # object. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # @option params [String] :grant_read # Allows grantee to read the object data and its metadata. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # @option params [String] :grant_read_acp # Allows grantee to read the object ACL. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # @option params [String] :grant_write_acp # Allows grantee to write the ACL for the applicable object. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # @option params [required, String] :key # The key of the destination object. # # @option params [Hash] :metadata # A map of metadata to store with the object in S3. # # @option params [String] :metadata_directive # Specifies whether the metadata is copied from the source object or # replaced with metadata that's provided in the request. When copying # an object, you can preserve all metadata (the default) or specify new # metadata. If this header isn’t specified, `COPY` is the default # behavior. # # **General purpose bucket** - For general purpose buckets, when you # grant permissions, you can use the `s3:x-amz-metadata-directive` # condition key to enforce certain metadata behavior when objects are # uploaded. For more information, see [Amazon S3 condition key # examples][1] in the *Amazon S3 User Guide*. # # `x-amz-website-redirect-location` is unique to each object and is not # copied when using the `x-amz-metadata-directive` header. To copy the # value, you must specify `x-amz-website-redirect-location` in the # request header. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html # # @option params [String] :tagging_directive # Specifies whether the object tag-set is copied from the source object # or replaced with the tag-set that's provided in the request. # # The default value is `COPY`. # # **Directory buckets** - For directory buckets in a `CopyObject` # operation, only the empty tag-set is supported. Any requests that # attempt to write non-empty tags into directory buckets will receive a # `501 Not Implemented` status code. When the destination bucket is a # directory bucket, you will receive a `501 Not Implemented` response in # any of the following situations: # # * When you attempt to `COPY` the tag-set from an S3 source object that # has non-empty tags. # # * When you attempt to `REPLACE` the tag-set of a source object and set # a non-empty value to `x-amz-tagging`. # # * When you don't set the `x-amz-tagging-directive` header and the # source object has non-empty tags. This is because the default value # of `x-amz-tagging-directive` is `COPY`. # # Because only the empty tag-set is supported for directory buckets in a # `CopyObject` operation, the following situations are allowed: # # * When you attempt to `COPY` the tag-set from a directory bucket # source object that has no tags to a general purpose bucket. It # copies an empty tag-set to the destination object. # # * When you attempt to `REPLACE` the tag-set of a directory bucket # source object and set the `x-amz-tagging` value of the directory # bucket destination object to empty. # # * When you attempt to `REPLACE` the tag-set of a general purpose # bucket source object that has non-empty tags and set the # `x-amz-tagging` value of the directory bucket destination object to # empty. # # * When you attempt to `REPLACE` the tag-set of a directory bucket # source object and don't set the `x-amz-tagging` value of the # directory bucket destination object. This is because the default # value of `x-amz-tagging` is the empty value. # # # # @option params [String] :server_side_encryption # The server-side encryption algorithm used when storing this object in # Amazon S3 (for example, `AES256`, `aws:kms`, `aws:kms:dsse`). # Unrecognized or unsupported values won’t write a destination object # and will receive a `400 Bad Request` response. # # Amazon S3 automatically encrypts all new objects that are copied to an # S3 bucket. When copying an object, if you don't specify encryption # information in your copy request, the encryption setting of the target # object is set to the default encryption configuration of the # destination bucket. By default, all buckets have a base level of # encryption configuration that uses server-side encryption with Amazon # S3 managed keys (SSE-S3). If the destination bucket has a default # encryption configuration that uses server-side encryption with Key # Management Service (KMS) keys (SSE-KMS), dual-layer server-side # encryption with Amazon Web Services KMS keys (DSSE-KMS), or # server-side encryption with customer-provided encryption keys (SSE-C), # Amazon S3 uses the corresponding KMS key, or a customer-provided key # to encrypt the target object copy. # # When you perform a `CopyObject` operation, if you want to use a # different type of encryption setting for the target object, you can # specify appropriate encryption-related headers to encrypt the target # object with an Amazon S3 managed key, a KMS key, or a # customer-provided key. If the encryption setting in your request is # different from the default encryption configuration of the destination # bucket, the encryption setting in your request takes precedence. # # With server-side encryption, Amazon S3 encrypts your data as it writes # your data to disks in its data centers and decrypts the data when you # access it. For more information about server-side encryption, see # [Using Server-Side Encryption][1] in the *Amazon S3 User Guide*. # # For directory buckets, only server-side encryption with Amazon S3 # managed keys (SSE-S3) (`AES256`) is supported. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html # # @option params [String] :storage_class # If the `x-amz-storage-class` header is not used, the copied object # will be stored in the `STANDARD` Storage Class by default. The # `STANDARD` storage class provides high durability and high # availability. Depending on performance needs, you can specify a # different Storage Class. # # * Directory buckets - For directory buckets, only the S3 # Express One Zone storage class is supported to store newly created # objects. Unsupported storage class values won't write a destination # object and will respond with the HTTP status code `400 Bad Request`. # # * Amazon S3 on Outposts - S3 on Outposts only uses the # `OUTPOSTS` Storage Class. # # # # You can use the `CopyObject` action to change the storage class of an # object that is already stored in Amazon S3 by using the # `x-amz-storage-class` header. For more information, see [Storage # Classes][1] in the *Amazon S3 User Guide*. # # Before using an object as a source object for the copy operation, you # must restore a copy of it if it meets any of the following conditions: # # * The storage class of the source object is `GLACIER` or # `DEEP_ARCHIVE`. # # * The storage class of the source object is `INTELLIGENT_TIERING` and # it's [S3 Intelligent-Tiering access tier][2] is `Archive Access` or # `Deep Archive Access`. # # For more information, see [RestoreObject][3] and [Copying Objects][4] # in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering-overview.html#intel-tiering-tier-definition # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html # # @option params [String] :website_redirect_location # If the destination bucket is configured as a website, redirects # requests for this object copy to another object in the same bucket or # to an external URL. Amazon S3 stores the value of this header in the # object metadata. This value is unique to each object and is not copied # when using the `x-amz-metadata-directive` header. Instead, you may opt # to provide this header in combination with the # `x-amz-metadata-directive` header. # # This functionality is not supported for directory buckets. # # # # @option params [String] :sse_customer_algorithm # Specifies the algorithm to use when encrypting the object (for # example, `AES256`). # # When you perform a `CopyObject` operation, if you want to use a # different type of encryption setting for the target object, you can # specify appropriate encryption-related headers to encrypt the target # object with an Amazon S3 managed key, a KMS key, or a # customer-provided key. If the encryption setting in your request is # different from the default encryption configuration of the destination # bucket, the encryption setting in your request takes precedence. # # This functionality is not supported when the destination bucket is a # directory bucket. # # # # @option params [String] :sse_customer_key # Specifies the customer-provided encryption key for Amazon S3 to use in # encrypting data. This value is used to store the object and then it is # discarded. Amazon S3 does not store the encryption key. The key must # be appropriate for use with the algorithm specified in the # `x-amz-server-side-encryption-customer-algorithm` header. # # This functionality is not supported when the destination bucket is a # directory bucket. # # # # @option params [String] :sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the encryption key according to # RFC 1321. Amazon S3 uses this header for a message integrity check to # ensure that the encryption key was transmitted without error. # # This functionality is not supported when the destination bucket is a # directory bucket. # # # # @option params [String] :ssekms_key_id # Specifies the KMS ID (Key ID, Key ARN, or Key Alias) to use for object # encryption. All GET and PUT requests for an object protected by KMS # will fail if they're not made via SSL or using SigV4. For information # about configuring any of the officially supported Amazon Web Services # SDKs and Amazon Web Services CLI, see [Specifying the Signature # Version in Request Authentication][1] in the *Amazon S3 User Guide*. # # This functionality is not supported when the destination bucket is a # directory bucket. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version # # @option params [String] :ssekms_encryption_context # Specifies the Amazon Web Services KMS Encryption Context to use for # object encryption. The value of this header is a base64-encoded UTF-8 # string holding JSON with the encryption context key-value pairs. This # value must be explicitly added to specify encryption context for # `CopyObject` requests. # # This functionality is not supported when the destination bucket is a # directory bucket. # # # # @option params [Boolean] :bucket_key_enabled # Specifies whether Amazon S3 should use an S3 Bucket Key for object # encryption with server-side encryption using Key Management Service # (KMS) keys (SSE-KMS). If a target object uses SSE-KMS, you can enable # an S3 Bucket Key for the object. # # Setting this header to `true` causes Amazon S3 to use an S3 Bucket Key # for object encryption with SSE-KMS. Specifying this header with a COPY # action doesn’t affect bucket-level settings for S3 Bucket Key. # # For more information, see [Amazon S3 Bucket Keys][1] in the *Amazon S3 # User Guide*. # # This functionality is not supported when the destination bucket is a # directory bucket. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html # # @option params [String] :copy_source_sse_customer_algorithm # Specifies the algorithm to use when decrypting the source object (for # example, `AES256`). # # If the source object for the copy is stored in Amazon S3 using SSE-C, # you must provide the necessary encryption information in your request # so that Amazon S3 can decrypt the object for copying. # # This functionality is not supported when the source object is in a # directory bucket. # # # # @option params [String] :copy_source_sse_customer_key # Specifies the customer-provided encryption key for Amazon S3 to use to # decrypt the source object. The encryption key provided in this header # must be the same one that was used when the source object was created. # # If the source object for the copy is stored in Amazon S3 using SSE-C, # you must provide the necessary encryption information in your request # so that Amazon S3 can decrypt the object for copying. # # This functionality is not supported when the source object is in a # directory bucket. # # # # @option params [String] :copy_source_sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the encryption key according to # RFC 1321. Amazon S3 uses this header for a message integrity check to # ensure that the encryption key was transmitted without error. # # If the source object for the copy is stored in Amazon S3 using SSE-C, # you must provide the necessary encryption information in your request # so that Amazon S3 can decrypt the object for copying. # # This functionality is not supported when the source object is in a # directory bucket. # # # # @option params [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # # @option params [String] :tagging # The tag-set for the object copy in the destination bucket. This value # must be used in conjunction with the `x-amz-tagging-directive` if you # choose `REPLACE` for the `x-amz-tagging-directive`. If you choose # `COPY` for the `x-amz-tagging-directive`, you don't need to set the # `x-amz-tagging` header, because the tag-set will be copied from the # source object directly. The tag-set must be encoded as URL Query # parameters. # # The default value is the empty value. # # **Directory buckets** - For directory buckets in a `CopyObject` # operation, only the empty tag-set is supported. Any requests that # attempt to write non-empty tags into directory buckets will receive a # `501 Not Implemented` status code. When the destination bucket is a # directory bucket, you will receive a `501 Not Implemented` response in # any of the following situations: # # * When you attempt to `COPY` the tag-set from an S3 source object that # has non-empty tags. # # * When you attempt to `REPLACE` the tag-set of a source object and set # a non-empty value to `x-amz-tagging`. # # * When you don't set the `x-amz-tagging-directive` header and the # source object has non-empty tags. This is because the default value # of `x-amz-tagging-directive` is `COPY`. # # Because only the empty tag-set is supported for directory buckets in a # `CopyObject` operation, the following situations are allowed: # # * When you attempt to `COPY` the tag-set from a directory bucket # source object that has no tags to a general purpose bucket. It # copies an empty tag-set to the destination object. # # * When you attempt to `REPLACE` the tag-set of a directory bucket # source object and set the `x-amz-tagging` value of the directory # bucket destination object to empty. # # * When you attempt to `REPLACE` the tag-set of a general purpose # bucket source object that has non-empty tags and set the # `x-amz-tagging` value of the directory bucket destination object to # empty. # # * When you attempt to `REPLACE` the tag-set of a directory bucket # source object and don't set the `x-amz-tagging` value of the # directory bucket destination object. This is because the default # value of `x-amz-tagging` is the empty value. # # # # @option params [String] :object_lock_mode # The Object Lock mode that you want to apply to the object copy. # # This functionality is not supported for directory buckets. # # # # @option params [Time,DateTime,Date,Integer,String] :object_lock_retain_until_date # The date and time when you want the Object Lock of the object copy to # expire. # # This functionality is not supported for directory buckets. # # # # @option params [String] :object_lock_legal_hold_status # Specifies whether you want to apply a legal hold to the object copy. # # This functionality is not supported for directory buckets. # # # # @option params [String] :expected_bucket_owner # The account ID of the expected destination bucket owner. If the # account ID that you provide does not match the actual owner of the # destination bucket, the request fails with the HTTP status code `403 # Forbidden` (access denied). # # @option params [String] :expected_source_bucket_owner # The account ID of the expected source bucket owner. If the account ID # that you provide does not match the actual owner of the source bucket, # the request fails with the HTTP status code `403 Forbidden` (access # denied). # # @return [Types::CopyObjectOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::CopyObjectOutput#copy_object_result #copy_object_result} => Types::CopyObjectResult # * {Types::CopyObjectOutput#expiration #expiration} => String # * {Types::CopyObjectOutput#copy_source_version_id #copy_source_version_id} => String # * {Types::CopyObjectOutput#version_id #version_id} => String # * {Types::CopyObjectOutput#server_side_encryption #server_side_encryption} => String # * {Types::CopyObjectOutput#sse_customer_algorithm #sse_customer_algorithm} => String # * {Types::CopyObjectOutput#sse_customer_key_md5 #sse_customer_key_md5} => String # * {Types::CopyObjectOutput#ssekms_key_id #ssekms_key_id} => String # * {Types::CopyObjectOutput#ssekms_encryption_context #ssekms_encryption_context} => String # * {Types::CopyObjectOutput#bucket_key_enabled #bucket_key_enabled} => Boolean # * {Types::CopyObjectOutput#request_charged #request_charged} => String # # # @example Example: To copy an object # # # The following example copies an object from one bucket to another. # # resp = client.copy_object({ # bucket: "destinationbucket", # copy_source: "/sourcebucket/HappyFacejpg", # key: "HappyFaceCopyjpg", # }) # # resp.to_h outputs the following: # { # copy_object_result: { # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"", # last_modified: Time.parse("2016-12-15T17:38:53.000Z"), # }, # } # # @example Request syntax with placeholder values # # resp = client.copy_object({ # acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control # bucket: "BucketName", # required # cache_control: "CacheControl", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # content_disposition: "ContentDisposition", # content_encoding: "ContentEncoding", # content_language: "ContentLanguage", # content_type: "ContentType", # copy_source: "CopySource", # required # copy_source_if_match: "CopySourceIfMatch", # copy_source_if_modified_since: Time.now, # copy_source_if_none_match: "CopySourceIfNoneMatch", # copy_source_if_unmodified_since: Time.now, # expires: Time.now, # grant_full_control: "GrantFullControl", # grant_read: "GrantRead", # grant_read_acp: "GrantReadACP", # grant_write_acp: "GrantWriteACP", # key: "ObjectKey", # required # metadata: { # "MetadataKey" => "MetadataValue", # }, # metadata_directive: "COPY", # accepts COPY, REPLACE # tagging_directive: "COPY", # accepts COPY, REPLACE # server_side_encryption: "AES256", # accepts AES256, aws:kms, aws:kms:dsse # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE # website_redirect_location: "WebsiteRedirectLocation", # sse_customer_algorithm: "SSECustomerAlgorithm", # sse_customer_key: "SSECustomerKey", # sse_customer_key_md5: "SSECustomerKeyMD5", # ssekms_key_id: "SSEKMSKeyId", # ssekms_encryption_context: "SSEKMSEncryptionContext", # bucket_key_enabled: false, # copy_source_sse_customer_algorithm: "CopySourceSSECustomerAlgorithm", # copy_source_sse_customer_key: "CopySourceSSECustomerKey", # copy_source_sse_customer_key_md5: "CopySourceSSECustomerKeyMD5", # request_payer: "requester", # accepts requester # tagging: "TaggingHeader", # object_lock_mode: "GOVERNANCE", # accepts GOVERNANCE, COMPLIANCE # object_lock_retain_until_date: Time.now, # object_lock_legal_hold_status: "ON", # accepts ON, OFF # expected_bucket_owner: "AccountId", # expected_source_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.copy_object_result.etag #=> String # resp.copy_object_result.last_modified #=> Time # resp.copy_object_result.checksum_crc32 #=> String # resp.copy_object_result.checksum_crc32c #=> String # resp.copy_object_result.checksum_sha1 #=> String # resp.copy_object_result.checksum_sha256 #=> String # resp.expiration #=> String # resp.copy_source_version_id #=> String # resp.version_id #=> String # resp.server_side_encryption #=> String, one of "AES256", "aws:kms", "aws:kms:dsse" # resp.sse_customer_algorithm #=> String # resp.sse_customer_key_md5 #=> String # resp.ssekms_key_id #=> String # resp.ssekms_encryption_context #=> String # resp.bucket_key_enabled #=> Boolean # resp.request_charged #=> String, one of "requester" # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject AWS API Documentation # # @overload copy_object(params = {}) # @param [Hash] params ({}) def copy_object(params = {}, options = {}) req = build_request(:copy_object, params) req.send_request(options) end # This action creates an Amazon S3 bucket. To create an Amazon S3 on # Outposts bucket, see [ `CreateBucket` ][1]. # # # # Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 # and have a valid Amazon Web Services Access Key ID to authenticate # requests. Anonymous requests are never allowed to create buckets. By # creating the bucket, you become the bucket owner. # # There are two types of buckets: general purpose buckets and directory # buckets. For more information about these bucket types, see [Creating, # configuring, and working with Amazon S3 buckets][2] in the *Amazon S3 # User Guide*. # # * **General purpose buckets** - If you send your `CreateBucket` # request to the `s3.amazonaws.com` global endpoint, the request goes # to the `us-east-1` Region. So the signature calculations in # Signature Version 4 must use `us-east-1` as the Region, even if the # location constraint in the request specifies another Region where # the bucket is to be created. If you create a bucket in a Region # other than US East (N. Virginia), your application must be able to # handle 307 redirect. For more information, see [Virtual hosting of # buckets][3] in the *Amazon S3 User Guide*. # # * Directory buckets - For directory buckets, you must make # requests for this API operation to the Regional endpoint. These # endpoints support path-style requests in the format # `https://s3express-control.region_code.amazonaws.com/bucket-name `. # Virtual-hosted-style requests aren't supported. For more # information, see [Regional and Zonal endpoints][4] in the *Amazon S3 # User Guide*. # # # # Permissions # : * **General purpose bucket permissions** - In addition to the # `s3:CreateBucket` permission, the following permissions are # required in a policy when your `CreateBucket` request includes # specific headers: # # * **Access control lists (ACLs)** - In your `CreateBucket` # request, if you specify an access control list (ACL) and set it # to `public-read`, `public-read-write`, `authenticated-read`, or # if you explicitly specify any other custom ACLs, both # `s3:CreateBucket` and `s3:PutBucketAcl` permissions are # required. In your `CreateBucket` request, if you set the ACL to # `private`, or if you don't specify any ACLs, only the # `s3:CreateBucket` permission is required. # # * **Object Lock** - In your `CreateBucket` request, if you set # `x-amz-bucket-object-lock-enabled` to true, the # `s3:PutBucketObjectLockConfiguration` and # `s3:PutBucketVersioning` permissions are required. # # * **S3 Object Ownership** - If your `CreateBucket` request # includes the `x-amz-object-ownership` header, then the # `s3:PutBucketOwnershipControls` permission is required. # # If your `CreateBucket` request sets `BucketOwnerEnforced` for # Amazon S3 Object Ownership and specifies a bucket ACL that # provides access to an external Amazon Web Services account, your # request fails with a `400` error and returns the # `InvalidBucketAcLWithObjectOwnership` error code. For more # information, see [Setting Object Ownership on an existing bucket # ][5] in the *Amazon S3 User Guide*. # # * **S3 Block Public Access** - If your specific use case requires # granting public access to your S3 resources, you can disable # Block Public Access. Specifically, you can create a new bucket # with Block Public Access enabled, then separately call the [ # `DeletePublicAccessBlock` ][6] API. To use this operation, you # must have the `s3:PutBucketPublicAccessBlock` permission. For # more information about S3 Block Public Access, see [Blocking # public access to your Amazon S3 storage ][7] in the *Amazon S3 # User Guide*. # # * **Directory bucket permissions** - You must have the # `s3express:CreateBucket` permission in an IAM identity-based # policy instead of a bucket policy. Cross-account access to this # API operation isn't supported. This operation can only be # performed by the Amazon Web Services account that owns the # resource. For more information about directory bucket policies and # permissions, see [Amazon Web Services Identity and Access # Management (IAM) for S3 Express One Zone][8] in the *Amazon S3 # User Guide*. # # The permissions for ACLs, Object Lock, S3 Object Ownership, and S3 # Block Public Access are not supported for directory buckets. For # directory buckets, all Block Public Access settings are enabled at # the bucket level and S3 Object Ownership is set to Bucket owner # enforced (ACLs disabled). These settings can't be modified. # # For more information about permissions for creating and working # with directory buckets, see [Directory buckets][9] in the *Amazon # S3 User Guide*. For more information about supported S3 features # for directory buckets, see [Features of S3 Express One Zone][10] # in the *Amazon S3 User Guide*. # # HTTP Host header syntax # # : Directory buckets - The HTTP Host header syntax is # `s3express-control.region.amazonaws.com`. # # The following operations are related to `CreateBucket`: # # * [PutObject][11] # # * [DeleteBucket][12] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-ownership-existing-bucket.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html # [7]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html # [8]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html # [9]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html # [10]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-one-zone.html#s3-express-features # [11]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html # [12]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html # # @option params [String] :acl # The canned ACL to apply to the bucket. # # This functionality is not supported for directory buckets. # # # # @option params [required, String] :bucket # The name of the bucket to create. # # **General purpose buckets** - For information about bucket naming # restrictions, see [Bucket naming rules][1] in the *Amazon S3 User # Guide*. # # Directory buckets - When you use this operation with a # directory bucket, you must use path-style requests in the format # `https://s3express-control.region_code.amazonaws.com/bucket-name `. # Virtual-hosted-style requests aren't supported. Directory bucket # names must be unique in the chosen Availability Zone. Bucket names # must also follow the format ` bucket_base_name--az_id--x-s3` (for # example, ` DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about # bucket naming restrictions, see [Directory bucket naming rules][2] in # the *Amazon S3 User Guide* # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # # @option params [Types::CreateBucketConfiguration] :create_bucket_configuration # The configuration information for the bucket. # # @option params [String] :grant_full_control # Allows grantee the read, write, read ACP, and write ACP permissions on # the bucket. # # This functionality is not supported for directory buckets. # # # # @option params [String] :grant_read # Allows grantee to list the objects in the bucket. # # This functionality is not supported for directory buckets. # # # # @option params [String] :grant_read_acp # Allows grantee to read the bucket ACL. # # This functionality is not supported for directory buckets. # # # # @option params [String] :grant_write # Allows grantee to create new objects in the bucket. # # For the bucket and object owners of existing objects, also allows # deletions and overwrites of those objects. # # This functionality is not supported for directory buckets. # # # # @option params [String] :grant_write_acp # Allows grantee to write the ACL for the applicable bucket. # # This functionality is not supported for directory buckets. # # # # @option params [Boolean] :object_lock_enabled_for_bucket # Specifies whether you want S3 Object Lock to be enabled for the new # bucket. # # This functionality is not supported for directory buckets. # # # # @option params [String] :object_ownership # The container element for object ownership for a bucket's ownership # controls. # # `BucketOwnerPreferred` - Objects uploaded to the bucket change # ownership to the bucket owner if the objects are uploaded with the # `bucket-owner-full-control` canned ACL. # # `ObjectWriter` - The uploading account will own the object if the # object is uploaded with the `bucket-owner-full-control` canned ACL. # # `BucketOwnerEnforced` - Access control lists (ACLs) are disabled and # no longer affect permissions. The bucket owner automatically owns and # has full control over every object in the bucket. The bucket only # accepts PUT requests that don't specify an ACL or specify bucket # owner full control ACLs (such as the predefined # `bucket-owner-full-control` canned ACL or a custom ACL in XML format # that grants the same permissions). # # By default, `ObjectOwnership` is set to `BucketOwnerEnforced` and ACLs # are disabled. We recommend keeping ACLs disabled, except in uncommon # use cases where you must control access for each object individually. # For more information about S3 Object Ownership, see [Controlling # ownership of objects and disabling ACLs for your bucket][1] in the # *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. Directory # buckets use the bucket owner enforced setting for S3 Object Ownership. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html # # @return [Types::CreateBucketOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::CreateBucketOutput#location #location} => String # # # @example Example: To create a bucket # # # The following example creates a bucket. # # resp = client.create_bucket({ # bucket: "examplebucket", # }) # # resp.to_h outputs the following: # { # location: "/examplebucket", # } # # @example Example: To create a bucket in a specific region # # # The following example creates a bucket. The request specifies an AWS region where to create the bucket. # # resp = client.create_bucket({ # bucket: "examplebucket", # create_bucket_configuration: { # location_constraint: "eu-west-1", # }, # }) # # resp.to_h outputs the following: # { # location: "http://examplebucket..s3.amazonaws.com/", # } # # @example Request syntax with placeholder values # # resp = client.create_bucket({ # acl: "private", # accepts private, public-read, public-read-write, authenticated-read # bucket: "BucketName", # required # create_bucket_configuration: { # location_constraint: "af-south-1", # accepts af-south-1, ap-east-1, ap-northeast-1, ap-northeast-2, ap-northeast-3, ap-south-1, ap-south-2, ap-southeast-1, ap-southeast-2, ap-southeast-3, ca-central-1, cn-north-1, cn-northwest-1, EU, eu-central-1, eu-north-1, eu-south-1, eu-south-2, eu-west-1, eu-west-2, eu-west-3, me-south-1, sa-east-1, us-east-2, us-gov-east-1, us-gov-west-1, us-west-1, us-west-2 # location: { # type: "AvailabilityZone", # accepts AvailabilityZone # name: "LocationNameAsString", # }, # bucket: { # data_redundancy: "SingleAvailabilityZone", # accepts SingleAvailabilityZone # type: "Directory", # accepts Directory # }, # }, # grant_full_control: "GrantFullControl", # grant_read: "GrantRead", # grant_read_acp: "GrantReadACP", # grant_write: "GrantWrite", # grant_write_acp: "GrantWriteACP", # object_lock_enabled_for_bucket: false, # object_ownership: "BucketOwnerPreferred", # accepts BucketOwnerPreferred, ObjectWriter, BucketOwnerEnforced # }) # # @example Response structure # # resp.location #=> String # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket AWS API Documentation # # @overload create_bucket(params = {}) # @param [Hash] params ({}) def create_bucket(params = {}, options = {}) req = build_request(:create_bucket, params) req.send_request(options) end # This action initiates a multipart upload and returns an upload ID. # This upload ID is used to associate all of the parts in the specific # multipart upload. You specify this upload ID in each of your # subsequent upload part requests (see [UploadPart][1]). You also # include this upload ID in the final request to either complete or # abort the multipart upload request. For more information about # multipart uploads, see [Multipart Upload Overview][2] in the *Amazon # S3 User Guide*. # # After you initiate a multipart upload and upload one or more parts, to # stop being charged for storing the uploaded parts, you must either # complete or abort the multipart upload. Amazon S3 frees up the space # used to store the parts and stops charging you for storing them only # after you either complete or abort a multipart upload. # # # # If you have configured a lifecycle rule to abort incomplete multipart # uploads, the created multipart upload must be completed within the # number of days specified in the bucket lifecycle configuration. # Otherwise, the incomplete multipart upload becomes eligible for an # abort action and Amazon S3 aborts the multipart upload. For more # information, see [Aborting Incomplete Multipart Uploads Using a Bucket # Lifecycle Configuration][3]. # # * Directory buckets - S3 Lifecycle is not supported by # directory buckets. # # * Directory buckets - For directory buckets, you must make # requests for this API operation to the Zonal endpoint. These # endpoints support virtual-hosted-style requests in the format # `https://bucket_name.s3express-az_id.region.amazonaws.com/key-name # `. Path-style requests are not supported. For more information, see # [Regional and Zonal endpoints][4] in the *Amazon S3 User Guide*. # # # # Request signing # # : For request signing, multipart upload is just a series of regular # requests. You initiate a multipart upload, send one or more requests # to upload parts, and then complete the multipart upload process. You # sign each request individually. There is nothing special about # signing multipart upload requests. For more information about # signing, see [Authenticating Requests (Amazon Web Services Signature # Version 4)][5] in the *Amazon S3 User Guide*. # # Permissions # : * **General purpose bucket permissions** - For information about the # permissions required to use the multipart upload API, see # [Multipart upload and permissions][6] in the *Amazon S3 User # Guide*. # # To perform a multipart upload with encryption by using an Amazon # Web Services KMS key, the requester must have permission to the # `kms:Decrypt` and `kms:GenerateDataKey*` actions on the key. These # permissions are required because Amazon S3 must decrypt and read # data from the encrypted file parts before it completes the # multipart upload. For more information, see [Multipart upload API # and permissions][7] and [Protecting data using server-side # encryption with Amazon Web Services KMS][8] in the *Amazon S3 User # Guide*. # # * **Directory bucket permissions** - To grant access to this API # operation on a directory bucket, we recommend that you use the [ # `CreateSession` ][9] API operation for session-based # authorization. Specifically, you grant the # `s3express:CreateSession` permission to the directory bucket in a # bucket policy or an IAM identity-based policy. Then, you make the # `CreateSession` API call on the bucket to obtain a session token. # With the session token in your request header, you can make API # requests to this operation. After the session token expires, you # make another `CreateSession` API call to generate a new session # token for use. Amazon Web Services CLI or SDKs create session and # refresh the session token automatically to avoid service # interruptions when a session expires. For more information about # authorization, see [ `CreateSession` ][9]. # # Encryption # : * **General purpose buckets** - Server-side encryption is for data # encryption at rest. Amazon S3 encrypts your data as it writes it # to disks in its data centers and decrypts it when you access it. # Amazon S3 automatically encrypts all new objects that are uploaded # to an S3 bucket. When doing a multipart upload, if you don't # specify encryption information in your request, the encryption # setting of the uploaded parts is set to the default encryption # configuration of the destination bucket. By default, all buckets # have a base level of encryption configuration that uses # server-side encryption with Amazon S3 managed keys (SSE-S3). If # the destination bucket has a default encryption configuration that # uses server-side encryption with an Key Management Service (KMS) # key (SSE-KMS), or a customer-provided encryption key (SSE-C), # Amazon S3 uses the corresponding KMS key, or a customer-provided # key to encrypt the uploaded parts. When you perform a # CreateMultipartUpload operation, if you want to use a different # type of encryption setting for the uploaded parts, you can request # that Amazon S3 encrypts the object with a different encryption key # (such as an Amazon S3 managed key, a KMS key, or a # customer-provided key). When the encryption setting in your # request is different from the default encryption configuration of # the destination bucket, the encryption setting in your request # takes precedence. If you choose to provide your own encryption # key, the request headers you provide in [UploadPart][1] and # [UploadPartCopy][10] requests must match the headers you used in # the `CreateMultipartUpload` request. # # * Use KMS keys (SSE-KMS) that include the Amazon Web Services # managed key (`aws/s3`) and KMS customer managed keys stored in # Key Management Service (KMS) – If you want Amazon Web Services # to manage the keys used to encrypt data, specify the following # headers in the request. # # * `x-amz-server-side-encryption` # # * `x-amz-server-side-encryption-aws-kms-key-id` # # * `x-amz-server-side-encryption-context` # # * If you specify `x-amz-server-side-encryption:aws:kms`, but # don't provide `x-amz-server-side-encryption-aws-kms-key-id`, # Amazon S3 uses the Amazon Web Services managed key (`aws/s3` # key) in KMS to protect the data. # # * To perform a multipart upload with encryption by using an # Amazon Web Services KMS key, the requester must have # permission to the `kms:Decrypt` and `kms:GenerateDataKey*` # actions on the key. These permissions are required because # Amazon S3 must decrypt and read data from the encrypted file # parts before it completes the multipart upload. For more # information, see [Multipart upload API and permissions][7] and # [Protecting data using server-side encryption with Amazon Web # Services KMS][8] in the *Amazon S3 User Guide*. # # * If your Identity and Access Management (IAM) user or role is # in the same Amazon Web Services account as the KMS key, then # you must have these permissions on the key policy. If your IAM # user or role is in a different account from the key, then you # must have the permissions on both the key policy and your IAM # user or role. # # * All `GET` and `PUT` requests for an object protected by KMS # fail if you don't make them by using Secure Sockets Layer # (SSL), Transport Layer Security (TLS), or Signature Version 4. # For information about configuring any of the officially # supported Amazon Web Services SDKs and Amazon Web Services # CLI, see [Specifying the Signature Version in Request # Authentication][11] in the *Amazon S3 User Guide*. # # # # For more information about server-side encryption with KMS keys # (SSE-KMS), see [Protecting Data Using Server-Side Encryption # with KMS keys][8] in the *Amazon S3 User Guide*. # # * Use customer-provided encryption keys (SSE-C) – If you want to # manage your own encryption keys, provide all the following # headers in the request. # # * `x-amz-server-side-encryption-customer-algorithm` # # * `x-amz-server-side-encryption-customer-key` # # * `x-amz-server-side-encryption-customer-key-MD5` # # For more information about server-side encryption with # customer-provided encryption keys (SSE-C), see [ Protecting data # using server-side encryption with customer-provided encryption # keys (SSE-C)][12] in the *Amazon S3 User Guide*. # # * **Directory buckets** -For directory buckets, only server-side # encryption with Amazon S3 managed keys (SSE-S3) (`AES256`) is # supported. # # HTTP Host header syntax # # : Directory buckets - The HTTP Host header syntax is ` # Bucket_name.s3express-az_id.region.amazonaws.com`. # # The following operations are related to `CreateMultipartUpload`: # # * [UploadPart][1] # # * [CompleteMultipartUpload][13] # # * [AbortMultipartUpload][14] # # * [ListParts][15] # # * [ListMultipartUploads][16] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config # [4]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html # [7]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions # [8]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html # [9]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html # [10]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html # [11]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version # [12]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html # [13]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html # [14]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html # [15]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html # [16]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html # # @option params [String] :acl # The canned ACL to apply to the object. Amazon S3 supports a set of # predefined ACLs, known as *canned ACLs*. Each canned ACL has a # predefined set of grantees and permissions. For more information, see # [Canned ACL][1] in the *Amazon S3 User Guide*. # # By default, all objects are private. Only the owner has full access # control. When uploading an object, you can grant access permissions to # individual Amazon Web Services accounts or to predefined groups # defined by Amazon S3. These permissions are then added to the access # control list (ACL) on the new object. For more information, see [Using # ACLs][2]. One way to grant the permissions using the request headers # is to specify a canned ACL with the `x-amz-acl` request header. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html # # @option params [required, String] :bucket # The name of the bucket where the multipart upload is initiated and # where the object is uploaded. # # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style requests # are not supported. Directory bucket names must be unique in the chosen # Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, you # must provide the alias of the access point in place of the bucket name # or specify the access point ARN. When using the access point ARN, you # must direct requests to the access point hostname. The access point # hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the bucket # name. For more information about access point ARNs, see [Using access # points][2] in the *Amazon S3 User Guide*. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. The # S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][3] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # # @option params [String] :cache_control # Specifies caching behavior along the request/reply chain. # # @option params [String] :content_disposition # Specifies presentational information for the object. # # @option params [String] :content_encoding # Specifies what content encodings have been applied to the object and # thus what decoding mechanisms must be applied to obtain the media-type # referenced by the Content-Type header field. # # For directory buckets, only the `aws-chunked` value is supported in # this header field. # # # # @option params [String] :content_language # The language that the content is in. # # @option params [String] :content_type # A standard MIME type describing the format of the object data. # # @option params [Time,DateTime,Date,Integer,String] :expires # The date and time at which the object is no longer cacheable. # # @option params [String] :grant_full_control # Specify access permissions explicitly to give the grantee READ, # READ\_ACP, and WRITE\_ACP permissions on the object. # # By default, all objects are private. Only the owner has full access # control. When uploading an object, you can use this header to # explicitly grant access permissions to specific Amazon Web Services # accounts or groups. This header maps to specific permissions that # Amazon S3 supports in an ACL. For more information, see [Access # Control List (ACL) Overview][1] in the *Amazon S3 User Guide*. # # You specify each grantee as a type=value pair, where the type is one # of the following: # # * `id` – if the value specified is the canonical user ID of an Amazon # Web Services account # # * `uri` – if you are granting permissions to a predefined group # # * `emailAddress` – if the value specified is the email address of an # Amazon Web Services account # # Using email addresses to specify a grantee is only supported in the # following Amazon Web Services Regions: # # * US East (N. Virginia) # # * US West (N. California) # # * US West (Oregon) # # * Asia Pacific (Singapore) # # * Asia Pacific (Sydney) # # * Asia Pacific (Tokyo) # # * Europe (Ireland) # # * South America (São Paulo) # # For a list of all the Amazon S3 supported Regions and endpoints, see # [Regions and Endpoints][2] in the Amazon Web Services General # Reference. # # # # For example, the following `x-amz-grant-read` header grants the Amazon # Web Services accounts identified by account IDs permissions to read # object data and its metadata: # # `x-amz-grant-read: id="11112222333", id="444455556666" ` # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html # [2]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region # # @option params [String] :grant_read # Specify access permissions explicitly to allow grantee to read the # object data and its metadata. # # By default, all objects are private. Only the owner has full access # control. When uploading an object, you can use this header to # explicitly grant access permissions to specific Amazon Web Services # accounts or groups. This header maps to specific permissions that # Amazon S3 supports in an ACL. For more information, see [Access # Control List (ACL) Overview][1] in the *Amazon S3 User Guide*. # # You specify each grantee as a type=value pair, where the type is one # of the following: # # * `id` – if the value specified is the canonical user ID of an Amazon # Web Services account # # * `uri` – if you are granting permissions to a predefined group # # * `emailAddress` – if the value specified is the email address of an # Amazon Web Services account # # Using email addresses to specify a grantee is only supported in the # following Amazon Web Services Regions: # # * US East (N. Virginia) # # * US West (N. California) # # * US West (Oregon) # # * Asia Pacific (Singapore) # # * Asia Pacific (Sydney) # # * Asia Pacific (Tokyo) # # * Europe (Ireland) # # * South America (São Paulo) # # For a list of all the Amazon S3 supported Regions and endpoints, see # [Regions and Endpoints][2] in the Amazon Web Services General # Reference. # # # # For example, the following `x-amz-grant-read` header grants the Amazon # Web Services accounts identified by account IDs permissions to read # object data and its metadata: # # `x-amz-grant-read: id="11112222333", id="444455556666" ` # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html # [2]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region # # @option params [String] :grant_read_acp # Specify access permissions explicitly to allows grantee to read the # object ACL. # # By default, all objects are private. Only the owner has full access # control. When uploading an object, you can use this header to # explicitly grant access permissions to specific Amazon Web Services # accounts or groups. This header maps to specific permissions that # Amazon S3 supports in an ACL. For more information, see [Access # Control List (ACL) Overview][1] in the *Amazon S3 User Guide*. # # You specify each grantee as a type=value pair, where the type is one # of the following: # # * `id` – if the value specified is the canonical user ID of an Amazon # Web Services account # # * `uri` – if you are granting permissions to a predefined group # # * `emailAddress` – if the value specified is the email address of an # Amazon Web Services account # # Using email addresses to specify a grantee is only supported in the # following Amazon Web Services Regions: # # * US East (N. Virginia) # # * US West (N. California) # # * US West (Oregon) # # * Asia Pacific (Singapore) # # * Asia Pacific (Sydney) # # * Asia Pacific (Tokyo) # # * Europe (Ireland) # # * South America (São Paulo) # # For a list of all the Amazon S3 supported Regions and endpoints, see # [Regions and Endpoints][2] in the Amazon Web Services General # Reference. # # # # For example, the following `x-amz-grant-read` header grants the Amazon # Web Services accounts identified by account IDs permissions to read # object data and its metadata: # # `x-amz-grant-read: id="11112222333", id="444455556666" ` # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html # [2]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region # # @option params [String] :grant_write_acp # Specify access permissions explicitly to allows grantee to allow # grantee to write the ACL for the applicable object. # # By default, all objects are private. Only the owner has full access # control. When uploading an object, you can use this header to # explicitly grant access permissions to specific Amazon Web Services # accounts or groups. This header maps to specific permissions that # Amazon S3 supports in an ACL. For more information, see [Access # Control List (ACL) Overview][1] in the *Amazon S3 User Guide*. # # You specify each grantee as a type=value pair, where the type is one # of the following: # # * `id` – if the value specified is the canonical user ID of an Amazon # Web Services account # # * `uri` – if you are granting permissions to a predefined group # # * `emailAddress` – if the value specified is the email address of an # Amazon Web Services account # # Using email addresses to specify a grantee is only supported in the # following Amazon Web Services Regions: # # * US East (N. Virginia) # # * US West (N. California) # # * US West (Oregon) # # * Asia Pacific (Singapore) # # * Asia Pacific (Sydney) # # * Asia Pacific (Tokyo) # # * Europe (Ireland) # # * South America (São Paulo) # # For a list of all the Amazon S3 supported Regions and endpoints, see # [Regions and Endpoints][2] in the Amazon Web Services General # Reference. # # # # For example, the following `x-amz-grant-read` header grants the Amazon # Web Services accounts identified by account IDs permissions to read # object data and its metadata: # # `x-amz-grant-read: id="11112222333", id="444455556666" ` # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html # [2]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region # # @option params [required, String] :key # Object key for which the multipart upload is to be initiated. # # @option params [Hash] :metadata # A map of metadata to store with the object in S3. # # @option params [String] :server_side_encryption # The server-side encryption algorithm used when you store this object # in Amazon S3 (for example, `AES256`, `aws:kms`). # # For directory buckets, only server-side encryption with Amazon S3 # managed keys (SSE-S3) (`AES256`) is supported. # # # # @option params [String] :storage_class # By default, Amazon S3 uses the STANDARD Storage Class to store newly # created objects. The STANDARD storage class provides high durability # and high availability. Depending on performance needs, you can specify # a different Storage Class. For more information, see [Storage # Classes][1] in the *Amazon S3 User Guide*. # # * For directory buckets, only the S3 Express One Zone storage class is # supported to store newly created objects. # # * Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html # # @option params [String] :website_redirect_location # If the bucket is configured as a website, redirects requests for this # object to another object in the same bucket or to an external URL. # Amazon S3 stores the value of this header in the object metadata. # # This functionality is not supported for directory buckets. # # # # @option params [String] :sse_customer_algorithm # Specifies the algorithm to use when encrypting the object (for # example, AES256). # # This functionality is not supported for directory buckets. # # # # @option params [String] :sse_customer_key # Specifies the customer-provided encryption key for Amazon S3 to use in # encrypting data. This value is used to store the object and then it is # discarded; Amazon S3 does not store the encryption key. The key must # be appropriate for use with the algorithm specified in the # `x-amz-server-side-encryption-customer-algorithm` header. # # This functionality is not supported for directory buckets. # # # # @option params [String] :sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the customer-provided encryption # key according to RFC 1321. Amazon S3 uses this header for a message # integrity check to ensure that the encryption key was transmitted # without error. # # This functionality is not supported for directory buckets. # # # # @option params [String] :ssekms_key_id # Specifies the ID (Key ID, Key ARN, or Key Alias) of the symmetric # encryption customer managed key to use for object encryption. # # This functionality is not supported for directory buckets. # # # # @option params [String] :ssekms_encryption_context # Specifies the Amazon Web Services KMS Encryption Context to use for # object encryption. The value of this header is a base64-encoded UTF-8 # string holding JSON with the encryption context key-value pairs. # # This functionality is not supported for directory buckets. # # # # @option params [Boolean] :bucket_key_enabled # Specifies whether Amazon S3 should use an S3 Bucket Key for object # encryption with server-side encryption using Key Management Service # (KMS) keys (SSE-KMS). Setting this header to `true` causes Amazon S3 # to use an S3 Bucket Key for object encryption with SSE-KMS. # # Specifying this header with an object action doesn’t affect # bucket-level settings for S3 Bucket Key. # # This functionality is not supported for directory buckets. # # # # @option params [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # # @option params [String] :tagging # The tag-set for the object. The tag-set must be encoded as URL Query # parameters. # # This functionality is not supported for directory buckets. # # # # @option params [String] :object_lock_mode # Specifies the Object Lock mode that you want to apply to the uploaded # object. # # This functionality is not supported for directory buckets. # # # # @option params [Time,DateTime,Date,Integer,String] :object_lock_retain_until_date # Specifies the date and time when you want the Object Lock to expire. # # This functionality is not supported for directory buckets. # # # # @option params [String] :object_lock_legal_hold_status # Specifies whether you want to apply a legal hold to the uploaded # object. # # This functionality is not supported for directory buckets. # # # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @option params [String] :checksum_algorithm # Indicates the algorithm that you want Amazon S3 to use to create the # checksum for the object. For more information, see [Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @return [Types::CreateMultipartUploadOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::CreateMultipartUploadOutput#abort_date #abort_date} => Time # * {Types::CreateMultipartUploadOutput#abort_rule_id #abort_rule_id} => String # * {Types::CreateMultipartUploadOutput#bucket #bucket} => String # * {Types::CreateMultipartUploadOutput#key #key} => String # * {Types::CreateMultipartUploadOutput#upload_id #upload_id} => String # * {Types::CreateMultipartUploadOutput#server_side_encryption #server_side_encryption} => String # * {Types::CreateMultipartUploadOutput#sse_customer_algorithm #sse_customer_algorithm} => String # * {Types::CreateMultipartUploadOutput#sse_customer_key_md5 #sse_customer_key_md5} => String # * {Types::CreateMultipartUploadOutput#ssekms_key_id #ssekms_key_id} => String # * {Types::CreateMultipartUploadOutput#ssekms_encryption_context #ssekms_encryption_context} => String # * {Types::CreateMultipartUploadOutput#bucket_key_enabled #bucket_key_enabled} => Boolean # * {Types::CreateMultipartUploadOutput#request_charged #request_charged} => String # * {Types::CreateMultipartUploadOutput#checksum_algorithm #checksum_algorithm} => String # # # @example Example: To initiate a multipart upload # # # The following example initiates a multipart upload. # # resp = client.create_multipart_upload({ # bucket: "examplebucket", # key: "largeobject", # }) # # resp.to_h outputs the following: # { # bucket: "examplebucket", # key: "largeobject", # upload_id: "ibZBv_75gd9r8lH_gqXatLdxMVpAlj6ZQjEs.OwyF3953YdwbcQnMA2BLGn8Lx12fQNICtMw5KyteFeHw.Sjng--", # } # # @example Request syntax with placeholder values # # resp = client.create_multipart_upload({ # acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control # bucket: "BucketName", # required # cache_control: "CacheControl", # content_disposition: "ContentDisposition", # content_encoding: "ContentEncoding", # content_language: "ContentLanguage", # content_type: "ContentType", # expires: Time.now, # grant_full_control: "GrantFullControl", # grant_read: "GrantRead", # grant_read_acp: "GrantReadACP", # grant_write_acp: "GrantWriteACP", # key: "ObjectKey", # required # metadata: { # "MetadataKey" => "MetadataValue", # }, # server_side_encryption: "AES256", # accepts AES256, aws:kms, aws:kms:dsse # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE # website_redirect_location: "WebsiteRedirectLocation", # sse_customer_algorithm: "SSECustomerAlgorithm", # sse_customer_key: "SSECustomerKey", # sse_customer_key_md5: "SSECustomerKeyMD5", # ssekms_key_id: "SSEKMSKeyId", # ssekms_encryption_context: "SSEKMSEncryptionContext", # bucket_key_enabled: false, # request_payer: "requester", # accepts requester # tagging: "TaggingHeader", # object_lock_mode: "GOVERNANCE", # accepts GOVERNANCE, COMPLIANCE # object_lock_retain_until_date: Time.now, # object_lock_legal_hold_status: "ON", # accepts ON, OFF # expected_bucket_owner: "AccountId", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # }) # # @example Response structure # # resp.abort_date #=> Time # resp.abort_rule_id #=> String # resp.bucket #=> String # resp.key #=> String # resp.upload_id #=> String # resp.server_side_encryption #=> String, one of "AES256", "aws:kms", "aws:kms:dsse" # resp.sse_customer_algorithm #=> String # resp.sse_customer_key_md5 #=> String # resp.ssekms_key_id #=> String # resp.ssekms_encryption_context #=> String # resp.bucket_key_enabled #=> Boolean # resp.request_charged #=> String, one of "requester" # resp.checksum_algorithm #=> String, one of "CRC32", "CRC32C", "SHA1", "SHA256" # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload AWS API Documentation # # @overload create_multipart_upload(params = {}) # @param [Hash] params ({}) def create_multipart_upload(params = {}, options = {}) req = build_request(:create_multipart_upload, params) req.send_request(options) end # Creates a session that establishes temporary security credentials to # support fast authentication and authorization for the Zonal endpoint # APIs on directory buckets. For more information about Zonal endpoint # APIs that include the Availability Zone in the request endpoint, see # [S3 Express One Zone APIs][1] in the *Amazon S3 User Guide*. # # To make Zonal endpoint API requests on a directory bucket, use the # `CreateSession` API operation. Specifically, you grant # `s3express:CreateSession` permission to a bucket in a bucket policy or # an IAM identity-based policy. Then, you use IAM credentials to make # the `CreateSession` API request on the bucket, which returns temporary # security credentials that include the access key ID, secret access # key, session token, and expiration. These credentials have associated # permissions to access the Zonal endpoint APIs. After the session is # created, you don’t need to use other policies to grant permissions to # each Zonal endpoint API individually. Instead, in your Zonal endpoint # API requests, you sign your requests by applying the temporary # security credentials of the session to the request headers and # following the SigV4 protocol for authentication. You also apply the # session token to the `x-amz-s3session-token` request header for # authorization. Temporary security credentials are scoped to the bucket # and expire after 5 minutes. After the expiration time, any calls that # you make with those credentials will fail. You must use IAM # credentials again to make a `CreateSession` API request that generates # a new set of temporary credentials for use. Temporary credentials # cannot be extended or refreshed beyond the original specified # interval. # # If you use Amazon Web Services SDKs, SDKs handle the session token # refreshes automatically to avoid service interruptions when a session # expires. We recommend that you use the Amazon Web Services SDKs to # initiate and manage requests to the CreateSession API. For more # information, see [Performance guidelines and design patterns][2] in # the *Amazon S3 User Guide*. # # * You must make requests for this API operation to the Zonal endpoint. # These endpoints support virtual-hosted-style requests in the format # `https://bucket_name.s3express-az_id.region.amazonaws.com`. # Path-style requests are not supported. For more information, see # [Regional and Zonal endpoints][3] in the *Amazon S3 User Guide*. # # * CopyObject API operation - Unlike other Zonal # endpoint APIs, the `CopyObject` API operation doesn't use the # temporary security credentials returned from the `CreateSession` API # operation for authentication and authorization. For information # about authentication and authorization of the `CopyObject` API # operation on directory buckets, see [CopyObject][4]. # # * HeadBucket API operation - Unlike other Zonal # endpoint APIs, the `HeadBucket` API operation doesn't use the # temporary security credentials returned from the `CreateSession` API # operation for authentication and authorization. For information # about authentication and authorization of the `HeadBucket` API # operation on directory buckets, see [HeadBucket][5]. # # # # Permissions # # : To obtain temporary security credentials, you must create a bucket # policy or an IAM identity-based policy that grants # `s3express:CreateSession` permission to the bucket. In a policy, you # can have the `s3express:SessionMode` condition key to control who # can create a `ReadWrite` or `ReadOnly` session. For more information # about `ReadWrite` or `ReadOnly` sessions, see [ # `x-amz-create-session-mode` ][6]. For example policies, see [Example # bucket policies for S3 Express One Zone][7] and [Amazon Web Services # Identity and Access Management (IAM) identity-based policies for S3 # Express One Zone][8] in the *Amazon S3 User Guide*. # # To grant cross-account access to Zonal endpoint APIs, the bucket # policy should also grant both accounts the `s3express:CreateSession` # permission. # # HTTP Host header syntax # # : Directory buckets - The HTTP Host header syntax is ` # Bucket_name.s3express-az_id.region.amazonaws.com`. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-APIs.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-optimizing-performance-guidelines-design-patterns.html#s3-express-optimizing-performance-session-authentication # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html#API_CreateSession_RequestParameters # [7]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html # [8]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html # # @option params [String] :session_mode # Specifies the mode of the session that will be created, either # `ReadWrite` or `ReadOnly`. By default, a `ReadWrite` session is # created. A `ReadWrite` session is capable of executing all the Zonal # endpoint APIs on a directory bucket. A `ReadOnly` session is # constrained to execute the following Zonal endpoint APIs: `GetObject`, # `HeadObject`, `ListObjectsV2`, `GetObjectAttributes`, `ListParts`, and # `ListMultipartUploads`. # # @option params [required, String] :bucket # The name of the bucket that you create a session for. # # @return [Types::CreateSessionOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::CreateSessionOutput#credentials #credentials} => Types::SessionCredentials # # @example Request syntax with placeholder values # # resp = client.create_session({ # session_mode: "ReadOnly", # accepts ReadOnly, ReadWrite # bucket: "BucketName", # required # }) # # @example Response structure # # resp.credentials.access_key_id #=> String # resp.credentials.secret_access_key #=> String # resp.credentials.session_token #=> String # resp.credentials.expiration #=> Time # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateSession AWS API Documentation # # @overload create_session(params = {}) # @param [Hash] params ({}) def create_session(params = {}, options = {}) req = build_request(:create_session, params) req.send_request(options) end # Deletes the S3 bucket. All objects (including all object versions and # delete markers) in the bucket must be deleted before the bucket itself # can be deleted. # # * **Directory buckets** - If multipart uploads in a directory bucket # are in progress, you can't delete the bucket until all the # in-progress multipart uploads are aborted or completed. # # * Directory buckets - For directory buckets, you must make # requests for this API operation to the Regional endpoint. These # endpoints support path-style requests in the format # `https://s3express-control.region_code.amazonaws.com/bucket-name `. # Virtual-hosted-style requests aren't supported. For more # information, see [Regional and Zonal endpoints][1] in the *Amazon S3 # User Guide*. # # # # Permissions # : * **General purpose bucket permissions** - You must have the # `s3:DeleteBucket` permission on the specified bucket in a policy. # # * **Directory bucket permissions** - You must have the # `s3express:DeleteBucket` permission in an IAM identity-based # policy instead of a bucket policy. Cross-account access to this # API operation isn't supported. This operation can only be # performed by the Amazon Web Services account that owns the # resource. For more information about directory bucket policies and # permissions, see [Amazon Web Services Identity and Access # Management (IAM) for S3 Express One Zone][2] in the *Amazon S3 # User Guide*. # # HTTP Host header syntax # # : Directory buckets - The HTTP Host header syntax is # `s3express-control.region.amazonaws.com`. # # The following operations are related to `DeleteBucket`: # # * [CreateBucket][3] # # * [DeleteObject][4] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html # # @option params [required, String] :bucket # Specifies the bucket being deleted. # # Directory buckets - When you use this operation with a # directory bucket, you must use path-style requests in the format # `https://s3express-control.region_code.amazonaws.com/bucket-name `. # Virtual-hosted-style requests aren't supported. Directory bucket # names must be unique in the chosen Availability Zone. Bucket names # must also follow the format ` bucket_base_name--az_id--x-s3` (for # example, ` DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about # bucket naming restrictions, see [Directory bucket naming rules][1] in # the *Amazon S3 User Guide* # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # For directory buckets, this header is not supported in this API # operation. If you specify this header, the request fails with the HTTP # status code `501 Not Implemented`. # # # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # # @example Example: To delete a bucket # # # The following example deletes the specified bucket. # # resp = client.delete_bucket({ # bucket: "forrandall2", # }) # # @example Request syntax with placeholder values # # resp = client.delete_bucket({ # bucket: "BucketName", # required # expected_bucket_owner: "AccountId", # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket AWS API Documentation # # @overload delete_bucket(params = {}) # @param [Hash] params ({}) def delete_bucket(params = {}, options = {}) req = build_request(:delete_bucket, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Deletes an analytics configuration for the bucket (specified by the # analytics configuration ID). # # To use this operation, you must have permissions to perform the # `s3:PutAnalyticsConfiguration` action. The bucket owner has this # permission by default. The bucket owner can grant this permission to # others. For more information about permissions, see [Permissions # Related to Bucket Subresource Operations][1] and [Managing Access # Permissions to Your Amazon S3 Resources][2]. # # For information about the Amazon S3 analytics feature, see [Amazon S3 # Analytics – Storage Class Analysis][3]. # # The following operations are related to # `DeleteBucketAnalyticsConfiguration`: # # * [GetBucketAnalyticsConfiguration][4] # # * [ListBucketAnalyticsConfigurations][5] # # * [PutBucketAnalyticsConfiguration][6] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html # # @option params [required, String] :bucket # The name of the bucket from which an analytics configuration is # deleted. # # @option params [required, String] :id # The ID that identifies the analytics configuration. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # @example Request syntax with placeholder values # # resp = client.delete_bucket_analytics_configuration({ # bucket: "BucketName", # required # id: "AnalyticsId", # required # expected_bucket_owner: "AccountId", # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration AWS API Documentation # # @overload delete_bucket_analytics_configuration(params = {}) # @param [Hash] params ({}) def delete_bucket_analytics_configuration(params = {}, options = {}) req = build_request(:delete_bucket_analytics_configuration, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Deletes the `cors` configuration information set for the bucket. # # To use this operation, you must have permission to perform the # `s3:PutBucketCORS` action. The bucket owner has this permission by # default and can grant this permission to others. # # For information about `cors`, see [Enabling Cross-Origin Resource # Sharing][1] in the *Amazon S3 User Guide*. # # **Related Resources** # # * [PutBucketCors][2] # # * [RESTOPTIONSobject][3] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html # # @option params [required, String] :bucket # Specifies the bucket whose `cors` configuration is being deleted. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # # @example Example: To delete cors configuration on a bucket. # # # The following example deletes CORS configuration on a bucket. # # resp = client.delete_bucket_cors({ # bucket: "examplebucket", # }) # # @example Request syntax with placeholder values # # resp = client.delete_bucket_cors({ # bucket: "BucketName", # required # expected_bucket_owner: "AccountId", # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors AWS API Documentation # # @overload delete_bucket_cors(params = {}) # @param [Hash] params ({}) def delete_bucket_cors(params = {}, options = {}) req = build_request(:delete_bucket_cors, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # This implementation of the DELETE action resets the default encryption # for the bucket as server-side encryption with Amazon S3 managed keys # (SSE-S3). For information about the bucket default encryption feature, # see [Amazon S3 Bucket Default Encryption][1] in the *Amazon S3 User # Guide*. # # To use this operation, you must have permissions to perform the # `s3:PutEncryptionConfiguration` action. The bucket owner has this # permission by default. The bucket owner can grant this permission to # others. For more information about permissions, see [Permissions # Related to Bucket Subresource Operations][2] and [Managing Access # Permissions to your Amazon S3 Resources][3] in the *Amazon S3 User # Guide*. # # The following operations are related to `DeleteBucketEncryption`: # # * [PutBucketEncryption][4] # # * [GetBucketEncryption][5] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html # # @option params [required, String] :bucket # The name of the bucket containing the server-side encryption # configuration to delete. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # @example Request syntax with placeholder values # # resp = client.delete_bucket_encryption({ # bucket: "BucketName", # required # expected_bucket_owner: "AccountId", # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption AWS API Documentation # # @overload delete_bucket_encryption(params = {}) # @param [Hash] params ({}) def delete_bucket_encryption(params = {}, options = {}) req = build_request(:delete_bucket_encryption, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Deletes the S3 Intelligent-Tiering configuration from the specified # bucket. # # The S3 Intelligent-Tiering storage class is designed to optimize # storage costs by automatically moving data to the most cost-effective # storage access tier, without performance impact or operational # overhead. S3 Intelligent-Tiering delivers automatic cost savings in # three low latency and high throughput access tiers. To get the lowest # storage cost on data that can be accessed in minutes to hours, you can # choose to activate additional archiving capabilities. # # The S3 Intelligent-Tiering storage class is the ideal storage class # for data with unknown, changing, or unpredictable access patterns, # independent of object size or retention period. If the size of an # object is less than 128 KB, it is not monitored and not eligible for # auto-tiering. Smaller objects can be stored, but they are always # charged at the Frequent Access tier rates in the S3 # Intelligent-Tiering storage class. # # For more information, see [Storage class for automatically optimizing # frequently and infrequently accessed objects][1]. # # Operations related to `DeleteBucketIntelligentTieringConfiguration` # include: # # * [GetBucketIntelligentTieringConfiguration][2] # # * [PutBucketIntelligentTieringConfiguration][3] # # * [ListBucketIntelligentTieringConfigurations][4] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html # # @option params [required, String] :bucket # The name of the Amazon S3 bucket whose configuration you want to # modify or retrieve. # # @option params [required, String] :id # The ID used to identify the S3 Intelligent-Tiering configuration. # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # @example Request syntax with placeholder values # # resp = client.delete_bucket_intelligent_tiering_configuration({ # bucket: "BucketName", # required # id: "IntelligentTieringId", # required # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketIntelligentTieringConfiguration AWS API Documentation # # @overload delete_bucket_intelligent_tiering_configuration(params = {}) # @param [Hash] params ({}) def delete_bucket_intelligent_tiering_configuration(params = {}, options = {}) req = build_request(:delete_bucket_intelligent_tiering_configuration, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Deletes an inventory configuration (identified by the inventory ID) # from the bucket. # # To use this operation, you must have permissions to perform the # `s3:PutInventoryConfiguration` action. The bucket owner has this # permission by default. The bucket owner can grant this permission to # others. For more information about permissions, see [Permissions # Related to Bucket Subresource Operations][1] and [Managing Access # Permissions to Your Amazon S3 Resources][2]. # # For information about the Amazon S3 inventory feature, see [Amazon S3 # Inventory][3]. # # Operations related to `DeleteBucketInventoryConfiguration` include: # # * [GetBucketInventoryConfiguration][4] # # * [PutBucketInventoryConfiguration][5] # # * [ListBucketInventoryConfigurations][6] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html # # @option params [required, String] :bucket # The name of the bucket containing the inventory configuration to # delete. # # @option params [required, String] :id # The ID used to identify the inventory configuration. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # @example Request syntax with placeholder values # # resp = client.delete_bucket_inventory_configuration({ # bucket: "BucketName", # required # id: "InventoryId", # required # expected_bucket_owner: "AccountId", # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration AWS API Documentation # # @overload delete_bucket_inventory_configuration(params = {}) # @param [Hash] params ({}) def delete_bucket_inventory_configuration(params = {}, options = {}) req = build_request(:delete_bucket_inventory_configuration, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Deletes the lifecycle configuration from the specified bucket. Amazon # S3 removes all the lifecycle configuration rules in the lifecycle # subresource associated with the bucket. Your objects never expire, and # Amazon S3 no longer automatically deletes any objects on the basis of # rules contained in the deleted lifecycle configuration. # # To use this operation, you must have permission to perform the # `s3:PutLifecycleConfiguration` action. By default, the bucket owner # has this permission and the bucket owner can grant this permission to # others. # # There is usually some time lag before lifecycle configuration deletion # is fully propagated to all the Amazon S3 systems. # # For more information about the object expiration, see [Elements to # Describe Lifecycle Actions][1]. # # Related actions include: # # * [PutBucketLifecycleConfiguration][2] # # * [GetBucketLifecycleConfiguration][3] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html # # @option params [required, String] :bucket # The bucket name of the lifecycle to delete. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # # @example Example: To delete lifecycle configuration on a bucket. # # # The following example deletes lifecycle configuration on a bucket. # # resp = client.delete_bucket_lifecycle({ # bucket: "examplebucket", # }) # # @example Request syntax with placeholder values # # resp = client.delete_bucket_lifecycle({ # bucket: "BucketName", # required # expected_bucket_owner: "AccountId", # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle AWS API Documentation # # @overload delete_bucket_lifecycle(params = {}) # @param [Hash] params ({}) def delete_bucket_lifecycle(params = {}, options = {}) req = build_request(:delete_bucket_lifecycle, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Deletes a metrics configuration for the Amazon CloudWatch request # metrics (specified by the metrics configuration ID) from the bucket. # Note that this doesn't include the daily storage metrics. # # To use this operation, you must have permissions to perform the # `s3:PutMetricsConfiguration` action. The bucket owner has this # permission by default. The bucket owner can grant this permission to # others. For more information about permissions, see [Permissions # Related to Bucket Subresource Operations][1] and [Managing Access # Permissions to Your Amazon S3 Resources][2]. # # For information about CloudWatch request metrics for Amazon S3, see # [Monitoring Metrics with Amazon CloudWatch][3]. # # The following operations are related to # `DeleteBucketMetricsConfiguration`: # # * [GetBucketMetricsConfiguration][4] # # * [PutBucketMetricsConfiguration][5] # # * [ListBucketMetricsConfigurations][6] # # * [Monitoring Metrics with Amazon CloudWatch][3] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html # # @option params [required, String] :bucket # The name of the bucket containing the metrics configuration to delete. # # @option params [required, String] :id # The ID used to identify the metrics configuration. The ID has a 64 # character limit and can only contain letters, numbers, periods, # dashes, and underscores. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # @example Request syntax with placeholder values # # resp = client.delete_bucket_metrics_configuration({ # bucket: "BucketName", # required # id: "MetricsId", # required # expected_bucket_owner: "AccountId", # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration AWS API Documentation # # @overload delete_bucket_metrics_configuration(params = {}) # @param [Hash] params ({}) def delete_bucket_metrics_configuration(params = {}, options = {}) req = build_request(:delete_bucket_metrics_configuration, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Removes `OwnershipControls` for an Amazon S3 bucket. To use this # operation, you must have the `s3:PutBucketOwnershipControls` # permission. For more information about Amazon S3 permissions, see # [Specifying Permissions in a Policy][1]. # # For information about Amazon S3 Object Ownership, see [Using Object # Ownership][2]. # # The following operations are related to # `DeleteBucketOwnershipControls`: # # * GetBucketOwnershipControls # # * PutBucketOwnershipControls # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html # # @option params [required, String] :bucket # The Amazon S3 bucket whose `OwnershipControls` you want to delete. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # @example Request syntax with placeholder values # # resp = client.delete_bucket_ownership_controls({ # bucket: "BucketName", # required # expected_bucket_owner: "AccountId", # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOwnershipControls AWS API Documentation # # @overload delete_bucket_ownership_controls(params = {}) # @param [Hash] params ({}) def delete_bucket_ownership_controls(params = {}, options = {}) req = build_request(:delete_bucket_ownership_controls, params) req.send_request(options) end # Deletes the policy of a specified bucket. # # Directory buckets - For directory buckets, you must make # requests for this API operation to the Regional endpoint. These # endpoints support path-style requests in the format # `https://s3express-control.region_code.amazonaws.com/bucket-name `. # Virtual-hosted-style requests aren't supported. For more information, # see [Regional and Zonal endpoints][1] in the *Amazon S3 User Guide*. # # # # Permissions # # : If you are using an identity other than the root user of the Amazon # Web Services account that owns the bucket, the calling identity must # both have the `DeleteBucketPolicy` permissions on the specified # bucket and belong to the bucket owner's account in order to use # this operation. # # If you don't have `DeleteBucketPolicy` permissions, Amazon S3 # returns a `403 Access Denied` error. If you have the correct # permissions, but you're not using an identity that belongs to the # bucket owner's account, Amazon S3 returns a `405 Method Not # Allowed` error. # # To ensure that bucket owners don't inadvertently lock themselves # out of their own buckets, the root principal in a bucket owner's # Amazon Web Services account can perform the `GetBucketPolicy`, # `PutBucketPolicy`, and `DeleteBucketPolicy` API actions, even if # their bucket policy explicitly denies the root principal's access. # Bucket owner root principals can only be blocked from performing # these API actions by VPC endpoint policies and Amazon Web Services # Organizations policies. # # * **General purpose bucket permissions** - The # `s3:DeleteBucketPolicy` permission is required in a policy. For # more information about general purpose buckets bucket policies, # see [Using Bucket Policies and User Policies][2] in the *Amazon S3 # User Guide*. # # * **Directory bucket permissions** - To grant access to this API # operation, you must have the `s3express:DeleteBucketPolicy` # permission in an IAM identity-based policy instead of a bucket # policy. Cross-account access to this API operation isn't # supported. This operation can only be performed by the Amazon Web # Services account that owns the resource. For more information # about directory bucket policies and permissions, see [Amazon Web # Services Identity and Access Management (IAM) for S3 Express One # Zone][3] in the *Amazon S3 User Guide*. # # HTTP Host header syntax # # : Directory buckets - The HTTP Host header syntax is # `s3express-control.region.amazonaws.com`. # # The following operations are related to `DeleteBucketPolicy` # # * [CreateBucket][4] # # * [DeleteObject][5] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html # # @option params [required, String] :bucket # The bucket name. # # Directory buckets - When you use this operation with a # directory bucket, you must use path-style requests in the format # `https://s3express-control.region_code.amazonaws.com/bucket-name `. # Virtual-hosted-style requests aren't supported. Directory bucket # names must be unique in the chosen Availability Zone. Bucket names # must also follow the format ` bucket_base_name--az_id--x-s3` (for # example, ` DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about # bucket naming restrictions, see [Directory bucket naming rules][1] in # the *Amazon S3 User Guide* # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # For directory buckets, this header is not supported in this API # operation. If you specify this header, the request fails with the HTTP # status code `501 Not Implemented`. # # # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # # @example Example: To delete bucket policy # # # The following example deletes bucket policy on the specified bucket. # # resp = client.delete_bucket_policy({ # bucket: "examplebucket", # }) # # @example Request syntax with placeholder values # # resp = client.delete_bucket_policy({ # bucket: "BucketName", # required # expected_bucket_owner: "AccountId", # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy AWS API Documentation # # @overload delete_bucket_policy(params = {}) # @param [Hash] params ({}) def delete_bucket_policy(params = {}, options = {}) req = build_request(:delete_bucket_policy, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Deletes the replication configuration from the bucket. # # To use this operation, you must have permissions to perform the # `s3:PutReplicationConfiguration` action. The bucket owner has these # permissions by default and can grant it to others. For more # information about permissions, see [Permissions Related to Bucket # Subresource Operations][1] and [Managing Access Permissions to Your # Amazon S3 Resources][2]. # # It can take a while for the deletion of a replication configuration to # fully propagate. # # # # For information about replication configuration, see [Replication][3] # in the *Amazon S3 User Guide*. # # The following operations are related to `DeleteBucketReplication`: # # * [PutBucketReplication][4] # # * [GetBucketReplication][5] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html # # @option params [required, String] :bucket # The bucket name. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # # @example Example: To delete bucket replication configuration # # # The following example deletes replication configuration set on bucket. # # resp = client.delete_bucket_replication({ # bucket: "example", # }) # # @example Request syntax with placeholder values # # resp = client.delete_bucket_replication({ # bucket: "BucketName", # required # expected_bucket_owner: "AccountId", # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication AWS API Documentation # # @overload delete_bucket_replication(params = {}) # @param [Hash] params ({}) def delete_bucket_replication(params = {}, options = {}) req = build_request(:delete_bucket_replication, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Deletes the tags from the bucket. # # To use this operation, you must have permission to perform the # `s3:PutBucketTagging` action. By default, the bucket owner has this # permission and can grant this permission to others. # # The following operations are related to `DeleteBucketTagging`: # # * [GetBucketTagging][1] # # * [PutBucketTagging][2] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html # # @option params [required, String] :bucket # The bucket that has the tag set to be removed. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # # @example Example: To delete bucket tags # # # The following example deletes bucket tags. # # resp = client.delete_bucket_tagging({ # bucket: "examplebucket", # }) # # @example Request syntax with placeholder values # # resp = client.delete_bucket_tagging({ # bucket: "BucketName", # required # expected_bucket_owner: "AccountId", # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging AWS API Documentation # # @overload delete_bucket_tagging(params = {}) # @param [Hash] params ({}) def delete_bucket_tagging(params = {}, options = {}) req = build_request(:delete_bucket_tagging, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # This action removes the website configuration for a bucket. Amazon S3 # returns a `200 OK` response upon successfully deleting a website # configuration on the specified bucket. You will get a `200 OK` # response if the website configuration you are trying to delete does # not exist on the bucket. Amazon S3 returns a `404` response if the # bucket specified in the request does not exist. # # This DELETE action requires the `S3:DeleteBucketWebsite` permission. # By default, only the bucket owner can delete the website configuration # attached to a bucket. However, bucket owners can grant other users # permission to delete the website configuration by writing a bucket # policy granting them the `S3:DeleteBucketWebsite` permission. # # For more information about hosting websites, see [Hosting Websites on # Amazon S3][1]. # # The following operations are related to `DeleteBucketWebsite`: # # * [GetBucketWebsite][2] # # * [PutBucketWebsite][3] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html # # @option params [required, String] :bucket # The bucket name for which you want to remove the website # configuration. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # # @example Example: To delete bucket website configuration # # # The following example deletes bucket website configuration. # # resp = client.delete_bucket_website({ # bucket: "examplebucket", # }) # # @example Request syntax with placeholder values # # resp = client.delete_bucket_website({ # bucket: "BucketName", # required # expected_bucket_owner: "AccountId", # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite AWS API Documentation # # @overload delete_bucket_website(params = {}) # @param [Hash] params ({}) def delete_bucket_website(params = {}, options = {}) req = build_request(:delete_bucket_website, params) req.send_request(options) end # Removes an object from a bucket. The behavior depends on the bucket's # versioning state: # # * If versioning is enabled, the operation removes the null version (if # there is one) of an object and inserts a delete marker, which # becomes the latest version of the object. If there isn't a null # version, Amazon S3 does not remove any objects but will still # respond that the command was successful. # # * If versioning is suspended or not enabled, the operation permanently # deletes the object. # # * **Directory buckets** - S3 Versioning isn't enabled and supported # for directory buckets. For this API operation, only the `null` value # of the version ID is supported by directory buckets. You can only # specify `null` to the `versionId` query parameter in the request. # # * **Directory buckets** - For directory buckets, you must make # requests for this API operation to the Zonal endpoint. These # endpoints support virtual-hosted-style requests in the format # `https://bucket_name.s3express-az_id.region.amazonaws.com/key-name # `. Path-style requests are not supported. For more information, see # [Regional and Zonal endpoints][1] in the *Amazon S3 User Guide*. # # # # To remove a specific version, you must use the `versionId` query # parameter. Using this query parameter permanently deletes the version. # If the object deleted is a delete marker, Amazon S3 sets the response # header `x-amz-delete-marker` to true. # # If the object you want to delete is in a bucket where the bucket # versioning configuration is MFA Delete enabled, you must include the # `x-amz-mfa` request header in the DELETE `versionId` request. Requests # that include `x-amz-mfa` must use HTTPS. For more information about # MFA Delete, see [Using MFA Delete][2] in the *Amazon S3 User Guide*. # To see sample requests that use versioning, see [Sample Request][3]. # # **Directory buckets** - MFA delete is not supported by directory # buckets. # # # # You can delete objects by explicitly calling DELETE Object or calling # ([PutBucketLifecycle][4]) to enable Amazon S3 to remove them for you. # If you want to block users or accounts from removing or deleting # objects from your bucket, you must deny them the `s3:DeleteObject`, # `s3:DeleteObjectVersion`, and `s3:PutLifeCycleConfiguration` actions. # # **Directory buckets** - S3 Lifecycle is not supported by directory # buckets. # # # # Permissions # : * **General purpose bucket permissions** - The following permissions # are required in your policies when your `DeleteObjects` request # includes specific headers. # # * s3:DeleteObject - To delete an object from # a bucket, you must always have the `s3:DeleteObject` permission. # # * s3:DeleteObjectVersion - To delete a # specific version of an object from a versiong-enabled bucket, # you must have the `s3:DeleteObjectVersion` permission. # # * **Directory bucket permissions** - To grant access to this API # operation on a directory bucket, we recommend that you use the [ # `CreateSession` ][5] API operation for session-based # authorization. Specifically, you grant the # `s3express:CreateSession` permission to the directory bucket in a # bucket policy or an IAM identity-based policy. Then, you make the # `CreateSession` API call on the bucket to obtain a session token. # With the session token in your request header, you can make API # requests to this operation. After the session token expires, you # make another `CreateSession` API call to generate a new session # token for use. Amazon Web Services CLI or SDKs create session and # refresh the session token automatically to avoid service # interruptions when a session expires. For more information about # authorization, see [ `CreateSession` ][5]. # # HTTP Host header syntax # # : Directory buckets - The HTTP Host header syntax is ` # Bucket_name.s3express-az_id.region.amazonaws.com`. # # The following action is related to `DeleteObject`: # # * [PutObject][6] # # ^ # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html # # @option params [required, String] :bucket # The bucket name of the bucket containing the object. # # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style requests # are not supported. Directory bucket names must be unique in the chosen # Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, you # must provide the alias of the access point in place of the bucket name # or specify the access point ARN. When using the access point ARN, you # must direct requests to the access point hostname. The access point # hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the bucket # name. For more information about access point ARNs, see [Using access # points][2] in the *Amazon S3 User Guide*. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. The # S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][3] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # # @option params [required, String] :key # Key name of the object to delete. # # @option params [String] :mfa # The concatenation of the authentication device's serial number, a # space, and the value that is displayed on your authentication device. # Required to permanently delete a versioned object if versioning is # configured with MFA delete enabled. # # This functionality is not supported for directory buckets. # # # # @option params [String] :version_id # Version ID used to reference a specific version of the object. # # For directory buckets in this API operation, only the `null` value of # the version ID is supported. # # # # @option params [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # # @option params [Boolean] :bypass_governance_retention # Indicates whether S3 Object Lock should bypass Governance-mode # restrictions to process this operation. To use this header, you must # have the `s3:BypassGovernanceRetention` permission. # # This functionality is not supported for directory buckets. # # # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::DeleteObjectOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::DeleteObjectOutput#delete_marker #delete_marker} => Boolean # * {Types::DeleteObjectOutput#version_id #version_id} => String # * {Types::DeleteObjectOutput#request_charged #request_charged} => String # # # @example Example: To delete an object (from a non-versioned bucket) # # # The following example deletes an object from a non-versioned bucket. # # resp = client.delete_object({ # bucket: "ExampleBucket", # key: "HappyFace.jpg", # }) # # @example Example: To delete an object # # # The following example deletes an object from an S3 bucket. # # resp = client.delete_object({ # bucket: "examplebucket", # key: "objectkey.jpg", # }) # # resp.to_h outputs the following: # { # } # # @example Request syntax with placeholder values # # resp = client.delete_object({ # bucket: "BucketName", # required # key: "ObjectKey", # required # mfa: "MFA", # version_id: "ObjectVersionId", # request_payer: "requester", # accepts requester # bypass_governance_retention: false, # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.delete_marker #=> Boolean # resp.version_id #=> String # resp.request_charged #=> String, one of "requester" # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject AWS API Documentation # # @overload delete_object(params = {}) # @param [Hash] params ({}) def delete_object(params = {}, options = {}) req = build_request(:delete_object, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Removes the entire tag set from the specified object. For more # information about managing object tags, see [ Object Tagging][1]. # # To use this operation, you must have permission to perform the # `s3:DeleteObjectTagging` action. # # To delete tags of a specific object version, add the `versionId` query # parameter in the request. You will need permission for the # `s3:DeleteObjectVersionTagging` action. # # The following operations are related to `DeleteObjectTagging`: # # * [PutObjectTagging][2] # # * [GetObjectTagging][3] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html # # @option params [required, String] :bucket # The bucket name containing the objects from which to remove the tags. # # **Access points** - When you use this action with an access point, you # must provide the alias of the access point in place of the bucket name # or specify the access point ARN. When using the access point ARN, you # must direct requests to the access point hostname. The access point # hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the bucket # name. For more information about access point ARNs, see [Using access # points][1] in the *Amazon S3 User Guide*. # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. The # S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][2] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # # @option params [required, String] :key # The key that identifies the object in the bucket from which to remove # all tags. # # @option params [String] :version_id # The versionId of the object that the tag-set will be removed from. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::DeleteObjectTaggingOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::DeleteObjectTaggingOutput#version_id #version_id} => String # # # @example Example: To remove tag set from an object version # # # The following example removes tag set associated with the specified object version. The request specifies both the # # object key and object version. # # resp = client.delete_object_tagging({ # bucket: "examplebucket", # key: "HappyFace.jpg", # version_id: "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI", # }) # # resp.to_h outputs the following: # { # version_id: "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI", # } # # @example Example: To remove tag set from an object # # # The following example removes tag set associated with the specified object. If the bucket is versioning enabled, the # # operation removes tag set from the latest object version. # # resp = client.delete_object_tagging({ # bucket: "examplebucket", # key: "HappyFace.jpg", # }) # # resp.to_h outputs the following: # { # version_id: "null", # } # # @example Request syntax with placeholder values # # resp = client.delete_object_tagging({ # bucket: "BucketName", # required # key: "ObjectKey", # required # version_id: "ObjectVersionId", # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.version_id #=> String # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging AWS API Documentation # # @overload delete_object_tagging(params = {}) # @param [Hash] params ({}) def delete_object_tagging(params = {}, options = {}) req = build_request(:delete_object_tagging, params) req.send_request(options) end # This operation enables you to delete multiple objects from a bucket # using a single HTTP request. If you know the object keys that you want # to delete, then this operation provides a suitable alternative to # sending individual delete requests, reducing per-request overhead. # # The request can contain a list of up to 1000 keys that you want to # delete. In the XML, you provide the object key names, and optionally, # version IDs if you want to delete a specific version of the object # from a versioning-enabled bucket. For each key, Amazon S3 performs a # delete operation and returns the result of that delete, success or # failure, in the response. Note that if the object specified in the # request is not found, Amazon S3 returns the result as deleted. # # * **Directory buckets** - S3 Versioning isn't enabled and supported # for directory buckets. # # * **Directory buckets** - For directory buckets, you must make # requests for this API operation to the Zonal endpoint. These # endpoints support virtual-hosted-style requests in the format # `https://bucket_name.s3express-az_id.region.amazonaws.com/key-name # `. Path-style requests are not supported. For more information, see # [Regional and Zonal endpoints][1] in the *Amazon S3 User Guide*. # # # # The operation supports two modes for the response: verbose and quiet. # By default, the operation uses verbose mode in which the response # includes the result of deletion of each key in your request. In quiet # mode the response includes only keys where the delete operation # encountered an error. For a successful deletion in a quiet mode, the # operation does not return any information about the delete in the # response body. # # When performing this action on an MFA Delete enabled bucket, that # attempts to delete any versioned objects, you must include an MFA # token. If you do not provide one, the entire request will fail, even # if there are non-versioned objects you are trying to delete. If you # provide an invalid token, whether there are versioned keys in the # request or not, the entire Multi-Object Delete request will fail. For # information about MFA Delete, see [MFA Delete][2] in the *Amazon S3 # User Guide*. # # **Directory buckets** - MFA delete is not supported by directory # buckets. # # # # Permissions # : * **General purpose bucket permissions** - The following permissions # are required in your policies when your `DeleteObjects` request # includes specific headers. # # * s3:DeleteObject - To delete an object from # a bucket, you must always specify the `s3:DeleteObject` # permission. # # * s3:DeleteObjectVersion - To delete a # specific version of an object from a versiong-enabled bucket, # you must specify the `s3:DeleteObjectVersion` permission. # # * **Directory bucket permissions** - To grant access to this API # operation on a directory bucket, we recommend that you use the [ # `CreateSession` ][3] API operation for session-based # authorization. Specifically, you grant the # `s3express:CreateSession` permission to the directory bucket in a # bucket policy or an IAM identity-based policy. Then, you make the # `CreateSession` API call on the bucket to obtain a session token. # With the session token in your request header, you can make API # requests to this operation. After the session token expires, you # make another `CreateSession` API call to generate a new session # token for use. Amazon Web Services CLI or SDKs create session and # refresh the session token automatically to avoid service # interruptions when a session expires. For more information about # authorization, see [ `CreateSession` ][3]. # # Content-MD5 request header # : * **General purpose bucket** - The Content-MD5 request header is # required for all Multi-Object Delete requests. Amazon S3 uses the # header value to ensure that your request body has not been altered # in transit. # # * **Directory bucket** - The Content-MD5 request header or a # additional checksum request header (including # `x-amz-checksum-crc32`, `x-amz-checksum-crc32c`, # `x-amz-checksum-sha1`, or `x-amz-checksum-sha256`) is required for # all Multi-Object Delete requests. # # HTTP Host header syntax # # : Directory buckets - The HTTP Host header syntax is ` # Bucket_name.s3express-az_id.region.amazonaws.com`. # # The following operations are related to `DeleteObjects`: # # * [CreateMultipartUpload][4] # # * [UploadPart][5] # # * [CompleteMultipartUpload][6] # # * [ListParts][7] # # * [AbortMultipartUpload][8] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html # # @option params [required, String] :bucket # The bucket name containing the objects to delete. # # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style requests # are not supported. Directory bucket names must be unique in the chosen # Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, you # must provide the alias of the access point in place of the bucket name # or specify the access point ARN. When using the access point ARN, you # must direct requests to the access point hostname. The access point # hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the bucket # name. For more information about access point ARNs, see [Using access # points][2] in the *Amazon S3 User Guide*. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. The # S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][3] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # # @option params [required, Types::Delete] :delete # Container for the request. # # @option params [String] :mfa # The concatenation of the authentication device's serial number, a # space, and the value that is displayed on your authentication device. # Required to permanently delete a versioned object if versioning is # configured with MFA delete enabled. # # When performing the `DeleteObjects` operation on an MFA delete enabled # bucket, which attempts to delete the specified versioned objects, you # must include an MFA token. If you don't provide an MFA token, the # entire request will fail, even if there are non-versioned objects that # you are trying to delete. If you provide an invalid token, whether # there are versioned object keys in the request or not, the entire # Multi-Object Delete request will fail. For information about MFA # Delete, see [ MFA Delete][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete # # @option params [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # # @option params [Boolean] :bypass_governance_retention # Specifies whether you want to delete this object even if it has a # Governance-type Object Lock in place. To use this header, you must # have the `s3:BypassGovernanceRetention` permission. # # This functionality is not supported for directory buckets. # # # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @option params [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum-algorithm ` or # `x-amz-trailer` header sent. Otherwise, Amazon S3 fails the request # with the HTTP status code `400 Bad Request`. # # For the `x-amz-checksum-algorithm ` header, replace ` algorithm ` with # the supported algorithm from the following list: # # * CRC32 # # * CRC32C # # * SHA1 # # * SHA256 # # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # If the individual checksum value you provide through # `x-amz-checksum-algorithm ` doesn't match the checksum algorithm you # set through `x-amz-sdk-checksum-algorithm`, Amazon S3 ignores any # provided `ChecksumAlgorithm` parameter and uses the checksum algorithm # that matches the provided value in `x-amz-checksum-algorithm `. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @return [Types::DeleteObjectsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::DeleteObjectsOutput#deleted #deleted} => Array<Types::DeletedObject> # * {Types::DeleteObjectsOutput#request_charged #request_charged} => String # * {Types::DeleteObjectsOutput#errors #errors} => Array<Types::Error> # # # @example Example: To delete multiple object versions from a versioned bucket # # # The following example deletes objects from a bucket. The request specifies object versions. S3 deletes specific object # # versions and returns the key and versions of deleted objects in the response. # # resp = client.delete_objects({ # bucket: "examplebucket", # delete: { # objects: [ # { # key: "HappyFace.jpg", # version_id: "2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b", # }, # { # key: "HappyFace.jpg", # version_id: "yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd", # }, # ], # quiet: false, # }, # }) # # resp.to_h outputs the following: # { # deleted: [ # { # key: "HappyFace.jpg", # version_id: "yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd", # }, # { # key: "HappyFace.jpg", # version_id: "2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b", # }, # ], # } # # @example Example: To delete multiple objects from a versioned bucket # # # The following example deletes objects from a bucket. The bucket is versioned, and the request does not specify the # # object version to delete. In this case, all versions remain in the bucket and S3 adds a delete marker. # # resp = client.delete_objects({ # bucket: "examplebucket", # delete: { # objects: [ # { # key: "objectkey1", # }, # { # key: "objectkey2", # }, # ], # quiet: false, # }, # }) # # resp.to_h outputs the following: # { # deleted: [ # { # delete_marker: true, # delete_marker_version_id: "A._w1z6EFiCF5uhtQMDal9JDkID9tQ7F", # key: "objectkey1", # }, # { # delete_marker: true, # delete_marker_version_id: "iOd_ORxhkKe_e8G8_oSGxt2PjsCZKlkt", # key: "objectkey2", # }, # ], # } # # @example Request syntax with placeholder values # # resp = client.delete_objects({ # bucket: "BucketName", # required # delete: { # required # objects: [ # required # { # key: "ObjectKey", # required # version_id: "ObjectVersionId", # }, # ], # quiet: false, # }, # mfa: "MFA", # request_payer: "requester", # accepts requester # bypass_governance_retention: false, # expected_bucket_owner: "AccountId", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # }) # # @example Response structure # # resp.deleted #=> Array # resp.deleted[0].key #=> String # resp.deleted[0].version_id #=> String # resp.deleted[0].delete_marker #=> Boolean # resp.deleted[0].delete_marker_version_id #=> String # resp.request_charged #=> String, one of "requester" # resp.errors #=> Array # resp.errors[0].key #=> String # resp.errors[0].version_id #=> String # resp.errors[0].code #=> String # resp.errors[0].message #=> String # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects AWS API Documentation # # @overload delete_objects(params = {}) # @param [Hash] params ({}) def delete_objects(params = {}, options = {}) req = build_request(:delete_objects, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Removes the `PublicAccessBlock` configuration for an Amazon S3 bucket. # To use this operation, you must have the # `s3:PutBucketPublicAccessBlock` permission. For more information about # permissions, see [Permissions Related to Bucket Subresource # Operations][1] and [Managing Access Permissions to Your Amazon S3 # Resources][2]. # # The following operations are related to `DeletePublicAccessBlock`: # # * [Using Amazon S3 Block Public Access][3] # # * [GetPublicAccessBlock][4] # # * [PutPublicAccessBlock][5] # # * [GetBucketPolicyStatus][6] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html # # @option params [required, String] :bucket # The Amazon S3 bucket whose `PublicAccessBlock` configuration you want # to delete. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # @example Request syntax with placeholder values # # resp = client.delete_public_access_block({ # bucket: "BucketName", # required # expected_bucket_owner: "AccountId", # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlock AWS API Documentation # # @overload delete_public_access_block(params = {}) # @param [Hash] params ({}) def delete_public_access_block(params = {}, options = {}) req = build_request(:delete_public_access_block, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # This implementation of the GET action uses the `accelerate` # subresource to return the Transfer Acceleration state of a bucket, # which is either `Enabled` or `Suspended`. Amazon S3 Transfer # Acceleration is a bucket-level feature that enables you to perform # faster data transfers to and from Amazon S3. # # To use this operation, you must have permission to perform the # `s3:GetAccelerateConfiguration` action. The bucket owner has this # permission by default. The bucket owner can grant this permission to # others. For more information about permissions, see [Permissions # Related to Bucket Subresource Operations][1] and [Managing Access # Permissions to your Amazon S3 Resources][2] in the *Amazon S3 User # Guide*. # # You set the Transfer Acceleration state of an existing bucket to # `Enabled` or `Suspended` by using the # [PutBucketAccelerateConfiguration][3] operation. # # A GET `accelerate` request does not return a state value for a bucket # that has no transfer acceleration state. A bucket has no Transfer # Acceleration state if a state has never been set on the bucket. # # For more information about transfer acceleration, see [Transfer # Acceleration][4] in the Amazon S3 User Guide. # # The following operations are related to # `GetBucketAccelerateConfiguration`: # # * [PutBucketAccelerateConfiguration][3] # # ^ # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html # # @option params [required, String] :bucket # The name of the bucket for which the accelerate configuration is # retrieved. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @option params [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # # @return [Types::GetBucketAccelerateConfigurationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::GetBucketAccelerateConfigurationOutput#status #status} => String # * {Types::GetBucketAccelerateConfigurationOutput#request_charged #request_charged} => String # # @example Request syntax with placeholder values # # resp = client.get_bucket_accelerate_configuration({ # bucket: "BucketName", # required # expected_bucket_owner: "AccountId", # request_payer: "requester", # accepts requester # }) # # @example Response structure # # resp.status #=> String, one of "Enabled", "Suspended" # resp.request_charged #=> String, one of "requester" # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration AWS API Documentation # # @overload get_bucket_accelerate_configuration(params = {}) # @param [Hash] params ({}) def get_bucket_accelerate_configuration(params = {}, options = {}) req = build_request(:get_bucket_accelerate_configuration, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # This implementation of the `GET` action uses the `acl` subresource to # return the access control list (ACL) of a bucket. To use `GET` to # return the ACL of the bucket, you must have the `READ_ACP` access to # the bucket. If `READ_ACP` permission is granted to the anonymous user, # you can return the ACL of the bucket without using an authorization # header. # # When you use this API operation with an access point, provide the # alias of the access point in place of the bucket name. # # When you use this API operation with an Object Lambda access point, # provide the alias of the Object Lambda access point in place of the # bucket name. If the Object Lambda access point alias in a request is # not valid, the error code `InvalidAccessPointAliasError` is returned. # For more information about `InvalidAccessPointAliasError`, see [List # of Error Codes][1]. # # If your bucket uses the bucket owner enforced setting for S3 Object # Ownership, requests to read ACLs are still supported and return the # `bucket-owner-full-control` ACL with the owner being the account that # created the bucket. For more information, see [ Controlling object # ownership and disabling ACLs][2] in the *Amazon S3 User Guide*. # # # # The following operations are related to `GetBucketAcl`: # # * [ListObjects][3] # # ^ # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html # # @option params [required, String] :bucket # Specifies the S3 bucket whose ACL is being requested. # # When you use this API operation with an access point, provide the # alias of the access point in place of the bucket name. # # When you use this API operation with an Object Lambda access point, # provide the alias of the Object Lambda access point in place of the # bucket name. If the Object Lambda access point alias in a request is # not valid, the error code `InvalidAccessPointAliasError` is returned. # For more information about `InvalidAccessPointAliasError`, see [List # of Error Codes][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::GetBucketAclOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::GetBucketAclOutput#owner #owner} => Types::Owner # * {Types::GetBucketAclOutput#grants #grants} => Array<Types::Grant> # # @example Request syntax with placeholder values # # resp = client.get_bucket_acl({ # bucket: "BucketName", # required # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.owner.display_name #=> String # resp.owner.id #=> String # resp.grants #=> Array # resp.grants[0].grantee.display_name #=> String # resp.grants[0].grantee.email_address #=> String # resp.grants[0].grantee.id #=> String # resp.grants[0].grantee.type #=> String, one of "CanonicalUser", "AmazonCustomerByEmail", "Group" # resp.grants[0].grantee.uri #=> String # resp.grants[0].permission #=> String, one of "FULL_CONTROL", "WRITE", "WRITE_ACP", "READ", "READ_ACP" # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl AWS API Documentation # # @overload get_bucket_acl(params = {}) # @param [Hash] params ({}) def get_bucket_acl(params = {}, options = {}) req = build_request(:get_bucket_acl, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # This implementation of the GET action returns an analytics # configuration (identified by the analytics configuration ID) from the # bucket. # # To use this operation, you must have permissions to perform the # `s3:GetAnalyticsConfiguration` action. The bucket owner has this # permission by default. The bucket owner can grant this permission to # others. For more information about permissions, see [ Permissions # Related to Bucket Subresource Operations][1] and [Managing Access # Permissions to Your Amazon S3 Resources][2] in the *Amazon S3 User # Guide*. # # For information about Amazon S3 analytics feature, see [Amazon S3 # Analytics – Storage Class Analysis][3] in the *Amazon S3 User Guide*. # # The following operations are related to # `GetBucketAnalyticsConfiguration`: # # * [DeleteBucketAnalyticsConfiguration][4] # # * [ListBucketAnalyticsConfigurations][5] # # * [PutBucketAnalyticsConfiguration][6] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html # # @option params [required, String] :bucket # The name of the bucket from which an analytics configuration is # retrieved. # # @option params [required, String] :id # The ID that identifies the analytics configuration. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::GetBucketAnalyticsConfigurationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::GetBucketAnalyticsConfigurationOutput#analytics_configuration #analytics_configuration} => Types::AnalyticsConfiguration # # @example Request syntax with placeholder values # # resp = client.get_bucket_analytics_configuration({ # bucket: "BucketName", # required # id: "AnalyticsId", # required # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.analytics_configuration.id #=> String # resp.analytics_configuration.filter.prefix #=> String # resp.analytics_configuration.filter.tag.key #=> String # resp.analytics_configuration.filter.tag.value #=> String # resp.analytics_configuration.filter.and.prefix #=> String # resp.analytics_configuration.filter.and.tags #=> Array # resp.analytics_configuration.filter.and.tags[0].key #=> String # resp.analytics_configuration.filter.and.tags[0].value #=> String # resp.analytics_configuration.storage_class_analysis.data_export.output_schema_version #=> String, one of "V_1" # resp.analytics_configuration.storage_class_analysis.data_export.destination.s3_bucket_destination.format #=> String, one of "CSV" # resp.analytics_configuration.storage_class_analysis.data_export.destination.s3_bucket_destination.bucket_account_id #=> String # resp.analytics_configuration.storage_class_analysis.data_export.destination.s3_bucket_destination.bucket #=> String # resp.analytics_configuration.storage_class_analysis.data_export.destination.s3_bucket_destination.prefix #=> String # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration AWS API Documentation # # @overload get_bucket_analytics_configuration(params = {}) # @param [Hash] params ({}) def get_bucket_analytics_configuration(params = {}, options = {}) req = build_request(:get_bucket_analytics_configuration, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Returns the Cross-Origin Resource Sharing (CORS) configuration # information set for the bucket. # # To use this operation, you must have permission to perform the # `s3:GetBucketCORS` action. By default, the bucket owner has this # permission and can grant it to others. # # When you use this API operation with an access point, provide the # alias of the access point in place of the bucket name. # # When you use this API operation with an Object Lambda access point, # provide the alias of the Object Lambda access point in place of the # bucket name. If the Object Lambda access point alias in a request is # not valid, the error code `InvalidAccessPointAliasError` is returned. # For more information about `InvalidAccessPointAliasError`, see [List # of Error Codes][1]. # # For more information about CORS, see [ Enabling Cross-Origin Resource # Sharing][2]. # # The following operations are related to `GetBucketCors`: # # * [PutBucketCors][3] # # * [DeleteBucketCors][4] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html # # @option params [required, String] :bucket # The bucket name for which to get the cors configuration. # # When you use this API operation with an access point, provide the # alias of the access point in place of the bucket name. # # When you use this API operation with an Object Lambda access point, # provide the alias of the Object Lambda access point in place of the # bucket name. If the Object Lambda access point alias in a request is # not valid, the error code `InvalidAccessPointAliasError` is returned. # For more information about `InvalidAccessPointAliasError`, see [List # of Error Codes][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::GetBucketCorsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::GetBucketCorsOutput#cors_rules #cors_rules} => Array<Types::CORSRule> # # # @example Example: To get cors configuration set on a bucket # # # The following example returns cross-origin resource sharing (CORS) configuration set on a bucket. # # resp = client.get_bucket_cors({ # bucket: "examplebucket", # }) # # resp.to_h outputs the following: # { # cors_rules: [ # { # allowed_headers: [ # "Authorization", # ], # allowed_methods: [ # "GET", # ], # allowed_origins: [ # "*", # ], # max_age_seconds: 3000, # }, # ], # } # # @example Request syntax with placeholder values # # resp = client.get_bucket_cors({ # bucket: "BucketName", # required # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.cors_rules #=> Array # resp.cors_rules[0].id #=> String # resp.cors_rules[0].allowed_headers #=> Array # resp.cors_rules[0].allowed_headers[0] #=> String # resp.cors_rules[0].allowed_methods #=> Array # resp.cors_rules[0].allowed_methods[0] #=> String # resp.cors_rules[0].allowed_origins #=> Array # resp.cors_rules[0].allowed_origins[0] #=> String # resp.cors_rules[0].expose_headers #=> Array # resp.cors_rules[0].expose_headers[0] #=> String # resp.cors_rules[0].max_age_seconds #=> Integer # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors AWS API Documentation # # @overload get_bucket_cors(params = {}) # @param [Hash] params ({}) def get_bucket_cors(params = {}, options = {}) req = build_request(:get_bucket_cors, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Returns the default encryption configuration for an Amazon S3 bucket. # By default, all buckets have a default encryption configuration that # uses server-side encryption with Amazon S3 managed keys (SSE-S3). For # information about the bucket default encryption feature, see [Amazon # S3 Bucket Default Encryption][1] in the *Amazon S3 User Guide*. # # To use this operation, you must have permission to perform the # `s3:GetEncryptionConfiguration` action. The bucket owner has this # permission by default. The bucket owner can grant this permission to # others. For more information about permissions, see [Permissions # Related to Bucket Subresource Operations][2] and [Managing Access # Permissions to Your Amazon S3 Resources][3]. # # The following operations are related to `GetBucketEncryption`: # # * [PutBucketEncryption][4] # # * [DeleteBucketEncryption][5] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html # # @option params [required, String] :bucket # The name of the bucket from which the server-side encryption # configuration is retrieved. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::GetBucketEncryptionOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::GetBucketEncryptionOutput#server_side_encryption_configuration #server_side_encryption_configuration} => Types::ServerSideEncryptionConfiguration # # @example Request syntax with placeholder values # # resp = client.get_bucket_encryption({ # bucket: "BucketName", # required # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.server_side_encryption_configuration.rules #=> Array # resp.server_side_encryption_configuration.rules[0].apply_server_side_encryption_by_default.sse_algorithm #=> String, one of "AES256", "aws:kms", "aws:kms:dsse" # resp.server_side_encryption_configuration.rules[0].apply_server_side_encryption_by_default.kms_master_key_id #=> String # resp.server_side_encryption_configuration.rules[0].bucket_key_enabled #=> Boolean # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption AWS API Documentation # # @overload get_bucket_encryption(params = {}) # @param [Hash] params ({}) def get_bucket_encryption(params = {}, options = {}) req = build_request(:get_bucket_encryption, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Gets the S3 Intelligent-Tiering configuration from the specified # bucket. # # The S3 Intelligent-Tiering storage class is designed to optimize # storage costs by automatically moving data to the most cost-effective # storage access tier, without performance impact or operational # overhead. S3 Intelligent-Tiering delivers automatic cost savings in # three low latency and high throughput access tiers. To get the lowest # storage cost on data that can be accessed in minutes to hours, you can # choose to activate additional archiving capabilities. # # The S3 Intelligent-Tiering storage class is the ideal storage class # for data with unknown, changing, or unpredictable access patterns, # independent of object size or retention period. If the size of an # object is less than 128 KB, it is not monitored and not eligible for # auto-tiering. Smaller objects can be stored, but they are always # charged at the Frequent Access tier rates in the S3 # Intelligent-Tiering storage class. # # For more information, see [Storage class for automatically optimizing # frequently and infrequently accessed objects][1]. # # Operations related to `GetBucketIntelligentTieringConfiguration` # include: # # * [DeleteBucketIntelligentTieringConfiguration][2] # # * [PutBucketIntelligentTieringConfiguration][3] # # * [ListBucketIntelligentTieringConfigurations][4] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html # # @option params [required, String] :bucket # The name of the Amazon S3 bucket whose configuration you want to # modify or retrieve. # # @option params [required, String] :id # The ID used to identify the S3 Intelligent-Tiering configuration. # # @return [Types::GetBucketIntelligentTieringConfigurationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::GetBucketIntelligentTieringConfigurationOutput#intelligent_tiering_configuration #intelligent_tiering_configuration} => Types::IntelligentTieringConfiguration # # @example Request syntax with placeholder values # # resp = client.get_bucket_intelligent_tiering_configuration({ # bucket: "BucketName", # required # id: "IntelligentTieringId", # required # }) # # @example Response structure # # resp.intelligent_tiering_configuration.id #=> String # resp.intelligent_tiering_configuration.filter.prefix #=> String # resp.intelligent_tiering_configuration.filter.tag.key #=> String # resp.intelligent_tiering_configuration.filter.tag.value #=> String # resp.intelligent_tiering_configuration.filter.and.prefix #=> String # resp.intelligent_tiering_configuration.filter.and.tags #=> Array # resp.intelligent_tiering_configuration.filter.and.tags[0].key #=> String # resp.intelligent_tiering_configuration.filter.and.tags[0].value #=> String # resp.intelligent_tiering_configuration.status #=> String, one of "Enabled", "Disabled" # resp.intelligent_tiering_configuration.tierings #=> Array # resp.intelligent_tiering_configuration.tierings[0].days #=> Integer # resp.intelligent_tiering_configuration.tierings[0].access_tier #=> String, one of "ARCHIVE_ACCESS", "DEEP_ARCHIVE_ACCESS" # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketIntelligentTieringConfiguration AWS API Documentation # # @overload get_bucket_intelligent_tiering_configuration(params = {}) # @param [Hash] params ({}) def get_bucket_intelligent_tiering_configuration(params = {}, options = {}) req = build_request(:get_bucket_intelligent_tiering_configuration, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Returns an inventory configuration (identified by the inventory # configuration ID) from the bucket. # # To use this operation, you must have permissions to perform the # `s3:GetInventoryConfiguration` action. The bucket owner has this # permission by default and can grant this permission to others. For # more information about permissions, see [Permissions Related to Bucket # Subresource Operations][1] and [Managing Access Permissions to Your # Amazon S3 Resources][2]. # # For information about the Amazon S3 inventory feature, see [Amazon S3 # Inventory][3]. # # The following operations are related to # `GetBucketInventoryConfiguration`: # # * [DeleteBucketInventoryConfiguration][4] # # * [ListBucketInventoryConfigurations][5] # # * [PutBucketInventoryConfiguration][6] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html # # @option params [required, String] :bucket # The name of the bucket containing the inventory configuration to # retrieve. # # @option params [required, String] :id # The ID used to identify the inventory configuration. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::GetBucketInventoryConfigurationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::GetBucketInventoryConfigurationOutput#inventory_configuration #inventory_configuration} => Types::InventoryConfiguration # # @example Request syntax with placeholder values # # resp = client.get_bucket_inventory_configuration({ # bucket: "BucketName", # required # id: "InventoryId", # required # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.inventory_configuration.destination.s3_bucket_destination.account_id #=> String # resp.inventory_configuration.destination.s3_bucket_destination.bucket #=> String # resp.inventory_configuration.destination.s3_bucket_destination.format #=> String, one of "CSV", "ORC", "Parquet" # resp.inventory_configuration.destination.s3_bucket_destination.prefix #=> String # resp.inventory_configuration.destination.s3_bucket_destination.encryption.ssekms.key_id #=> String # resp.inventory_configuration.is_enabled #=> Boolean # resp.inventory_configuration.filter.prefix #=> String # resp.inventory_configuration.id #=> String # resp.inventory_configuration.included_object_versions #=> String, one of "All", "Current" # resp.inventory_configuration.optional_fields #=> Array # resp.inventory_configuration.optional_fields[0] #=> String, one of "Size", "LastModifiedDate", "StorageClass", "ETag", "IsMultipartUploaded", "ReplicationStatus", "EncryptionStatus", "ObjectLockRetainUntilDate", "ObjectLockMode", "ObjectLockLegalHoldStatus", "IntelligentTieringAccessTier", "BucketKeyStatus", "ChecksumAlgorithm", "ObjectAccessControlList", "ObjectOwner" # resp.inventory_configuration.schedule.frequency #=> String, one of "Daily", "Weekly" # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration AWS API Documentation # # @overload get_bucket_inventory_configuration(params = {}) # @param [Hash] params ({}) def get_bucket_inventory_configuration(params = {}, options = {}) req = build_request(:get_bucket_inventory_configuration, params) req.send_request(options) end # For an updated version of this API, see # [GetBucketLifecycleConfiguration][1]. If you configured a bucket # lifecycle using the `filter` element, you should see the updated # version of this topic. This topic is provided for backward # compatibility. # # This operation is not supported by directory buckets. # # # # Returns the lifecycle configuration information set on the bucket. For # information about lifecycle configuration, see [Object Lifecycle # Management][2]. # # To use this operation, you must have permission to perform the # `s3:GetLifecycleConfiguration` action. The bucket owner has this # permission by default. The bucket owner can grant this permission to # others. For more information about permissions, see [Permissions # Related to Bucket Subresource Operations][3] and [Managing Access # Permissions to Your Amazon S3 Resources][4]. # # `GetBucketLifecycle` has the following special error: # # * Error code: `NoSuchLifecycleConfiguration` # # * Description: The lifecycle configuration does not exist. # # * HTTP Status Code: 404 Not Found # # * SOAP Fault Code Prefix: Client # # The following operations are related to `GetBucketLifecycle`: # # * [GetBucketLifecycleConfiguration][1] # # * [PutBucketLifecycle][5] # # * [DeleteBucketLifecycle][6] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources # [4]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html # # @option params [required, String] :bucket # The name of the bucket for which to get the lifecycle information. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::GetBucketLifecycleOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::GetBucketLifecycleOutput#rules #rules} => Array<Types::Rule> # # # @example Example: To get a bucket acl # # # The following example gets ACL on the specified bucket. # # resp = client.get_bucket_lifecycle({ # bucket: "acl1", # }) # # resp.to_h outputs the following: # { # rules: [ # { # expiration: { # days: 1, # }, # id: "delete logs", # prefix: "123/", # status: "Enabled", # }, # ], # } # # @example Request syntax with placeholder values # # resp = client.get_bucket_lifecycle({ # bucket: "BucketName", # required # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.rules #=> Array # resp.rules[0].expiration.date #=> Time # resp.rules[0].expiration.days #=> Integer # resp.rules[0].expiration.expired_object_delete_marker #=> Boolean # resp.rules[0].id #=> String # resp.rules[0].prefix #=> String # resp.rules[0].status #=> String, one of "Enabled", "Disabled" # resp.rules[0].transition.date #=> Time # resp.rules[0].transition.days #=> Integer # resp.rules[0].transition.storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "GLACIER_IR" # resp.rules[0].noncurrent_version_transition.noncurrent_days #=> Integer # resp.rules[0].noncurrent_version_transition.storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "GLACIER_IR" # resp.rules[0].noncurrent_version_transition.newer_noncurrent_versions #=> Integer # resp.rules[0].noncurrent_version_expiration.noncurrent_days #=> Integer # resp.rules[0].noncurrent_version_expiration.newer_noncurrent_versions #=> Integer # resp.rules[0].abort_incomplete_multipart_upload.days_after_initiation #=> Integer # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle AWS API Documentation # # @overload get_bucket_lifecycle(params = {}) # @param [Hash] params ({}) def get_bucket_lifecycle(params = {}, options = {}) req = build_request(:get_bucket_lifecycle, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Bucket lifecycle configuration now supports specifying a lifecycle # rule using an object key name prefix, one or more object tags, or a # combination of both. Accordingly, this section describes the latest # API. The response describes the new filter element that you can use to # specify a filter to select a subset of objects to which the rule # applies. If you are using a previous version of the lifecycle # configuration, it still works. For the earlier action, see # [GetBucketLifecycle][1]. # # # # Returns the lifecycle configuration information set on the bucket. For # information about lifecycle configuration, see [Object Lifecycle # Management][2]. # # To use this operation, you must have permission to perform the # `s3:GetLifecycleConfiguration` action. The bucket owner has this # permission, by default. The bucket owner can grant this permission to # others. For more information about permissions, see [Permissions # Related to Bucket Subresource Operations][3] and [Managing Access # Permissions to Your Amazon S3 Resources][4]. # # `GetBucketLifecycleConfiguration` has the following special error: # # * Error code: `NoSuchLifecycleConfiguration` # # * Description: The lifecycle configuration does not exist. # # * HTTP Status Code: 404 Not Found # # * SOAP Fault Code Prefix: Client # # The following operations are related to # `GetBucketLifecycleConfiguration`: # # * [GetBucketLifecycle][1] # # * [PutBucketLifecycle][5] # # * [DeleteBucketLifecycle][6] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources # [4]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html # # @option params [required, String] :bucket # The name of the bucket for which to get the lifecycle information. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::GetBucketLifecycleConfigurationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::GetBucketLifecycleConfigurationOutput#rules #rules} => Array<Types::LifecycleRule> # # # @example Example: To get lifecycle configuration on a bucket # # # The following example retrieves lifecycle configuration on set on a bucket. # # resp = client.get_bucket_lifecycle_configuration({ # bucket: "examplebucket", # }) # # resp.to_h outputs the following: # { # rules: [ # { # id: "Rule for TaxDocs/", # prefix: "TaxDocs", # status: "Enabled", # transitions: [ # { # days: 365, # storage_class: "STANDARD_IA", # }, # ], # }, # ], # } # # @example Request syntax with placeholder values # # resp = client.get_bucket_lifecycle_configuration({ # bucket: "BucketName", # required # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.rules #=> Array # resp.rules[0].expiration.date #=> Time # resp.rules[0].expiration.days #=> Integer # resp.rules[0].expiration.expired_object_delete_marker #=> Boolean # resp.rules[0].id #=> String # resp.rules[0].prefix #=> String # resp.rules[0].filter.prefix #=> String # resp.rules[0].filter.tag.key #=> String # resp.rules[0].filter.tag.value #=> String # resp.rules[0].filter.object_size_greater_than #=> Integer # resp.rules[0].filter.object_size_less_than #=> Integer # resp.rules[0].filter.and.prefix #=> String # resp.rules[0].filter.and.tags #=> Array # resp.rules[0].filter.and.tags[0].key #=> String # resp.rules[0].filter.and.tags[0].value #=> String # resp.rules[0].filter.and.object_size_greater_than #=> Integer # resp.rules[0].filter.and.object_size_less_than #=> Integer # resp.rules[0].status #=> String, one of "Enabled", "Disabled" # resp.rules[0].transitions #=> Array # resp.rules[0].transitions[0].date #=> Time # resp.rules[0].transitions[0].days #=> Integer # resp.rules[0].transitions[0].storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "GLACIER_IR" # resp.rules[0].noncurrent_version_transitions #=> Array # resp.rules[0].noncurrent_version_transitions[0].noncurrent_days #=> Integer # resp.rules[0].noncurrent_version_transitions[0].storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "GLACIER_IR" # resp.rules[0].noncurrent_version_transitions[0].newer_noncurrent_versions #=> Integer # resp.rules[0].noncurrent_version_expiration.noncurrent_days #=> Integer # resp.rules[0].noncurrent_version_expiration.newer_noncurrent_versions #=> Integer # resp.rules[0].abort_incomplete_multipart_upload.days_after_initiation #=> Integer # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration AWS API Documentation # # @overload get_bucket_lifecycle_configuration(params = {}) # @param [Hash] params ({}) def get_bucket_lifecycle_configuration(params = {}, options = {}) req = build_request(:get_bucket_lifecycle_configuration, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Returns the Region the bucket resides in. You set the bucket's Region # using the `LocationConstraint` request parameter in a `CreateBucket` # request. For more information, see [CreateBucket][1]. # # When you use this API operation with an access point, provide the # alias of the access point in place of the bucket name. # # When you use this API operation with an Object Lambda access point, # provide the alias of the Object Lambda access point in place of the # bucket name. If the Object Lambda access point alias in a request is # not valid, the error code `InvalidAccessPointAliasError` is returned. # For more information about `InvalidAccessPointAliasError`, see [List # of Error Codes][2]. # # We recommend that you use [HeadBucket][3] to return the Region that a # bucket resides in. For backward compatibility, Amazon S3 continues to # support GetBucketLocation. # # # # The following operations are related to `GetBucketLocation`: # # * [GetObject][4] # # * [CreateBucket][1] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html # # @option params [required, String] :bucket # The name of the bucket for which to get the location. # # When you use this API operation with an access point, provide the # alias of the access point in place of the bucket name. # # When you use this API operation with an Object Lambda access point, # provide the alias of the Object Lambda access point in place of the # bucket name. If the Object Lambda access point alias in a request is # not valid, the error code `InvalidAccessPointAliasError` is returned. # For more information about `InvalidAccessPointAliasError`, see [List # of Error Codes][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::GetBucketLocationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::GetBucketLocationOutput#location_constraint #location_constraint} => String # # # @example Example: To get bucket location # # # The following example returns bucket location. # # resp = client.get_bucket_location({ # bucket: "examplebucket", # }) # # resp.to_h outputs the following: # { # location_constraint: "us-west-2", # } # # @example Request syntax with placeholder values # # resp = client.get_bucket_location({ # bucket: "BucketName", # required # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.location_constraint #=> String, one of "af-south-1", "ap-east-1", "ap-northeast-1", "ap-northeast-2", "ap-northeast-3", "ap-south-1", "ap-south-2", "ap-southeast-1", "ap-southeast-2", "ap-southeast-3", "ca-central-1", "cn-north-1", "cn-northwest-1", "EU", "eu-central-1", "eu-north-1", "eu-south-1", "eu-south-2", "eu-west-1", "eu-west-2", "eu-west-3", "me-south-1", "sa-east-1", "us-east-2", "us-gov-east-1", "us-gov-west-1", "us-west-1", "us-west-2" # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation AWS API Documentation # # @overload get_bucket_location(params = {}) # @param [Hash] params ({}) def get_bucket_location(params = {}, options = {}) req = build_request(:get_bucket_location, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Returns the logging status of a bucket and the permissions users have # to view and modify that status. # # The following operations are related to `GetBucketLogging`: # # * [CreateBucket][1] # # * [PutBucketLogging][2] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html # # @option params [required, String] :bucket # The bucket name for which to get the logging information. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::GetBucketLoggingOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::GetBucketLoggingOutput#logging_enabled #logging_enabled} => Types::LoggingEnabled # # @example Request syntax with placeholder values # # resp = client.get_bucket_logging({ # bucket: "BucketName", # required # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.logging_enabled.target_bucket #=> String # resp.logging_enabled.target_grants #=> Array # resp.logging_enabled.target_grants[0].grantee.display_name #=> String # resp.logging_enabled.target_grants[0].grantee.email_address #=> String # resp.logging_enabled.target_grants[0].grantee.id #=> String # resp.logging_enabled.target_grants[0].grantee.type #=> String, one of "CanonicalUser", "AmazonCustomerByEmail", "Group" # resp.logging_enabled.target_grants[0].grantee.uri #=> String # resp.logging_enabled.target_grants[0].permission #=> String, one of "FULL_CONTROL", "READ", "WRITE" # resp.logging_enabled.target_prefix #=> String # resp.logging_enabled.target_object_key_format.partitioned_prefix.partition_date_source #=> String, one of "EventTime", "DeliveryTime" # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging AWS API Documentation # # @overload get_bucket_logging(params = {}) # @param [Hash] params ({}) def get_bucket_logging(params = {}, options = {}) req = build_request(:get_bucket_logging, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Gets a metrics configuration (specified by the metrics configuration # ID) from the bucket. Note that this doesn't include the daily storage # metrics. # # To use this operation, you must have permissions to perform the # `s3:GetMetricsConfiguration` action. The bucket owner has this # permission by default. The bucket owner can grant this permission to # others. For more information about permissions, see [Permissions # Related to Bucket Subresource Operations][1] and [Managing Access # Permissions to Your Amazon S3 Resources][2]. # # For information about CloudWatch request metrics for Amazon S3, see # [Monitoring Metrics with Amazon CloudWatch][3]. # # The following operations are related to # `GetBucketMetricsConfiguration`: # # * [PutBucketMetricsConfiguration][4] # # * [DeleteBucketMetricsConfiguration][5] # # * [ListBucketMetricsConfigurations][6] # # * [Monitoring Metrics with Amazon CloudWatch][3] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html # # @option params [required, String] :bucket # The name of the bucket containing the metrics configuration to # retrieve. # # @option params [required, String] :id # The ID used to identify the metrics configuration. The ID has a 64 # character limit and can only contain letters, numbers, periods, # dashes, and underscores. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::GetBucketMetricsConfigurationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::GetBucketMetricsConfigurationOutput#metrics_configuration #metrics_configuration} => Types::MetricsConfiguration # # @example Request syntax with placeholder values # # resp = client.get_bucket_metrics_configuration({ # bucket: "BucketName", # required # id: "MetricsId", # required # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.metrics_configuration.id #=> String # resp.metrics_configuration.filter.prefix #=> String # resp.metrics_configuration.filter.tag.key #=> String # resp.metrics_configuration.filter.tag.value #=> String # resp.metrics_configuration.filter.access_point_arn #=> String # resp.metrics_configuration.filter.and.prefix #=> String # resp.metrics_configuration.filter.and.tags #=> Array # resp.metrics_configuration.filter.and.tags[0].key #=> String # resp.metrics_configuration.filter.and.tags[0].value #=> String # resp.metrics_configuration.filter.and.access_point_arn #=> String # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration AWS API Documentation # # @overload get_bucket_metrics_configuration(params = {}) # @param [Hash] params ({}) def get_bucket_metrics_configuration(params = {}, options = {}) req = build_request(:get_bucket_metrics_configuration, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # No longer used, see [GetBucketNotificationConfiguration][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html # # @option params [required, String] :bucket # The name of the bucket for which to get the notification # configuration. # # When you use this API operation with an access point, provide the # alias of the access point in place of the bucket name. # # When you use this API operation with an Object Lambda access point, # provide the alias of the Object Lambda access point in place of the # bucket name. If the Object Lambda access point alias in a request is # not valid, the error code `InvalidAccessPointAliasError` is returned. # For more information about `InvalidAccessPointAliasError`, see [List # of Error Codes][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::NotificationConfigurationDeprecated] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::NotificationConfigurationDeprecated#topic_configuration #topic_configuration} => Types::TopicConfigurationDeprecated # * {Types::NotificationConfigurationDeprecated#queue_configuration #queue_configuration} => Types::QueueConfigurationDeprecated # * {Types::NotificationConfigurationDeprecated#cloud_function_configuration #cloud_function_configuration} => Types::CloudFunctionConfiguration # # # @example Example: To get notification configuration set on a bucket # # # The following example returns notification configuration set on a bucket. # # resp = client.get_bucket_notification({ # bucket: "examplebucket", # }) # # resp.to_h outputs the following: # { # queue_configuration: { # event: "s3:ObjectCreated:Put", # events: [ # "s3:ObjectCreated:Put", # ], # id: "MDQ2OGQ4NDEtOTBmNi00YTM4LTk0NzYtZDIwN2I3NWQ1NjIx", # queue: "arn:aws:sqs:us-east-1:acct-id:S3ObjectCreatedEventQueue", # }, # topic_configuration: { # event: "s3:ObjectCreated:Copy", # events: [ # "s3:ObjectCreated:Copy", # ], # id: "YTVkMWEzZGUtNTY1NS00ZmE2LWJjYjktMmRlY2QwODFkNTJi", # topic: "arn:aws:sns:us-east-1:acct-id:S3ObjectCreatedEventTopic", # }, # } # # @example Example: To get notification configuration set on a bucket # # # The following example returns notification configuration set on a bucket. # # resp = client.get_bucket_notification({ # bucket: "examplebucket", # }) # # resp.to_h outputs the following: # { # queue_configuration: { # event: "s3:ObjectCreated:Put", # events: [ # "s3:ObjectCreated:Put", # ], # id: "MDQ2OGQ4NDEtOTBmNi00YTM4LTk0NzYtZDIwN2I3NWQ1NjIx", # queue: "arn:aws:sqs:us-east-1:acct-id:S3ObjectCreatedEventQueue", # }, # topic_configuration: { # event: "s3:ObjectCreated:Copy", # events: [ # "s3:ObjectCreated:Copy", # ], # id: "YTVkMWEzZGUtNTY1NS00ZmE2LWJjYjktMmRlY2QwODFkNTJi", # topic: "arn:aws:sns:us-east-1:acct-id:S3ObjectCreatedEventTopic", # }, # } # # @example Request syntax with placeholder values # # resp = client.get_bucket_notification({ # bucket: "BucketName", # required # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.topic_configuration.id #=> String # resp.topic_configuration.events #=> Array # resp.topic_configuration.events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete" # resp.topic_configuration.event #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete" # resp.topic_configuration.topic #=> String # resp.queue_configuration.id #=> String # resp.queue_configuration.event #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete" # resp.queue_configuration.events #=> Array # resp.queue_configuration.events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete" # resp.queue_configuration.queue #=> String # resp.cloud_function_configuration.id #=> String # resp.cloud_function_configuration.event #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete" # resp.cloud_function_configuration.events #=> Array # resp.cloud_function_configuration.events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete" # resp.cloud_function_configuration.cloud_function #=> String # resp.cloud_function_configuration.invocation_role #=> String # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification AWS API Documentation # # @overload get_bucket_notification(params = {}) # @param [Hash] params ({}) def get_bucket_notification(params = {}, options = {}) req = build_request(:get_bucket_notification, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Returns the notification configuration of a bucket. # # If notifications are not enabled on the bucket, the action returns an # empty `NotificationConfiguration` element. # # By default, you must be the bucket owner to read the notification # configuration of a bucket. However, the bucket owner can use a bucket # policy to grant permission to other users to read this configuration # with the `s3:GetBucketNotification` permission. # # When you use this API operation with an access point, provide the # alias of the access point in place of the bucket name. # # When you use this API operation with an Object Lambda access point, # provide the alias of the Object Lambda access point in place of the # bucket name. If the Object Lambda access point alias in a request is # not valid, the error code `InvalidAccessPointAliasError` is returned. # For more information about `InvalidAccessPointAliasError`, see [List # of Error Codes][1]. # # For more information about setting and reading the notification # configuration on a bucket, see [Setting Up Notification of Bucket # Events][2]. For more information about bucket policies, see [Using # Bucket Policies][3]. # # The following action is related to `GetBucketNotification`: # # * [PutBucketNotification][4] # # ^ # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html # # @option params [required, String] :bucket # The name of the bucket for which to get the notification # configuration. # # When you use this API operation with an access point, provide the # alias of the access point in place of the bucket name. # # When you use this API operation with an Object Lambda access point, # provide the alias of the Object Lambda access point in place of the # bucket name. If the Object Lambda access point alias in a request is # not valid, the error code `InvalidAccessPointAliasError` is returned. # For more information about `InvalidAccessPointAliasError`, see [List # of Error Codes][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::NotificationConfiguration] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::NotificationConfiguration#topic_configurations #topic_configurations} => Array<Types::TopicConfiguration> # * {Types::NotificationConfiguration#queue_configurations #queue_configurations} => Array<Types::QueueConfiguration> # * {Types::NotificationConfiguration#lambda_function_configurations #lambda_function_configurations} => Array<Types::LambdaFunctionConfiguration> # * {Types::NotificationConfiguration#event_bridge_configuration #event_bridge_configuration} => Types::EventBridgeConfiguration # # @example Request syntax with placeholder values # # resp = client.get_bucket_notification_configuration({ # bucket: "BucketName", # required # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.topic_configurations #=> Array # resp.topic_configurations[0].id #=> String # resp.topic_configurations[0].topic_arn #=> String # resp.topic_configurations[0].events #=> Array # resp.topic_configurations[0].events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete" # resp.topic_configurations[0].filter.key.filter_rules #=> Array # resp.topic_configurations[0].filter.key.filter_rules[0].name #=> String, one of "prefix", "suffix" # resp.topic_configurations[0].filter.key.filter_rules[0].value #=> String # resp.queue_configurations #=> Array # resp.queue_configurations[0].id #=> String # resp.queue_configurations[0].queue_arn #=> String # resp.queue_configurations[0].events #=> Array # resp.queue_configurations[0].events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete" # resp.queue_configurations[0].filter.key.filter_rules #=> Array # resp.queue_configurations[0].filter.key.filter_rules[0].name #=> String, one of "prefix", "suffix" # resp.queue_configurations[0].filter.key.filter_rules[0].value #=> String # resp.lambda_function_configurations #=> Array # resp.lambda_function_configurations[0].id #=> String # resp.lambda_function_configurations[0].lambda_function_arn #=> String # resp.lambda_function_configurations[0].events #=> Array # resp.lambda_function_configurations[0].events[0] #=> String, one of "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", "s3:ObjectRestore:*", "s3:ObjectRestore:Post", "s3:ObjectRestore:Completed", "s3:Replication:*", "s3:Replication:OperationFailedReplication", "s3:Replication:OperationNotTracked", "s3:Replication:OperationMissedThreshold", "s3:Replication:OperationReplicatedAfterThreshold", "s3:ObjectRestore:Delete", "s3:LifecycleTransition", "s3:IntelligentTiering", "s3:ObjectAcl:Put", "s3:LifecycleExpiration:*", "s3:LifecycleExpiration:Delete", "s3:LifecycleExpiration:DeleteMarkerCreated", "s3:ObjectTagging:*", "s3:ObjectTagging:Put", "s3:ObjectTagging:Delete" # resp.lambda_function_configurations[0].filter.key.filter_rules #=> Array # resp.lambda_function_configurations[0].filter.key.filter_rules[0].name #=> String, one of "prefix", "suffix" # resp.lambda_function_configurations[0].filter.key.filter_rules[0].value #=> String # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration AWS API Documentation # # @overload get_bucket_notification_configuration(params = {}) # @param [Hash] params ({}) def get_bucket_notification_configuration(params = {}, options = {}) req = build_request(:get_bucket_notification_configuration, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Retrieves `OwnershipControls` for an Amazon S3 bucket. To use this # operation, you must have the `s3:GetBucketOwnershipControls` # permission. For more information about Amazon S3 permissions, see # [Specifying permissions in a policy][1]. # # For information about Amazon S3 Object Ownership, see [Using Object # Ownership][2]. # # The following operations are related to `GetBucketOwnershipControls`: # # * PutBucketOwnershipControls # # * DeleteBucketOwnershipControls # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html # # @option params [required, String] :bucket # The name of the Amazon S3 bucket whose `OwnershipControls` you want to # retrieve. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::GetBucketOwnershipControlsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::GetBucketOwnershipControlsOutput#ownership_controls #ownership_controls} => Types::OwnershipControls # # @example Request syntax with placeholder values # # resp = client.get_bucket_ownership_controls({ # bucket: "BucketName", # required # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.ownership_controls.rules #=> Array # resp.ownership_controls.rules[0].object_ownership #=> String, one of "BucketOwnerPreferred", "ObjectWriter", "BucketOwnerEnforced" # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketOwnershipControls AWS API Documentation # # @overload get_bucket_ownership_controls(params = {}) # @param [Hash] params ({}) def get_bucket_ownership_controls(params = {}, options = {}) req = build_request(:get_bucket_ownership_controls, params) req.send_request(options) end # Returns the policy of a specified bucket. # # Directory buckets - For directory buckets, you must make # requests for this API operation to the Regional endpoint. These # endpoints support path-style requests in the format # `https://s3express-control.region_code.amazonaws.com/bucket-name `. # Virtual-hosted-style requests aren't supported. For more information, # see [Regional and Zonal endpoints][1] in the *Amazon S3 User Guide*. # # # # Permissions # # : If you are using an identity other than the root user of the Amazon # Web Services account that owns the bucket, the calling identity must # both have the `GetBucketPolicy` permissions on the specified bucket # and belong to the bucket owner's account in order to use this # operation. # # If you don't have `GetBucketPolicy` permissions, Amazon S3 returns # a `403 Access Denied` error. If you have the correct permissions, # but you're not using an identity that belongs to the bucket # owner's account, Amazon S3 returns a `405 Method Not Allowed` # error. # # To ensure that bucket owners don't inadvertently lock themselves # out of their own buckets, the root principal in a bucket owner's # Amazon Web Services account can perform the `GetBucketPolicy`, # `PutBucketPolicy`, and `DeleteBucketPolicy` API actions, even if # their bucket policy explicitly denies the root principal's access. # Bucket owner root principals can only be blocked from performing # these API actions by VPC endpoint policies and Amazon Web Services # Organizations policies. # # * **General purpose bucket permissions** - The `s3:GetBucketPolicy` # permission is required in a policy. For more information about # general purpose buckets bucket policies, see [Using Bucket # Policies and User Policies][2] in the *Amazon S3 User Guide*. # # * **Directory bucket permissions** - To grant access to this API # operation, you must have the `s3express:GetBucketPolicy` # permission in an IAM identity-based policy instead of a bucket # policy. Cross-account access to this API operation isn't # supported. This operation can only be performed by the Amazon Web # Services account that owns the resource. For more information # about directory bucket policies and permissions, see [Amazon Web # Services Identity and Access Management (IAM) for S3 Express One # Zone][3] in the *Amazon S3 User Guide*. # # Example bucket policies # # : **General purpose buckets example bucket policies** - See [Bucket # policy examples][4] in the *Amazon S3 User Guide*. # # **Directory bucket example bucket policies** - See [Example bucket # policies for S3 Express One Zone][5] in the *Amazon S3 User Guide*. # # HTTP Host header syntax # # : Directory buckets - The HTTP Host header syntax is # `s3express-control.region.amazonaws.com`. # # The following action is related to `GetBucketPolicy`: # # * [GetObject][6] # # ^ # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html # # @option params [required, String] :bucket # The bucket name to get the bucket policy for. # # Directory buckets - When you use this operation with a # directory bucket, you must use path-style requests in the format # `https://s3express-control.region_code.amazonaws.com/bucket-name `. # Virtual-hosted-style requests aren't supported. Directory bucket # names must be unique in the chosen Availability Zone. Bucket names # must also follow the format ` bucket_base_name--az_id--x-s3` (for # example, ` DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about # bucket naming restrictions, see [Directory bucket naming rules][1] in # the *Amazon S3 User Guide* # # **Access points** - When you use this API operation with an access # point, provide the alias of the access point in place of the bucket # name. # # **Object Lambda access points** - When you use this API operation with # an Object Lambda access point, provide the alias of the Object Lambda # access point in place of the bucket name. If the Object Lambda access # point alias in a request is not valid, the error code # `InvalidAccessPointAliasError` is returned. For more information about # `InvalidAccessPointAliasError`, see [List of Error Codes][2]. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # For directory buckets, this header is not supported in this API # operation. If you specify this header, the request fails with the HTTP # status code `501 Not Implemented`. # # # # @return [Types::GetBucketPolicyOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::GetBucketPolicyOutput#policy #policy} => IO # # # @example Example: To get bucket policy # # # The following example returns bucket policy associated with a bucket. # # resp = client.get_bucket_policy({ # bucket: "examplebucket", # }) # # resp.to_h outputs the following: # { # policy: "{\"Version\":\"2008-10-17\",\"Id\":\"LogPolicy\",\"Statement\":[{\"Sid\":\"Enables the log delivery group to publish logs to your bucket \",\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"111122223333\"},\"Action\":[\"s3:GetBucketAcl\",\"s3:GetObjectAcl\",\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::policytest1/*\",\"arn:aws:s3:::policytest1\"]}]}", # } # # @example Request syntax with placeholder values # # resp = client.get_bucket_policy({ # bucket: "BucketName", # required # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.policy #=> String # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy AWS API Documentation # # @overload get_bucket_policy(params = {}) # @param [Hash] params ({}) def get_bucket_policy(params = {}, options = {}, &block) req = build_request(:get_bucket_policy, params) req.send_request(options, &block) end # This operation is not supported by directory buckets. # # # # Retrieves the policy status for an Amazon S3 bucket, indicating # whether the bucket is public. In order to use this operation, you must # have the `s3:GetBucketPolicyStatus` permission. For more information # about Amazon S3 permissions, see [Specifying Permissions in a # Policy][1]. # # For more information about when Amazon S3 considers a bucket public, # see [The Meaning of "Public"][2]. # # The following operations are related to `GetBucketPolicyStatus`: # # * [Using Amazon S3 Block Public Access][3] # # * [GetPublicAccessBlock][4] # # * [PutPublicAccessBlock][5] # # * [DeletePublicAccessBlock][6] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html # # @option params [required, String] :bucket # The name of the Amazon S3 bucket whose policy status you want to # retrieve. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::GetBucketPolicyStatusOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::GetBucketPolicyStatusOutput#policy_status #policy_status} => Types::PolicyStatus # # @example Request syntax with placeholder values # # resp = client.get_bucket_policy_status({ # bucket: "BucketName", # required # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.policy_status.is_public #=> Boolean # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyStatus AWS API Documentation # # @overload get_bucket_policy_status(params = {}) # @param [Hash] params ({}) def get_bucket_policy_status(params = {}, options = {}) req = build_request(:get_bucket_policy_status, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Returns the replication configuration of a bucket. # # It can take a while to propagate the put or delete a replication # configuration to all Amazon S3 systems. Therefore, a get request soon # after put or delete can return a wrong result. # # # # For information about replication configuration, see [Replication][1] # in the *Amazon S3 User Guide*. # # This action requires permissions for the # `s3:GetReplicationConfiguration` action. For more information about # permissions, see [Using Bucket Policies and User Policies][2]. # # If you include the `Filter` element in a replication configuration, # you must also include the `DeleteMarkerReplication` and `Priority` # elements. The response also returns those elements. # # For information about `GetBucketReplication` errors, see [List of # replication-related error codes][3] # # The following operations are related to `GetBucketReplication`: # # * [PutBucketReplication][4] # # * [DeleteBucketReplication][5] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html # # @option params [required, String] :bucket # The bucket name for which to get the replication information. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::GetBucketReplicationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::GetBucketReplicationOutput#replication_configuration #replication_configuration} => Types::ReplicationConfiguration # # # @example Example: To get replication configuration set on a bucket # # # The following example returns replication configuration set on a bucket. # # resp = client.get_bucket_replication({ # bucket: "examplebucket", # }) # # resp.to_h outputs the following: # { # replication_configuration: { # role: "arn:aws:iam::acct-id:role/example-role", # rules: [ # { # destination: { # bucket: "arn:aws:s3:::destination-bucket", # }, # id: "MWIwNTkwZmItMTE3MS00ZTc3LWJkZDEtNzRmODQwYzc1OTQy", # prefix: "Tax", # status: "Enabled", # }, # ], # }, # } # # @example Request syntax with placeholder values # # resp = client.get_bucket_replication({ # bucket: "BucketName", # required # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.replication_configuration.role #=> String # resp.replication_configuration.rules #=> Array # resp.replication_configuration.rules[0].id #=> String # resp.replication_configuration.rules[0].priority #=> Integer # resp.replication_configuration.rules[0].prefix #=> String # resp.replication_configuration.rules[0].filter.prefix #=> String # resp.replication_configuration.rules[0].filter.tag.key #=> String # resp.replication_configuration.rules[0].filter.tag.value #=> String # resp.replication_configuration.rules[0].filter.and.prefix #=> String # resp.replication_configuration.rules[0].filter.and.tags #=> Array # resp.replication_configuration.rules[0].filter.and.tags[0].key #=> String # resp.replication_configuration.rules[0].filter.and.tags[0].value #=> String # resp.replication_configuration.rules[0].status #=> String, one of "Enabled", "Disabled" # resp.replication_configuration.rules[0].source_selection_criteria.sse_kms_encrypted_objects.status #=> String, one of "Enabled", "Disabled" # resp.replication_configuration.rules[0].source_selection_criteria.replica_modifications.status #=> String, one of "Enabled", "Disabled" # resp.replication_configuration.rules[0].existing_object_replication.status #=> String, one of "Enabled", "Disabled" # resp.replication_configuration.rules[0].destination.bucket #=> String # resp.replication_configuration.rules[0].destination.account #=> String # resp.replication_configuration.rules[0].destination.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR", "SNOW", "EXPRESS_ONEZONE" # resp.replication_configuration.rules[0].destination.access_control_translation.owner #=> String, one of "Destination" # resp.replication_configuration.rules[0].destination.encryption_configuration.replica_kms_key_id #=> String # resp.replication_configuration.rules[0].destination.replication_time.status #=> String, one of "Enabled", "Disabled" # resp.replication_configuration.rules[0].destination.replication_time.time.minutes #=> Integer # resp.replication_configuration.rules[0].destination.metrics.status #=> String, one of "Enabled", "Disabled" # resp.replication_configuration.rules[0].destination.metrics.event_threshold.minutes #=> Integer # resp.replication_configuration.rules[0].delete_marker_replication.status #=> String, one of "Enabled", "Disabled" # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication AWS API Documentation # # @overload get_bucket_replication(params = {}) # @param [Hash] params ({}) def get_bucket_replication(params = {}, options = {}) req = build_request(:get_bucket_replication, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Returns the request payment configuration of a bucket. To use this # version of the operation, you must be the bucket owner. For more # information, see [Requester Pays Buckets][1]. # # The following operations are related to `GetBucketRequestPayment`: # # * [ListObjects][2] # # ^ # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html # # @option params [required, String] :bucket # The name of the bucket for which to get the payment request # configuration # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::GetBucketRequestPaymentOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::GetBucketRequestPaymentOutput#payer #payer} => String # # # @example Example: To get bucket versioning configuration # # # The following example retrieves bucket versioning configuration. # # resp = client.get_bucket_request_payment({ # bucket: "examplebucket", # }) # # resp.to_h outputs the following: # { # payer: "BucketOwner", # } # # @example Request syntax with placeholder values # # resp = client.get_bucket_request_payment({ # bucket: "BucketName", # required # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.payer #=> String, one of "Requester", "BucketOwner" # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment AWS API Documentation # # @overload get_bucket_request_payment(params = {}) # @param [Hash] params ({}) def get_bucket_request_payment(params = {}, options = {}) req = build_request(:get_bucket_request_payment, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Returns the tag set associated with the bucket. # # To use this operation, you must have permission to perform the # `s3:GetBucketTagging` action. By default, the bucket owner has this # permission and can grant this permission to others. # # `GetBucketTagging` has the following special error: # # * Error code: `NoSuchTagSet` # # * Description: There is no tag set associated with the bucket. # # ^ # # The following operations are related to `GetBucketTagging`: # # * [PutBucketTagging][1] # # * [DeleteBucketTagging][2] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html # # @option params [required, String] :bucket # The name of the bucket for which to get the tagging information. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::GetBucketTaggingOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::GetBucketTaggingOutput#tag_set #tag_set} => Array<Types::Tag> # # # @example Example: To get tag set associated with a bucket # # # The following example returns tag set associated with a bucket # # resp = client.get_bucket_tagging({ # bucket: "examplebucket", # }) # # resp.to_h outputs the following: # { # tag_set: [ # { # key: "key1", # value: "value1", # }, # { # key: "key2", # value: "value2", # }, # ], # } # # @example Request syntax with placeholder values # # resp = client.get_bucket_tagging({ # bucket: "BucketName", # required # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.tag_set #=> Array # resp.tag_set[0].key #=> String # resp.tag_set[0].value #=> String # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging AWS API Documentation # # @overload get_bucket_tagging(params = {}) # @param [Hash] params ({}) def get_bucket_tagging(params = {}, options = {}) req = build_request(:get_bucket_tagging, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Returns the versioning state of a bucket. # # To retrieve the versioning state of a bucket, you must be the bucket # owner. # # This implementation also returns the MFA Delete status of the # versioning state. If the MFA Delete status is `enabled`, the bucket # owner must use an authentication device to change the versioning state # of the bucket. # # The following operations are related to `GetBucketVersioning`: # # * [GetObject][1] # # * [PutObject][2] # # * [DeleteObject][3] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html # # @option params [required, String] :bucket # The name of the bucket for which to get the versioning information. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::GetBucketVersioningOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::GetBucketVersioningOutput#status #status} => String # * {Types::GetBucketVersioningOutput#mfa_delete #mfa_delete} => String # # # @example Example: To get bucket versioning configuration # # # The following example retrieves bucket versioning configuration. # # resp = client.get_bucket_versioning({ # bucket: "examplebucket", # }) # # resp.to_h outputs the following: # { # mfa_delete: "Disabled", # status: "Enabled", # } # # @example Request syntax with placeholder values # # resp = client.get_bucket_versioning({ # bucket: "BucketName", # required # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.status #=> String, one of "Enabled", "Suspended" # resp.mfa_delete #=> String, one of "Enabled", "Disabled" # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning AWS API Documentation # # @overload get_bucket_versioning(params = {}) # @param [Hash] params ({}) def get_bucket_versioning(params = {}, options = {}) req = build_request(:get_bucket_versioning, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Returns the website configuration for a bucket. To host website on # Amazon S3, you can configure a bucket as website by adding a website # configuration. For more information about hosting websites, see # [Hosting Websites on Amazon S3][1]. # # This GET action requires the `S3:GetBucketWebsite` permission. By # default, only the bucket owner can read the bucket website # configuration. However, bucket owners can allow other users to read # the website configuration by writing a bucket policy granting them the # `S3:GetBucketWebsite` permission. # # The following operations are related to `GetBucketWebsite`: # # * [DeleteBucketWebsite][2] # # * [PutBucketWebsite][3] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html # # @option params [required, String] :bucket # The bucket name for which to get the website configuration. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::GetBucketWebsiteOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::GetBucketWebsiteOutput#redirect_all_requests_to #redirect_all_requests_to} => Types::RedirectAllRequestsTo # * {Types::GetBucketWebsiteOutput#index_document #index_document} => Types::IndexDocument # * {Types::GetBucketWebsiteOutput#error_document #error_document} => Types::ErrorDocument # * {Types::GetBucketWebsiteOutput#routing_rules #routing_rules} => Array<Types::RoutingRule> # # # @example Example: To get bucket website configuration # # # The following example retrieves website configuration of a bucket. # # resp = client.get_bucket_website({ # bucket: "examplebucket", # }) # # resp.to_h outputs the following: # { # error_document: { # key: "error.html", # }, # index_document: { # suffix: "index.html", # }, # } # # @example Request syntax with placeholder values # # resp = client.get_bucket_website({ # bucket: "BucketName", # required # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.redirect_all_requests_to.host_name #=> String # resp.redirect_all_requests_to.protocol #=> String, one of "http", "https" # resp.index_document.suffix #=> String # resp.error_document.key #=> String # resp.routing_rules #=> Array # resp.routing_rules[0].condition.http_error_code_returned_equals #=> String # resp.routing_rules[0].condition.key_prefix_equals #=> String # resp.routing_rules[0].redirect.host_name #=> String # resp.routing_rules[0].redirect.http_redirect_code #=> String # resp.routing_rules[0].redirect.protocol #=> String, one of "http", "https" # resp.routing_rules[0].redirect.replace_key_prefix_with #=> String # resp.routing_rules[0].redirect.replace_key_with #=> String # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite AWS API Documentation # # @overload get_bucket_website(params = {}) # @param [Hash] params ({}) def get_bucket_website(params = {}, options = {}) req = build_request(:get_bucket_website, params) req.send_request(options) end # Retrieves an object from Amazon S3. # # In the `GetObject` request, specify the full key name for the object. # # **General purpose buckets** - Both the virtual-hosted-style requests # and the path-style requests are supported. For a virtual hosted-style # request example, if you have the object # `photos/2006/February/sample.jpg`, specify the object key name as # `/photos/2006/February/sample.jpg`. For a path-style request example, # if you have the object `photos/2006/February/sample.jpg` in the bucket # named `examplebucket`, specify the object key name as # `/examplebucket/photos/2006/February/sample.jpg`. For more information # about request types, see [HTTP Host Header Bucket Specification][1] in # the *Amazon S3 User Guide*. # # **Directory buckets** - Only virtual-hosted-style requests are # supported. For a virtual hosted-style request example, if you have the # object `photos/2006/February/sample.jpg` in the bucket named # `examplebucket--use1-az5--x-s3`, specify the object key name as # `/photos/2006/February/sample.jpg`. Also, when you make requests to # this API operation, your requests are sent to the Zonal endpoint. # These endpoints support virtual-hosted-style requests in the format # `https://bucket_name.s3express-az_id.region.amazonaws.com/key-name `. # Path-style requests are not supported. For more information, see # [Regional and Zonal endpoints][2] in the *Amazon S3 User Guide*. # # Permissions # : * **General purpose bucket permissions** - You must have the # required permissions in a policy. To use `GetObject`, you must # have the `READ` access to the object (or version). If you grant # `READ` access to the anonymous user, the `GetObject` operation # returns the object without using an authorization header. For more # information, see [Specifying permissions in a policy][3] in the # *Amazon S3 User Guide*. # # If you include a `versionId` in your request header, you must have # the `s3:GetObjectVersion` permission to access a specific version # of an object. The `s3:GetObject` permission is not required in # this scenario. # # If you request the current version of an object without a specific # `versionId` in the request header, only the `s3:GetObject` # permission is required. The `s3:GetObjectVersion` permission is # not required in this scenario. # # If the object that you request doesn’t exist, the error that # Amazon S3 returns depends on whether you also have the # `s3:ListBucket` permission. # # * If you have the `s3:ListBucket` permission on the bucket, Amazon # S3 returns an HTTP status code `404 Not Found` error. # # * If you don’t have the `s3:ListBucket` permission, Amazon S3 # returns an HTTP status code `403 Access Denied` error. # # * **Directory bucket permissions** - To grant access to this API # operation on a directory bucket, we recommend that you use the [ # `CreateSession` ][4] API operation for session-based # authorization. Specifically, you grant the # `s3express:CreateSession` permission to the directory bucket in a # bucket policy or an IAM identity-based policy. Then, you make the # `CreateSession` API call on the bucket to obtain a session token. # With the session token in your request header, you can make API # requests to this operation. After the session token expires, you # make another `CreateSession` API call to generate a new session # token for use. Amazon Web Services CLI or SDKs create session and # refresh the session token automatically to avoid service # interruptions when a session expires. For more information about # authorization, see [ `CreateSession` ][4]. # # Storage classes # # : If the object you are retrieving is stored in the S3 Glacier # Flexible Retrieval storage class, the S3 Glacier Deep Archive # storage class, the S3 Intelligent-Tiering Archive Access tier, or # the S3 Intelligent-Tiering Deep Archive Access tier, before you can # retrieve the object you must first restore a copy using # [RestoreObject][5]. Otherwise, this operation returns an # `InvalidObjectState` error. For information about restoring archived # objects, see [Restoring Archived Objects][6] in the *Amazon S3 User # Guide*. # # Directory buckets - For directory buckets, only the S3 # Express One Zone storage class is supported to store newly created # objects. Unsupported storage class values won't write a destination # object and will respond with the HTTP status code `400 Bad Request`. # # Encryption # # : Encryption request headers, like `x-amz-server-side-encryption`, # should not be sent for the `GetObject` requests, if your object uses # server-side encryption with Amazon S3 managed encryption keys # (SSE-S3), server-side encryption with Key Management Service (KMS) # keys (SSE-KMS), or dual-layer server-side encryption with Amazon Web # Services KMS keys (DSSE-KMS). If you include the header in your # `GetObject` requests for the object that uses these types of keys, # you’ll get an HTTP `400 Bad Request` error. # # Overriding response header values through the request # # : There are times when you want to override certain response header # values of a `GetObject` response. For example, you might override # the `Content-Disposition` response header value through your # `GetObject` request. # # You can override values for a set of response headers. These # modified response header values are included only in a successful # response, that is, when the HTTP status code `200 OK` is returned. # The headers you can override using the following query parameters in # the request are a subset of the headers that Amazon S3 accepts when # you create an object. # # The response headers that you can override for the `GetObject` # response are `Cache-Control`, `Content-Disposition`, # `Content-Encoding`, `Content-Language`, `Content-Type`, and # `Expires`. # # To override values for a set of response headers in the `GetObject` # response, you can use the following query parameters in the request. # # * `response-cache-control` # # * `response-content-disposition` # # * `response-content-encoding` # # * `response-content-language` # # * `response-content-type` # # * `response-expires` # # When you use these parameters, you must sign the request by using # either an Authorization header or a presigned URL. These parameters # cannot be used with an unsigned (anonymous) request. # # # # HTTP Host header syntax # # : Directory buckets - The HTTP Host header syntax is ` # Bucket_name.s3express-az_id.region.amazonaws.com`. # # The following operations are related to `GetObject`: # # * [ListBuckets][7] # # * [GetObjectAcl][8] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html # # @option params [String, IO] :response_target # Where to write response data, file path, or IO object. # # @option params [required, String] :bucket # The bucket name containing the object. # # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style requests # are not supported. Directory bucket names must be unique in the chosen # Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, you # must provide the alias of the access point in place of the bucket name # or specify the access point ARN. When using the access point ARN, you # must direct requests to the access point hostname. The access point # hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the bucket # name. For more information about access point ARNs, see [Using access # points][2] in the *Amazon S3 User Guide*. # # **Object Lambda access points** - When you use this action with an # Object Lambda access point, you must direct requests to the Object # Lambda access point hostname. The Object Lambda access point hostname # takes the form # *AccessPointName*-*AccountId*.s3-object-lambda.*Region*.amazonaws.com. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. The # S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][3] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # # @option params [String] :if_match # Return the object only if its entity tag (ETag) is the same as the one # specified in this header; otherwise, return a `412 Precondition # Failed` error. # # If both of the `If-Match` and `If-Unmodified-Since` headers are # present in the request as follows: `If-Match` condition evaluates to # `true`, and; `If-Unmodified-Since` condition evaluates to `false`; # then, S3 returns `200 OK` and the data requested. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # # @option params [Time,DateTime,Date,Integer,String] :if_modified_since # Return the object only if it has been modified since the specified # time; otherwise, return a `304 Not Modified` error. # # If both of the `If-None-Match` and `If-Modified-Since` headers are # present in the request as follows:` If-None-Match` condition evaluates # to `false`, and; `If-Modified-Since` condition evaluates to `true`; # then, S3 returns `304 Not Modified` status code. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # # @option params [String] :if_none_match # Return the object only if its entity tag (ETag) is different from the # one specified in this header; otherwise, return a `304 Not Modified` # error. # # If both of the `If-None-Match` and `If-Modified-Since` headers are # present in the request as follows:` If-None-Match` condition evaluates # to `false`, and; `If-Modified-Since` condition evaluates to `true`; # then, S3 returns `304 Not Modified` HTTP status code. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # # @option params [Time,DateTime,Date,Integer,String] :if_unmodified_since # Return the object only if it has not been modified since the specified # time; otherwise, return a `412 Precondition Failed` error. # # If both of the `If-Match` and `If-Unmodified-Since` headers are # present in the request as follows: `If-Match` condition evaluates to # `true`, and; `If-Unmodified-Since` condition evaluates to `false`; # then, S3 returns `200 OK` and the data requested. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # # @option params [required, String] :key # Key of the object to get. # # @option params [String] :range # Downloads the specified byte range of an object. For more information # about the HTTP Range header, see # [https://www.rfc-editor.org/rfc/rfc9110.html#name-range][1]. # # Amazon S3 doesn't support retrieving multiple ranges of data per # `GET` request. # # # # # # [1]: https://www.rfc-editor.org/rfc/rfc9110.html#name-range # # @option params [String] :response_cache_control # Sets the `Cache-Control` header of the response. # # @option params [String] :response_content_disposition # Sets the `Content-Disposition` header of the response. # # @option params [String] :response_content_encoding # Sets the `Content-Encoding` header of the response. # # @option params [String] :response_content_language # Sets the `Content-Language` header of the response. # # @option params [String] :response_content_type # Sets the `Content-Type` header of the response. # # @option params [Time,DateTime,Date,Integer,String] :response_expires # Sets the `Expires` header of the response. # # @option params [String] :version_id # Version ID used to reference a specific version of the object. # # By default, the `GetObject` operation returns the current version of # an object. To return a different version, use the `versionId` # subresource. # # * If you include a `versionId` in your request header, you must have # the `s3:GetObjectVersion` permission to access a specific version of # an object. The `s3:GetObject` permission is not required in this # scenario. # # * If you request the current version of an object without a specific # `versionId` in the request header, only the `s3:GetObject` # permission is required. The `s3:GetObjectVersion` permission is not # required in this scenario. # # * **Directory buckets** - S3 Versioning isn't enabled and supported # for directory buckets. For this API operation, only the `null` value # of the version ID is supported by directory buckets. You can only # specify `null` to the `versionId` query parameter in the request. # # # # For more information about versioning, see [PutBucketVersioning][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html # # @option params [String] :sse_customer_algorithm # Specifies the algorithm to use when decrypting the object (for # example, `AES256`). # # If you encrypt an object by using server-side encryption with # customer-provided encryption keys (SSE-C) when you store the object in # Amazon S3, then when you GET the object, you must use the following # headers: # # * `x-amz-server-side-encryption-customer-algorithm` # # * `x-amz-server-side-encryption-customer-key` # # * `x-amz-server-side-encryption-customer-key-MD5` # # For more information about SSE-C, see [Server-Side Encryption (Using # Customer-Provided Encryption Keys)][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # # @option params [String] :sse_customer_key # Specifies the customer-provided encryption key that you originally # provided for Amazon S3 to encrypt the data before storing it. This # value is used to decrypt the object when recovering it and must match # the one used when storing the data. The key must be appropriate for # use with the algorithm specified in the # `x-amz-server-side-encryption-customer-algorithm` header. # # If you encrypt an object by using server-side encryption with # customer-provided encryption keys (SSE-C) when you store the object in # Amazon S3, then when you GET the object, you must use the following # headers: # # * `x-amz-server-side-encryption-customer-algorithm` # # * `x-amz-server-side-encryption-customer-key` # # * `x-amz-server-side-encryption-customer-key-MD5` # # For more information about SSE-C, see [Server-Side Encryption (Using # Customer-Provided Encryption Keys)][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # # @option params [String] :sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the customer-provided encryption # key according to RFC 1321. Amazon S3 uses this header for a message # integrity check to ensure that the encryption key was transmitted # without error. # # If you encrypt an object by using server-side encryption with # customer-provided encryption keys (SSE-C) when you store the object in # Amazon S3, then when you GET the object, you must use the following # headers: # # * `x-amz-server-side-encryption-customer-algorithm` # # * `x-amz-server-side-encryption-customer-key` # # * `x-amz-server-side-encryption-customer-key-MD5` # # For more information about SSE-C, see [Server-Side Encryption (Using # Customer-Provided Encryption Keys)][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # # @option params [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # # @option params [Integer] :part_number # Part number of the object being read. This is a positive integer # between 1 and 10,000. Effectively performs a 'ranged' GET request # for the part specified. Useful for downloading just a part of an # object. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @option params [String] :checksum_mode # To retrieve the checksum, this mode must be enabled. # # @return [Types::GetObjectOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::GetObjectOutput#body #body} => IO # * {Types::GetObjectOutput#delete_marker #delete_marker} => Boolean # * {Types::GetObjectOutput#accept_ranges #accept_ranges} => String # * {Types::GetObjectOutput#expiration #expiration} => String # * {Types::GetObjectOutput#restore #restore} => String # * {Types::GetObjectOutput#last_modified #last_modified} => Time # * {Types::GetObjectOutput#content_length #content_length} => Integer # * {Types::GetObjectOutput#etag #etag} => String # * {Types::GetObjectOutput#checksum_crc32 #checksum_crc32} => String # * {Types::GetObjectOutput#checksum_crc32c #checksum_crc32c} => String # * {Types::GetObjectOutput#checksum_sha1 #checksum_sha1} => String # * {Types::GetObjectOutput#checksum_sha256 #checksum_sha256} => String # * {Types::GetObjectOutput#missing_meta #missing_meta} => Integer # * {Types::GetObjectOutput#version_id #version_id} => String # * {Types::GetObjectOutput#cache_control #cache_control} => String # * {Types::GetObjectOutput#content_disposition #content_disposition} => String # * {Types::GetObjectOutput#content_encoding #content_encoding} => String # * {Types::GetObjectOutput#content_language #content_language} => String # * {Types::GetObjectOutput#content_range #content_range} => String # * {Types::GetObjectOutput#content_type #content_type} => String # * {Types::GetObjectOutput#expires #expires} => Time # * {Types::GetObjectOutput#expires_string #expires_string} => String # * {Types::GetObjectOutput#website_redirect_location #website_redirect_location} => String # * {Types::GetObjectOutput#server_side_encryption #server_side_encryption} => String # * {Types::GetObjectOutput#metadata #metadata} => Hash<String,String> # * {Types::GetObjectOutput#sse_customer_algorithm #sse_customer_algorithm} => String # * {Types::GetObjectOutput#sse_customer_key_md5 #sse_customer_key_md5} => String # * {Types::GetObjectOutput#ssekms_key_id #ssekms_key_id} => String # * {Types::GetObjectOutput#bucket_key_enabled #bucket_key_enabled} => Boolean # * {Types::GetObjectOutput#storage_class #storage_class} => String # * {Types::GetObjectOutput#request_charged #request_charged} => String # * {Types::GetObjectOutput#replication_status #replication_status} => String # * {Types::GetObjectOutput#parts_count #parts_count} => Integer # * {Types::GetObjectOutput#tag_count #tag_count} => Integer # * {Types::GetObjectOutput#object_lock_mode #object_lock_mode} => String # * {Types::GetObjectOutput#object_lock_retain_until_date #object_lock_retain_until_date} => Time # * {Types::GetObjectOutput#object_lock_legal_hold_status #object_lock_legal_hold_status} => String # # # @example Example: To retrieve a byte range of an object # # # The following example retrieves an object for an S3 bucket. The request specifies the range header to retrieve a # # specific byte range. # # resp = client.get_object({ # bucket: "examplebucket", # key: "SampleFile.txt", # range: "bytes=0-9", # }) # # resp.to_h outputs the following: # { # accept_ranges: "bytes", # content_length: 10, # content_range: "bytes 0-9/43", # content_type: "text/plain", # etag: "\"0d94420ffd0bc68cd3d152506b97a9cc\"", # last_modified: Time.parse("Thu, 09 Oct 2014 22:57:28 GMT"), # metadata: { # }, # version_id: "null", # } # # @example Example: To retrieve an object # # # The following example retrieves an object for an S3 bucket. # # resp = client.get_object({ # bucket: "examplebucket", # key: "HappyFace.jpg", # }) # # resp.to_h outputs the following: # { # accept_ranges: "bytes", # content_length: 3191, # content_type: "image/jpeg", # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"", # last_modified: Time.parse("Thu, 15 Dec 2016 01:19:41 GMT"), # metadata: { # }, # tag_count: 2, # version_id: "null", # } # # @example Download an object to disk # # stream object directly to disk # resp = s3.get_object( # response_target: '/path/to/file', # bucket: 'bucket-name', # key: 'object-key') # # # you can still access other response data # resp.metadata #=> { ... } # resp.etag #=> "..." # # @example Download object into memory # # omit :response_target to download to a StringIO in memory # resp = s3.get_object(bucket: 'bucket-name', key: 'object-key') # # # call #read or #string on the response body # resp.body.read # #=> '...' # # @example Streaming data to a block # # WARNING: yielding data to a block disables retries of networking errors # # However truncation of the body will be retried automatically using a range request # File.open('/path/to/file', 'wb') do |file| # s3.get_object(bucket: 'bucket-name', key: 'object-key') do |chunk, headers| # # headers['content-length'] # file.write(chunk) # end # end # # @example Request syntax with placeholder values # # resp = client.get_object({ # bucket: "BucketName", # required # if_match: "IfMatch", # if_modified_since: Time.now, # if_none_match: "IfNoneMatch", # if_unmodified_since: Time.now, # key: "ObjectKey", # required # range: "Range", # response_cache_control: "ResponseCacheControl", # response_content_disposition: "ResponseContentDisposition", # response_content_encoding: "ResponseContentEncoding", # response_content_language: "ResponseContentLanguage", # response_content_type: "ResponseContentType", # response_expires: Time.now, # version_id: "ObjectVersionId", # sse_customer_algorithm: "SSECustomerAlgorithm", # sse_customer_key: "SSECustomerKey", # sse_customer_key_md5: "SSECustomerKeyMD5", # request_payer: "requester", # accepts requester # part_number: 1, # expected_bucket_owner: "AccountId", # checksum_mode: "ENABLED", # accepts ENABLED # }) # # @example Response structure # # resp.body #=> IO # resp.delete_marker #=> Boolean # resp.accept_ranges #=> String # resp.expiration #=> String # resp.restore #=> String # resp.last_modified #=> Time # resp.content_length #=> Integer # resp.etag #=> String # resp.checksum_crc32 #=> String # resp.checksum_crc32c #=> String # resp.checksum_sha1 #=> String # resp.checksum_sha256 #=> String # resp.missing_meta #=> Integer # resp.version_id #=> String # resp.cache_control #=> String # resp.content_disposition #=> String # resp.content_encoding #=> String # resp.content_language #=> String # resp.content_range #=> String # resp.content_type #=> String # resp.expires #=> Time # resp.expires_string #=> String # resp.website_redirect_location #=> String # resp.server_side_encryption #=> String, one of "AES256", "aws:kms", "aws:kms:dsse" # resp.metadata #=> Hash # resp.metadata["MetadataKey"] #=> String # resp.sse_customer_algorithm #=> String # resp.sse_customer_key_md5 #=> String # resp.ssekms_key_id #=> String # resp.bucket_key_enabled #=> Boolean # resp.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR", "SNOW", "EXPRESS_ONEZONE" # resp.request_charged #=> String, one of "requester" # resp.replication_status #=> String, one of "COMPLETE", "PENDING", "FAILED", "REPLICA", "COMPLETED" # resp.parts_count #=> Integer # resp.tag_count #=> Integer # resp.object_lock_mode #=> String, one of "GOVERNANCE", "COMPLIANCE" # resp.object_lock_retain_until_date #=> Time # resp.object_lock_legal_hold_status #=> String, one of "ON", "OFF" # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject AWS API Documentation # # @overload get_object(params = {}) # @param [Hash] params ({}) def get_object(params = {}, options = {}, &block) req = build_request(:get_object, params) req.send_request(options, &block) end # This operation is not supported by directory buckets. # # # # Returns the access control list (ACL) of an object. To use this # operation, you must have `s3:GetObjectAcl` permissions or `READ_ACP` # access to the object. For more information, see [Mapping of ACL # permissions and access policy permissions][1] in the *Amazon S3 User # Guide* # # This functionality is not supported for Amazon S3 on Outposts. # # By default, GET returns ACL information about the current version of # an object. To return ACL information about a different version, use # the versionId subresource. # # If your bucket uses the bucket owner enforced setting for S3 Object # Ownership, requests to read ACLs are still supported and return the # `bucket-owner-full-control` ACL with the owner being the account that # created the bucket. For more information, see [ Controlling object # ownership and disabling ACLs][2] in the *Amazon S3 User Guide*. # # # # The following operations are related to `GetObjectAcl`: # # * [GetObject][3] # # * [GetObjectAttributes][4] # # * [DeleteObject][5] # # * [PutObject][6] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#acl-access-policy-permission-mapping # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html # # @option params [required, String] :bucket # The bucket name that contains the object for which to get the ACL # information. # # **Access points** - When you use this action with an access point, you # must provide the alias of the access point in place of the bucket name # or specify the access point ARN. When using the access point ARN, you # must direct requests to the access point hostname. The access point # hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the bucket # name. For more information about access point ARNs, see [Using access # points][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # # @option params [required, String] :key # The key of the object for which to get the ACL information. # # @option params [String] :version_id # Version ID used to reference a specific version of the object. # # This functionality is not supported for directory buckets. # # # # @option params [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::GetObjectAclOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::GetObjectAclOutput#owner #owner} => Types::Owner # * {Types::GetObjectAclOutput#grants #grants} => Array<Types::Grant> # * {Types::GetObjectAclOutput#request_charged #request_charged} => String # # # @example Example: To retrieve object ACL # # # The following example retrieves access control list (ACL) of an object. # # resp = client.get_object_acl({ # bucket: "examplebucket", # key: "HappyFace.jpg", # }) # # resp.to_h outputs the following: # { # grants: [ # { # grantee: { # display_name: "owner-display-name", # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", # type: "CanonicalUser", # }, # permission: "WRITE", # }, # { # grantee: { # display_name: "owner-display-name", # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", # type: "CanonicalUser", # }, # permission: "WRITE_ACP", # }, # { # grantee: { # display_name: "owner-display-name", # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", # type: "CanonicalUser", # }, # permission: "READ", # }, # { # grantee: { # display_name: "owner-display-name", # id: "852b113eexamplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", # type: "CanonicalUser", # }, # permission: "READ_ACP", # }, # ], # owner: { # display_name: "owner-display-name", # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", # }, # } # # @example Request syntax with placeholder values # # resp = client.get_object_acl({ # bucket: "BucketName", # required # key: "ObjectKey", # required # version_id: "ObjectVersionId", # request_payer: "requester", # accepts requester # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.owner.display_name #=> String # resp.owner.id #=> String # resp.grants #=> Array # resp.grants[0].grantee.display_name #=> String # resp.grants[0].grantee.email_address #=> String # resp.grants[0].grantee.id #=> String # resp.grants[0].grantee.type #=> String, one of "CanonicalUser", "AmazonCustomerByEmail", "Group" # resp.grants[0].grantee.uri #=> String # resp.grants[0].permission #=> String, one of "FULL_CONTROL", "WRITE", "WRITE_ACP", "READ", "READ_ACP" # resp.request_charged #=> String, one of "requester" # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl AWS API Documentation # # @overload get_object_acl(params = {}) # @param [Hash] params ({}) def get_object_acl(params = {}, options = {}) req = build_request(:get_object_acl, params) req.send_request(options) end # Retrieves all the metadata from an object without returning the object # itself. This operation is useful if you're interested only in an # object's metadata. # # `GetObjectAttributes` combines the functionality of `HeadObject` and # `ListParts`. All of the data returned with each of those individual # calls can be returned with a single call to `GetObjectAttributes`. # # **Directory buckets** - For directory buckets, you must make requests # for this API operation to the Zonal endpoint. These endpoints support # virtual-hosted-style requests in the format # `https://bucket_name.s3express-az_id.region.amazonaws.com/key-name `. # Path-style requests are not supported. For more information, see # [Regional and Zonal endpoints][1] in the *Amazon S3 User Guide*. # # # # Permissions # : * **General purpose bucket permissions** - To use # `GetObjectAttributes`, you must have READ access to the object. # The permissions that you need to use this operation with depend on # whether the bucket is versioned. If the bucket is versioned, you # need both the `s3:GetObjectVersion` and # `s3:GetObjectVersionAttributes` permissions for this operation. If # the bucket is not versioned, you need the `s3:GetObject` and # `s3:GetObjectAttributes` permissions. For more information, see # [Specifying Permissions in a Policy][2] in the *Amazon S3 User # Guide*. If the object that you request does not exist, the error # Amazon S3 returns depends on whether you also have the # `s3:ListBucket` permission. # # * If you have the `s3:ListBucket` permission on the bucket, Amazon # S3 returns an HTTP status code `404 Not Found` ("no such key") # error. # # * If you don't have the `s3:ListBucket` permission, Amazon S3 # returns an HTTP status code `403 Forbidden` ("access denied") # error. # # * **Directory bucket permissions** - To grant access to this API # operation on a directory bucket, we recommend that you use the [ # `CreateSession` ][3] API operation for session-based # authorization. Specifically, you grant the # `s3express:CreateSession` permission to the directory bucket in a # bucket policy or an IAM identity-based policy. Then, you make the # `CreateSession` API call on the bucket to obtain a session token. # With the session token in your request header, you can make API # requests to this operation. After the session token expires, you # make another `CreateSession` API call to generate a new session # token for use. Amazon Web Services CLI or SDKs create session and # refresh the session token automatically to avoid service # interruptions when a session expires. For more information about # authorization, see [ `CreateSession` ][3]. # # Encryption # : Encryption request headers, like `x-amz-server-side-encryption`, # should not be sent for `HEAD` requests if your object uses # server-side encryption with Key Management Service (KMS) keys # (SSE-KMS), dual-layer server-side encryption with Amazon Web # Services KMS keys (DSSE-KMS), or server-side encryption with Amazon # S3 managed encryption keys (SSE-S3). The # `x-amz-server-side-encryption` header is used when you `PUT` an # object to S3 and want to specify the encryption method. If you # include this header in a `GET` request for an object that uses these # types of keys, you’ll get an HTTP `400 Bad Request` error. It's # because the encryption method can't be changed when you retrieve # the object. # # # # If you encrypt an object by using server-side encryption with # customer-provided encryption keys (SSE-C) when you store the object # in Amazon S3, then when you retrieve the metadata from the object, # you must use the following headers to provide the encryption key for # the server to be able to retrieve the object's metadata. The # headers are: # # * `x-amz-server-side-encryption-customer-algorithm` # # * `x-amz-server-side-encryption-customer-key` # # * `x-amz-server-side-encryption-customer-key-MD5` # # For more information about SSE-C, see [Server-Side Encryption (Using # Customer-Provided Encryption Keys)][4] in the *Amazon S3 User # Guide*. # # **Directory bucket permissions** - For directory buckets, only # server-side encryption with Amazon S3 managed keys (SSE-S3) # (`AES256`) is supported. # # # # Versioning # # : **Directory buckets** - S3 Versioning isn't enabled and supported # for directory buckets. For this API operation, only the `null` value # of the version ID is supported by directory buckets. You can only # specify `null` to the `versionId` query parameter in the request. # # Conditional request headers # # : Consider the following when using request headers: # # * If both of the `If-Match` and `If-Unmodified-Since` headers are # present in the request as follows, then Amazon S3 returns the HTTP # status code `200 OK` and the data requested: # # * `If-Match` condition evaluates to `true`. # # * `If-Unmodified-Since` condition evaluates to `false`. # # For more information about conditional requests, see [RFC # 7232][5]. # # * If both of the `If-None-Match` and `If-Modified-Since` headers are # present in the request as follows, then Amazon S3 returns the HTTP # status code `304 Not Modified`: # # * `If-None-Match` condition evaluates to `false`. # # * `If-Modified-Since` condition evaluates to `true`. # # For more information about conditional requests, see [RFC # 7232][5]. # # HTTP Host header syntax # # : Directory buckets - The HTTP Host header syntax is ` # Bucket_name.s3express-az_id.region.amazonaws.com`. # # The following actions are related to `GetObjectAttributes`: # # * [GetObject][6] # # * [GetObjectAcl][7] # # * [GetObjectLegalHold][8] # # * [GetObjectLockConfiguration][9] # # * [GetObjectRetention][10] # # * [GetObjectTagging][11] # # * [HeadObject][12] # # * [ListParts][13] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # [5]: https://tools.ietf.org/html/rfc7232 # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLegalHold.html # [9]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLockConfiguration.html # [10]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectRetention.html # [11]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html # [12]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html # [13]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html # # @option params [required, String] :bucket # The name of the bucket that contains the object. # # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style requests # are not supported. Directory bucket names must be unique in the chosen # Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, you # must provide the alias of the access point in place of the bucket name # or specify the access point ARN. When using the access point ARN, you # must direct requests to the access point hostname. The access point # hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the bucket # name. For more information about access point ARNs, see [Using access # points][2] in the *Amazon S3 User Guide*. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. The # S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][3] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # # @option params [required, String] :key # The object key. # # @option params [String] :version_id # The version ID used to reference a specific version of the object. # # S3 Versioning isn't enabled and supported for directory buckets. For # this API operation, only the `null` value of the version ID is # supported by directory buckets. You can only specify `null` to the # `versionId` query parameter in the request. # # # # @option params [Integer] :max_parts # Sets the maximum number of parts to return. # # @option params [Integer] :part_number_marker # Specifies the part after which listing should begin. Only parts with # higher part numbers will be listed. # # @option params [String] :sse_customer_algorithm # Specifies the algorithm to use when encrypting the object (for # example, AES256). # # This functionality is not supported for directory buckets. # # # # @option params [String] :sse_customer_key # Specifies the customer-provided encryption key for Amazon S3 to use in # encrypting data. This value is used to store the object and then it is # discarded; Amazon S3 does not store the encryption key. The key must # be appropriate for use with the algorithm specified in the # `x-amz-server-side-encryption-customer-algorithm` header. # # This functionality is not supported for directory buckets. # # # # @option params [String] :sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the encryption key according to # RFC 1321. Amazon S3 uses this header for a message integrity check to # ensure that the encryption key was transmitted without error. # # This functionality is not supported for directory buckets. # # # # @option params [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @option params [required, Array] :object_attributes # Specifies the fields at the root level that you want returned in the # response. Fields that you do not specify are not returned. # # @return [Types::GetObjectAttributesOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::GetObjectAttributesOutput#delete_marker #delete_marker} => Boolean # * {Types::GetObjectAttributesOutput#last_modified #last_modified} => Time # * {Types::GetObjectAttributesOutput#version_id #version_id} => String # * {Types::GetObjectAttributesOutput#request_charged #request_charged} => String # * {Types::GetObjectAttributesOutput#etag #etag} => String # * {Types::GetObjectAttributesOutput#checksum #checksum} => Types::Checksum # * {Types::GetObjectAttributesOutput#object_parts #object_parts} => Types::GetObjectAttributesParts # * {Types::GetObjectAttributesOutput#storage_class #storage_class} => String # * {Types::GetObjectAttributesOutput#object_size #object_size} => Integer # # @example Request syntax with placeholder values # # resp = client.get_object_attributes({ # bucket: "BucketName", # required # key: "ObjectKey", # required # version_id: "ObjectVersionId", # max_parts: 1, # part_number_marker: 1, # sse_customer_algorithm: "SSECustomerAlgorithm", # sse_customer_key: "SSECustomerKey", # sse_customer_key_md5: "SSECustomerKeyMD5", # request_payer: "requester", # accepts requester # expected_bucket_owner: "AccountId", # object_attributes: ["ETag"], # required, accepts ETag, Checksum, ObjectParts, StorageClass, ObjectSize # }) # # @example Response structure # # resp.delete_marker #=> Boolean # resp.last_modified #=> Time # resp.version_id #=> String # resp.request_charged #=> String, one of "requester" # resp.etag #=> String # resp.checksum.checksum_crc32 #=> String # resp.checksum.checksum_crc32c #=> String # resp.checksum.checksum_sha1 #=> String # resp.checksum.checksum_sha256 #=> String # resp.object_parts.total_parts_count #=> Integer # resp.object_parts.part_number_marker #=> Integer # resp.object_parts.next_part_number_marker #=> Integer # resp.object_parts.max_parts #=> Integer # resp.object_parts.is_truncated #=> Boolean # resp.object_parts.parts #=> Array # resp.object_parts.parts[0].part_number #=> Integer # resp.object_parts.parts[0].size #=> Integer # resp.object_parts.parts[0].checksum_crc32 #=> String # resp.object_parts.parts[0].checksum_crc32c #=> String # resp.object_parts.parts[0].checksum_sha1 #=> String # resp.object_parts.parts[0].checksum_sha256 #=> String # resp.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR", "SNOW", "EXPRESS_ONEZONE" # resp.object_size #=> Integer # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAttributes AWS API Documentation # # @overload get_object_attributes(params = {}) # @param [Hash] params ({}) def get_object_attributes(params = {}, options = {}) req = build_request(:get_object_attributes, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Gets an object's current legal hold status. For more information, see # [Locking Objects][1]. # # This functionality is not supported for Amazon S3 on Outposts. # # The following action is related to `GetObjectLegalHold`: # # * [GetObjectAttributes][2] # # ^ # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html # # @option params [required, String] :bucket # The bucket name containing the object whose legal hold status you want # to retrieve. # # **Access points** - When you use this action with an access point, you # must provide the alias of the access point in place of the bucket name # or specify the access point ARN. When using the access point ARN, you # must direct requests to the access point hostname. The access point # hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the bucket # name. For more information about access point ARNs, see [Using access # points][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # # @option params [required, String] :key # The key name for the object whose legal hold status you want to # retrieve. # # @option params [String] :version_id # The version ID of the object whose legal hold status you want to # retrieve. # # @option params [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::GetObjectLegalHoldOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::GetObjectLegalHoldOutput#legal_hold #legal_hold} => Types::ObjectLockLegalHold # # @example Request syntax with placeholder values # # resp = client.get_object_legal_hold({ # bucket: "BucketName", # required # key: "ObjectKey", # required # version_id: "ObjectVersionId", # request_payer: "requester", # accepts requester # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.legal_hold.status #=> String, one of "ON", "OFF" # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHold AWS API Documentation # # @overload get_object_legal_hold(params = {}) # @param [Hash] params ({}) def get_object_legal_hold(params = {}, options = {}) req = build_request(:get_object_legal_hold, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Gets the Object Lock configuration for a bucket. The rule specified in # the Object Lock configuration will be applied by default to every new # object placed in the specified bucket. For more information, see # [Locking Objects][1]. # # The following action is related to `GetObjectLockConfiguration`: # # * [GetObjectAttributes][2] # # ^ # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html # # @option params [required, String] :bucket # The bucket whose Object Lock configuration you want to retrieve. # # **Access points** - When you use this action with an access point, you # must provide the alias of the access point in place of the bucket name # or specify the access point ARN. When using the access point ARN, you # must direct requests to the access point hostname. The access point # hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the bucket # name. For more information about access point ARNs, see [Using access # points][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::GetObjectLockConfigurationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::GetObjectLockConfigurationOutput#object_lock_configuration #object_lock_configuration} => Types::ObjectLockConfiguration # # @example Request syntax with placeholder values # # resp = client.get_object_lock_configuration({ # bucket: "BucketName", # required # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.object_lock_configuration.object_lock_enabled #=> String, one of "Enabled" # resp.object_lock_configuration.rule.default_retention.mode #=> String, one of "GOVERNANCE", "COMPLIANCE" # resp.object_lock_configuration.rule.default_retention.days #=> Integer # resp.object_lock_configuration.rule.default_retention.years #=> Integer # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfiguration AWS API Documentation # # @overload get_object_lock_configuration(params = {}) # @param [Hash] params ({}) def get_object_lock_configuration(params = {}, options = {}) req = build_request(:get_object_lock_configuration, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Retrieves an object's retention settings. For more information, see # [Locking Objects][1]. # # This functionality is not supported for Amazon S3 on Outposts. # # The following action is related to `GetObjectRetention`: # # * [GetObjectAttributes][2] # # ^ # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html # # @option params [required, String] :bucket # The bucket name containing the object whose retention settings you # want to retrieve. # # **Access points** - When you use this action with an access point, you # must provide the alias of the access point in place of the bucket name # or specify the access point ARN. When using the access point ARN, you # must direct requests to the access point hostname. The access point # hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the bucket # name. For more information about access point ARNs, see [Using access # points][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # # @option params [required, String] :key # The key name for the object whose retention settings you want to # retrieve. # # @option params [String] :version_id # The version ID for the object whose retention settings you want to # retrieve. # # @option params [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::GetObjectRetentionOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::GetObjectRetentionOutput#retention #retention} => Types::ObjectLockRetention # # @example Request syntax with placeholder values # # resp = client.get_object_retention({ # bucket: "BucketName", # required # key: "ObjectKey", # required # version_id: "ObjectVersionId", # request_payer: "requester", # accepts requester # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.retention.mode #=> String, one of "GOVERNANCE", "COMPLIANCE" # resp.retention.retain_until_date #=> Time # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetention AWS API Documentation # # @overload get_object_retention(params = {}) # @param [Hash] params ({}) def get_object_retention(params = {}, options = {}) req = build_request(:get_object_retention, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Returns the tag-set of an object. You send the GET request against the # tagging subresource associated with the object. # # To use this operation, you must have permission to perform the # `s3:GetObjectTagging` action. By default, the GET action returns # information about current version of an object. For a versioned # bucket, you can have multiple versions of an object in your bucket. To # retrieve tags of any other version, use the versionId query parameter. # You also need permission for the `s3:GetObjectVersionTagging` action. # # By default, the bucket owner has this permission and can grant this # permission to others. # # For information about the Amazon S3 object tagging feature, see # [Object Tagging][1]. # # The following actions are related to `GetObjectTagging`: # # * [DeleteObjectTagging][2] # # * [GetObjectAttributes][3] # # * [PutObjectTagging][4] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html # # @option params [required, String] :bucket # The bucket name containing the object for which to get the tagging # information. # # **Access points** - When you use this action with an access point, you # must provide the alias of the access point in place of the bucket name # or specify the access point ARN. When using the access point ARN, you # must direct requests to the access point hostname. The access point # hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the bucket # name. For more information about access point ARNs, see [Using access # points][1] in the *Amazon S3 User Guide*. # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. The # S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][2] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # # @option params [required, String] :key # Object key for which to get the tagging information. # # @option params [String] :version_id # The versionId of the object for which to get the tagging information. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @option params [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # # @return [Types::GetObjectTaggingOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::GetObjectTaggingOutput#version_id #version_id} => String # * {Types::GetObjectTaggingOutput#tag_set #tag_set} => Array<Types::Tag> # # # @example Example: To retrieve tag set of an object # # # The following example retrieves tag set of an object. # # resp = client.get_object_tagging({ # bucket: "examplebucket", # key: "HappyFace.jpg", # }) # # resp.to_h outputs the following: # { # tag_set: [ # { # key: "Key4", # value: "Value4", # }, # { # key: "Key3", # value: "Value3", # }, # ], # version_id: "null", # } # # @example Example: To retrieve tag set of a specific object version # # # The following example retrieves tag set of an object. The request specifies object version. # # resp = client.get_object_tagging({ # bucket: "examplebucket", # key: "exampleobject", # version_id: "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI", # }) # # resp.to_h outputs the following: # { # tag_set: [ # { # key: "Key1", # value: "Value1", # }, # ], # version_id: "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI", # } # # @example Request syntax with placeholder values # # resp = client.get_object_tagging({ # bucket: "BucketName", # required # key: "ObjectKey", # required # version_id: "ObjectVersionId", # expected_bucket_owner: "AccountId", # request_payer: "requester", # accepts requester # }) # # @example Response structure # # resp.version_id #=> String # resp.tag_set #=> Array # resp.tag_set[0].key #=> String # resp.tag_set[0].value #=> String # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging AWS API Documentation # # @overload get_object_tagging(params = {}) # @param [Hash] params ({}) def get_object_tagging(params = {}, options = {}) req = build_request(:get_object_tagging, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Returns torrent files from a bucket. BitTorrent can save you bandwidth # when you're distributing large files. # # You can get torrent only for objects that are less than 5 GB in size, # and that are not encrypted using server-side encryption with a # customer-provided encryption key. # # # # To use GET, you must have READ access to the object. # # This functionality is not supported for Amazon S3 on Outposts. # # The following action is related to `GetObjectTorrent`: # # * [GetObject][1] # # ^ # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html # # @option params [String, IO] :response_target # Where to write response data, file path, or IO object. # # @option params [required, String] :bucket # The name of the bucket containing the object for which to get the # torrent files. # # @option params [required, String] :key # The object key for which to get the information. # # @option params [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::GetObjectTorrentOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::GetObjectTorrentOutput#body #body} => IO # * {Types::GetObjectTorrentOutput#request_charged #request_charged} => String # # # @example Example: To retrieve torrent files for an object # # # The following example retrieves torrent files of an object. # # resp = client.get_object_torrent({ # bucket: "examplebucket", # key: "HappyFace.jpg", # }) # # resp.to_h outputs the following: # { # } # # @example Request syntax with placeholder values # # resp = client.get_object_torrent({ # bucket: "BucketName", # required # key: "ObjectKey", # required # request_payer: "requester", # accepts requester # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.body #=> IO # resp.request_charged #=> String, one of "requester" # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent AWS API Documentation # # @overload get_object_torrent(params = {}) # @param [Hash] params ({}) def get_object_torrent(params = {}, options = {}, &block) req = build_request(:get_object_torrent, params) req.send_request(options, &block) end # This operation is not supported by directory buckets. # # # # Retrieves the `PublicAccessBlock` configuration for an Amazon S3 # bucket. To use this operation, you must have the # `s3:GetBucketPublicAccessBlock` permission. For more information about # Amazon S3 permissions, see [Specifying Permissions in a Policy][1]. # # When Amazon S3 evaluates the `PublicAccessBlock` configuration for a # bucket or an object, it checks the `PublicAccessBlock` configuration # for both the bucket (or the bucket that contains the object) and the # bucket owner's account. If the `PublicAccessBlock` settings are # different between the bucket and the account, Amazon S3 uses the most # restrictive combination of the bucket-level and account-level # settings. # # For more information about when Amazon S3 considers a bucket or an # object public, see [The Meaning of "Public"][2]. # # The following operations are related to `GetPublicAccessBlock`: # # * [Using Amazon S3 Block Public Access][3] # # * [PutPublicAccessBlock][4] # # * [GetPublicAccessBlock][5] # # * [DeletePublicAccessBlock][6] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html # # @option params [required, String] :bucket # The name of the Amazon S3 bucket whose `PublicAccessBlock` # configuration you want to retrieve. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::GetPublicAccessBlockOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::GetPublicAccessBlockOutput#public_access_block_configuration #public_access_block_configuration} => Types::PublicAccessBlockConfiguration # # @example Request syntax with placeholder values # # resp = client.get_public_access_block({ # bucket: "BucketName", # required # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.public_access_block_configuration.block_public_acls #=> Boolean # resp.public_access_block_configuration.ignore_public_acls #=> Boolean # resp.public_access_block_configuration.block_public_policy #=> Boolean # resp.public_access_block_configuration.restrict_public_buckets #=> Boolean # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlock AWS API Documentation # # @overload get_public_access_block(params = {}) # @param [Hash] params ({}) def get_public_access_block(params = {}, options = {}) req = build_request(:get_public_access_block, params) req.send_request(options) end # You can use this operation to determine if a bucket exists and if you # have permission to access it. The action returns a `200 OK` if the # bucket exists and you have permission to access it. # # If the bucket does not exist or you do not have permission to access # it, the `HEAD` request returns a generic `400 Bad Request`, `403 # Forbidden` or `404 Not Found` code. A message body is not included, so # you cannot determine the exception beyond these error codes. # # Directory buckets - You must make requests for this API # operation to the Zonal endpoint. These endpoints support # virtual-hosted-style requests in the format # `https://bucket_name.s3express-az_id.region.amazonaws.com`. Path-style # requests are not supported. For more information, see [Regional and # Zonal endpoints][1] in the *Amazon S3 User Guide*. # # # # Authentication and authorization # # : All `HeadBucket` requests must be authenticated and signed by using # IAM credentials (access key ID and secret access key for the IAM # identities). All headers with the `x-amz-` prefix, including # `x-amz-copy-source`, must be signed. For more information, see [REST # Authentication][2]. # # **Directory bucket** - You must use IAM credentials to authenticate # and authorize your access to the `HeadBucket` API operation, instead # of using the temporary security credentials through the # `CreateSession` API operation. # # Amazon Web Services CLI or SDKs handles authentication and # authorization on your behalf. # # Permissions # # : # # * **General purpose bucket permissions** - To use this operation, # you must have permissions to perform the `s3:ListBucket` action. # The bucket owner has this permission by default and can grant this # permission to others. For more information about permissions, see # [Managing access permissions to your Amazon S3 resources][3] in # the *Amazon S3 User Guide*. # # * **Directory bucket permissions** - You must have the # s3express:CreateSession permission in the # `Action` element of a policy. By default, the session is in the # `ReadWrite` mode. If you want to restrict the access, you can # explicitly set the `s3express:SessionMode` condition key to # `ReadOnly` on the bucket. # # For more information about example bucket policies, see [Example # bucket policies for S3 Express One Zone][4] and [Amazon Web # Services Identity and Access Management (IAM) identity-based # policies for S3 Express One Zone][5] in the *Amazon S3 User # Guide*. # # HTTP Host header syntax # # : Directory buckets - The HTTP Host header syntax is ` # Bucket_name.s3express-az_id.region.amazonaws.com`. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html # # @option params [required, String] :bucket # The bucket name. # # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style requests # are not supported. Directory bucket names must be unique in the chosen # Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, you # must provide the alias of the access point in place of the bucket name # or specify the access point ARN. When using the access point ARN, you # must direct requests to the access point hostname. The access point # hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the bucket # name. For more information about access point ARNs, see [Using access # points][2] in the *Amazon S3 User Guide*. # # **Object Lambda access points** - When you use this API operation with # an Object Lambda access point, provide the alias of the Object Lambda # access point in place of the bucket name. If the Object Lambda access # point alias in a request is not valid, the error code # `InvalidAccessPointAliasError` is returned. For more information about # `InvalidAccessPointAliasError`, see [List of Error Codes][3]. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. The # S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][4] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList # [4]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::HeadBucketOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::HeadBucketOutput#bucket_location_type #bucket_location_type} => String # * {Types::HeadBucketOutput#bucket_location_name #bucket_location_name} => String # * {Types::HeadBucketOutput#bucket_region #bucket_region} => String # * {Types::HeadBucketOutput#access_point_alias #access_point_alias} => Boolean # # # @example Example: To determine if bucket exists # # # This operation checks to see if a bucket exists. # # resp = client.head_bucket({ # bucket: "acl1", # }) # # @example Request syntax with placeholder values # # resp = client.head_bucket({ # bucket: "BucketName", # required # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.bucket_location_type #=> String, one of "AvailabilityZone" # resp.bucket_location_name #=> String # resp.bucket_region #=> String # resp.access_point_alias #=> Boolean # # # The following waiters are defined for this operation (see {Client#wait_until} for detailed usage): # # * bucket_exists # * bucket_not_exists # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket AWS API Documentation # # @overload head_bucket(params = {}) # @param [Hash] params ({}) def head_bucket(params = {}, options = {}) req = build_request(:head_bucket, params) req.send_request(options) end # The `HEAD` operation retrieves metadata from an object without # returning the object itself. This operation is useful if you're # interested only in an object's metadata. # # A `HEAD` request has the same options as a `GET` operation on an # object. The response is identical to the `GET` response except that # there is no response body. Because of this, if the `HEAD` request # generates an error, it returns a generic code, such as `400 Bad # Request`, `403 Forbidden`, `404 Not Found`, `405 Method Not Allowed`, # `412 Precondition Failed`, or `304 Not Modified`. It's not possible # to retrieve the exact exception of these error codes. # # Request headers are limited to 8 KB in size. For more information, see # [Common Request Headers][1]. # # **Directory buckets** - For directory buckets, you must make requests # for this API operation to the Zonal endpoint. These endpoints support # virtual-hosted-style requests in the format # `https://bucket_name.s3express-az_id.region.amazonaws.com/key-name `. # Path-style requests are not supported. For more information, see # [Regional and Zonal endpoints][2] in the *Amazon S3 User Guide*. # # # # Permissions # # : # # * **General purpose bucket permissions** - To use `HEAD`, you must # have the `s3:GetObject` permission. You need the relevant read # object (or version) permission for this operation. For more # information, see [Actions, resources, and condition keys for # Amazon S3][3] in the *Amazon S3 User Guide*. # # If the object you request doesn't exist, the error that Amazon S3 # returns depends on whether you also have the `s3:ListBucket` # permission. # # * If you have the `s3:ListBucket` permission on the bucket, Amazon # S3 returns an HTTP status code `404 Not Found` error. # # * If you don’t have the `s3:ListBucket` permission, Amazon S3 # returns an HTTP status code `403 Forbidden` error. # # * **Directory bucket permissions** - To grant access to this API # operation on a directory bucket, we recommend that you use the [ # `CreateSession` ][4] API operation for session-based # authorization. Specifically, you grant the # `s3express:CreateSession` permission to the directory bucket in a # bucket policy or an IAM identity-based policy. Then, you make the # `CreateSession` API call on the bucket to obtain a session token. # With the session token in your request header, you can make API # requests to this operation. After the session token expires, you # make another `CreateSession` API call to generate a new session # token for use. Amazon Web Services CLI or SDKs create session and # refresh the session token automatically to avoid service # interruptions when a session expires. For more information about # authorization, see [ `CreateSession` ][4]. # # Encryption # : Encryption request headers, like `x-amz-server-side-encryption`, # should not be sent for `HEAD` requests if your object uses # server-side encryption with Key Management Service (KMS) keys # (SSE-KMS), dual-layer server-side encryption with Amazon Web # Services KMS keys (DSSE-KMS), or server-side encryption with Amazon # S3 managed encryption keys (SSE-S3). The # `x-amz-server-side-encryption` header is used when you `PUT` an # object to S3 and want to specify the encryption method. If you # include this header in a `HEAD` request for an object that uses # these types of keys, you’ll get an HTTP `400 Bad Request` error. # It's because the encryption method can't be changed when you # retrieve the object. # # # # If you encrypt an object by using server-side encryption with # customer-provided encryption keys (SSE-C) when you store the object # in Amazon S3, then when you retrieve the metadata from the object, # you must use the following headers to provide the encryption key for # the server to be able to retrieve the object's metadata. The # headers are: # # * `x-amz-server-side-encryption-customer-algorithm` # # * `x-amz-server-side-encryption-customer-key` # # * `x-amz-server-side-encryption-customer-key-MD5` # # For more information about SSE-C, see [Server-Side Encryption (Using # Customer-Provided Encryption Keys)][5] in the *Amazon S3 User # Guide*. # # **Directory bucket permissions** - For directory buckets, only # server-side encryption with Amazon S3 managed keys (SSE-S3) # (`AES256`) is supported. # # # # Versioning # : * If the current version of the object is a delete marker, Amazon S3 # behaves as if the object was deleted and includes # `x-amz-delete-marker: true` in the response. # # * If the specified version is a delete marker, the response returns # a `405 Method Not Allowed` error and the `Last-Modified: # timestamp` response header. # # * **Directory buckets** - Delete marker is not supported by # directory buckets. # # * **Directory buckets** - S3 Versioning isn't enabled and supported # for directory buckets. For this API operation, only the `null` # value of the version ID is supported by directory buckets. You can # only specify `null` to the `versionId` query parameter in the # request. # # # # HTTP Host header syntax # # : Directory buckets - The HTTP Host header syntax is ` # Bucket_name.s3express-az_id.region.amazonaws.com`. # # The following actions are related to `HeadObject`: # # * [GetObject][6] # # * [GetObjectAttributes][7] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html # # @option params [required, String] :bucket # The name of the bucket that contains the object. # # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style requests # are not supported. Directory bucket names must be unique in the chosen # Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, you # must provide the alias of the access point in place of the bucket name # or specify the access point ARN. When using the access point ARN, you # must direct requests to the access point hostname. The access point # hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the bucket # name. For more information about access point ARNs, see [Using access # points][2] in the *Amazon S3 User Guide*. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. The # S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][3] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # # @option params [String] :if_match # Return the object only if its entity tag (ETag) is the same as the one # specified; otherwise, return a 412 (precondition failed) error. # # If both of the `If-Match` and `If-Unmodified-Since` headers are # present in the request as follows: # # * `If-Match` condition evaluates to `true`, and; # # * `If-Unmodified-Since` condition evaluates to `false`; # # Then Amazon S3 returns `200 OK` and the data requested. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # # @option params [Time,DateTime,Date,Integer,String] :if_modified_since # Return the object only if it has been modified since the specified # time; otherwise, return a 304 (not modified) error. # # If both of the `If-None-Match` and `If-Modified-Since` headers are # present in the request as follows: # # * `If-None-Match` condition evaluates to `false`, and; # # * `If-Modified-Since` condition evaluates to `true`; # # Then Amazon S3 returns the `304 Not Modified` response code. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # # @option params [String] :if_none_match # Return the object only if its entity tag (ETag) is different from the # one specified; otherwise, return a 304 (not modified) error. # # If both of the `If-None-Match` and `If-Modified-Since` headers are # present in the request as follows: # # * `If-None-Match` condition evaluates to `false`, and; # # * `If-Modified-Since` condition evaluates to `true`; # # Then Amazon S3 returns the `304 Not Modified` response code. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # # @option params [Time,DateTime,Date,Integer,String] :if_unmodified_since # Return the object only if it has not been modified since the specified # time; otherwise, return a 412 (precondition failed) error. # # If both of the `If-Match` and `If-Unmodified-Since` headers are # present in the request as follows: # # * `If-Match` condition evaluates to `true`, and; # # * `If-Unmodified-Since` condition evaluates to `false`; # # Then Amazon S3 returns `200 OK` and the data requested. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # # @option params [required, String] :key # The object key. # # @option params [String] :range # HeadObject returns only the metadata for an object. If the Range is # satisfiable, only the `ContentLength` is affected in the response. If # the Range is not satisfiable, S3 returns a `416 - Requested Range Not # Satisfiable` error. # # @option params [String] :version_id # Version ID used to reference a specific version of the object. # # For directory buckets in this API operation, only the `null` value of # the version ID is supported. # # # # @option params [String] :sse_customer_algorithm # Specifies the algorithm to use when encrypting the object (for # example, AES256). # # This functionality is not supported for directory buckets. # # # # @option params [String] :sse_customer_key # Specifies the customer-provided encryption key for Amazon S3 to use in # encrypting data. This value is used to store the object and then it is # discarded; Amazon S3 does not store the encryption key. The key must # be appropriate for use with the algorithm specified in the # `x-amz-server-side-encryption-customer-algorithm` header. # # This functionality is not supported for directory buckets. # # # # @option params [String] :sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the encryption key according to # RFC 1321. Amazon S3 uses this header for a message integrity check to # ensure that the encryption key was transmitted without error. # # This functionality is not supported for directory buckets. # # # # @option params [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # # @option params [Integer] :part_number # Part number of the object being read. This is a positive integer # between 1 and 10,000. Effectively performs a 'ranged' HEAD request # for the part specified. Useful querying about the size of the part and # the number of parts in this object. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @option params [String] :checksum_mode # To retrieve the checksum, this parameter must be enabled. # # In addition, if you enable `ChecksumMode` and the object is encrypted # with Amazon Web Services Key Management Service (Amazon Web Services # KMS), you must have permission to use the `kms:Decrypt` action for the # request to succeed. # # @return [Types::HeadObjectOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::HeadObjectOutput#delete_marker #delete_marker} => Boolean # * {Types::HeadObjectOutput#accept_ranges #accept_ranges} => String # * {Types::HeadObjectOutput#expiration #expiration} => String # * {Types::HeadObjectOutput#restore #restore} => String # * {Types::HeadObjectOutput#archive_status #archive_status} => String # * {Types::HeadObjectOutput#last_modified #last_modified} => Time # * {Types::HeadObjectOutput#content_length #content_length} => Integer # * {Types::HeadObjectOutput#checksum_crc32 #checksum_crc32} => String # * {Types::HeadObjectOutput#checksum_crc32c #checksum_crc32c} => String # * {Types::HeadObjectOutput#checksum_sha1 #checksum_sha1} => String # * {Types::HeadObjectOutput#checksum_sha256 #checksum_sha256} => String # * {Types::HeadObjectOutput#etag #etag} => String # * {Types::HeadObjectOutput#missing_meta #missing_meta} => Integer # * {Types::HeadObjectOutput#version_id #version_id} => String # * {Types::HeadObjectOutput#cache_control #cache_control} => String # * {Types::HeadObjectOutput#content_disposition #content_disposition} => String # * {Types::HeadObjectOutput#content_encoding #content_encoding} => String # * {Types::HeadObjectOutput#content_language #content_language} => String # * {Types::HeadObjectOutput#content_type #content_type} => String # * {Types::HeadObjectOutput#expires #expires} => Time # * {Types::HeadObjectOutput#expires_string #expires_string} => String # * {Types::HeadObjectOutput#website_redirect_location #website_redirect_location} => String # * {Types::HeadObjectOutput#server_side_encryption #server_side_encryption} => String # * {Types::HeadObjectOutput#metadata #metadata} => Hash<String,String> # * {Types::HeadObjectOutput#sse_customer_algorithm #sse_customer_algorithm} => String # * {Types::HeadObjectOutput#sse_customer_key_md5 #sse_customer_key_md5} => String # * {Types::HeadObjectOutput#ssekms_key_id #ssekms_key_id} => String # * {Types::HeadObjectOutput#bucket_key_enabled #bucket_key_enabled} => Boolean # * {Types::HeadObjectOutput#storage_class #storage_class} => String # * {Types::HeadObjectOutput#request_charged #request_charged} => String # * {Types::HeadObjectOutput#replication_status #replication_status} => String # * {Types::HeadObjectOutput#parts_count #parts_count} => Integer # * {Types::HeadObjectOutput#object_lock_mode #object_lock_mode} => String # * {Types::HeadObjectOutput#object_lock_retain_until_date #object_lock_retain_until_date} => Time # * {Types::HeadObjectOutput#object_lock_legal_hold_status #object_lock_legal_hold_status} => String # # # @example Example: To retrieve metadata of an object without returning the object itself # # # The following example retrieves an object metadata. # # resp = client.head_object({ # bucket: "examplebucket", # key: "HappyFace.jpg", # }) # # resp.to_h outputs the following: # { # accept_ranges: "bytes", # content_length: 3191, # content_type: "image/jpeg", # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"", # last_modified: Time.parse("Thu, 15 Dec 2016 01:19:41 GMT"), # metadata: { # }, # version_id: "null", # } # # @example Request syntax with placeholder values # # resp = client.head_object({ # bucket: "BucketName", # required # if_match: "IfMatch", # if_modified_since: Time.now, # if_none_match: "IfNoneMatch", # if_unmodified_since: Time.now, # key: "ObjectKey", # required # range: "Range", # version_id: "ObjectVersionId", # sse_customer_algorithm: "SSECustomerAlgorithm", # sse_customer_key: "SSECustomerKey", # sse_customer_key_md5: "SSECustomerKeyMD5", # request_payer: "requester", # accepts requester # part_number: 1, # expected_bucket_owner: "AccountId", # checksum_mode: "ENABLED", # accepts ENABLED # }) # # @example Response structure # # resp.delete_marker #=> Boolean # resp.accept_ranges #=> String # resp.expiration #=> String # resp.restore #=> String # resp.archive_status #=> String, one of "ARCHIVE_ACCESS", "DEEP_ARCHIVE_ACCESS" # resp.last_modified #=> Time # resp.content_length #=> Integer # resp.checksum_crc32 #=> String # resp.checksum_crc32c #=> String # resp.checksum_sha1 #=> String # resp.checksum_sha256 #=> String # resp.etag #=> String # resp.missing_meta #=> Integer # resp.version_id #=> String # resp.cache_control #=> String # resp.content_disposition #=> String # resp.content_encoding #=> String # resp.content_language #=> String # resp.content_type #=> String # resp.expires #=> Time # resp.expires_string #=> String # resp.website_redirect_location #=> String # resp.server_side_encryption #=> String, one of "AES256", "aws:kms", "aws:kms:dsse" # resp.metadata #=> Hash # resp.metadata["MetadataKey"] #=> String # resp.sse_customer_algorithm #=> String # resp.sse_customer_key_md5 #=> String # resp.ssekms_key_id #=> String # resp.bucket_key_enabled #=> Boolean # resp.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR", "SNOW", "EXPRESS_ONEZONE" # resp.request_charged #=> String, one of "requester" # resp.replication_status #=> String, one of "COMPLETE", "PENDING", "FAILED", "REPLICA", "COMPLETED" # resp.parts_count #=> Integer # resp.object_lock_mode #=> String, one of "GOVERNANCE", "COMPLIANCE" # resp.object_lock_retain_until_date #=> Time # resp.object_lock_legal_hold_status #=> String, one of "ON", "OFF" # # # The following waiters are defined for this operation (see {Client#wait_until} for detailed usage): # # * object_exists # * object_not_exists # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject AWS API Documentation # # @overload head_object(params = {}) # @param [Hash] params ({}) def head_object(params = {}, options = {}) req = build_request(:head_object, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Lists the analytics configurations for the bucket. You can have up to # 1,000 analytics configurations per bucket. # # This action supports list pagination and does not return more than 100 # configurations at a time. You should always check the `IsTruncated` # element in the response. If there are no more configurations to list, # `IsTruncated` is set to false. If there are more configurations to # list, `IsTruncated` is set to true, and there will be a value in # `NextContinuationToken`. You use the `NextContinuationToken` value to # continue the pagination of the list by passing the value in # continuation-token in the request to `GET` the next page. # # To use this operation, you must have permissions to perform the # `s3:GetAnalyticsConfiguration` action. The bucket owner has this # permission by default. The bucket owner can grant this permission to # others. For more information about permissions, see [Permissions # Related to Bucket Subresource Operations][1] and [Managing Access # Permissions to Your Amazon S3 Resources][2]. # # For information about Amazon S3 analytics feature, see [Amazon S3 # Analytics – Storage Class Analysis][3]. # # The following operations are related to # `ListBucketAnalyticsConfigurations`: # # * [GetBucketAnalyticsConfiguration][4] # # * [DeleteBucketAnalyticsConfiguration][5] # # * [PutBucketAnalyticsConfiguration][6] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html # # @option params [required, String] :bucket # The name of the bucket from which analytics configurations are # retrieved. # # @option params [String] :continuation_token # The `ContinuationToken` that represents a placeholder from where this # request should begin. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::ListBucketAnalyticsConfigurationsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::ListBucketAnalyticsConfigurationsOutput#is_truncated #is_truncated} => Boolean # * {Types::ListBucketAnalyticsConfigurationsOutput#continuation_token #continuation_token} => String # * {Types::ListBucketAnalyticsConfigurationsOutput#next_continuation_token #next_continuation_token} => String # * {Types::ListBucketAnalyticsConfigurationsOutput#analytics_configuration_list #analytics_configuration_list} => Array<Types::AnalyticsConfiguration> # # @example Request syntax with placeholder values # # resp = client.list_bucket_analytics_configurations({ # bucket: "BucketName", # required # continuation_token: "Token", # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.is_truncated #=> Boolean # resp.continuation_token #=> String # resp.next_continuation_token #=> String # resp.analytics_configuration_list #=> Array # resp.analytics_configuration_list[0].id #=> String # resp.analytics_configuration_list[0].filter.prefix #=> String # resp.analytics_configuration_list[0].filter.tag.key #=> String # resp.analytics_configuration_list[0].filter.tag.value #=> String # resp.analytics_configuration_list[0].filter.and.prefix #=> String # resp.analytics_configuration_list[0].filter.and.tags #=> Array # resp.analytics_configuration_list[0].filter.and.tags[0].key #=> String # resp.analytics_configuration_list[0].filter.and.tags[0].value #=> String # resp.analytics_configuration_list[0].storage_class_analysis.data_export.output_schema_version #=> String, one of "V_1" # resp.analytics_configuration_list[0].storage_class_analysis.data_export.destination.s3_bucket_destination.format #=> String, one of "CSV" # resp.analytics_configuration_list[0].storage_class_analysis.data_export.destination.s3_bucket_destination.bucket_account_id #=> String # resp.analytics_configuration_list[0].storage_class_analysis.data_export.destination.s3_bucket_destination.bucket #=> String # resp.analytics_configuration_list[0].storage_class_analysis.data_export.destination.s3_bucket_destination.prefix #=> String # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations AWS API Documentation # # @overload list_bucket_analytics_configurations(params = {}) # @param [Hash] params ({}) def list_bucket_analytics_configurations(params = {}, options = {}) req = build_request(:list_bucket_analytics_configurations, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Lists the S3 Intelligent-Tiering configuration from the specified # bucket. # # The S3 Intelligent-Tiering storage class is designed to optimize # storage costs by automatically moving data to the most cost-effective # storage access tier, without performance impact or operational # overhead. S3 Intelligent-Tiering delivers automatic cost savings in # three low latency and high throughput access tiers. To get the lowest # storage cost on data that can be accessed in minutes to hours, you can # choose to activate additional archiving capabilities. # # The S3 Intelligent-Tiering storage class is the ideal storage class # for data with unknown, changing, or unpredictable access patterns, # independent of object size or retention period. If the size of an # object is less than 128 KB, it is not monitored and not eligible for # auto-tiering. Smaller objects can be stored, but they are always # charged at the Frequent Access tier rates in the S3 # Intelligent-Tiering storage class. # # For more information, see [Storage class for automatically optimizing # frequently and infrequently accessed objects][1]. # # Operations related to `ListBucketIntelligentTieringConfigurations` # include: # # * [DeleteBucketIntelligentTieringConfiguration][2] # # * [PutBucketIntelligentTieringConfiguration][3] # # * [GetBucketIntelligentTieringConfiguration][4] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html # # @option params [required, String] :bucket # The name of the Amazon S3 bucket whose configuration you want to # modify or retrieve. # # @option params [String] :continuation_token # The `ContinuationToken` that represents a placeholder from where this # request should begin. # # @return [Types::ListBucketIntelligentTieringConfigurationsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::ListBucketIntelligentTieringConfigurationsOutput#is_truncated #is_truncated} => Boolean # * {Types::ListBucketIntelligentTieringConfigurationsOutput#continuation_token #continuation_token} => String # * {Types::ListBucketIntelligentTieringConfigurationsOutput#next_continuation_token #next_continuation_token} => String # * {Types::ListBucketIntelligentTieringConfigurationsOutput#intelligent_tiering_configuration_list #intelligent_tiering_configuration_list} => Array<Types::IntelligentTieringConfiguration> # # @example Request syntax with placeholder values # # resp = client.list_bucket_intelligent_tiering_configurations({ # bucket: "BucketName", # required # continuation_token: "Token", # }) # # @example Response structure # # resp.is_truncated #=> Boolean # resp.continuation_token #=> String # resp.next_continuation_token #=> String # resp.intelligent_tiering_configuration_list #=> Array # resp.intelligent_tiering_configuration_list[0].id #=> String # resp.intelligent_tiering_configuration_list[0].filter.prefix #=> String # resp.intelligent_tiering_configuration_list[0].filter.tag.key #=> String # resp.intelligent_tiering_configuration_list[0].filter.tag.value #=> String # resp.intelligent_tiering_configuration_list[0].filter.and.prefix #=> String # resp.intelligent_tiering_configuration_list[0].filter.and.tags #=> Array # resp.intelligent_tiering_configuration_list[0].filter.and.tags[0].key #=> String # resp.intelligent_tiering_configuration_list[0].filter.and.tags[0].value #=> String # resp.intelligent_tiering_configuration_list[0].status #=> String, one of "Enabled", "Disabled" # resp.intelligent_tiering_configuration_list[0].tierings #=> Array # resp.intelligent_tiering_configuration_list[0].tierings[0].days #=> Integer # resp.intelligent_tiering_configuration_list[0].tierings[0].access_tier #=> String, one of "ARCHIVE_ACCESS", "DEEP_ARCHIVE_ACCESS" # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketIntelligentTieringConfigurations AWS API Documentation # # @overload list_bucket_intelligent_tiering_configurations(params = {}) # @param [Hash] params ({}) def list_bucket_intelligent_tiering_configurations(params = {}, options = {}) req = build_request(:list_bucket_intelligent_tiering_configurations, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Returns a list of inventory configurations for the bucket. You can # have up to 1,000 analytics configurations per bucket. # # This action supports list pagination and does not return more than 100 # configurations at a time. Always check the `IsTruncated` element in # the response. If there are no more configurations to list, # `IsTruncated` is set to false. If there are more configurations to # list, `IsTruncated` is set to true, and there is a value in # `NextContinuationToken`. You use the `NextContinuationToken` value to # continue the pagination of the list by passing the value in # continuation-token in the request to `GET` the next page. # # To use this operation, you must have permissions to perform the # `s3:GetInventoryConfiguration` action. The bucket owner has this # permission by default. The bucket owner can grant this permission to # others. For more information about permissions, see [Permissions # Related to Bucket Subresource Operations][1] and [Managing Access # Permissions to Your Amazon S3 Resources][2]. # # For information about the Amazon S3 inventory feature, see [Amazon S3 # Inventory][3] # # The following operations are related to # `ListBucketInventoryConfigurations`: # # * [GetBucketInventoryConfiguration][4] # # * [DeleteBucketInventoryConfiguration][5] # # * [PutBucketInventoryConfiguration][6] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html # # @option params [required, String] :bucket # The name of the bucket containing the inventory configurations to # retrieve. # # @option params [String] :continuation_token # The marker used to continue an inventory configuration listing that # has been truncated. Use the `NextContinuationToken` from a previously # truncated list response to continue the listing. The continuation # token is an opaque value that Amazon S3 understands. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::ListBucketInventoryConfigurationsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::ListBucketInventoryConfigurationsOutput#continuation_token #continuation_token} => String # * {Types::ListBucketInventoryConfigurationsOutput#inventory_configuration_list #inventory_configuration_list} => Array<Types::InventoryConfiguration> # * {Types::ListBucketInventoryConfigurationsOutput#is_truncated #is_truncated} => Boolean # * {Types::ListBucketInventoryConfigurationsOutput#next_continuation_token #next_continuation_token} => String # # @example Request syntax with placeholder values # # resp = client.list_bucket_inventory_configurations({ # bucket: "BucketName", # required # continuation_token: "Token", # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.continuation_token #=> String # resp.inventory_configuration_list #=> Array # resp.inventory_configuration_list[0].destination.s3_bucket_destination.account_id #=> String # resp.inventory_configuration_list[0].destination.s3_bucket_destination.bucket #=> String # resp.inventory_configuration_list[0].destination.s3_bucket_destination.format #=> String, one of "CSV", "ORC", "Parquet" # resp.inventory_configuration_list[0].destination.s3_bucket_destination.prefix #=> String # resp.inventory_configuration_list[0].destination.s3_bucket_destination.encryption.ssekms.key_id #=> String # resp.inventory_configuration_list[0].is_enabled #=> Boolean # resp.inventory_configuration_list[0].filter.prefix #=> String # resp.inventory_configuration_list[0].id #=> String # resp.inventory_configuration_list[0].included_object_versions #=> String, one of "All", "Current" # resp.inventory_configuration_list[0].optional_fields #=> Array # resp.inventory_configuration_list[0].optional_fields[0] #=> String, one of "Size", "LastModifiedDate", "StorageClass", "ETag", "IsMultipartUploaded", "ReplicationStatus", "EncryptionStatus", "ObjectLockRetainUntilDate", "ObjectLockMode", "ObjectLockLegalHoldStatus", "IntelligentTieringAccessTier", "BucketKeyStatus", "ChecksumAlgorithm", "ObjectAccessControlList", "ObjectOwner" # resp.inventory_configuration_list[0].schedule.frequency #=> String, one of "Daily", "Weekly" # resp.is_truncated #=> Boolean # resp.next_continuation_token #=> String # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations AWS API Documentation # # @overload list_bucket_inventory_configurations(params = {}) # @param [Hash] params ({}) def list_bucket_inventory_configurations(params = {}, options = {}) req = build_request(:list_bucket_inventory_configurations, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Lists the metrics configurations for the bucket. The metrics # configurations are only for the request metrics of the bucket and do # not provide information on daily storage metrics. You can have up to # 1,000 configurations per bucket. # # This action supports list pagination and does not return more than 100 # configurations at a time. Always check the `IsTruncated` element in # the response. If there are no more configurations to list, # `IsTruncated` is set to false. If there are more configurations to # list, `IsTruncated` is set to true, and there is a value in # `NextContinuationToken`. You use the `NextContinuationToken` value to # continue the pagination of the list by passing the value in # `continuation-token` in the request to `GET` the next page. # # To use this operation, you must have permissions to perform the # `s3:GetMetricsConfiguration` action. The bucket owner has this # permission by default. The bucket owner can grant this permission to # others. For more information about permissions, see [Permissions # Related to Bucket Subresource Operations][1] and [Managing Access # Permissions to Your Amazon S3 Resources][2]. # # For more information about metrics configurations and CloudWatch # request metrics, see [Monitoring Metrics with Amazon CloudWatch][3]. # # The following operations are related to # `ListBucketMetricsConfigurations`: # # * [PutBucketMetricsConfiguration][4] # # * [GetBucketMetricsConfiguration][5] # # * [DeleteBucketMetricsConfiguration][6] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html # # @option params [required, String] :bucket # The name of the bucket containing the metrics configurations to # retrieve. # # @option params [String] :continuation_token # The marker that is used to continue a metrics configuration listing # that has been truncated. Use the `NextContinuationToken` from a # previously truncated list response to continue the listing. The # continuation token is an opaque value that Amazon S3 understands. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::ListBucketMetricsConfigurationsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::ListBucketMetricsConfigurationsOutput#is_truncated #is_truncated} => Boolean # * {Types::ListBucketMetricsConfigurationsOutput#continuation_token #continuation_token} => String # * {Types::ListBucketMetricsConfigurationsOutput#next_continuation_token #next_continuation_token} => String # * {Types::ListBucketMetricsConfigurationsOutput#metrics_configuration_list #metrics_configuration_list} => Array<Types::MetricsConfiguration> # # @example Request syntax with placeholder values # # resp = client.list_bucket_metrics_configurations({ # bucket: "BucketName", # required # continuation_token: "Token", # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.is_truncated #=> Boolean # resp.continuation_token #=> String # resp.next_continuation_token #=> String # resp.metrics_configuration_list #=> Array # resp.metrics_configuration_list[0].id #=> String # resp.metrics_configuration_list[0].filter.prefix #=> String # resp.metrics_configuration_list[0].filter.tag.key #=> String # resp.metrics_configuration_list[0].filter.tag.value #=> String # resp.metrics_configuration_list[0].filter.access_point_arn #=> String # resp.metrics_configuration_list[0].filter.and.prefix #=> String # resp.metrics_configuration_list[0].filter.and.tags #=> Array # resp.metrics_configuration_list[0].filter.and.tags[0].key #=> String # resp.metrics_configuration_list[0].filter.and.tags[0].value #=> String # resp.metrics_configuration_list[0].filter.and.access_point_arn #=> String # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations AWS API Documentation # # @overload list_bucket_metrics_configurations(params = {}) # @param [Hash] params ({}) def list_bucket_metrics_configurations(params = {}, options = {}) req = build_request(:list_bucket_metrics_configurations, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Returns a list of all buckets owned by the authenticated sender of the # request. To use this operation, you must have the # `s3:ListAllMyBuckets` permission. # # For information about Amazon S3 buckets, see [Creating, configuring, # and working with Amazon S3 buckets][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html # # @return [Types::ListBucketsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::ListBucketsOutput#buckets #buckets} => Array<Types::Bucket> # * {Types::ListBucketsOutput#owner #owner} => Types::Owner # # # @example Example: To list all buckets # # # The following example returns all the buckets owned by the sender of this request. # # resp = client.list_buckets({ # }) # # resp.to_h outputs the following: # { # buckets: [ # { # creation_date: Time.parse("2012-02-15T21:03:02.000Z"), # name: "examplebucket", # }, # { # creation_date: Time.parse("2011-07-24T19:33:50.000Z"), # name: "examplebucket2", # }, # { # creation_date: Time.parse("2010-12-17T00:56:49.000Z"), # name: "examplebucket3", # }, # ], # owner: { # display_name: "own-display-name", # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31", # }, # } # # @example Response structure # # resp.buckets #=> Array # resp.buckets[0].name #=> String # resp.buckets[0].creation_date #=> Time # resp.owner.display_name #=> String # resp.owner.id #=> String # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets AWS API Documentation # # @overload list_buckets(params = {}) # @param [Hash] params ({}) def list_buckets(params = {}, options = {}) req = build_request(:list_buckets, params) req.send_request(options) end # Returns a list of all Amazon S3 directory buckets owned by the # authenticated sender of the request. For more information about # directory buckets, see [Directory buckets][1] in the *Amazon S3 User # Guide*. # # Directory buckets - For directory buckets, you must make # requests for this API operation to the Regional endpoint. These # endpoints support path-style requests in the format # `https://s3express-control.region_code.amazonaws.com/bucket-name `. # Virtual-hosted-style requests aren't supported. For more information, # see [Regional and Zonal endpoints][2] in the *Amazon S3 User Guide*. # # # # Permissions # # : You must have the `s3express:ListAllMyDirectoryBuckets` permission # in an IAM identity-based policy instead of a bucket policy. # Cross-account access to this API operation isn't supported. This # operation can only be performed by the Amazon Web Services account # that owns the resource. For more information about directory bucket # policies and permissions, see [Amazon Web Services Identity and # Access Management (IAM) for S3 Express One Zone][3] in the *Amazon # S3 User Guide*. # # HTTP Host header syntax # # : Directory buckets - The HTTP Host header syntax is # `s3express-control.region.amazonaws.com`. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html # # @option params [String] :continuation_token # `ContinuationToken` indicates to Amazon S3 that the list is being # continued on this bucket with a token. `ContinuationToken` is # obfuscated and is not a real key. You can use this `ContinuationToken` # for pagination of the list results. # # @option params [Integer] :max_directory_buckets # Maximum number of buckets to be returned in response. When the number # is more than the count of buckets that are owned by an Amazon Web # Services account, return all the buckets in response. # # @return [Types::ListDirectoryBucketsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::ListDirectoryBucketsOutput#buckets #buckets} => Array<Types::Bucket> # * {Types::ListDirectoryBucketsOutput#continuation_token #continuation_token} => String # # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}. # # @example Request syntax with placeholder values # # resp = client.list_directory_buckets({ # continuation_token: "DirectoryBucketToken", # max_directory_buckets: 1, # }) # # @example Response structure # # resp.buckets #=> Array # resp.buckets[0].name #=> String # resp.buckets[0].creation_date #=> Time # resp.continuation_token #=> String # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListDirectoryBuckets AWS API Documentation # # @overload list_directory_buckets(params = {}) # @param [Hash] params ({}) def list_directory_buckets(params = {}, options = {}) req = build_request(:list_directory_buckets, params) req.send_request(options) end # This operation lists in-progress multipart uploads in a bucket. An # in-progress multipart upload is a multipart upload that has been # initiated by the `CreateMultipartUpload` request, but has not yet been # completed or aborted. # # **Directory buckets** - If multipart uploads in a directory bucket are # in progress, you can't delete the bucket until all the in-progress # multipart uploads are aborted or completed. # # # # The `ListMultipartUploads` operation returns a maximum of 1,000 # multipart uploads in the response. The limit of 1,000 multipart # uploads is also the default value. You can further limit the number of # uploads in a response by specifying the `max-uploads` request # parameter. If there are more than 1,000 multipart uploads that satisfy # your `ListMultipartUploads` request, the response returns an # `IsTruncated` element with the value of `true`, a `NextKeyMarker` # element, and a `NextUploadIdMarker` element. To list the remaining # multipart uploads, you need to make subsequent `ListMultipartUploads` # requests. In these requests, include two query parameters: # `key-marker` and `upload-id-marker`. Set the value of `key-marker` to # the `NextKeyMarker` value from the previous response. Similarly, set # the value of `upload-id-marker` to the `NextUploadIdMarker` value from # the previous response. # # **Directory buckets** - The `upload-id-marker` element and the # `NextUploadIdMarker` element aren't supported by directory buckets. # To list the additional multipart uploads, you only need to set the # value of `key-marker` to the `NextKeyMarker` value from the previous # response. # # # # For more information about multipart uploads, see [Uploading Objects # Using Multipart Upload][1] in the *Amazon S3 User Guide*. # # **Directory buckets** - For directory buckets, you must make requests # for this API operation to the Zonal endpoint. These endpoints support # virtual-hosted-style requests in the format # `https://bucket_name.s3express-az_id.region.amazonaws.com/key-name `. # Path-style requests are not supported. For more information, see # [Regional and Zonal endpoints][2] in the *Amazon S3 User Guide*. # # # # Permissions # : * **General purpose bucket permissions** - For information about # permissions required to use the multipart upload API, see # [Multipart Upload and Permissions][3] in the *Amazon S3 User # Guide*. # # * **Directory bucket permissions** - To grant access to this API # operation on a directory bucket, we recommend that you use the [ # `CreateSession` ][4] API operation for session-based # authorization. Specifically, you grant the # `s3express:CreateSession` permission to the directory bucket in a # bucket policy or an IAM identity-based policy. Then, you make the # `CreateSession` API call on the bucket to obtain a session token. # With the session token in your request header, you can make API # requests to this operation. After the session token expires, you # make another `CreateSession` API call to generate a new session # token for use. Amazon Web Services CLI or SDKs create session and # refresh the session token automatically to avoid service # interruptions when a session expires. For more information about # authorization, see [ `CreateSession` ][4]. # # Sorting of multipart uploads in response # : * **General purpose bucket** - In the `ListMultipartUploads` # response, the multipart uploads are sorted based on two criteria: # # * Key-based sorting - Multipart uploads are initially sorted in # ascending order based on their object keys. # # * Time-based sorting - For uploads that share the same object key, # they are further sorted in ascending order based on the upload # initiation time. Among uploads with the same key, the one that # was initiated first will appear before the ones that were # initiated later. # # * **Directory bucket** - In the `ListMultipartUploads` response, the # multipart uploads aren't sorted lexicographically based on the # object keys. # # HTTP Host header syntax # # : Directory buckets - The HTTP Host header syntax is ` # Bucket_name.s3express-az_id.region.amazonaws.com`. # # The following operations are related to `ListMultipartUploads`: # # * [CreateMultipartUpload][5] # # * [UploadPart][6] # # * [CompleteMultipartUpload][7] # # * [ListParts][8] # # * [AbortMultipartUpload][9] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html # [9]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html # # @option params [required, String] :bucket # The name of the bucket to which the multipart upload was initiated. # # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style requests # are not supported. Directory bucket names must be unique in the chosen # Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, you # must provide the alias of the access point in place of the bucket name # or specify the access point ARN. When using the access point ARN, you # must direct requests to the access point hostname. The access point # hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the bucket # name. For more information about access point ARNs, see [Using access # points][2] in the *Amazon S3 User Guide*. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. The # S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][3] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # # @option params [String] :delimiter # Character you use to group keys. # # All keys that contain the same string between the prefix, if # specified, and the first occurrence of the delimiter after the prefix # are grouped under a single result element, `CommonPrefixes`. If you # don't specify the prefix parameter, then the substring starts at the # beginning of the key. The keys that are grouped under `CommonPrefixes` # result element are not returned elsewhere in the response. # # **Directory buckets** - For directory buckets, `/` is the only # supported delimiter. # # # # @option params [String] :encoding_type # Requests Amazon S3 to encode the object keys in the response and # specifies the encoding method to use. An object key can contain any # Unicode character; however, the XML 1.0 parser cannot parse some # characters, such as characters with an ASCII value from 0 to 10. For # characters that are not supported in XML 1.0, you can add this # parameter to request that Amazon S3 encode the keys in the response. # # @option params [String] :key_marker # Specifies the multipart upload after which listing should begin. # # * **General purpose buckets** - For general purpose buckets, # `key-marker` is an object key. Together with `upload-id-marker`, # this parameter specifies the multipart upload after which listing # should begin. # # If `upload-id-marker` is not specified, only the keys # lexicographically greater than the specified `key-marker` will be # included in the list. # # If `upload-id-marker` is specified, any multipart uploads for a key # equal to the `key-marker` might also be included, provided those # multipart uploads have upload IDs lexicographically greater than the # specified `upload-id-marker`. # # * **Directory buckets** - For directory buckets, `key-marker` is # obfuscated and isn't a real object key. The `upload-id-marker` # parameter isn't supported by directory buckets. To list the # additional multipart uploads, you only need to set the value of # `key-marker` to the `NextKeyMarker` value from the previous # response. # # In the `ListMultipartUploads` response, the multipart uploads # aren't sorted lexicographically based on the object keys. # # # # @option params [Integer] :max_uploads # Sets the maximum number of multipart uploads, from 1 to 1,000, to # return in the response body. 1,000 is the maximum number of uploads # that can be returned in a response. # # @option params [String] :prefix # Lists in-progress uploads only for those keys that begin with the # specified prefix. You can use prefixes to separate a bucket into # different grouping of keys. (You can think of using `prefix` to make # groups in the same way that you'd use a folder in a file system.) # # **Directory buckets** - For directory buckets, only prefixes that end # in a delimiter (`/`) are supported. # # # # @option params [String] :upload_id_marker # Together with key-marker, specifies the multipart upload after which # listing should begin. If key-marker is not specified, the # upload-id-marker parameter is ignored. Otherwise, any multipart # uploads for a key equal to the key-marker might be included in the # list only if they have an upload ID lexicographically greater than the # specified `upload-id-marker`. # # This functionality is not supported for directory buckets. # # # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @option params [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # # @return [Types::ListMultipartUploadsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::ListMultipartUploadsOutput#bucket #bucket} => String # * {Types::ListMultipartUploadsOutput#key_marker #key_marker} => String # * {Types::ListMultipartUploadsOutput#upload_id_marker #upload_id_marker} => String # * {Types::ListMultipartUploadsOutput#next_key_marker #next_key_marker} => String # * {Types::ListMultipartUploadsOutput#prefix #prefix} => String # * {Types::ListMultipartUploadsOutput#delimiter #delimiter} => String # * {Types::ListMultipartUploadsOutput#next_upload_id_marker #next_upload_id_marker} => String # * {Types::ListMultipartUploadsOutput#max_uploads #max_uploads} => Integer # * {Types::ListMultipartUploadsOutput#is_truncated #is_truncated} => Boolean # * {Types::ListMultipartUploadsOutput#uploads #uploads} => Array<Types::MultipartUpload> # * {Types::ListMultipartUploadsOutput#common_prefixes #common_prefixes} => Array<Types::CommonPrefix> # * {Types::ListMultipartUploadsOutput#encoding_type #encoding_type} => String # * {Types::ListMultipartUploadsOutput#request_charged #request_charged} => String # # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}. # # # @example Example: To list in-progress multipart uploads on a bucket # # # The following example lists in-progress multipart uploads on a specific bucket. # # resp = client.list_multipart_uploads({ # bucket: "examplebucket", # }) # # resp.to_h outputs the following: # { # uploads: [ # { # initiated: Time.parse("2014-05-01T05:40:58.000Z"), # initiator: { # display_name: "display-name", # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", # }, # key: "JavaFile", # owner: { # display_name: "display-name", # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", # }, # storage_class: "STANDARD", # upload_id: "examplelUa.CInXklLQtSMJITdUnoZ1Y5GACB5UckOtspm5zbDMCkPF_qkfZzMiFZ6dksmcnqxJyIBvQMG9X9Q--", # }, # { # initiated: Time.parse("2014-05-01T05:41:27.000Z"), # initiator: { # display_name: "display-name", # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", # }, # key: "JavaFile", # owner: { # display_name: "display-name", # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", # }, # storage_class: "STANDARD", # upload_id: "examplelo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--", # }, # ], # } # # @example Example: List next set of multipart uploads when previous result is truncated # # # The following example specifies the upload-id-marker and key-marker from previous truncated response to retrieve next # # setup of multipart uploads. # # resp = client.list_multipart_uploads({ # bucket: "examplebucket", # key_marker: "nextkeyfrompreviousresponse", # max_uploads: 2, # upload_id_marker: "valuefrompreviousresponse", # }) # # resp.to_h outputs the following: # { # bucket: "acl1", # is_truncated: true, # key_marker: "", # max_uploads: 2, # next_key_marker: "someobjectkey", # next_upload_id_marker: "examplelo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--", # upload_id_marker: "", # uploads: [ # { # initiated: Time.parse("2014-05-01T05:40:58.000Z"), # initiator: { # display_name: "ownder-display-name", # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", # }, # key: "JavaFile", # owner: { # display_name: "mohanataws", # id: "852b113e7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", # }, # storage_class: "STANDARD", # upload_id: "gZ30jIqlUa.CInXklLQtSMJITdUnoZ1Y5GACB5UckOtspm5zbDMCkPF_qkfZzMiFZ6dksmcnqxJyIBvQMG9X9Q--", # }, # { # initiated: Time.parse("2014-05-01T05:41:27.000Z"), # initiator: { # display_name: "ownder-display-name", # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", # }, # key: "JavaFile", # owner: { # display_name: "ownder-display-name", # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", # }, # storage_class: "STANDARD", # upload_id: "b7tZSqIlo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--", # }, # ], # } # # @example Request syntax with placeholder values # # resp = client.list_multipart_uploads({ # bucket: "BucketName", # required # delimiter: "Delimiter", # encoding_type: "url", # accepts url # key_marker: "KeyMarker", # max_uploads: 1, # prefix: "Prefix", # upload_id_marker: "UploadIdMarker", # expected_bucket_owner: "AccountId", # request_payer: "requester", # accepts requester # }) # # @example Response structure # # resp.bucket #=> String # resp.key_marker #=> String # resp.upload_id_marker #=> String # resp.next_key_marker #=> String # resp.prefix #=> String # resp.delimiter #=> String # resp.next_upload_id_marker #=> String # resp.max_uploads #=> Integer # resp.is_truncated #=> Boolean # resp.uploads #=> Array # resp.uploads[0].upload_id #=> String # resp.uploads[0].key #=> String # resp.uploads[0].initiated #=> Time # resp.uploads[0].storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR", "SNOW", "EXPRESS_ONEZONE" # resp.uploads[0].owner.display_name #=> String # resp.uploads[0].owner.id #=> String # resp.uploads[0].initiator.id #=> String # resp.uploads[0].initiator.display_name #=> String # resp.uploads[0].checksum_algorithm #=> String, one of "CRC32", "CRC32C", "SHA1", "SHA256" # resp.common_prefixes #=> Array # resp.common_prefixes[0].prefix #=> String # resp.encoding_type #=> String, one of "url" # resp.request_charged #=> String, one of "requester" # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads AWS API Documentation # # @overload list_multipart_uploads(params = {}) # @param [Hash] params ({}) def list_multipart_uploads(params = {}, options = {}) req = build_request(:list_multipart_uploads, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Returns metadata about all versions of the objects in a bucket. You # can also use request parameters as selection criteria to return # metadata about a subset of all the object versions. # # To use this operation, you must have permission to perform the # `s3:ListBucketVersions` action. Be aware of the name difference. # # A `200 OK` response can contain valid or invalid XML. Make sure to # design your application to parse the contents of the response and # handle it appropriately. # # # # To use this operation, you must have READ access to the bucket. # # The following operations are related to `ListObjectVersions`: # # * [ListObjectsV2][1] # # * [GetObject][2] # # * [PutObject][3] # # * [DeleteObject][4] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html # # @option params [required, String] :bucket # The bucket name that contains the objects. # # @option params [String] :delimiter # A delimiter is a character that you specify to group keys. All keys # that contain the same string between the `prefix` and the first # occurrence of the delimiter are grouped under a single result element # in `CommonPrefixes`. These groups are counted as one result against # the `max-keys` limitation. These keys are not returned elsewhere in # the response. # # @option params [String] :encoding_type # Requests Amazon S3 to encode the object keys in the response and # specifies the encoding method to use. An object key can contain any # Unicode character; however, the XML 1.0 parser cannot parse some # characters, such as characters with an ASCII value from 0 to 10. For # characters that are not supported in XML 1.0, you can add this # parameter to request that Amazon S3 encode the keys in the response. # # @option params [String] :key_marker # Specifies the key to start with when listing objects in a bucket. # # @option params [Integer] :max_keys # Sets the maximum number of keys returned in the response. By default, # the action returns up to 1,000 key names. The response might contain # fewer keys but will never contain more. If additional keys satisfy the # search criteria, but were not returned because `max-keys` was # exceeded, the response contains `true`. To # return the additional keys, see `key-marker` and `version-id-marker`. # # @option params [String] :prefix # Use this parameter to select only those keys that begin with the # specified prefix. You can use prefixes to separate a bucket into # different groupings of keys. (You can think of using `prefix` to make # groups in the same way that you'd use a folder in a file system.) You # can use `prefix` with `delimiter` to roll up numerous objects into a # single result under `CommonPrefixes`. # # @option params [String] :version_id_marker # Specifies the object version you want to start listing from. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @option params [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # # @option params [Array] :optional_object_attributes # Specifies the optional fields that you want returned in the response. # Fields that you do not specify are not returned. # # @return [Types::ListObjectVersionsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::ListObjectVersionsOutput#is_truncated #is_truncated} => Boolean # * {Types::ListObjectVersionsOutput#key_marker #key_marker} => String # * {Types::ListObjectVersionsOutput#version_id_marker #version_id_marker} => String # * {Types::ListObjectVersionsOutput#next_key_marker #next_key_marker} => String # * {Types::ListObjectVersionsOutput#next_version_id_marker #next_version_id_marker} => String # * {Types::ListObjectVersionsOutput#versions #versions} => Array<Types::ObjectVersion> # * {Types::ListObjectVersionsOutput#delete_markers #delete_markers} => Array<Types::DeleteMarkerEntry> # * {Types::ListObjectVersionsOutput#name #name} => String # * {Types::ListObjectVersionsOutput#prefix #prefix} => String # * {Types::ListObjectVersionsOutput#delimiter #delimiter} => String # * {Types::ListObjectVersionsOutput#max_keys #max_keys} => Integer # * {Types::ListObjectVersionsOutput#common_prefixes #common_prefixes} => Array<Types::CommonPrefix> # * {Types::ListObjectVersionsOutput#encoding_type #encoding_type} => String # * {Types::ListObjectVersionsOutput#request_charged #request_charged} => String # # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}. # # # @example Example: To list object versions # # # The following example return versions of an object with specific key name prefix. The request limits the number of items # # returned to two. If there are are more than two object version, S3 returns NextToken in the response. You can specify # # this token value in your next request to fetch next set of object versions. # # resp = client.list_object_versions({ # bucket: "examplebucket", # prefix: "HappyFace.jpg", # }) # # resp.to_h outputs the following: # { # versions: [ # { # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"", # is_latest: true, # key: "HappyFace.jpg", # last_modified: Time.parse("2016-12-15T01:19:41.000Z"), # owner: { # display_name: "owner-display-name", # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", # }, # size: 3191, # storage_class: "STANDARD", # version_id: "null", # }, # { # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"", # is_latest: false, # key: "HappyFace.jpg", # last_modified: Time.parse("2016-12-13T00:58:26.000Z"), # owner: { # display_name: "owner-display-name", # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", # }, # size: 3191, # storage_class: "STANDARD", # version_id: "PHtexPGjH2y.zBgT8LmB7wwLI2mpbz.k", # }, # ], # } # # @example Request syntax with placeholder values # # resp = client.list_object_versions({ # bucket: "BucketName", # required # delimiter: "Delimiter", # encoding_type: "url", # accepts url # key_marker: "KeyMarker", # max_keys: 1, # prefix: "Prefix", # version_id_marker: "VersionIdMarker", # expected_bucket_owner: "AccountId", # request_payer: "requester", # accepts requester # optional_object_attributes: ["RestoreStatus"], # accepts RestoreStatus # }) # # @example Response structure # # resp.is_truncated #=> Boolean # resp.key_marker #=> String # resp.version_id_marker #=> String # resp.next_key_marker #=> String # resp.next_version_id_marker #=> String # resp.versions #=> Array # resp.versions[0].etag #=> String # resp.versions[0].checksum_algorithm #=> Array # resp.versions[0].checksum_algorithm[0] #=> String, one of "CRC32", "CRC32C", "SHA1", "SHA256" # resp.versions[0].size #=> Integer # resp.versions[0].storage_class #=> String, one of "STANDARD" # resp.versions[0].key #=> String # resp.versions[0].version_id #=> String # resp.versions[0].is_latest #=> Boolean # resp.versions[0].last_modified #=> Time # resp.versions[0].owner.display_name #=> String # resp.versions[0].owner.id #=> String # resp.versions[0].restore_status.is_restore_in_progress #=> Boolean # resp.versions[0].restore_status.restore_expiry_date #=> Time # resp.delete_markers #=> Array # resp.delete_markers[0].owner.display_name #=> String # resp.delete_markers[0].owner.id #=> String # resp.delete_markers[0].key #=> String # resp.delete_markers[0].version_id #=> String # resp.delete_markers[0].is_latest #=> Boolean # resp.delete_markers[0].last_modified #=> Time # resp.name #=> String # resp.prefix #=> String # resp.delimiter #=> String # resp.max_keys #=> Integer # resp.common_prefixes #=> Array # resp.common_prefixes[0].prefix #=> String # resp.encoding_type #=> String, one of "url" # resp.request_charged #=> String, one of "requester" # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions AWS API Documentation # # @overload list_object_versions(params = {}) # @param [Hash] params ({}) def list_object_versions(params = {}, options = {}) req = build_request(:list_object_versions, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Returns some or all (up to 1,000) of the objects in a bucket. You can # use the request parameters as selection criteria to return a subset of # the objects in a bucket. A 200 OK response can contain valid or # invalid XML. Be sure to design your application to parse the contents # of the response and handle it appropriately. # # This action has been revised. We recommend that you use the newer # version, [ListObjectsV2][1], when developing applications. For # backward compatibility, Amazon S3 continues to support `ListObjects`. # # The following operations are related to `ListObjects`: # # * [ListObjectsV2][1] # # * [GetObject][2] # # * [PutObject][3] # # * [CreateBucket][4] # # * [ListBuckets][5] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html # # @option params [required, String] :bucket # The name of the bucket containing the objects. # # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style requests # are not supported. Directory bucket names must be unique in the chosen # Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, you # must provide the alias of the access point in place of the bucket name # or specify the access point ARN. When using the access point ARN, you # must direct requests to the access point hostname. The access point # hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the bucket # name. For more information about access point ARNs, see [Using access # points][2] in the *Amazon S3 User Guide*. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. The # S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][3] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # # @option params [String] :delimiter # A delimiter is a character that you use to group keys. # # @option params [String] :encoding_type # Requests Amazon S3 to encode the object keys in the response and # specifies the encoding method to use. An object key can contain any # Unicode character; however, the XML 1.0 parser cannot parse some # characters, such as characters with an ASCII value from 0 to 10. For # characters that are not supported in XML 1.0, you can add this # parameter to request that Amazon S3 encode the keys in the response. # # @option params [String] :marker # Marker is where you want Amazon S3 to start listing from. Amazon S3 # starts listing after this specified key. Marker can be any key in the # bucket. # # @option params [Integer] :max_keys # Sets the maximum number of keys returned in the response. By default, # the action returns up to 1,000 key names. The response might contain # fewer keys but will never contain more. # # @option params [String] :prefix # Limits the response to keys that begin with the specified prefix. # # @option params [String] :request_payer # Confirms that the requester knows that she or he will be charged for # the list objects request. Bucket owners need not specify this # parameter in their requests. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @option params [Array] :optional_object_attributes # Specifies the optional fields that you want returned in the response. # Fields that you do not specify are not returned. # # @return [Types::ListObjectsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::ListObjectsOutput#is_truncated #is_truncated} => Boolean # * {Types::ListObjectsOutput#marker #marker} => String # * {Types::ListObjectsOutput#next_marker #next_marker} => String # * {Types::ListObjectsOutput#contents #contents} => Array<Types::Object> # * {Types::ListObjectsOutput#name #name} => String # * {Types::ListObjectsOutput#prefix #prefix} => String # * {Types::ListObjectsOutput#delimiter #delimiter} => String # * {Types::ListObjectsOutput#max_keys #max_keys} => Integer # * {Types::ListObjectsOutput#common_prefixes #common_prefixes} => Array<Types::CommonPrefix> # * {Types::ListObjectsOutput#encoding_type #encoding_type} => String # * {Types::ListObjectsOutput#request_charged #request_charged} => String # # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}. # # # @example Example: To list objects in a bucket # # # The following example list two objects in a bucket. # # resp = client.list_objects({ # bucket: "examplebucket", # max_keys: 2, # }) # # resp.to_h outputs the following: # { # contents: [ # { # etag: "\"70ee1738b6b21e2c8a43f3a5ab0eee71\"", # key: "example1.jpg", # last_modified: Time.parse("2014-11-21T19:40:05.000Z"), # owner: { # display_name: "myname", # id: "12345example25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", # }, # size: 11, # storage_class: "STANDARD", # }, # { # etag: "\"9c8af9a76df052144598c115ef33e511\"", # key: "example2.jpg", # last_modified: Time.parse("2013-11-15T01:10:49.000Z"), # owner: { # display_name: "myname", # id: "12345example25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", # }, # size: 713193, # storage_class: "STANDARD", # }, # ], # next_marker: "eyJNYXJrZXIiOiBudWxsLCAiYm90b190cnVuY2F0ZV9hbW91bnQiOiAyfQ==", # } # # @example Request syntax with placeholder values # # resp = client.list_objects({ # bucket: "BucketName", # required # delimiter: "Delimiter", # encoding_type: "url", # accepts url # marker: "Marker", # max_keys: 1, # prefix: "Prefix", # request_payer: "requester", # accepts requester # expected_bucket_owner: "AccountId", # optional_object_attributes: ["RestoreStatus"], # accepts RestoreStatus # }) # # @example Response structure # # resp.is_truncated #=> Boolean # resp.marker #=> String # resp.next_marker #=> String # resp.contents #=> Array # resp.contents[0].key #=> String # resp.contents[0].last_modified #=> Time # resp.contents[0].etag #=> String # resp.contents[0].checksum_algorithm #=> Array # resp.contents[0].checksum_algorithm[0] #=> String, one of "CRC32", "CRC32C", "SHA1", "SHA256" # resp.contents[0].size #=> Integer # resp.contents[0].storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR", "SNOW", "EXPRESS_ONEZONE" # resp.contents[0].owner.display_name #=> String # resp.contents[0].owner.id #=> String # resp.contents[0].restore_status.is_restore_in_progress #=> Boolean # resp.contents[0].restore_status.restore_expiry_date #=> Time # resp.name #=> String # resp.prefix #=> String # resp.delimiter #=> String # resp.max_keys #=> Integer # resp.common_prefixes #=> Array # resp.common_prefixes[0].prefix #=> String # resp.encoding_type #=> String, one of "url" # resp.request_charged #=> String, one of "requester" # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects AWS API Documentation # # @overload list_objects(params = {}) # @param [Hash] params ({}) def list_objects(params = {}, options = {}) req = build_request(:list_objects, params) req.send_request(options) end # Returns some or all (up to 1,000) of the objects in a bucket with each # request. You can use the request parameters as selection criteria to # return a subset of the objects in a bucket. A `200 OK` response can # contain valid or invalid XML. Make sure to design your application to # parse the contents of the response and handle it appropriately. For # more information about listing objects, see [Listing object keys # programmatically][1] in the *Amazon S3 User Guide*. To get a list of # your buckets, see [ListBuckets][2]. # # **Directory buckets** - For directory buckets, you must make requests # for this API operation to the Zonal endpoint. These endpoints support # virtual-hosted-style requests in the format # `https://bucket_name.s3express-az_id.region.amazonaws.com/key-name `. # Path-style requests are not supported. For more information, see # [Regional and Zonal endpoints][3] in the *Amazon S3 User Guide*. # # # # Permissions # : * **General purpose bucket permissions** - To use this operation, # you must have READ access to the bucket. You must have permission # to perform the `s3:ListBucket` action. The bucket owner has this # permission by default and can grant this permission to others. For # more information about permissions, see [Permissions Related to # Bucket Subresource Operations][4] and [Managing Access Permissions # to Your Amazon S3 Resources][5] in the *Amazon S3 User Guide*. # # * **Directory bucket permissions** - To grant access to this API # operation on a directory bucket, we recommend that you use the [ # `CreateSession` ][6] API operation for session-based # authorization. Specifically, you grant the # `s3express:CreateSession` permission to the directory bucket in a # bucket policy or an IAM identity-based policy. Then, you make the # `CreateSession` API call on the bucket to obtain a session token. # With the session token in your request header, you can make API # requests to this operation. After the session token expires, you # make another `CreateSession` API call to generate a new session # token for use. Amazon Web Services CLI or SDKs create session and # refresh the session token automatically to avoid service # interruptions when a session expires. For more information about # authorization, see [ `CreateSession` ][6]. # # Sorting order of returned objects # : * **General purpose bucket** - For general purpose buckets, # `ListObjectsV2` returns objects in lexicographical order based on # their key names. # # * **Directory bucket** - For directory buckets, `ListObjectsV2` does # not return objects in lexicographical order. # # HTTP Host header syntax # # : Directory buckets - The HTTP Host header syntax is ` # Bucket_name.s3express-az_id.region.amazonaws.com`. # # This section describes the latest revision of this action. We # recommend that you use this revised API operation for application # development. For backward compatibility, Amazon S3 continues to # support the prior version of this API operation, [ListObjects][7]. # # The following operations are related to `ListObjectsV2`: # # * [GetObject][8] # # * [PutObject][9] # # * [CreateBucket][10] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources # [5]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html # [9]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html # [10]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html # # @option params [required, String] :bucket # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style requests # are not supported. Directory bucket names must be unique in the chosen # Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, you # must provide the alias of the access point in place of the bucket name # or specify the access point ARN. When using the access point ARN, you # must direct requests to the access point hostname. The access point # hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the bucket # name. For more information about access point ARNs, see [Using access # points][2] in the *Amazon S3 User Guide*. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. The # S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][3] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # # @option params [String] :delimiter # A delimiter is a character that you use to group keys. # # * **Directory buckets** - For directory buckets, `/` is the only # supported delimiter. # # * Directory buckets - When you query `ListObjectsV2` with a # delimiter during in-progress multipart uploads, the `CommonPrefixes` # response parameter contains the prefixes that are associated with # the in-progress multipart uploads. For more information about # multipart uploads, see [Multipart Upload Overview][1] in the *Amazon # S3 User Guide*. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html # # @option params [String] :encoding_type # Encoding type used by Amazon S3 to encode object keys in the response. # # @option params [Integer] :max_keys # Sets the maximum number of keys returned in the response. By default, # the action returns up to 1,000 key names. The response might contain # fewer keys but will never contain more. # # @option params [String] :prefix # Limits the response to keys that begin with the specified prefix. # # **Directory buckets** - For directory buckets, only prefixes that end # in a delimiter (`/`) are supported. # # # # @option params [String] :continuation_token # `ContinuationToken` indicates to Amazon S3 that the list is being # continued on this bucket with a token. `ContinuationToken` is # obfuscated and is not a real key. You can use this `ContinuationToken` # for pagination of the list results. # # @option params [Boolean] :fetch_owner # The owner field is not present in `ListObjectsV2` by default. If you # want to return the owner field with each key in the result, then set # the `FetchOwner` field to `true`. # # **Directory buckets** - For directory buckets, the bucket owner is # returned as the object owner for all objects. # # # # @option params [String] :start_after # StartAfter is where you want Amazon S3 to start listing from. Amazon # S3 starts listing after this specified key. StartAfter can be any key # in the bucket. # # This functionality is not supported for directory buckets. # # # # @option params [String] :request_payer # Confirms that the requester knows that she or he will be charged for # the list objects request in V2 style. Bucket owners need not specify # this parameter in their requests. # # This functionality is not supported for directory buckets. # # # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @option params [Array] :optional_object_attributes # Specifies the optional fields that you want returned in the response. # Fields that you do not specify are not returned. # # This functionality is not supported for directory buckets. # # # # @return [Types::ListObjectsV2Output] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::ListObjectsV2Output#is_truncated #is_truncated} => Boolean # * {Types::ListObjectsV2Output#contents #contents} => Array<Types::Object> # * {Types::ListObjectsV2Output#name #name} => String # * {Types::ListObjectsV2Output#prefix #prefix} => String # * {Types::ListObjectsV2Output#delimiter #delimiter} => String # * {Types::ListObjectsV2Output#max_keys #max_keys} => Integer # * {Types::ListObjectsV2Output#common_prefixes #common_prefixes} => Array<Types::CommonPrefix> # * {Types::ListObjectsV2Output#encoding_type #encoding_type} => String # * {Types::ListObjectsV2Output#key_count #key_count} => Integer # * {Types::ListObjectsV2Output#continuation_token #continuation_token} => String # * {Types::ListObjectsV2Output#next_continuation_token #next_continuation_token} => String # * {Types::ListObjectsV2Output#start_after #start_after} => String # * {Types::ListObjectsV2Output#request_charged #request_charged} => String # # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}. # # # @example Example: To get object list # # # The following example retrieves object list. The request specifies max keys to limit response to include only 2 object # # keys. # # resp = client.list_objects_v2({ # bucket: "DOC-EXAMPLE-BUCKET", # max_keys: 2, # }) # # resp.to_h outputs the following: # { # contents: [ # { # etag: "\"70ee1738b6b21e2c8a43f3a5ab0eee71\"", # key: "happyface.jpg", # last_modified: Time.parse("2014-11-21T19:40:05.000Z"), # size: 11, # storage_class: "STANDARD", # }, # { # etag: "\"becf17f89c30367a9a44495d62ed521a-1\"", # key: "test.jpg", # last_modified: Time.parse("2014-05-02T04:51:50.000Z"), # size: 4192256, # storage_class: "STANDARD", # }, # ], # is_truncated: true, # key_count: 2, # max_keys: 2, # name: "DOC-EXAMPLE-BUCKET", # next_continuation_token: "1w41l63U0xa8q7smH50vCxyTQqdxo69O3EmK28Bi5PcROI4wI/EyIJg==", # prefix: "", # } # # @example Request syntax with placeholder values # # resp = client.list_objects_v2({ # bucket: "BucketName", # required # delimiter: "Delimiter", # encoding_type: "url", # accepts url # max_keys: 1, # prefix: "Prefix", # continuation_token: "Token", # fetch_owner: false, # start_after: "StartAfter", # request_payer: "requester", # accepts requester # expected_bucket_owner: "AccountId", # optional_object_attributes: ["RestoreStatus"], # accepts RestoreStatus # }) # # @example Response structure # # resp.is_truncated #=> Boolean # resp.contents #=> Array # resp.contents[0].key #=> String # resp.contents[0].last_modified #=> Time # resp.contents[0].etag #=> String # resp.contents[0].checksum_algorithm #=> Array # resp.contents[0].checksum_algorithm[0] #=> String, one of "CRC32", "CRC32C", "SHA1", "SHA256" # resp.contents[0].size #=> Integer # resp.contents[0].storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR", "SNOW", "EXPRESS_ONEZONE" # resp.contents[0].owner.display_name #=> String # resp.contents[0].owner.id #=> String # resp.contents[0].restore_status.is_restore_in_progress #=> Boolean # resp.contents[0].restore_status.restore_expiry_date #=> Time # resp.name #=> String # resp.prefix #=> String # resp.delimiter #=> String # resp.max_keys #=> Integer # resp.common_prefixes #=> Array # resp.common_prefixes[0].prefix #=> String # resp.encoding_type #=> String, one of "url" # resp.key_count #=> Integer # resp.continuation_token #=> String # resp.next_continuation_token #=> String # resp.start_after #=> String # resp.request_charged #=> String, one of "requester" # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 AWS API Documentation # # @overload list_objects_v2(params = {}) # @param [Hash] params ({}) def list_objects_v2(params = {}, options = {}) req = build_request(:list_objects_v2, params) req.send_request(options) end # Lists the parts that have been uploaded for a specific multipart # upload. # # To use this operation, you must provide the `upload ID` in the # request. You obtain this uploadID by sending the initiate multipart # upload request through [CreateMultipartUpload][1]. # # The `ListParts` request returns a maximum of 1,000 uploaded parts. The # limit of 1,000 parts is also the default value. You can restrict the # number of parts in a response by specifying the `max-parts` request # parameter. If your multipart upload consists of more than 1,000 parts, # the response returns an `IsTruncated` field with the value of `true`, # and a `NextPartNumberMarker` element. To list remaining uploaded # parts, in subsequent `ListParts` requests, include the # `part-number-marker` query string parameter and set its value to the # `NextPartNumberMarker` field value from the previous response. # # For more information on multipart uploads, see [Uploading Objects # Using Multipart Upload][2] in the *Amazon S3 User Guide*. # # **Directory buckets** - For directory buckets, you must make requests # for this API operation to the Zonal endpoint. These endpoints support # virtual-hosted-style requests in the format # `https://bucket_name.s3express-az_id.region.amazonaws.com/key-name `. # Path-style requests are not supported. For more information, see # [Regional and Zonal endpoints][3] in the *Amazon S3 User Guide*. # # # # Permissions # : * **General purpose bucket permissions** - For information about # permissions required to use the multipart upload API, see # [Multipart Upload and Permissions][4] in the *Amazon S3 User # Guide*. # # If the upload was created using server-side encryption with Key # Management Service (KMS) keys (SSE-KMS) or dual-layer server-side # encryption with Amazon Web Services KMS keys (DSSE-KMS), you must # have permission to the `kms:Decrypt` action for the `ListParts` # request to succeed. # # * **Directory bucket permissions** - To grant access to this API # operation on a directory bucket, we recommend that you use the [ # `CreateSession` ][5] API operation for session-based # authorization. Specifically, you grant the # `s3express:CreateSession` permission to the directory bucket in a # bucket policy or an IAM identity-based policy. Then, you make the # `CreateSession` API call on the bucket to obtain a session token. # With the session token in your request header, you can make API # requests to this operation. After the session token expires, you # make another `CreateSession` API call to generate a new session # token for use. Amazon Web Services CLI or SDKs create session and # refresh the session token automatically to avoid service # interruptions when a session expires. For more information about # authorization, see [ `CreateSession` ][5]. # # HTTP Host header syntax # # : Directory buckets - The HTTP Host header syntax is ` # Bucket_name.s3express-az_id.region.amazonaws.com`. # # The following operations are related to `ListParts`: # # * [CreateMultipartUpload][1] # # * [UploadPart][6] # # * [CompleteMultipartUpload][7] # # * [AbortMultipartUpload][8] # # * [GetObjectAttributes][9] # # * [ListMultipartUploads][10] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html # [9]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html # [10]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html # # @option params [required, String] :bucket # The name of the bucket to which the parts are being uploaded. # # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style requests # are not supported. Directory bucket names must be unique in the chosen # Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, you # must provide the alias of the access point in place of the bucket name # or specify the access point ARN. When using the access point ARN, you # must direct requests to the access point hostname. The access point # hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the bucket # name. For more information about access point ARNs, see [Using access # points][2] in the *Amazon S3 User Guide*. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. The # S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][3] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # # @option params [required, String] :key # Object key for which the multipart upload was initiated. # # @option params [Integer] :max_parts # Sets the maximum number of parts to return. # # @option params [Integer] :part_number_marker # Specifies the part after which listing should begin. Only parts with # higher part numbers will be listed. # # @option params [required, String] :upload_id # Upload ID identifying the multipart upload whose parts are being # listed. # # @option params [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @option params [String] :sse_customer_algorithm # The server-side encryption (SSE) algorithm used to encrypt the object. # This parameter is needed only when the object was created using a # checksum algorithm. For more information, see [Protecting data using # SSE-C keys][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # # @option params [String] :sse_customer_key # The server-side encryption (SSE) customer managed key. This parameter # is needed only when the object was created using a checksum algorithm. # For more information, see [Protecting data using SSE-C keys][1] in the # *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # # @option params [String] :sse_customer_key_md5 # The MD5 server-side encryption (SSE) customer managed key. This # parameter is needed only when the object was created using a checksum # algorithm. For more information, see [Protecting data using SSE-C # keys][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # # @return [Types::ListPartsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::ListPartsOutput#abort_date #abort_date} => Time # * {Types::ListPartsOutput#abort_rule_id #abort_rule_id} => String # * {Types::ListPartsOutput#bucket #bucket} => String # * {Types::ListPartsOutput#key #key} => String # * {Types::ListPartsOutput#upload_id #upload_id} => String # * {Types::ListPartsOutput#part_number_marker #part_number_marker} => Integer # * {Types::ListPartsOutput#next_part_number_marker #next_part_number_marker} => Integer # * {Types::ListPartsOutput#max_parts #max_parts} => Integer # * {Types::ListPartsOutput#is_truncated #is_truncated} => Boolean # * {Types::ListPartsOutput#parts #parts} => Array<Types::Part> # * {Types::ListPartsOutput#initiator #initiator} => Types::Initiator # * {Types::ListPartsOutput#owner #owner} => Types::Owner # * {Types::ListPartsOutput#storage_class #storage_class} => String # * {Types::ListPartsOutput#request_charged #request_charged} => String # * {Types::ListPartsOutput#checksum_algorithm #checksum_algorithm} => String # # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}. # # # @example Example: To list parts of a multipart upload. # # # The following example lists parts uploaded for a specific multipart upload. # # resp = client.list_parts({ # bucket: "examplebucket", # key: "bigobject", # upload_id: "example7YPBOJuoFiQ9cz4P3Pe6FIZwO4f7wN93uHsNBEw97pl5eNwzExg0LAT2dUN91cOmrEQHDsP3WA60CEg--", # }) # # resp.to_h outputs the following: # { # initiator: { # display_name: "owner-display-name", # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", # }, # owner: { # display_name: "owner-display-name", # id: "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc", # }, # parts: [ # { # etag: "\"d8c2eafd90c266e19ab9dcacc479f8af\"", # last_modified: Time.parse("2016-12-16T00:11:42.000Z"), # part_number: 1, # size: 26246026, # }, # { # etag: "\"d8c2eafd90c266e19ab9dcacc479f8af\"", # last_modified: Time.parse("2016-12-16T00:15:01.000Z"), # part_number: 2, # size: 26246026, # }, # ], # storage_class: "STANDARD", # } # # @example Request syntax with placeholder values # # resp = client.list_parts({ # bucket: "BucketName", # required # key: "ObjectKey", # required # max_parts: 1, # part_number_marker: 1, # upload_id: "MultipartUploadId", # required # request_payer: "requester", # accepts requester # expected_bucket_owner: "AccountId", # sse_customer_algorithm: "SSECustomerAlgorithm", # sse_customer_key: "SSECustomerKey", # sse_customer_key_md5: "SSECustomerKeyMD5", # }) # # @example Response structure # # resp.abort_date #=> Time # resp.abort_rule_id #=> String # resp.bucket #=> String # resp.key #=> String # resp.upload_id #=> String # resp.part_number_marker #=> Integer # resp.next_part_number_marker #=> Integer # resp.max_parts #=> Integer # resp.is_truncated #=> Boolean # resp.parts #=> Array # resp.parts[0].part_number #=> Integer # resp.parts[0].last_modified #=> Time # resp.parts[0].etag #=> String # resp.parts[0].size #=> Integer # resp.parts[0].checksum_crc32 #=> String # resp.parts[0].checksum_crc32c #=> String # resp.parts[0].checksum_sha1 #=> String # resp.parts[0].checksum_sha256 #=> String # resp.initiator.id #=> String # resp.initiator.display_name #=> String # resp.owner.display_name #=> String # resp.owner.id #=> String # resp.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR", "SNOW", "EXPRESS_ONEZONE" # resp.request_charged #=> String, one of "requester" # resp.checksum_algorithm #=> String, one of "CRC32", "CRC32C", "SHA1", "SHA256" # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts AWS API Documentation # # @overload list_parts(params = {}) # @param [Hash] params ({}) def list_parts(params = {}, options = {}) req = build_request(:list_parts, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Sets the accelerate configuration of an existing bucket. Amazon S3 # Transfer Acceleration is a bucket-level feature that enables you to # perform faster data transfers to Amazon S3. # # To use this operation, you must have permission to perform the # `s3:PutAccelerateConfiguration` action. The bucket owner has this # permission by default. The bucket owner can grant this permission to # others. For more information about permissions, see [Permissions # Related to Bucket Subresource Operations][1] and [Managing Access # Permissions to Your Amazon S3 Resources][2]. # # The Transfer Acceleration state of a bucket can be set to one of the # following two values: # # * Enabled – Enables accelerated data transfers to the bucket. # # * Suspended – Disables accelerated data transfers to the bucket. # # The [GetBucketAccelerateConfiguration][3] action returns the transfer # acceleration state of a bucket. # # After setting the Transfer Acceleration state of a bucket to Enabled, # it might take up to thirty minutes before the data transfer rates to # the bucket increase. # # The name of the bucket used for Transfer Acceleration must be # DNS-compliant and must not contain periods ("."). # # For more information about transfer acceleration, see [Transfer # Acceleration][4]. # # The following operations are related to # `PutBucketAccelerateConfiguration`: # # * [GetBucketAccelerateConfiguration][3] # # * [CreateBucket][5] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html # # @option params [required, String] :bucket # The name of the bucket for which the accelerate configuration is set. # # @option params [required, Types::AccelerateConfiguration] :accelerate_configuration # Container for setting the transfer acceleration state. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @option params [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # @example Request syntax with placeholder values # # resp = client.put_bucket_accelerate_configuration({ # bucket: "BucketName", # required # accelerate_configuration: { # required # status: "Enabled", # accepts Enabled, Suspended # }, # expected_bucket_owner: "AccountId", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration AWS API Documentation # # @overload put_bucket_accelerate_configuration(params = {}) # @param [Hash] params ({}) def put_bucket_accelerate_configuration(params = {}, options = {}) req = build_request(:put_bucket_accelerate_configuration, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Sets the permissions on an existing bucket using access control lists # (ACL). For more information, see [Using ACLs][1]. To set the ACL of a # bucket, you must have the `WRITE_ACP` permission. # # You can use one of the following two ways to set a bucket's # permissions: # # * Specify the ACL in the request body # # * Specify permissions using request headers # # You cannot specify access permission using both the body and the # request headers. # # # # Depending on your application needs, you may choose to set the ACL on # a bucket using either the request body or the headers. For example, if # you have an existing application that updates a bucket ACL using the # request body, then you can continue to use that approach. # # If your bucket uses the bucket owner enforced setting for S3 Object # Ownership, ACLs are disabled and no longer affect permissions. You # must use policies to grant access to your bucket and the objects in # it. Requests to set ACLs or update ACLs fail and return the # `AccessControlListNotSupported` error code. Requests to read ACLs are # still supported. For more information, see [Controlling object # ownership][2] in the *Amazon S3 User Guide*. # # Permissions # # : You can set access permissions by using one of the following # methods: # # * Specify a canned ACL with the `x-amz-acl` request header. Amazon # S3 supports a set of predefined ACLs, known as *canned ACLs*. Each # canned ACL has a predefined set of grantees and permissions. # Specify the canned ACL name as the value of `x-amz-acl`. If you # use this header, you cannot use other access control-specific # headers in your request. For more information, see [Canned # ACL][3]. # # * Specify access permissions explicitly with the `x-amz-grant-read`, # `x-amz-grant-read-acp`, `x-amz-grant-write-acp`, and # `x-amz-grant-full-control` headers. When using these headers, you # specify explicit access permissions and grantees (Amazon Web # Services accounts or Amazon S3 groups) who will receive the # permission. If you use these ACL-specific headers, you cannot use # the `x-amz-acl` header to set a canned ACL. These parameters map # to the set of permissions that Amazon S3 supports in an ACL. For # more information, see [Access Control List (ACL) Overview][4]. # # You specify each grantee as a type=value pair, where the type is # one of the following: # # * `id` – if the value specified is the canonical user ID of an # Amazon Web Services account # # * `uri` – if you are granting permissions to a predefined group # # * `emailAddress` – if the value specified is the email address of # an Amazon Web Services account # # Using email addresses to specify a grantee is only supported in # the following Amazon Web Services Regions: # # * US East (N. Virginia) # # * US West (N. California) # # * US West (Oregon) # # * Asia Pacific (Singapore) # # * Asia Pacific (Sydney) # # * Asia Pacific (Tokyo) # # * Europe (Ireland) # # * South America (São Paulo) # # For a list of all the Amazon S3 supported Regions and endpoints, # see [Regions and Endpoints][5] in the Amazon Web Services # General Reference. # # # # For example, the following `x-amz-grant-write` header grants # create, overwrite, and delete objects permission to LogDelivery # group predefined by Amazon S3 and two Amazon Web Services accounts # identified by their email addresses. # # `x-amz-grant-write: # uri="http://acs.amazonaws.com/groups/s3/LogDelivery", # id="111122223333", id="555566667777" ` # # You can use either a canned ACL or specify access permissions # explicitly. You cannot do both. # # Grantee Values # # : You can specify the person (grantee) to whom you're assigning # access rights (using request elements) in the following ways: # # * By the person's ID: # # `<>ID<><>GranteesEmail<> # ` # # DisplayName is optional and ignored in the request # # * By URI: # # `<>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<>` # # * By Email address: # # `<>Grantees@email.com<>&` # # The grantee is resolved to the CanonicalUser and, in a response to # a GET Object acl request, appears as the CanonicalUser. # # Using email addresses to specify a grantee is only supported in # the following Amazon Web Services Regions: # # * US East (N. Virginia) # # * US West (N. California) # # * US West (Oregon) # # * Asia Pacific (Singapore) # # * Asia Pacific (Sydney) # # * Asia Pacific (Tokyo) # # * Europe (Ireland) # # * South America (São Paulo) # # For a list of all the Amazon S3 supported Regions and endpoints, # see [Regions and Endpoints][5] in the Amazon Web Services General # Reference. # # # # The following operations are related to `PutBucketAcl`: # # * [CreateBucket][6] # # * [DeleteBucket][7] # # * [GetObjectAcl][8] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html # [5]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html # # @option params [String] :acl # The canned ACL to apply to the bucket. # # @option params [Types::AccessControlPolicy] :access_control_policy # Contains the elements that set the ACL permissions for an object per # grantee. # # @option params [required, String] :bucket # The bucket to which to apply the ACL. # # @option params [String] :content_md5 # The base64-encoded 128-bit MD5 digest of the data. This header must be # used as a message integrity check to verify that the request body was # not corrupted in transit. For more information, go to [RFC 1864.][1] # # For requests made using the Amazon Web Services Command Line Interface # (CLI) or Amazon Web Services SDKs, this field is calculated # automatically. # # # # [1]: http://www.ietf.org/rfc/rfc1864.txt # # @option params [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [String] :grant_full_control # Allows grantee the read, write, read ACP, and write ACP permissions on # the bucket. # # @option params [String] :grant_read # Allows grantee to list the objects in the bucket. # # @option params [String] :grant_read_acp # Allows grantee to read the bucket ACL. # # @option params [String] :grant_write # Allows grantee to create new objects in the bucket. # # For the bucket and object owners of existing objects, also allows # deletions and overwrites of those objects. # # @option params [String] :grant_write_acp # Allows grantee to write the ACL for the applicable bucket. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # # @example Example: Put bucket acl # # # The following example replaces existing ACL on a bucket. The ACL grants the bucket owner (specified using the owner ID) # # and write permission to the LogDelivery group. Because this is a replace operation, you must specify all the grants in # # your request. To incrementally add or remove ACL grants, you might use the console. # # resp = client.put_bucket_acl({ # bucket: "examplebucket", # grant_full_control: "id=examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484", # grant_write: "uri=http://acs.amazonaws.com/groups/s3/LogDelivery", # }) # # @example Request syntax with placeholder values # # resp = client.put_bucket_acl({ # acl: "private", # accepts private, public-read, public-read-write, authenticated-read # access_control_policy: { # grants: [ # { # grantee: { # display_name: "DisplayName", # email_address: "EmailAddress", # id: "ID", # type: "CanonicalUser", # required, accepts CanonicalUser, AmazonCustomerByEmail, Group # uri: "URI", # }, # permission: "FULL_CONTROL", # accepts FULL_CONTROL, WRITE, WRITE_ACP, READ, READ_ACP # }, # ], # owner: { # display_name: "DisplayName", # id: "ID", # }, # }, # bucket: "BucketName", # required # content_md5: "ContentMD5", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # grant_full_control: "GrantFullControl", # grant_read: "GrantRead", # grant_read_acp: "GrantReadACP", # grant_write: "GrantWrite", # grant_write_acp: "GrantWriteACP", # expected_bucket_owner: "AccountId", # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl AWS API Documentation # # @overload put_bucket_acl(params = {}) # @param [Hash] params ({}) def put_bucket_acl(params = {}, options = {}) req = build_request(:put_bucket_acl, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Sets an analytics configuration for the bucket (specified by the # analytics configuration ID). You can have up to 1,000 analytics # configurations per bucket. # # You can choose to have storage class analysis export analysis reports # sent to a comma-separated values (CSV) flat file. See the `DataExport` # request element. Reports are updated daily and are based on the object # filters that you configure. When selecting data export, you specify a # destination bucket and an optional destination prefix where the file # is written. You can export the data to a destination bucket in a # different account. However, the destination bucket must be in the same # Region as the bucket that you are making the PUT analytics # configuration to. For more information, see [Amazon S3 Analytics – # Storage Class Analysis][1]. # # You must create a bucket policy on the destination bucket where the # exported file is written to grant permissions to Amazon S3 to write # objects to the bucket. For an example policy, see [Granting # Permissions for Amazon S3 Inventory and Storage Class Analysis][2]. # # To use this operation, you must have permissions to perform the # `s3:PutAnalyticsConfiguration` action. The bucket owner has this # permission by default. The bucket owner can grant this permission to # others. For more information about permissions, see [Permissions # Related to Bucket Subresource Operations][3] and [Managing Access # Permissions to Your Amazon S3 Resources][4]. # # `PutBucketAnalyticsConfiguration` has the following special errors: # # * * *HTTP Error: HTTP 400 Bad Request* # # * *Code: InvalidArgument* # # * *Cause: Invalid argument.* # # * * *HTTP Error: HTTP 400 Bad Request* # # * *Code: TooManyConfigurations* # # * *Cause: You are attempting to create a new configuration but have # already reached the 1,000-configuration limit.* # # * * *HTTP Error: HTTP 403 Forbidden* # # * *Code: AccessDenied* # # * *Cause: You are not the owner of the specified bucket, or you do # not have the s3:PutAnalyticsConfiguration bucket permission to set # the configuration on the bucket.* # # The following operations are related to # `PutBucketAnalyticsConfiguration`: # # * [GetBucketAnalyticsConfiguration][5] # # * [DeleteBucketAnalyticsConfiguration][6] # # * [ListBucketAnalyticsConfigurations][7] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9 # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources # [4]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html # # @option params [required, String] :bucket # The name of the bucket to which an analytics configuration is stored. # # @option params [required, String] :id # The ID that identifies the analytics configuration. # # @option params [required, Types::AnalyticsConfiguration] :analytics_configuration # The configuration and any analyses for the analytics filter. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # @example Request syntax with placeholder values # # resp = client.put_bucket_analytics_configuration({ # bucket: "BucketName", # required # id: "AnalyticsId", # required # analytics_configuration: { # required # id: "AnalyticsId", # required # filter: { # prefix: "Prefix", # tag: { # key: "ObjectKey", # required # value: "Value", # required # }, # and: { # prefix: "Prefix", # tags: [ # { # key: "ObjectKey", # required # value: "Value", # required # }, # ], # }, # }, # storage_class_analysis: { # required # data_export: { # output_schema_version: "V_1", # required, accepts V_1 # destination: { # required # s3_bucket_destination: { # required # format: "CSV", # required, accepts CSV # bucket_account_id: "AccountId", # bucket: "BucketName", # required # prefix: "Prefix", # }, # }, # }, # }, # }, # expected_bucket_owner: "AccountId", # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration AWS API Documentation # # @overload put_bucket_analytics_configuration(params = {}) # @param [Hash] params ({}) def put_bucket_analytics_configuration(params = {}, options = {}) req = build_request(:put_bucket_analytics_configuration, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Sets the `cors` configuration for your bucket. If the configuration # exists, Amazon S3 replaces it. # # To use this operation, you must be allowed to perform the # `s3:PutBucketCORS` action. By default, the bucket owner has this # permission and can grant it to others. # # You set this configuration on a bucket so that the bucket can service # cross-origin requests. For example, you might want to enable a request # whose origin is `http://www.example.com` to access your Amazon S3 # bucket at `my.example.bucket.com` by using the browser's # `XMLHttpRequest` capability. # # To enable cross-origin resource sharing (CORS) on a bucket, you add # the `cors` subresource to the bucket. The `cors` subresource is an XML # document in which you configure rules that identify origins and the # HTTP methods that can be executed on your bucket. The document is # limited to 64 KB in size. # # When Amazon S3 receives a cross-origin request (or a pre-flight # OPTIONS request) against a bucket, it evaluates the `cors` # configuration on the bucket and uses the first `CORSRule` rule that # matches the incoming browser request to enable a cross-origin request. # For a rule to match, the following conditions must be met: # # * The request's `Origin` header must match `AllowedOrigin` elements. # # * The request method (for example, GET, PUT, HEAD, and so on) or the # `Access-Control-Request-Method` header in case of a pre-flight # `OPTIONS` request must be one of the `AllowedMethod` elements. # # * Every header specified in the `Access-Control-Request-Headers` # request header of a pre-flight request must match an `AllowedHeader` # element. # # For more information about CORS, go to [Enabling Cross-Origin Resource # Sharing][1] in the *Amazon S3 User Guide*. # # The following operations are related to `PutBucketCors`: # # * [GetBucketCors][2] # # * [DeleteBucketCors][3] # # * [RESTOPTIONSobject][4] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html # # @option params [required, String] :bucket # Specifies the bucket impacted by the `cors`configuration. # # @option params [required, Types::CORSConfiguration] :cors_configuration # Describes the cross-origin access configuration for objects in an # Amazon S3 bucket. For more information, see [Enabling Cross-Origin # Resource Sharing][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html # # @option params [String] :content_md5 # The base64-encoded 128-bit MD5 digest of the data. This header must be # used as a message integrity check to verify that the request body was # not corrupted in transit. For more information, go to [RFC 1864.][1] # # For requests made using the Amazon Web Services Command Line Interface # (CLI) or Amazon Web Services SDKs, this field is calculated # automatically. # # # # [1]: http://www.ietf.org/rfc/rfc1864.txt # # @option params [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # # @example Example: To set cors configuration on a bucket. # # # The following example enables PUT, POST, and DELETE requests from www.example.com, and enables GET requests from any # # domain. # # resp = client.put_bucket_cors({ # bucket: "", # cors_configuration: { # cors_rules: [ # { # allowed_headers: [ # "*", # ], # allowed_methods: [ # "PUT", # "POST", # "DELETE", # ], # allowed_origins: [ # "http://www.example.com", # ], # expose_headers: [ # "x-amz-server-side-encryption", # ], # max_age_seconds: 3000, # }, # { # allowed_headers: [ # "Authorization", # ], # allowed_methods: [ # "GET", # ], # allowed_origins: [ # "*", # ], # max_age_seconds: 3000, # }, # ], # }, # content_md5: "", # }) # # @example Request syntax with placeholder values # # resp = client.put_bucket_cors({ # bucket: "BucketName", # required # cors_configuration: { # required # cors_rules: [ # required # { # id: "ID", # allowed_headers: ["AllowedHeader"], # allowed_methods: ["AllowedMethod"], # required # allowed_origins: ["AllowedOrigin"], # required # expose_headers: ["ExposeHeader"], # max_age_seconds: 1, # }, # ], # }, # content_md5: "ContentMD5", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # expected_bucket_owner: "AccountId", # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors AWS API Documentation # # @overload put_bucket_cors(params = {}) # @param [Hash] params ({}) def put_bucket_cors(params = {}, options = {}) req = build_request(:put_bucket_cors, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # This action uses the `encryption` subresource to configure default # encryption and Amazon S3 Bucket Keys for an existing bucket. # # By default, all buckets have a default encryption configuration that # uses server-side encryption with Amazon S3 managed keys (SSE-S3). You # can optionally configure default encryption for a bucket by using # server-side encryption with Key Management Service (KMS) keys # (SSE-KMS) or dual-layer server-side encryption with Amazon Web # Services KMS keys (DSSE-KMS). If you specify default encryption by # using SSE-KMS, you can also configure [Amazon S3 Bucket Keys][1]. If # you use PutBucketEncryption to set your [default bucket encryption][2] # to SSE-KMS, you should verify that your KMS key ID is correct. Amazon # S3 does not validate the KMS key ID provided in PutBucketEncryption # requests. # # This action requires Amazon Web Services Signature Version 4. For more # information, see [ Authenticating Requests (Amazon Web Services # Signature Version 4)][3]. # # To use this operation, you must have permission to perform the # `s3:PutEncryptionConfiguration` action. The bucket owner has this # permission by default. The bucket owner can grant this permission to # others. For more information about permissions, see [Permissions # Related to Bucket Subresource Operations][4] and [Managing Access # Permissions to Your Amazon S3 Resources][5] in the *Amazon S3 User # Guide*. # # The following operations are related to `PutBucketEncryption`: # # * [GetBucketEncryption][6] # # * [DeleteBucketEncryption][7] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources # [5]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html # # @option params [required, String] :bucket # Specifies default encryption for a bucket using server-side encryption # with different key options. By default, all buckets have a default # encryption configuration that uses server-side encryption with Amazon # S3 managed keys (SSE-S3). You can optionally configure default # encryption for a bucket by using server-side encryption with an Amazon # Web Services KMS key (SSE-KMS) or a customer-provided key (SSE-C). For # information about the bucket default encryption feature, see [Amazon # S3 Bucket Default Encryption][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html # # @option params [String] :content_md5 # The base64-encoded 128-bit MD5 digest of the server-side encryption # configuration. # # For requests made using the Amazon Web Services Command Line Interface # (CLI) or Amazon Web Services SDKs, this field is calculated # automatically. # # @option params [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [required, Types::ServerSideEncryptionConfiguration] :server_side_encryption_configuration # Specifies the default server-side-encryption configuration. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # @example Request syntax with placeholder values # # resp = client.put_bucket_encryption({ # bucket: "BucketName", # required # content_md5: "ContentMD5", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # server_side_encryption_configuration: { # required # rules: [ # required # { # apply_server_side_encryption_by_default: { # sse_algorithm: "AES256", # required, accepts AES256, aws:kms, aws:kms:dsse # kms_master_key_id: "SSEKMSKeyId", # }, # bucket_key_enabled: false, # }, # ], # }, # expected_bucket_owner: "AccountId", # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption AWS API Documentation # # @overload put_bucket_encryption(params = {}) # @param [Hash] params ({}) def put_bucket_encryption(params = {}, options = {}) req = build_request(:put_bucket_encryption, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Puts a S3 Intelligent-Tiering configuration to the specified bucket. # You can have up to 1,000 S3 Intelligent-Tiering configurations per # bucket. # # The S3 Intelligent-Tiering storage class is designed to optimize # storage costs by automatically moving data to the most cost-effective # storage access tier, without performance impact or operational # overhead. S3 Intelligent-Tiering delivers automatic cost savings in # three low latency and high throughput access tiers. To get the lowest # storage cost on data that can be accessed in minutes to hours, you can # choose to activate additional archiving capabilities. # # The S3 Intelligent-Tiering storage class is the ideal storage class # for data with unknown, changing, or unpredictable access patterns, # independent of object size or retention period. If the size of an # object is less than 128 KB, it is not monitored and not eligible for # auto-tiering. Smaller objects can be stored, but they are always # charged at the Frequent Access tier rates in the S3 # Intelligent-Tiering storage class. # # For more information, see [Storage class for automatically optimizing # frequently and infrequently accessed objects][1]. # # Operations related to `PutBucketIntelligentTieringConfiguration` # include: # # * [DeleteBucketIntelligentTieringConfiguration][2] # # * [GetBucketIntelligentTieringConfiguration][3] # # * [ListBucketIntelligentTieringConfigurations][4] # # You only need S3 Intelligent-Tiering enabled on a bucket if you want # to automatically move objects stored in the S3 Intelligent-Tiering # storage class to the Archive Access or Deep Archive Access tier. # # # # `PutBucketIntelligentTieringConfiguration` has the following special # errors: # # HTTP 400 Bad Request Error # # : *Code:* InvalidArgument # # *Cause:* Invalid Argument # # HTTP 400 Bad Request Error # # : *Code:* TooManyConfigurations # # *Cause:* You are attempting to create a new configuration but have # already reached the 1,000-configuration limit. # # HTTP 403 Forbidden Error # # : *Cause:* You are not the owner of the specified bucket, or you do # not have the `s3:PutIntelligentTieringConfiguration` bucket # permission to set the configuration on the bucket. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html # # @option params [required, String] :bucket # The name of the Amazon S3 bucket whose configuration you want to # modify or retrieve. # # @option params [required, String] :id # The ID used to identify the S3 Intelligent-Tiering configuration. # # @option params [required, Types::IntelligentTieringConfiguration] :intelligent_tiering_configuration # Container for S3 Intelligent-Tiering configuration. # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # @example Request syntax with placeholder values # # resp = client.put_bucket_intelligent_tiering_configuration({ # bucket: "BucketName", # required # id: "IntelligentTieringId", # required # intelligent_tiering_configuration: { # required # id: "IntelligentTieringId", # required # filter: { # prefix: "Prefix", # tag: { # key: "ObjectKey", # required # value: "Value", # required # }, # and: { # prefix: "Prefix", # tags: [ # { # key: "ObjectKey", # required # value: "Value", # required # }, # ], # }, # }, # status: "Enabled", # required, accepts Enabled, Disabled # tierings: [ # required # { # days: 1, # required # access_tier: "ARCHIVE_ACCESS", # required, accepts ARCHIVE_ACCESS, DEEP_ARCHIVE_ACCESS # }, # ], # }, # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketIntelligentTieringConfiguration AWS API Documentation # # @overload put_bucket_intelligent_tiering_configuration(params = {}) # @param [Hash] params ({}) def put_bucket_intelligent_tiering_configuration(params = {}, options = {}) req = build_request(:put_bucket_intelligent_tiering_configuration, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # This implementation of the `PUT` action adds an inventory # configuration (identified by the inventory ID) to the bucket. You can # have up to 1,000 inventory configurations per bucket. # # Amazon S3 inventory generates inventories of the objects in the bucket # on a daily or weekly basis, and the results are published to a flat # file. The bucket that is inventoried is called the *source* bucket, # and the bucket where the inventory flat file is stored is called the # *destination* bucket. The *destination* bucket must be in the same # Amazon Web Services Region as the *source* bucket. # # When you configure an inventory for a *source* bucket, you specify the # *destination* bucket where you want the inventory to be stored, and # whether to generate the inventory daily or weekly. You can also # configure what object metadata to include and whether to inventory all # object versions or only current versions. For more information, see # [Amazon S3 Inventory][1] in the Amazon S3 User Guide. # # You must create a bucket policy on the *destination* bucket to grant # permissions to Amazon S3 to write objects to the bucket in the defined # location. For an example policy, see [ Granting Permissions for Amazon # S3 Inventory and Storage Class Analysis][2]. # # Permissions # # : To use this operation, you must have permission to perform the # `s3:PutInventoryConfiguration` action. The bucket owner has this # permission by default and can grant this permission to others. # # The `s3:PutInventoryConfiguration` permission allows a user to # create an [S3 Inventory][3] report that includes all object metadata # fields available and to specify the destination bucket to store the # inventory. A user with read access to objects in the destination # bucket can also access all object metadata fields that are available # in the inventory report. # # To restrict access to an inventory report, see [Restricting access # to an Amazon S3 Inventory report][4] in the *Amazon S3 User Guide*. # For more information about the metadata fields available in S3 # Inventory, see [Amazon S3 Inventory lists][5] in the *Amazon S3 User # Guide*. For more information about permissions, see [Permissions # related to bucket subresource operations][6] and [Identity and # access management in Amazon S3][7] in the *Amazon S3 User Guide*. # # `PutBucketInventoryConfiguration` has the following special errors: # # HTTP 400 Bad Request Error # # : *Code:* InvalidArgument # # *Cause:* Invalid Argument # # HTTP 400 Bad Request Error # # : *Code:* TooManyConfigurations # # *Cause:* You are attempting to create a new configuration but have # already reached the 1,000-configuration limit. # # HTTP 403 Forbidden Error # # : *Cause:* You are not the owner of the specified bucket, or you do # not have the `s3:PutInventoryConfiguration` bucket permission to set # the configuration on the bucket. # # The following operations are related to # `PutBucketInventoryConfiguration`: # # * [GetBucketInventoryConfiguration][8] # # * [DeleteBucketInventoryConfiguration][9] # # * [ListBucketInventoryConfigurations][10] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9 # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html#example-bucket-policies-use-case-10 # [5]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html#storage-inventory-contents # [6]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources # [7]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html # [9]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html # [10]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html # # @option params [required, String] :bucket # The name of the bucket where the inventory configuration will be # stored. # # @option params [required, String] :id # The ID used to identify the inventory configuration. # # @option params [required, Types::InventoryConfiguration] :inventory_configuration # Specifies the inventory configuration. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # @example Request syntax with placeholder values # # resp = client.put_bucket_inventory_configuration({ # bucket: "BucketName", # required # id: "InventoryId", # required # inventory_configuration: { # required # destination: { # required # s3_bucket_destination: { # required # account_id: "AccountId", # bucket: "BucketName", # required # format: "CSV", # required, accepts CSV, ORC, Parquet # prefix: "Prefix", # encryption: { # sses3: { # }, # ssekms: { # key_id: "SSEKMSKeyId", # required # }, # }, # }, # }, # is_enabled: false, # required # filter: { # prefix: "Prefix", # required # }, # id: "InventoryId", # required # included_object_versions: "All", # required, accepts All, Current # optional_fields: ["Size"], # accepts Size, LastModifiedDate, StorageClass, ETag, IsMultipartUploaded, ReplicationStatus, EncryptionStatus, ObjectLockRetainUntilDate, ObjectLockMode, ObjectLockLegalHoldStatus, IntelligentTieringAccessTier, BucketKeyStatus, ChecksumAlgorithm, ObjectAccessControlList, ObjectOwner # schedule: { # required # frequency: "Daily", # required, accepts Daily, Weekly # }, # }, # expected_bucket_owner: "AccountId", # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration AWS API Documentation # # @overload put_bucket_inventory_configuration(params = {}) # @param [Hash] params ({}) def put_bucket_inventory_configuration(params = {}, options = {}) req = build_request(:put_bucket_inventory_configuration, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # For an updated version of this API, see # [PutBucketLifecycleConfiguration][1]. This version has been # deprecated. Existing lifecycle configurations will work. For new # lifecycle configurations, use the updated API. # # Creates a new lifecycle configuration for the bucket or replaces an # existing lifecycle configuration. For information about lifecycle # configuration, see [Object Lifecycle Management][2] in the *Amazon S3 # User Guide*. # # By default, all Amazon S3 resources, including buckets, objects, and # related subresources (for example, lifecycle configuration and website # configuration) are private. Only the resource owner, the Amazon Web # Services account that created the resource, can access it. The # resource owner can optionally grant access permissions to others by # writing an access policy. For this operation, users must get the # `s3:PutLifecycleConfiguration` permission. # # You can also explicitly deny permissions. Explicit denial also # supersedes any other permissions. If you want to prevent users or # accounts from removing or deleting objects from your bucket, you must # deny them permissions for the following actions: # # * `s3:DeleteObject` # # * `s3:DeleteObjectVersion` # # * `s3:PutLifecycleConfiguration` # # For more information about permissions, see [Managing Access # Permissions to your Amazon S3 Resources][3] in the *Amazon S3 User # Guide*. # # For more examples of transitioning objects to storage classes such as # STANDARD\_IA or ONEZONE\_IA, see [Examples of Lifecycle # Configuration][4]. # # The following operations are related to `PutBucketLifecycle`: # # * [GetBucketLifecycle][5](Deprecated) # # * [GetBucketLifecycleConfiguration][6] # # * [RestoreObject][7] # # * By default, a resource owner—in this case, a bucket owner, which is # the Amazon Web Services account that created the bucket—can perform # any of the operations. A resource owner can also grant others # permission to perform the operation. For more information, see the # following topics in the Amazon S3 User Guide: # # * [Specifying Permissions in a Policy][8] # # * [Managing Access Permissions to your Amazon S3 Resources][3] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#lifecycle-configuration-examples # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html # [8]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html # # @option params [required, String] :bucket # # @option params [String] :content_md5 # For requests made using the Amazon Web Services Command Line Interface # (CLI) or Amazon Web Services SDKs, this field is calculated # automatically. # # @option params [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [Types::LifecycleConfiguration] :lifecycle_configuration # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # @example Request syntax with placeholder values # # resp = client.put_bucket_lifecycle({ # bucket: "BucketName", # required # content_md5: "ContentMD5", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # lifecycle_configuration: { # rules: [ # required # { # expiration: { # date: Time.now, # days: 1, # expired_object_delete_marker: false, # }, # id: "ID", # prefix: "Prefix", # required # status: "Enabled", # required, accepts Enabled, Disabled # transition: { # date: Time.now, # days: 1, # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR # }, # noncurrent_version_transition: { # noncurrent_days: 1, # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR # newer_noncurrent_versions: 1, # }, # noncurrent_version_expiration: { # noncurrent_days: 1, # newer_noncurrent_versions: 1, # }, # abort_incomplete_multipart_upload: { # days_after_initiation: 1, # }, # }, # ], # }, # expected_bucket_owner: "AccountId", # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle AWS API Documentation # # @overload put_bucket_lifecycle(params = {}) # @param [Hash] params ({}) def put_bucket_lifecycle(params = {}, options = {}) req = build_request(:put_bucket_lifecycle, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Creates a new lifecycle configuration for the bucket or replaces an # existing lifecycle configuration. Keep in mind that this will # overwrite an existing lifecycle configuration, so if you want to # retain any configuration details, they must be included in the new # lifecycle configuration. For information about lifecycle # configuration, see [Managing your storage lifecycle][1]. # # Bucket lifecycle configuration now supports specifying a lifecycle # rule using an object key name prefix, one or more object tags, or a # combination of both. Accordingly, this section describes the latest # API. The previous version of the API supported filtering based only on # an object key name prefix, which is supported for backward # compatibility. For the related API description, see # [PutBucketLifecycle][2]. # # # # Rules # # : You specify the lifecycle configuration in your request body. The # lifecycle configuration is specified as XML consisting of one or # more rules. An Amazon S3 Lifecycle configuration can have up to # 1,000 rules. This limit is not adjustable. Each rule consists of the # following: # # * A filter identifying a subset of objects to which the rule # applies. The filter can be based on a key name prefix, object # tags, or a combination of both. # # * A status indicating whether the rule is in effect. # # * One or more lifecycle transition and expiration actions that you # want Amazon S3 to perform on the objects identified by the filter. # If the state of your bucket is versioning-enabled or # versioning-suspended, you can have many versions of the same # object (one current version and zero or more noncurrent versions). # Amazon S3 provides predefined actions that you can specify for # current and noncurrent object versions. # # For more information, see [Object Lifecycle Management][3] and # [Lifecycle Configuration Elements][4]. # # Permissions # # : By default, all Amazon S3 resources are private, including buckets, # objects, and related subresources (for example, lifecycle # configuration and website configuration). Only the resource owner # (that is, the Amazon Web Services account that created it) can # access the resource. The resource owner can optionally grant access # permissions to others by writing an access policy. For this # operation, a user must get the `s3:PutLifecycleConfiguration` # permission. # # You can also explicitly deny permissions. An explicit deny also # supersedes any other permissions. If you want to block users or # accounts from removing or deleting objects from your bucket, you # must deny them permissions for the following actions: # # * `s3:DeleteObject` # # * `s3:DeleteObjectVersion` # # * `s3:PutLifecycleConfiguration` # # For more information about permissions, see [Managing Access # Permissions to Your Amazon S3 Resources][5]. # # The following operations are related to # `PutBucketLifecycleConfiguration`: # # * [Examples of Lifecycle Configuration][6] # # * [GetBucketLifecycleConfiguration][7] # # * [DeleteBucketLifecycle][8] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html # # @option params [required, String] :bucket # The name of the bucket for which to set the configuration. # # @option params [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [Types::BucketLifecycleConfiguration] :lifecycle_configuration # Container for lifecycle rules. You can add as many as 1,000 rules. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # # @example Example: Put bucket lifecycle # # # The following example replaces existing lifecycle configuration, if any, on the specified bucket. # # resp = client.put_bucket_lifecycle_configuration({ # bucket: "examplebucket", # lifecycle_configuration: { # rules: [ # { # expiration: { # days: 3650, # }, # filter: { # prefix: "documents/", # }, # id: "TestOnly", # status: "Enabled", # transitions: [ # { # days: 365, # storage_class: "GLACIER", # }, # ], # }, # ], # }, # }) # # @example Request syntax with placeholder values # # resp = client.put_bucket_lifecycle_configuration({ # bucket: "BucketName", # required # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # lifecycle_configuration: { # rules: [ # required # { # expiration: { # date: Time.now, # days: 1, # expired_object_delete_marker: false, # }, # id: "ID", # prefix: "Prefix", # filter: { # prefix: "Prefix", # tag: { # key: "ObjectKey", # required # value: "Value", # required # }, # object_size_greater_than: 1, # object_size_less_than: 1, # and: { # prefix: "Prefix", # tags: [ # { # key: "ObjectKey", # required # value: "Value", # required # }, # ], # object_size_greater_than: 1, # object_size_less_than: 1, # }, # }, # status: "Enabled", # required, accepts Enabled, Disabled # transitions: [ # { # date: Time.now, # days: 1, # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR # }, # ], # noncurrent_version_transitions: [ # { # noncurrent_days: 1, # storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE, GLACIER_IR # newer_noncurrent_versions: 1, # }, # ], # noncurrent_version_expiration: { # noncurrent_days: 1, # newer_noncurrent_versions: 1, # }, # abort_incomplete_multipart_upload: { # days_after_initiation: 1, # }, # }, # ], # }, # expected_bucket_owner: "AccountId", # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration AWS API Documentation # # @overload put_bucket_lifecycle_configuration(params = {}) # @param [Hash] params ({}) def put_bucket_lifecycle_configuration(params = {}, options = {}) req = build_request(:put_bucket_lifecycle_configuration, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Set the logging parameters for a bucket and to specify permissions for # who can view and modify the logging parameters. All logs are saved to # buckets in the same Amazon Web Services Region as the source bucket. # To set the logging status of a bucket, you must be the bucket owner. # # The bucket owner is automatically granted FULL\_CONTROL to all logs. # You use the `Grantee` request element to grant access to other people. # The `Permissions` request element specifies the kind of access the # grantee has to the logs. # # If the target bucket for log delivery uses the bucket owner enforced # setting for S3 Object Ownership, you can't use the `Grantee` request # element to grant access to others. Permissions can only be granted # using policies. For more information, see [Permissions for server # access log delivery][1] in the *Amazon S3 User Guide*. # # Grantee Values # # : You can specify the person (grantee) to whom you're assigning # access rights (by using request elements) in the following ways: # # * By the person's ID: # # `<>ID<><>GranteesEmail<> # ` # # `DisplayName` is optional and ignored in the request. # # * By Email address: # # ` <>Grantees@email.com<>` # # The grantee is resolved to the `CanonicalUser` and, in a response # to a `GETObjectAcl` request, appears as the CanonicalUser. # # * By URI: # # `<>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<>` # # To enable logging, you use `LoggingEnabled` and its children request # elements. To disable logging, you use an empty `BucketLoggingStatus` # request element: # # `` # # For more information about server access logging, see [Server Access # Logging][2] in the *Amazon S3 User Guide*. # # For more information about creating a bucket, see [CreateBucket][3]. # For more information about returning the logging status of a bucket, # see [GetBucketLogging][4]. # # The following operations are related to `PutBucketLogging`: # # * [PutObject][5] # # * [DeleteBucket][6] # # * [CreateBucket][3] # # * [GetBucketLogging][4] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html # # @option params [required, String] :bucket # The name of the bucket for which to set the logging parameters. # # @option params [required, Types::BucketLoggingStatus] :bucket_logging_status # Container for logging status information. # # @option params [String] :content_md5 # The MD5 hash of the `PutBucketLogging` request body. # # For requests made using the Amazon Web Services Command Line Interface # (CLI) or Amazon Web Services SDKs, this field is calculated # automatically. # # @option params [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # # @example Example: Set logging configuration for a bucket # # # The following example sets logging policy on a bucket. For the Log Delivery group to deliver logs to the destination # # bucket, it needs permission for the READ_ACP action which the policy grants. # # resp = client.put_bucket_logging({ # bucket: "sourcebucket", # bucket_logging_status: { # logging_enabled: { # target_bucket: "targetbucket", # target_grants: [ # { # grantee: { # type: "Group", # uri: "http://acs.amazonaws.com/groups/global/AllUsers", # }, # permission: "READ", # }, # ], # target_prefix: "MyBucketLogs/", # }, # }, # }) # # @example Request syntax with placeholder values # # resp = client.put_bucket_logging({ # bucket: "BucketName", # required # bucket_logging_status: { # required # logging_enabled: { # target_bucket: "TargetBucket", # required # target_grants: [ # { # grantee: { # display_name: "DisplayName", # email_address: "EmailAddress", # id: "ID", # type: "CanonicalUser", # required, accepts CanonicalUser, AmazonCustomerByEmail, Group # uri: "URI", # }, # permission: "FULL_CONTROL", # accepts FULL_CONTROL, READ, WRITE # }, # ], # target_prefix: "TargetPrefix", # required # target_object_key_format: { # simple_prefix: { # }, # partitioned_prefix: { # partition_date_source: "EventTime", # accepts EventTime, DeliveryTime # }, # }, # }, # }, # content_md5: "ContentMD5", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # expected_bucket_owner: "AccountId", # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging AWS API Documentation # # @overload put_bucket_logging(params = {}) # @param [Hash] params ({}) def put_bucket_logging(params = {}, options = {}) req = build_request(:put_bucket_logging, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Sets a metrics configuration (specified by the metrics configuration # ID) for the bucket. You can have up to 1,000 metrics configurations # per bucket. If you're updating an existing metrics configuration, # note that this is a full replacement of the existing metrics # configuration. If you don't include the elements you want to keep, # they are erased. # # To use this operation, you must have permissions to perform the # `s3:PutMetricsConfiguration` action. The bucket owner has this # permission by default. The bucket owner can grant this permission to # others. For more information about permissions, see [Permissions # Related to Bucket Subresource Operations][1] and [Managing Access # Permissions to Your Amazon S3 Resources][2]. # # For information about CloudWatch request metrics for Amazon S3, see # [Monitoring Metrics with Amazon CloudWatch][3]. # # The following operations are related to # `PutBucketMetricsConfiguration`: # # * [DeleteBucketMetricsConfiguration][4] # # * [GetBucketMetricsConfiguration][5] # # * [ListBucketMetricsConfigurations][6] # # `PutBucketMetricsConfiguration` has the following special error: # # * Error code: `TooManyConfigurations` # # * Description: You are attempting to create a new configuration but # have already reached the 1,000-configuration limit. # # * HTTP Status Code: HTTP 400 Bad Request # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html # # @option params [required, String] :bucket # The name of the bucket for which the metrics configuration is set. # # @option params [required, String] :id # The ID used to identify the metrics configuration. The ID has a 64 # character limit and can only contain letters, numbers, periods, # dashes, and underscores. # # @option params [required, Types::MetricsConfiguration] :metrics_configuration # Specifies the metrics configuration. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # @example Request syntax with placeholder values # # resp = client.put_bucket_metrics_configuration({ # bucket: "BucketName", # required # id: "MetricsId", # required # metrics_configuration: { # required # id: "MetricsId", # required # filter: { # prefix: "Prefix", # tag: { # key: "ObjectKey", # required # value: "Value", # required # }, # access_point_arn: "AccessPointArn", # and: { # prefix: "Prefix", # tags: [ # { # key: "ObjectKey", # required # value: "Value", # required # }, # ], # access_point_arn: "AccessPointArn", # }, # }, # }, # expected_bucket_owner: "AccountId", # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration AWS API Documentation # # @overload put_bucket_metrics_configuration(params = {}) # @param [Hash] params ({}) def put_bucket_metrics_configuration(params = {}, options = {}) req = build_request(:put_bucket_metrics_configuration, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # No longer used, see the [PutBucketNotificationConfiguration][1] # operation. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotificationConfiguration.html # # @option params [required, String] :bucket # The name of the bucket. # # @option params [String] :content_md5 # The MD5 hash of the `PutPublicAccessBlock` request body. # # For requests made using the Amazon Web Services Command Line Interface # (CLI) or Amazon Web Services SDKs, this field is calculated # automatically. # # @option params [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [required, Types::NotificationConfigurationDeprecated] :notification_configuration # The container for the configuration. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # @example Request syntax with placeholder values # # resp = client.put_bucket_notification({ # bucket: "BucketName", # required # content_md5: "ContentMD5", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # notification_configuration: { # required # topic_configuration: { # id: "NotificationId", # events: ["s3:ReducedRedundancyLostObject"], # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete # event: "s3:ReducedRedundancyLostObject", # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete # topic: "TopicArn", # }, # queue_configuration: { # id: "NotificationId", # event: "s3:ReducedRedundancyLostObject", # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete # events: ["s3:ReducedRedundancyLostObject"], # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete # queue: "QueueArn", # }, # cloud_function_configuration: { # id: "NotificationId", # event: "s3:ReducedRedundancyLostObject", # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete # events: ["s3:ReducedRedundancyLostObject"], # accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete # cloud_function: "CloudFunction", # invocation_role: "CloudFunctionInvocationRole", # }, # }, # expected_bucket_owner: "AccountId", # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification AWS API Documentation # # @overload put_bucket_notification(params = {}) # @param [Hash] params ({}) def put_bucket_notification(params = {}, options = {}) req = build_request(:put_bucket_notification, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Enables notifications of specified events for a bucket. For more # information about event notifications, see [Configuring Event # Notifications][1]. # # Using this API, you can replace an existing notification # configuration. The configuration is an XML file that defines the event # types that you want Amazon S3 to publish and the destination where you # want Amazon S3 to publish an event notification when it detects an # event of the specified type. # # By default, your bucket has no event notifications configured. That # is, the notification configuration will be an empty # `NotificationConfiguration`. # # `` # # `` # # This action replaces the existing notification configuration with the # configuration you include in the request body. # # After Amazon S3 receives this request, it first verifies that any # Amazon Simple Notification Service (Amazon SNS) or Amazon Simple Queue # Service (Amazon SQS) destination exists, and that the bucket owner has # permission to publish to it by sending a test notification. In the # case of Lambda destinations, Amazon S3 verifies that the Lambda # function permissions grant Amazon S3 permission to invoke the function # from the Amazon S3 bucket. For more information, see [Configuring # Notifications for Amazon S3 Events][1]. # # You can disable notifications by adding the empty # NotificationConfiguration element. # # For more information about the number of event notification # configurations that you can create per bucket, see [Amazon S3 service # quotas][2] in *Amazon Web Services General Reference*. # # By default, only the bucket owner can configure notifications on a # bucket. However, bucket owners can use a bucket policy to grant # permission to other users to set this configuration with the required # `s3:PutBucketNotification` permission. # # The PUT notification is an atomic operation. For example, suppose your # notification configuration includes SNS topic, SQS queue, and Lambda # function configurations. When you send a PUT request with this # configuration, Amazon S3 sends test messages to your SNS topic. If the # message fails, the entire PUT action will fail, and Amazon S3 will not # add the configuration to your bucket. # # # # If the configuration in the request body includes only one # `TopicConfiguration` specifying only the # `s3:ReducedRedundancyLostObject` event type, the response will also # include the `x-amz-sns-test-message-id` header containing the message # ID of the test notification sent to the topic. # # The following action is related to # `PutBucketNotificationConfiguration`: # # * [GetBucketNotificationConfiguration][3] # # ^ # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html # [2]: https://docs.aws.amazon.com/general/latest/gr/s3.html#limits_s3 # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html # # @option params [required, String] :bucket # The name of the bucket. # # @option params [required, Types::NotificationConfiguration] :notification_configuration # A container for specifying the notification configuration of the # bucket. If this element is empty, notifications are turned off for the # bucket. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @option params [Boolean] :skip_destination_validation # Skips validation of Amazon SQS, Amazon SNS, and Lambda destinations. # True or false value. # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # # @example Example: Set notification configuration for a bucket # # # The following example sets notification configuration on a bucket to publish the object created events to an SNS topic. # # resp = client.put_bucket_notification_configuration({ # bucket: "examplebucket", # notification_configuration: { # topic_configurations: [ # { # events: [ # "s3:ObjectCreated:*", # ], # topic_arn: "arn:aws:sns:us-west-2:123456789012:s3-notification-topic", # }, # ], # }, # }) # # @example Request syntax with placeholder values # # resp = client.put_bucket_notification_configuration({ # bucket: "BucketName", # required # notification_configuration: { # required # topic_configurations: [ # { # id: "NotificationId", # topic_arn: "TopicArn", # required # events: ["s3:ReducedRedundancyLostObject"], # required, accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete # filter: { # key: { # filter_rules: [ # { # name: "prefix", # accepts prefix, suffix # value: "FilterRuleValue", # }, # ], # }, # }, # }, # ], # queue_configurations: [ # { # id: "NotificationId", # queue_arn: "QueueArn", # required # events: ["s3:ReducedRedundancyLostObject"], # required, accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete # filter: { # key: { # filter_rules: [ # { # name: "prefix", # accepts prefix, suffix # value: "FilterRuleValue", # }, # ], # }, # }, # }, # ], # lambda_function_configurations: [ # { # id: "NotificationId", # lambda_function_arn: "LambdaFunctionArn", # required # events: ["s3:ReducedRedundancyLostObject"], # required, accepts s3:ReducedRedundancyLostObject, s3:ObjectCreated:*, s3:ObjectCreated:Put, s3:ObjectCreated:Post, s3:ObjectCreated:Copy, s3:ObjectCreated:CompleteMultipartUpload, s3:ObjectRemoved:*, s3:ObjectRemoved:Delete, s3:ObjectRemoved:DeleteMarkerCreated, s3:ObjectRestore:*, s3:ObjectRestore:Post, s3:ObjectRestore:Completed, s3:Replication:*, s3:Replication:OperationFailedReplication, s3:Replication:OperationNotTracked, s3:Replication:OperationMissedThreshold, s3:Replication:OperationReplicatedAfterThreshold, s3:ObjectRestore:Delete, s3:LifecycleTransition, s3:IntelligentTiering, s3:ObjectAcl:Put, s3:LifecycleExpiration:*, s3:LifecycleExpiration:Delete, s3:LifecycleExpiration:DeleteMarkerCreated, s3:ObjectTagging:*, s3:ObjectTagging:Put, s3:ObjectTagging:Delete # filter: { # key: { # filter_rules: [ # { # name: "prefix", # accepts prefix, suffix # value: "FilterRuleValue", # }, # ], # }, # }, # }, # ], # event_bridge_configuration: { # }, # }, # expected_bucket_owner: "AccountId", # skip_destination_validation: false, # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration AWS API Documentation # # @overload put_bucket_notification_configuration(params = {}) # @param [Hash] params ({}) def put_bucket_notification_configuration(params = {}, options = {}) req = build_request(:put_bucket_notification_configuration, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Creates or modifies `OwnershipControls` for an Amazon S3 bucket. To # use this operation, you must have the `s3:PutBucketOwnershipControls` # permission. For more information about Amazon S3 permissions, see # [Specifying permissions in a policy][1]. # # For information about Amazon S3 Object Ownership, see [Using object # ownership][2]. # # The following operations are related to `PutBucketOwnershipControls`: # # * GetBucketOwnershipControls # # * DeleteBucketOwnershipControls # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-with-s3-actions.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/user-guide/about-object-ownership.html # # @option params [required, String] :bucket # The name of the Amazon S3 bucket whose `OwnershipControls` you want to # set. # # @option params [String] :content_md5 # The MD5 hash of the `OwnershipControls` request body. # # For requests made using the Amazon Web Services Command Line Interface # (CLI) or Amazon Web Services SDKs, this field is calculated # automatically. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @option params [required, Types::OwnershipControls] :ownership_controls # The `OwnershipControls` (BucketOwnerEnforced, BucketOwnerPreferred, or # ObjectWriter) that you want to apply to this Amazon S3 bucket. # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # @example Request syntax with placeholder values # # resp = client.put_bucket_ownership_controls({ # bucket: "BucketName", # required # content_md5: "ContentMD5", # expected_bucket_owner: "AccountId", # ownership_controls: { # required # rules: [ # required # { # object_ownership: "BucketOwnerPreferred", # required, accepts BucketOwnerPreferred, ObjectWriter, BucketOwnerEnforced # }, # ], # }, # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketOwnershipControls AWS API Documentation # # @overload put_bucket_ownership_controls(params = {}) # @param [Hash] params ({}) def put_bucket_ownership_controls(params = {}, options = {}) req = build_request(:put_bucket_ownership_controls, params) req.send_request(options) end # Applies an Amazon S3 bucket policy to an Amazon S3 bucket. # # Directory buckets - For directory buckets, you must make # requests for this API operation to the Regional endpoint. These # endpoints support path-style requests in the format # `https://s3express-control.region_code.amazonaws.com/bucket-name `. # Virtual-hosted-style requests aren't supported. For more information, # see [Regional and Zonal endpoints][1] in the *Amazon S3 User Guide*. # # # # Permissions # # : If you are using an identity other than the root user of the Amazon # Web Services account that owns the bucket, the calling identity must # both have the `PutBucketPolicy` permissions on the specified bucket # and belong to the bucket owner's account in order to use this # operation. # # If you don't have `PutBucketPolicy` permissions, Amazon S3 returns # a `403 Access Denied` error. If you have the correct permissions, # but you're not using an identity that belongs to the bucket # owner's account, Amazon S3 returns a `405 Method Not Allowed` # error. # # To ensure that bucket owners don't inadvertently lock themselves # out of their own buckets, the root principal in a bucket owner's # Amazon Web Services account can perform the `GetBucketPolicy`, # `PutBucketPolicy`, and `DeleteBucketPolicy` API actions, even if # their bucket policy explicitly denies the root principal's access. # Bucket owner root principals can only be blocked from performing # these API actions by VPC endpoint policies and Amazon Web Services # Organizations policies. # # * **General purpose bucket permissions** - The `s3:PutBucketPolicy` # permission is required in a policy. For more information about # general purpose buckets bucket policies, see [Using Bucket # Policies and User Policies][2] in the *Amazon S3 User Guide*. # # * **Directory bucket permissions** - To grant access to this API # operation, you must have the `s3express:PutBucketPolicy` # permission in an IAM identity-based policy instead of a bucket # policy. Cross-account access to this API operation isn't # supported. This operation can only be performed by the Amazon Web # Services account that owns the resource. For more information # about directory bucket policies and permissions, see [Amazon Web # Services Identity and Access Management (IAM) for S3 Express One # Zone][3] in the *Amazon S3 User Guide*. # # Example bucket policies # # : **General purpose buckets example bucket policies** - See [Bucket # policy examples][4] in the *Amazon S3 User Guide*. # # **Directory bucket example bucket policies** - See [Example bucket # policies for S3 Express One Zone][5] in the *Amazon S3 User Guide*. # # HTTP Host header syntax # # : Directory buckets - The HTTP Host header syntax is # `s3express-control.region.amazonaws.com`. # # The following operations are related to `PutBucketPolicy`: # # * [CreateBucket][6] # # * [DeleteBucket][7] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html # # @option params [required, String] :bucket # The name of the bucket. # # Directory buckets - When you use this operation with a # directory bucket, you must use path-style requests in the format # `https://s3express-control.region_code.amazonaws.com/bucket-name `. # Virtual-hosted-style requests aren't supported. Directory bucket # names must be unique in the chosen Availability Zone. Bucket names # must also follow the format ` bucket_base_name--az_id--x-s3` (for # example, ` DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about # bucket naming restrictions, see [Directory bucket naming rules][1] in # the *Amazon S3 User Guide* # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # # @option params [String] :content_md5 # The MD5 hash of the request body. # # For requests made using the Amazon Web Services Command Line Interface # (CLI) or Amazon Web Services SDKs, this field is calculated # automatically. # # This functionality is not supported for directory buckets. # # # # @option params [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum-algorithm ` or # `x-amz-trailer` header sent. Otherwise, Amazon S3 fails the request # with the HTTP status code `400 Bad Request`. # # For the `x-amz-checksum-algorithm ` header, replace ` algorithm ` with # the supported algorithm from the following list: # # * CRC32 # # * CRC32C # # * SHA1 # # * SHA256 # # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # If the individual checksum value you provide through # `x-amz-checksum-algorithm ` doesn't match the checksum algorithm you # set through `x-amz-sdk-checksum-algorithm`, Amazon S3 ignores any # provided `ChecksumAlgorithm` parameter and uses the checksum algorithm # that matches the provided value in `x-amz-checksum-algorithm `. # # For directory buckets, when you use Amazon Web Services SDKs, `CRC32` # is the default checksum algorithm that's used for performance. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [Boolean] :confirm_remove_self_bucket_access # Set this parameter to true to confirm that you want to remove your # permissions to change this bucket policy in the future. # # This functionality is not supported for directory buckets. # # # # @option params [required, String] :policy # The bucket policy as a JSON document. # # For directory buckets, the only IAM action supported in the bucket # policy is `s3express:CreateSession`. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # For directory buckets, this header is not supported in this API # operation. If you specify this header, the request fails with the HTTP # status code `501 Not Implemented`. # # # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # # @example Example: Set bucket policy # # # The following example sets a permission policy on a bucket. # # resp = client.put_bucket_policy({ # bucket: "examplebucket", # policy: "{\"Version\": \"2012-10-17\", \"Statement\": [{ \"Sid\": \"id-1\",\"Effect\": \"Allow\",\"Principal\": {\"AWS\": \"arn:aws:iam::123456789012:root\"}, \"Action\": [ \"s3:PutObject\",\"s3:PutObjectAcl\"], \"Resource\": [\"arn:aws:s3:::acl3/*\" ] } ]}", # }) # # @example Request syntax with placeholder values # # resp = client.put_bucket_policy({ # bucket: "BucketName", # required # content_md5: "ContentMD5", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # confirm_remove_self_bucket_access: false, # policy: "Policy", # required # expected_bucket_owner: "AccountId", # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy AWS API Documentation # # @overload put_bucket_policy(params = {}) # @param [Hash] params ({}) def put_bucket_policy(params = {}, options = {}) req = build_request(:put_bucket_policy, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Creates a replication configuration or replaces an existing one. For # more information, see [Replication][1] in the *Amazon S3 User Guide*. # # Specify the replication configuration in the request body. In the # replication configuration, you provide the name of the destination # bucket or buckets where you want Amazon S3 to replicate objects, the # IAM role that Amazon S3 can assume to replicate objects on your # behalf, and other relevant information. You can invoke this request # for a specific Amazon Web Services Region by using the [ # `aws:RequestedRegion` ][2] condition key. # # A replication configuration must include at least one rule, and can # contain a maximum of 1,000. Each rule identifies a subset of objects # to replicate by filtering the objects in the source bucket. To choose # additional subsets of objects to replicate, add a rule for each # subset. # # To specify a subset of the objects in the source bucket to apply a # replication rule to, add the Filter element as a child of the Rule # element. You can filter objects based on an object key prefix, one or # more object tags, or both. When you add the Filter element in the # configuration, you must also add the following elements: # `DeleteMarkerReplication`, `Status`, and `Priority`. # # If you are using an earlier version of the replication configuration, # Amazon S3 handles replication of delete markers differently. For more # information, see [Backward Compatibility][3]. # # # # For information about enabling versioning on a bucket, see [Using # Versioning][4]. # # Handling Replication of Encrypted Objects # # : By default, Amazon S3 doesn't replicate objects that are stored at # rest using server-side encryption with KMS keys. To replicate Amazon # Web Services KMS-encrypted objects, add the following: # `SourceSelectionCriteria`, `SseKmsEncryptedObjects`, `Status`, # `EncryptionConfiguration`, and `ReplicaKmsKeyID`. For information # about replication configuration, see [Replicating Objects Created # with SSE Using KMS keys][5]. # # For information on `PutBucketReplication` errors, see [List of # replication-related error codes][6] # # Permissions # # : To create a `PutBucketReplication` request, you must have # `s3:PutReplicationConfiguration` permissions for the bucket. # # By default, a resource owner, in this case the Amazon Web Services # account that created the bucket, can perform this operation. The # resource owner can also grant others permissions to perform the # operation. For more information about permissions, see [Specifying # Permissions in a Policy][7] and [Managing Access Permissions to Your # Amazon S3 Resources][8]. # # To perform this operation, the user or role performing the action # must have the [iam:PassRole][9] permission. # # # # The following operations are related to `PutBucketReplication`: # # * [GetBucketReplication][10] # # * [DeleteBucketReplication][11] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-requestedregion # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList # [7]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html # [8]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html # [9]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html # [10]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html # [11]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html # # @option params [required, String] :bucket # The name of the bucket # # @option params [String] :content_md5 # The base64-encoded 128-bit MD5 digest of the data. You must use this # header as a message integrity check to verify that the request body # was not corrupted in transit. For more information, see [RFC 1864][1]. # # For requests made using the Amazon Web Services Command Line Interface # (CLI) or Amazon Web Services SDKs, this field is calculated # automatically. # # # # [1]: http://www.ietf.org/rfc/rfc1864.txt # # @option params [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [required, Types::ReplicationConfiguration] :replication_configuration # A container for replication rules. You can add up to 1,000 rules. The # maximum size of a replication configuration is 2 MB. # # @option params [String] :token # A token to allow Object Lock to be enabled for an existing bucket. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # # @example Example: Set replication configuration on a bucket # # # The following example sets replication configuration on a bucket. # # resp = client.put_bucket_replication({ # bucket: "examplebucket", # replication_configuration: { # role: "arn:aws:iam::123456789012:role/examplerole", # rules: [ # { # destination: { # bucket: "arn:aws:s3:::destinationbucket", # storage_class: "STANDARD", # }, # prefix: "", # status: "Enabled", # }, # ], # }, # }) # # @example Request syntax with placeholder values # # resp = client.put_bucket_replication({ # bucket: "BucketName", # required # content_md5: "ContentMD5", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # replication_configuration: { # required # role: "Role", # required # rules: [ # required # { # id: "ID", # priority: 1, # prefix: "Prefix", # filter: { # prefix: "Prefix", # tag: { # key: "ObjectKey", # required # value: "Value", # required # }, # and: { # prefix: "Prefix", # tags: [ # { # key: "ObjectKey", # required # value: "Value", # required # }, # ], # }, # }, # status: "Enabled", # required, accepts Enabled, Disabled # source_selection_criteria: { # sse_kms_encrypted_objects: { # status: "Enabled", # required, accepts Enabled, Disabled # }, # replica_modifications: { # status: "Enabled", # required, accepts Enabled, Disabled # }, # }, # existing_object_replication: { # status: "Enabled", # required, accepts Enabled, Disabled # }, # destination: { # required # bucket: "BucketName", # required # account: "AccountId", # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE # access_control_translation: { # owner: "Destination", # required, accepts Destination # }, # encryption_configuration: { # replica_kms_key_id: "ReplicaKmsKeyID", # }, # replication_time: { # status: "Enabled", # required, accepts Enabled, Disabled # time: { # required # minutes: 1, # }, # }, # metrics: { # status: "Enabled", # required, accepts Enabled, Disabled # event_threshold: { # minutes: 1, # }, # }, # }, # delete_marker_replication: { # status: "Enabled", # accepts Enabled, Disabled # }, # }, # ], # }, # token: "ObjectLockToken", # expected_bucket_owner: "AccountId", # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication AWS API Documentation # # @overload put_bucket_replication(params = {}) # @param [Hash] params ({}) def put_bucket_replication(params = {}, options = {}) req = build_request(:put_bucket_replication, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Sets the request payment configuration for a bucket. By default, the # bucket owner pays for downloads from the bucket. This configuration # parameter enables the bucket owner (only) to specify that the person # requesting the download will be charged for the download. For more # information, see [Requester Pays Buckets][1]. # # The following operations are related to `PutBucketRequestPayment`: # # * [CreateBucket][2] # # * [GetBucketRequestPayment][3] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html # # @option params [required, String] :bucket # The bucket name. # # @option params [String] :content_md5 # The base64-encoded 128-bit MD5 digest of the data. You must use this # header as a message integrity check to verify that the request body # was not corrupted in transit. For more information, see [RFC 1864][1]. # # For requests made using the Amazon Web Services Command Line Interface # (CLI) or Amazon Web Services SDKs, this field is calculated # automatically. # # # # [1]: http://www.ietf.org/rfc/rfc1864.txt # # @option params [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [required, Types::RequestPaymentConfiguration] :request_payment_configuration # Container for Payer. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # # @example Example: Set request payment configuration on a bucket. # # # The following example sets request payment configuration on a bucket so that person requesting the download is charged. # # resp = client.put_bucket_request_payment({ # bucket: "examplebucket", # request_payment_configuration: { # payer: "Requester", # }, # }) # # @example Request syntax with placeholder values # # resp = client.put_bucket_request_payment({ # bucket: "BucketName", # required # content_md5: "ContentMD5", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # request_payment_configuration: { # required # payer: "Requester", # required, accepts Requester, BucketOwner # }, # expected_bucket_owner: "AccountId", # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment AWS API Documentation # # @overload put_bucket_request_payment(params = {}) # @param [Hash] params ({}) def put_bucket_request_payment(params = {}, options = {}) req = build_request(:put_bucket_request_payment, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Sets the tags for a bucket. # # Use tags to organize your Amazon Web Services bill to reflect your own # cost structure. To do this, sign up to get your Amazon Web Services # account bill with tag key values included. Then, to see the cost of # combined resources, organize your billing information according to # resources with the same tag key values. For example, you can tag # several resources with a specific application name, and then organize # your billing information to see the total cost of that application # across several services. For more information, see [Cost Allocation # and Tagging][1] and [Using Cost Allocation in Amazon S3 Bucket # Tags][2]. # # When this operation sets the tags for a bucket, it will overwrite any # current tags the bucket already has. You cannot use this operation to # add tags to an existing list of tags. # # # # To use this operation, you must have permissions to perform the # `s3:PutBucketTagging` action. The bucket owner has this permission by # default and can grant this permission to others. For more information # about permissions, see [Permissions Related to Bucket Subresource # Operations][3] and [Managing Access Permissions to Your Amazon S3 # Resources][4]. # # `PutBucketTagging` has the following special errors. For more Amazon # S3 errors see, [Error Responses][5]. # # * `InvalidTag` - The tag provided was not a valid tag. This error can # occur if the tag did not pass input validation. For more # information, see [Using Cost Allocation in Amazon S3 Bucket # Tags][2]. # # * `MalformedXML` - The XML provided does not match the schema. # # * `OperationAborted` - A conflicting conditional action is currently # in progress against this resource. Please try again. # # * `InternalError` - The service was unable to apply the provided tag # to the bucket. # # The following operations are related to `PutBucketTagging`: # # * [GetBucketTagging][6] # # * [DeleteBucketTagging][7] # # # # [1]: https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources # [4]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html # # @option params [required, String] :bucket # The bucket name. # # @option params [String] :content_md5 # The base64-encoded 128-bit MD5 digest of the data. You must use this # header as a message integrity check to verify that the request body # was not corrupted in transit. For more information, see [RFC 1864][1]. # # For requests made using the Amazon Web Services Command Line Interface # (CLI) or Amazon Web Services SDKs, this field is calculated # automatically. # # # # [1]: http://www.ietf.org/rfc/rfc1864.txt # # @option params [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [required, Types::Tagging] :tagging # Container for the `TagSet` and `Tag` elements. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # # @example Example: Set tags on a bucket # # # The following example sets tags on a bucket. Any existing tags are replaced. # # resp = client.put_bucket_tagging({ # bucket: "examplebucket", # tagging: { # tag_set: [ # { # key: "Key1", # value: "Value1", # }, # { # key: "Key2", # value: "Value2", # }, # ], # }, # }) # # @example Request syntax with placeholder values # # resp = client.put_bucket_tagging({ # bucket: "BucketName", # required # content_md5: "ContentMD5", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # tagging: { # required # tag_set: [ # required # { # key: "ObjectKey", # required # value: "Value", # required # }, # ], # }, # expected_bucket_owner: "AccountId", # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging AWS API Documentation # # @overload put_bucket_tagging(params = {}) # @param [Hash] params ({}) def put_bucket_tagging(params = {}, options = {}) req = build_request(:put_bucket_tagging, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Sets the versioning state of an existing bucket. # # You can set the versioning state with one of the following values: # # **Enabled**—Enables versioning for the objects in the bucket. All # objects added to the bucket receive a unique version ID. # # **Suspended**—Disables versioning for the objects in the bucket. All # objects added to the bucket receive the version ID null. # # If the versioning state has never been set on a bucket, it has no # versioning state; a [GetBucketVersioning][1] request does not return a # versioning state value. # # In order to enable MFA Delete, you must be the bucket owner. If you # are the bucket owner and want to enable MFA Delete in the bucket # versioning configuration, you must include the `x-amz-mfa request` # header and the `Status` and the `MfaDelete` request elements in a # request to set the versioning state of the bucket. # # If you have an object expiration lifecycle configuration in your # non-versioned bucket and you want to maintain the same permanent # delete behavior when you enable versioning, you must add a noncurrent # expiration policy. The noncurrent expiration lifecycle configuration # will manage the deletes of the noncurrent object versions in the # version-enabled bucket. (A version-enabled bucket maintains one # current and zero or more noncurrent object versions.) For more # information, see [Lifecycle and Versioning][2]. # # The following operations are related to `PutBucketVersioning`: # # * [CreateBucket][3] # # * [DeleteBucket][4] # # * [GetBucketVersioning][1] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html # # @option params [required, String] :bucket # The bucket name. # # @option params [String] :content_md5 # >The base64-encoded 128-bit MD5 digest of the data. You must use # this header as a message integrity check to verify that the request # body was not corrupted in transit. For more information, see [RFC # 1864][1]. # # For requests made using the Amazon Web Services Command Line Interface # (CLI) or Amazon Web Services SDKs, this field is calculated # automatically. # # # # [1]: http://www.ietf.org/rfc/rfc1864.txt # # @option params [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [String] :mfa # The concatenation of the authentication device's serial number, a # space, and the value that is displayed on your authentication device. # # @option params [required, Types::VersioningConfiguration] :versioning_configuration # Container for setting the versioning state. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # # @example Example: Set versioning configuration on a bucket # # # The following example sets versioning configuration on bucket. The configuration enables versioning on the bucket. # # resp = client.put_bucket_versioning({ # bucket: "examplebucket", # versioning_configuration: { # mfa_delete: "Disabled", # status: "Enabled", # }, # }) # # @example Request syntax with placeholder values # # resp = client.put_bucket_versioning({ # bucket: "BucketName", # required # content_md5: "ContentMD5", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # mfa: "MFA", # versioning_configuration: { # required # mfa_delete: "Enabled", # accepts Enabled, Disabled # status: "Enabled", # accepts Enabled, Suspended # }, # expected_bucket_owner: "AccountId", # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning AWS API Documentation # # @overload put_bucket_versioning(params = {}) # @param [Hash] params ({}) def put_bucket_versioning(params = {}, options = {}) req = build_request(:put_bucket_versioning, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Sets the configuration of the website that is specified in the # `website` subresource. To configure a bucket as a website, you can add # this subresource on the bucket with website configuration information # such as the file name of the index document and any redirect rules. # For more information, see [Hosting Websites on Amazon S3][1]. # # This PUT action requires the `S3:PutBucketWebsite` permission. By # default, only the bucket owner can configure the website attached to a # bucket; however, bucket owners can allow other users to set the # website configuration by writing a bucket policy that grants them the # `S3:PutBucketWebsite` permission. # # To redirect all website requests sent to the bucket's website # endpoint, you add a website configuration with the following elements. # Because all requests are sent to another website, you don't need to # provide index document name for the bucket. # # * `WebsiteConfiguration` # # * `RedirectAllRequestsTo` # # * `HostName` # # * `Protocol` # # If you want granular control over redirects, you can use the following # elements to add routing rules that describe conditions for redirecting # requests and information about the redirect destination. In this case, # the website configuration must provide an index document for the # bucket, because some requests might not be redirected. # # * `WebsiteConfiguration` # # * `IndexDocument` # # * `Suffix` # # * `ErrorDocument` # # * `Key` # # * `RoutingRules` # # * `RoutingRule` # # * `Condition` # # * `HttpErrorCodeReturnedEquals` # # * `KeyPrefixEquals` # # * `Redirect` # # * `Protocol` # # * `HostName` # # * `ReplaceKeyPrefixWith` # # * `ReplaceKeyWith` # # * `HttpRedirectCode` # # Amazon S3 has a limitation of 50 routing rules per website # configuration. If you require more than 50 routing rules, you can use # object redirect. For more information, see [Configuring an Object # Redirect][2] in the *Amazon S3 User Guide*. # # The maximum request length is limited to 128 KB. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html # # @option params [required, String] :bucket # The bucket name. # # @option params [String] :content_md5 # The base64-encoded 128-bit MD5 digest of the data. You must use this # header as a message integrity check to verify that the request body # was not corrupted in transit. For more information, see [RFC 1864][1]. # # For requests made using the Amazon Web Services Command Line Interface # (CLI) or Amazon Web Services SDKs, this field is calculated # automatically. # # # # [1]: http://www.ietf.org/rfc/rfc1864.txt # # @option params [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [required, Types::WebsiteConfiguration] :website_configuration # Container for the request. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # # @example Example: Set website configuration on a bucket # # # The following example adds website configuration to a bucket. # # resp = client.put_bucket_website({ # bucket: "examplebucket", # content_md5: "", # website_configuration: { # error_document: { # key: "error.html", # }, # index_document: { # suffix: "index.html", # }, # }, # }) # # @example Request syntax with placeholder values # # resp = client.put_bucket_website({ # bucket: "BucketName", # required # content_md5: "ContentMD5", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # website_configuration: { # required # error_document: { # key: "ObjectKey", # required # }, # index_document: { # suffix: "Suffix", # required # }, # redirect_all_requests_to: { # host_name: "HostName", # required # protocol: "http", # accepts http, https # }, # routing_rules: [ # { # condition: { # http_error_code_returned_equals: "HttpErrorCodeReturnedEquals", # key_prefix_equals: "KeyPrefixEquals", # }, # redirect: { # required # host_name: "HostName", # http_redirect_code: "HttpRedirectCode", # protocol: "http", # accepts http, https # replace_key_prefix_with: "ReplaceKeyPrefixWith", # replace_key_with: "ReplaceKeyWith", # }, # }, # ], # }, # expected_bucket_owner: "AccountId", # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite AWS API Documentation # # @overload put_bucket_website(params = {}) # @param [Hash] params ({}) def put_bucket_website(params = {}, options = {}) req = build_request(:put_bucket_website, params) req.send_request(options) end # Adds an object to a bucket. # # * Amazon S3 never adds partial objects; if you receive a success # response, Amazon S3 added the entire object to the bucket. You # cannot use `PutObject` to only update a single piece of metadata for # an existing object. You must put the entire object with updated # metadata if you want to update some values. # # * If your bucket uses the bucket owner enforced setting for Object # Ownership, ACLs are disabled and no longer affect permissions. All # objects written to the bucket by any account will be owned by the # bucket owner. # # * **Directory buckets** - For directory buckets, you must make # requests for this API operation to the Zonal endpoint. These # endpoints support virtual-hosted-style requests in the format # `https://bucket_name.s3express-az_id.region.amazonaws.com/key-name # `. Path-style requests are not supported. For more information, see # [Regional and Zonal endpoints][1] in the *Amazon S3 User Guide*. # # # # Amazon S3 is a distributed system. If it receives multiple write # requests for the same object simultaneously, it overwrites all but the # last object written. However, Amazon S3 provides features that can # modify this behavior: # # * **S3 Object Lock** - To prevent objects from being deleted or # overwritten, you can use [Amazon S3 Object Lock][2] in the *Amazon # S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # * **S3 Versioning** - When you enable versioning for a bucket, if # Amazon S3 receives multiple write requests for the same object # simultaneously, it stores all versions of the objects. For each # write request that is made to the same object, Amazon S3 # automatically generates a unique version ID of that object being # stored in Amazon S3. You can retrieve, replace, or delete any # version of the object. For more information about versioning, see # [Adding Objects to Versioning-Enabled Buckets][3] in the *Amazon S3 # User Guide*. For information about returning the versioning state of # a bucket, see [GetBucketVersioning][4]. # # This functionality is not supported for directory buckets. # # # # Permissions # : * **General purpose bucket permissions** - The following permissions # are required in your policies when your `PutObject` request # includes specific headers. # # * s3:PutObject - To successfully complete # the `PutObject` request, you must always have the `s3:PutObject` # permission on a bucket to add an object to it. # # * s3:PutObjectAcl - To successfully change # the objects ACL of your `PutObject` request, you must have the # `s3:PutObjectAcl`. # # * s3:PutObjectTagging - To successfully set # the tag-set with your `PutObject` request, you must have the # `s3:PutObjectTagging`. # # * **Directory bucket permissions** - To grant access to this API # operation on a directory bucket, we recommend that you use the [ # `CreateSession` ][5] API operation for session-based # authorization. Specifically, you grant the # `s3express:CreateSession` permission to the directory bucket in a # bucket policy or an IAM identity-based policy. Then, you make the # `CreateSession` API call on the bucket to obtain a session token. # With the session token in your request header, you can make API # requests to this operation. After the session token expires, you # make another `CreateSession` API call to generate a new session # token for use. Amazon Web Services CLI or SDKs create session and # refresh the session token automatically to avoid service # interruptions when a session expires. For more information about # authorization, see [ `CreateSession` ][5]. # # Data integrity with Content-MD5 # : * **General purpose bucket** - To ensure that data is not corrupted # traversing the network, use the `Content-MD5` header. When you use # this header, Amazon S3 checks the object against the provided MD5 # value and, if they do not match, Amazon S3 returns an error. # Alternatively, when the object's ETag is its MD5 digest, you can # calculate the MD5 while putting the object to Amazon S3 and # compare the returned ETag to the calculated MD5 value. # # * **Directory bucket** - This functionality is not supported for # directory buckets. # # HTTP Host header syntax # # : Directory buckets - The HTTP Host header syntax is ` # Bucket_name.s3express-az_id.region.amazonaws.com`. # # For more information about related Amazon S3 APIs, see the following: # # * [CopyObject][6] # # * [DeleteObject][7] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html # # @option params [String] :acl # The canned ACL to apply to the object. For more information, see # [Canned ACL][1] in the *Amazon S3 User Guide*. # # When adding a new object, you can use headers to grant ACL-based # permissions to individual Amazon Web Services accounts or to # predefined groups defined by Amazon S3. These permissions are then # added to the ACL on the object. By default, all objects are private. # Only the owner has full access control. For more information, see # [Access Control List (ACL) Overview][2] and [Managing ACLs Using the # REST API][3] in the *Amazon S3 User Guide*. # # If the bucket that you're uploading objects to uses the bucket owner # enforced setting for S3 Object Ownership, ACLs are disabled and no # longer affect permissions. Buckets that use this setting only accept # PUT requests that don't specify an ACL or PUT requests that specify # bucket owner full control ACLs, such as the # `bucket-owner-full-control` canned ACL or an equivalent form of this # ACL expressed in the XML format. PUT requests that contain other ACLs # (for example, custom grants to certain Amazon Web Services accounts) # fail and return a `400` error with the error code # `AccessControlListNotSupported`. For more information, see [ # Controlling ownership of objects and disabling ACLs][4] in the *Amazon # S3 User Guide*. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html # # @option params [String, StringIO, File] :body # Object data. # # @option params [required, String] :bucket # The bucket name to which the PUT action was initiated. # # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style requests # are not supported. Directory bucket names must be unique in the chosen # Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, you # must provide the alias of the access point in place of the bucket name # or specify the access point ARN. When using the access point ARN, you # must direct requests to the access point hostname. The access point # hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the bucket # name. For more information about access point ARNs, see [Using access # points][2] in the *Amazon S3 User Guide*. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. The # S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][3] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # # @option params [String] :cache_control # Can be used to specify caching behavior along the request/reply chain. # For more information, see # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9][1]. # # # # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 # # @option params [String] :content_disposition # Specifies presentational information for the object. For more # information, see # [https://www.rfc-editor.org/rfc/rfc6266#section-4][1]. # # # # [1]: https://www.rfc-editor.org/rfc/rfc6266#section-4 # # @option params [String] :content_encoding # Specifies what content encodings have been applied to the object and # thus what decoding mechanisms must be applied to obtain the media-type # referenced by the Content-Type header field. For more information, see # [https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding][1]. # # # # [1]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding # # @option params [String] :content_language # The language the content is in. # # @option params [Integer] :content_length # Size of the body in bytes. This parameter is useful when the size of # the body cannot be determined automatically. For more information, see # [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length][1]. # # # # [1]: https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length # # @option params [String] :content_md5 # The base64-encoded 128-bit MD5 digest of the message (without the # headers) according to RFC 1864. This header can be used as a message # integrity check to verify that the data is the same data that was # originally sent. Although it is optional, we recommend using the # Content-MD5 mechanism as an end-to-end integrity check. For more # information about REST request authentication, see [REST # Authentication][1]. # # The `Content-MD5` header is required for any request to upload an # object with a retention period configured using Amazon S3 Object Lock. # For more information about Amazon S3 Object Lock, see [Amazon S3 # Object Lock Overview][2] in the *Amazon S3 User Guide*. # # # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html # # @option params [String] :content_type # A standard MIME type describing the format of the contents. For more # information, see # [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type][1]. # # # # [1]: https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type # # @option params [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum-algorithm ` or # `x-amz-trailer` header sent. Otherwise, Amazon S3 fails the request # with the HTTP status code `400 Bad Request`. # # For the `x-amz-checksum-algorithm ` header, replace ` algorithm ` with # the supported algorithm from the following list: # # * CRC32 # # * CRC32C # # * SHA1 # # * SHA256 # # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # If the individual checksum value you provide through # `x-amz-checksum-algorithm ` doesn't match the checksum algorithm you # set through `x-amz-sdk-checksum-algorithm`, Amazon S3 ignores any # provided `ChecksumAlgorithm` parameter and uses the checksum algorithm # that matches the provided value in `x-amz-checksum-algorithm `. # # For directory buckets, when you use Amazon Web Services SDKs, `CRC32` # is the default checksum algorithm that's used for performance. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [String] :checksum_crc32 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 32-bit CRC32 checksum of the object. For # more information, see [Checking object integrity][1] in the *Amazon S3 # User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [String] :checksum_crc32c # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 32-bit CRC32C checksum of the object. # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [String] :checksum_sha1 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 160-bit SHA-1 digest of the object. For # more information, see [Checking object integrity][1] in the *Amazon S3 # User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [String] :checksum_sha256 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 256-bit SHA-256 digest of the object. # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [Time,DateTime,Date,Integer,String] :expires # The date and time at which the object is no longer cacheable. For more # information, see # [https://www.rfc-editor.org/rfc/rfc7234#section-5.3][1]. # # # # [1]: https://www.rfc-editor.org/rfc/rfc7234#section-5.3 # # @option params [String] :grant_full_control # Gives the grantee READ, READ\_ACP, and WRITE\_ACP permissions on the # object. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # @option params [String] :grant_read # Allows grantee to read the object data and its metadata. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # @option params [String] :grant_read_acp # Allows grantee to read the object ACL. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # @option params [String] :grant_write_acp # Allows grantee to write the ACL for the applicable object. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # @option params [required, String] :key # Object key for which the PUT action was initiated. # # @option params [Hash] :metadata # A map of metadata to store with the object in S3. # # @option params [String] :server_side_encryption # The server-side encryption algorithm that was used when you store this # object in Amazon S3 (for example, `AES256`, `aws:kms`, # `aws:kms:dsse`). # # General purpose buckets - You have four mutually exclusive # options to protect data using server-side encryption in Amazon S3, # depending on how you choose to manage the encryption keys. # Specifically, the encryption key options are Amazon S3 managed keys # (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and # customer-provided keys (SSE-C). Amazon S3 encrypts data with # server-side encryption by using Amazon S3 managed keys (SSE-S3) by # default. You can optionally tell Amazon S3 to encrypt data at rest by # using server-side encryption with other key options. For more # information, see [Using Server-Side Encryption][1] in the *Amazon S3 # User Guide*. # # Directory buckets - For directory buckets, only the # server-side encryption with Amazon S3 managed keys (SSE-S3) (`AES256`) # value is supported. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html # # @option params [String] :storage_class # By default, Amazon S3 uses the STANDARD Storage Class to store newly # created objects. The STANDARD storage class provides high durability # and high availability. Depending on performance needs, you can specify # a different Storage Class. For more information, see [Storage # Classes][1] in the *Amazon S3 User Guide*. # # * For directory buckets, only the S3 Express One Zone storage class is # supported to store newly created objects. # # * Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html # # @option params [String] :website_redirect_location # If the bucket is configured as a website, redirects requests for this # object to another object in the same bucket or to an external URL. # Amazon S3 stores the value of this header in the object metadata. For # information about object metadata, see [Object Key and Metadata][1] in # the *Amazon S3 User Guide*. # # In the following example, the request header sets the redirect to an # object (anotherPage.html) in the same bucket: # # `x-amz-website-redirect-location: /anotherPage.html` # # In the following example, the request header sets the object redirect # to another website: # # `x-amz-website-redirect-location: http://www.example.com/` # # For more information about website hosting in Amazon S3, see [Hosting # Websites on Amazon S3][2] and [How to Configure Website Page # Redirects][3] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html # # @option params [String] :sse_customer_algorithm # Specifies the algorithm to use when encrypting the object (for # example, `AES256`). # # This functionality is not supported for directory buckets. # # # # @option params [String] :sse_customer_key # Specifies the customer-provided encryption key for Amazon S3 to use in # encrypting data. This value is used to store the object and then it is # discarded; Amazon S3 does not store the encryption key. The key must # be appropriate for use with the algorithm specified in the # `x-amz-server-side-encryption-customer-algorithm` header. # # This functionality is not supported for directory buckets. # # # # @option params [String] :sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the encryption key according to # RFC 1321. Amazon S3 uses this header for a message integrity check to # ensure that the encryption key was transmitted without error. # # This functionality is not supported for directory buckets. # # # # @option params [String] :ssekms_key_id # If `x-amz-server-side-encryption` has a valid value of `aws:kms` or # `aws:kms:dsse`, this header specifies the ID (Key ID, Key ARN, or Key # Alias) of the Key Management Service (KMS) symmetric encryption # customer managed key that was used for the object. If you specify # `x-amz-server-side-encryption:aws:kms` or # `x-amz-server-side-encryption:aws:kms:dsse`, but do not provide` # x-amz-server-side-encryption-aws-kms-key-id`, Amazon S3 uses the # Amazon Web Services managed key (`aws/s3`) to protect the data. If the # KMS key does not exist in the same account that's issuing the # command, you must use the full ARN and not just the ID. # # This functionality is not supported for directory buckets. # # # # @option params [String] :ssekms_encryption_context # Specifies the Amazon Web Services KMS Encryption Context to use for # object encryption. The value of this header is a base64-encoded UTF-8 # string holding JSON with the encryption context key-value pairs. This # value is stored as object metadata and automatically gets passed on to # Amazon Web Services KMS for future `GetObject` or `CopyObject` # operations on this object. This value must be explicitly added during # `CopyObject` operations. # # This functionality is not supported for directory buckets. # # # # @option params [Boolean] :bucket_key_enabled # Specifies whether Amazon S3 should use an S3 Bucket Key for object # encryption with server-side encryption using Key Management Service # (KMS) keys (SSE-KMS). Setting this header to `true` causes Amazon S3 # to use an S3 Bucket Key for object encryption with SSE-KMS. # # Specifying this header with a PUT action doesn’t affect bucket-level # settings for S3 Bucket Key. # # This functionality is not supported for directory buckets. # # # # @option params [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # # @option params [String] :tagging # The tag-set for the object. The tag-set must be encoded as URL Query # parameters. (For example, "Key1=Value1") # # This functionality is not supported for directory buckets. # # # # @option params [String] :object_lock_mode # The Object Lock mode that you want to apply to this object. # # This functionality is not supported for directory buckets. # # # # @option params [Time,DateTime,Date,Integer,String] :object_lock_retain_until_date # The date and time when you want this object's Object Lock to expire. # Must be formatted as a timestamp parameter. # # This functionality is not supported for directory buckets. # # # # @option params [String] :object_lock_legal_hold_status # Specifies whether a legal hold will be applied to this object. For # more information about S3 Object Lock, see [Object Lock][1] in the # *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::PutObjectOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::PutObjectOutput#expiration #expiration} => String # * {Types::PutObjectOutput#etag #etag} => String # * {Types::PutObjectOutput#checksum_crc32 #checksum_crc32} => String # * {Types::PutObjectOutput#checksum_crc32c #checksum_crc32c} => String # * {Types::PutObjectOutput#checksum_sha1 #checksum_sha1} => String # * {Types::PutObjectOutput#checksum_sha256 #checksum_sha256} => String # * {Types::PutObjectOutput#server_side_encryption #server_side_encryption} => String # * {Types::PutObjectOutput#version_id #version_id} => String # * {Types::PutObjectOutput#sse_customer_algorithm #sse_customer_algorithm} => String # * {Types::PutObjectOutput#sse_customer_key_md5 #sse_customer_key_md5} => String # * {Types::PutObjectOutput#ssekms_key_id #ssekms_key_id} => String # * {Types::PutObjectOutput#ssekms_encryption_context #ssekms_encryption_context} => String # * {Types::PutObjectOutput#bucket_key_enabled #bucket_key_enabled} => Boolean # * {Types::PutObjectOutput#request_charged #request_charged} => String # # # @example Example: To upload an object and specify server-side encryption and object tags # # # The following example uploads an object. The request specifies the optional server-side encryption option. The request # # also specifies optional object tags. If the bucket is versioning enabled, S3 returns version ID in response. # # resp = client.put_object({ # body: "filetoupload", # bucket: "examplebucket", # key: "exampleobject", # server_side_encryption: "AES256", # tagging: "key1=value1&key2=value2", # }) # # resp.to_h outputs the following: # { # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"", # server_side_encryption: "AES256", # version_id: "Ri.vC6qVlA4dEnjgRV4ZHsHoFIjqEMNt", # } # # @example Example: To create an object. # # # The following example creates an object. If the bucket is versioning enabled, S3 returns version ID in response. # # resp = client.put_object({ # body: "filetoupload", # bucket: "examplebucket", # key: "objectkey", # }) # # resp.to_h outputs the following: # { # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"", # version_id: "Bvq0EDKxOcXLJXNo_Lkz37eM3R4pfzyQ", # } # # @example Example: To upload an object (specify optional headers) # # # The following example uploads an object. The request specifies optional request headers to directs S3 to use specific # # storage class and use server-side encryption. # # resp = client.put_object({ # body: "HappyFace.jpg", # bucket: "examplebucket", # key: "HappyFace.jpg", # server_side_encryption: "AES256", # storage_class: "STANDARD_IA", # }) # # resp.to_h outputs the following: # { # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"", # server_side_encryption: "AES256", # version_id: "CG612hodqujkf8FaaNfp8U..FIhLROcp", # } # # @example Example: To upload an object and specify optional tags # # # The following example uploads an object. The request specifies optional object tags. The bucket is versioned, therefore # # S3 returns version ID of the newly created object. # # resp = client.put_object({ # body: "c:\\HappyFace.jpg", # bucket: "examplebucket", # key: "HappyFace.jpg", # tagging: "key1=value1&key2=value2", # }) # # resp.to_h outputs the following: # { # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"", # version_id: "psM2sYY4.o1501dSx8wMvnkOzSBB.V4a", # } # # @example Example: To upload object and specify user-defined metadata # # # The following example creates an object. The request also specifies optional metadata. If the bucket is versioning # # enabled, S3 returns version ID in response. # # resp = client.put_object({ # body: "filetoupload", # bucket: "examplebucket", # key: "exampleobject", # metadata: { # "metadata1" => "value1", # "metadata2" => "value2", # }, # }) # # resp.to_h outputs the following: # { # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"", # version_id: "pSKidl4pHBiNwukdbcPXAIs.sshFFOc0", # } # # @example Example: To upload an object and specify canned ACL. # # # The following example uploads and object. The request specifies optional canned ACL (access control list) to all READ # # access to authenticated users. If the bucket is versioning enabled, S3 returns version ID in response. # # resp = client.put_object({ # acl: "authenticated-read", # body: "filetoupload", # bucket: "examplebucket", # key: "exampleobject", # }) # # resp.to_h outputs the following: # { # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"", # version_id: "Kirh.unyZwjQ69YxcQLA8z4F5j3kJJKr", # } # # @example Example: To upload an object # # # The following example uploads an object to a versioning-enabled bucket. The source file is specified using Windows file # # syntax. S3 returns VersionId of the newly created object. # # resp = client.put_object({ # body: "HappyFace.jpg", # bucket: "examplebucket", # key: "HappyFace.jpg", # }) # # resp.to_h outputs the following: # { # etag: "\"6805f2cfc46c0f04559748bb039d69ae\"", # version_id: "tpf3zF08nBplQK1XLOefGskR7mGDwcDk", # } # # @example Streaming a file from disk # # upload file from disk in a single request, may not exceed 5GB # File.open('/source/file/path', 'rb') do |file| # s3.put_object(bucket: 'bucket-name', key: 'object-key', body: file) # end # # @example Request syntax with placeholder values # # resp = client.put_object({ # acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control # body: source_file, # bucket: "BucketName", # required # cache_control: "CacheControl", # content_disposition: "ContentDisposition", # content_encoding: "ContentEncoding", # content_language: "ContentLanguage", # content_length: 1, # content_md5: "ContentMD5", # content_type: "ContentType", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # checksum_crc32: "ChecksumCRC32", # checksum_crc32c: "ChecksumCRC32C", # checksum_sha1: "ChecksumSHA1", # checksum_sha256: "ChecksumSHA256", # expires: Time.now, # grant_full_control: "GrantFullControl", # grant_read: "GrantRead", # grant_read_acp: "GrantReadACP", # grant_write_acp: "GrantWriteACP", # key: "ObjectKey", # required # metadata: { # "MetadataKey" => "MetadataValue", # }, # server_side_encryption: "AES256", # accepts AES256, aws:kms, aws:kms:dsse # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE # website_redirect_location: "WebsiteRedirectLocation", # sse_customer_algorithm: "SSECustomerAlgorithm", # sse_customer_key: "SSECustomerKey", # sse_customer_key_md5: "SSECustomerKeyMD5", # ssekms_key_id: "SSEKMSKeyId", # ssekms_encryption_context: "SSEKMSEncryptionContext", # bucket_key_enabled: false, # request_payer: "requester", # accepts requester # tagging: "TaggingHeader", # object_lock_mode: "GOVERNANCE", # accepts GOVERNANCE, COMPLIANCE # object_lock_retain_until_date: Time.now, # object_lock_legal_hold_status: "ON", # accepts ON, OFF # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.expiration #=> String # resp.etag #=> String # resp.checksum_crc32 #=> String # resp.checksum_crc32c #=> String # resp.checksum_sha1 #=> String # resp.checksum_sha256 #=> String # resp.server_side_encryption #=> String, one of "AES256", "aws:kms", "aws:kms:dsse" # resp.version_id #=> String # resp.sse_customer_algorithm #=> String # resp.sse_customer_key_md5 #=> String # resp.ssekms_key_id #=> String # resp.ssekms_encryption_context #=> String # resp.bucket_key_enabled #=> Boolean # resp.request_charged #=> String, one of "requester" # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject AWS API Documentation # # @overload put_object(params = {}) # @param [Hash] params ({}) def put_object(params = {}, options = {}) req = build_request(:put_object, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Uses the `acl` subresource to set the access control list (ACL) # permissions for a new or existing object in an S3 bucket. You must # have the `WRITE_ACP` permission to set the ACL of an object. For more # information, see [What permissions can I grant?][1] in the *Amazon S3 # User Guide*. # # This functionality is not supported for Amazon S3 on Outposts. # # Depending on your application needs, you can choose to set the ACL on # an object using either the request body or the headers. For example, # if you have an existing application that updates a bucket ACL using # the request body, you can continue to use that approach. For more # information, see [Access Control List (ACL) Overview][2] in the # *Amazon S3 User Guide*. # # If your bucket uses the bucket owner enforced setting for S3 Object # Ownership, ACLs are disabled and no longer affect permissions. You # must use policies to grant access to your bucket and the objects in # it. Requests to set ACLs or update ACLs fail and return the # `AccessControlListNotSupported` error code. Requests to read ACLs are # still supported. For more information, see [Controlling object # ownership][3] in the *Amazon S3 User Guide*. # # Permissions # # : You can set access permissions using one of the following methods: # # * Specify a canned ACL with the `x-amz-acl` request header. Amazon # S3 supports a set of predefined ACLs, known as canned ACLs. Each # canned ACL has a predefined set of grantees and permissions. # Specify the canned ACL name as the value of `x-amz-ac`l. If you # use this header, you cannot use other access control-specific # headers in your request. For more information, see [Canned # ACL][4]. # # * Specify access permissions explicitly with the `x-amz-grant-read`, # `x-amz-grant-read-acp`, `x-amz-grant-write-acp`, and # `x-amz-grant-full-control` headers. When using these headers, you # specify explicit access permissions and grantees (Amazon Web # Services accounts or Amazon S3 groups) who will receive the # permission. If you use these ACL-specific headers, you cannot use # `x-amz-acl` header to set a canned ACL. These parameters map to # the set of permissions that Amazon S3 supports in an ACL. For more # information, see [Access Control List (ACL) Overview][2]. # # You specify each grantee as a type=value pair, where the type is # one of the following: # # * `id` – if the value specified is the canonical user ID of an # Amazon Web Services account # # * `uri` – if you are granting permissions to a predefined group # # * `emailAddress` – if the value specified is the email address of # an Amazon Web Services account # # Using email addresses to specify a grantee is only supported in # the following Amazon Web Services Regions: # # * US East (N. Virginia) # # * US West (N. California) # # * US West (Oregon) # # * Asia Pacific (Singapore) # # * Asia Pacific (Sydney) # # * Asia Pacific (Tokyo) # # * Europe (Ireland) # # * South America (São Paulo) # # For a list of all the Amazon S3 supported Regions and endpoints, # see [Regions and Endpoints][5] in the Amazon Web Services # General Reference. # # # # For example, the following `x-amz-grant-read` header grants list # objects permission to the two Amazon Web Services accounts # identified by their email addresses. # # `x-amz-grant-read: emailAddress="xyz@amazon.com", # emailAddress="abc@amazon.com" ` # # You can use either a canned ACL or specify access permissions # explicitly. You cannot do both. # # Grantee Values # # : You can specify the person (grantee) to whom you're assigning # access rights (using request elements) in the following ways: # # * By the person's ID: # # `<>ID<><>GranteesEmail<> # ` # # DisplayName is optional and ignored in the request. # # * By URI: # # `<>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<>` # # * By Email address: # # `<>Grantees@email.com<>lt;/Grantee>` # # The grantee is resolved to the CanonicalUser and, in a response to # a GET Object acl request, appears as the CanonicalUser. # # Using email addresses to specify a grantee is only supported in # the following Amazon Web Services Regions: # # * US East (N. Virginia) # # * US West (N. California) # # * US West (Oregon) # # * Asia Pacific (Singapore) # # * Asia Pacific (Sydney) # # * Asia Pacific (Tokyo) # # * Europe (Ireland) # # * South America (São Paulo) # # For a list of all the Amazon S3 supported Regions and endpoints, # see [Regions and Endpoints][5] in the Amazon Web Services General # Reference. # # # # Versioning # # : The ACL of an object is set at the object version level. By default, # PUT sets the ACL of the current version of an object. To set the ACL # of a different version, use the `versionId` subresource. # # The following operations are related to `PutObjectAcl`: # # * [CopyObject][6] # # * [GetObject][7] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL # [5]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region # [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html # # @option params [String] :acl # The canned ACL to apply to the object. For more information, see # [Canned ACL][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL # # @option params [Types::AccessControlPolicy] :access_control_policy # Contains the elements that set the ACL permissions for an object per # grantee. # # @option params [required, String] :bucket # The bucket name that contains the object to which you want to attach # the ACL. # # **Access points** - When you use this action with an access point, you # must provide the alias of the access point in place of the bucket name # or specify the access point ARN. When using the access point ARN, you # must direct requests to the access point hostname. The access point # hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the bucket # name. For more information about access point ARNs, see [Using access # points][1] in the *Amazon S3 User Guide*. # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. The # S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][2] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # # @option params [String] :content_md5 # The base64-encoded 128-bit MD5 digest of the data. This header must be # used as a message integrity check to verify that the request body was # not corrupted in transit. For more information, go to [RFC # 1864.>][1] # # For requests made using the Amazon Web Services Command Line Interface # (CLI) or Amazon Web Services SDKs, this field is calculated # automatically. # # # # [1]: http://www.ietf.org/rfc/rfc1864.txt # # @option params [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [String] :grant_full_control # Allows grantee the read, write, read ACP, and write ACP permissions on # the bucket. # # This functionality is not supported for Amazon S3 on Outposts. # # @option params [String] :grant_read # Allows grantee to list the objects in the bucket. # # This functionality is not supported for Amazon S3 on Outposts. # # @option params [String] :grant_read_acp # Allows grantee to read the bucket ACL. # # This functionality is not supported for Amazon S3 on Outposts. # # @option params [String] :grant_write # Allows grantee to create new objects in the bucket. # # For the bucket and object owners of existing objects, also allows # deletions and overwrites of those objects. # # @option params [String] :grant_write_acp # Allows grantee to write the ACL for the applicable bucket. # # This functionality is not supported for Amazon S3 on Outposts. # # @option params [required, String] :key # Key for which the PUT action was initiated. # # @option params [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # # @option params [String] :version_id # Version ID used to reference a specific version of the object. # # This functionality is not supported for directory buckets. # # # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::PutObjectAclOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::PutObjectAclOutput#request_charged #request_charged} => String # # # @example Example: To grant permissions using object ACL # # # The following example adds grants to an object ACL. The first permission grants user1 and user2 FULL_CONTROL and the # # AllUsers group READ permission. # # resp = client.put_object_acl({ # access_control_policy: { # }, # bucket: "examplebucket", # grant_full_control: "emailaddress=user1@example.com,emailaddress=user2@example.com", # grant_read: "uri=http://acs.amazonaws.com/groups/global/AllUsers", # key: "HappyFace.jpg", # }) # # resp.to_h outputs the following: # { # } # # @example Request syntax with placeholder values # # resp = client.put_object_acl({ # acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control # access_control_policy: { # grants: [ # { # grantee: { # display_name: "DisplayName", # email_address: "EmailAddress", # id: "ID", # type: "CanonicalUser", # required, accepts CanonicalUser, AmazonCustomerByEmail, Group # uri: "URI", # }, # permission: "FULL_CONTROL", # accepts FULL_CONTROL, WRITE, WRITE_ACP, READ, READ_ACP # }, # ], # owner: { # display_name: "DisplayName", # id: "ID", # }, # }, # bucket: "BucketName", # required # content_md5: "ContentMD5", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # grant_full_control: "GrantFullControl", # grant_read: "GrantRead", # grant_read_acp: "GrantReadACP", # grant_write: "GrantWrite", # grant_write_acp: "GrantWriteACP", # key: "ObjectKey", # required # request_payer: "requester", # accepts requester # version_id: "ObjectVersionId", # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.request_charged #=> String, one of "requester" # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl AWS API Documentation # # @overload put_object_acl(params = {}) # @param [Hash] params ({}) def put_object_acl(params = {}, options = {}) req = build_request(:put_object_acl, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Applies a legal hold configuration to the specified object. For more # information, see [Locking Objects][1]. # # This functionality is not supported for Amazon S3 on Outposts. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html # # @option params [required, String] :bucket # The bucket name containing the object that you want to place a legal # hold on. # # **Access points** - When you use this action with an access point, you # must provide the alias of the access point in place of the bucket name # or specify the access point ARN. When using the access point ARN, you # must direct requests to the access point hostname. The access point # hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the bucket # name. For more information about access point ARNs, see [Using access # points][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # # @option params [required, String] :key # The key name for the object that you want to place a legal hold on. # # @option params [Types::ObjectLockLegalHold] :legal_hold # Container element for the legal hold configuration you want to apply # to the specified object. # # @option params [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # # @option params [String] :version_id # The version ID of the object that you want to place a legal hold on. # # @option params [String] :content_md5 # The MD5 hash for the request body. # # For requests made using the Amazon Web Services Command Line Interface # (CLI) or Amazon Web Services SDKs, this field is calculated # automatically. # # @option params [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::PutObjectLegalHoldOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::PutObjectLegalHoldOutput#request_charged #request_charged} => String # # @example Request syntax with placeholder values # # resp = client.put_object_legal_hold({ # bucket: "BucketName", # required # key: "ObjectKey", # required # legal_hold: { # status: "ON", # accepts ON, OFF # }, # request_payer: "requester", # accepts requester # version_id: "ObjectVersionId", # content_md5: "ContentMD5", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.request_charged #=> String, one of "requester" # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHold AWS API Documentation # # @overload put_object_legal_hold(params = {}) # @param [Hash] params ({}) def put_object_legal_hold(params = {}, options = {}) req = build_request(:put_object_legal_hold, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Places an Object Lock configuration on the specified bucket. The rule # specified in the Object Lock configuration will be applied by default # to every new object placed in the specified bucket. For more # information, see [Locking Objects][1]. # # * The `DefaultRetention` settings require both a mode and a period. # # * The `DefaultRetention` period can be either `Days` or `Years` but # you must select one. You cannot specify `Days` and `Years` at the # same time. # # * You can enable Object Lock for new or existing buckets. For more # information, see [Configuring Object Lock][2]. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-configure.html # # @option params [required, String] :bucket # The bucket whose Object Lock configuration you want to create or # replace. # # @option params [Types::ObjectLockConfiguration] :object_lock_configuration # The Object Lock configuration that you want to apply to the specified # bucket. # # @option params [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # # @option params [String] :token # A token to allow Object Lock to be enabled for an existing bucket. # # @option params [String] :content_md5 # The MD5 hash for the request body. # # For requests made using the Amazon Web Services Command Line Interface # (CLI) or Amazon Web Services SDKs, this field is calculated # automatically. # # @option params [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::PutObjectLockConfigurationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::PutObjectLockConfigurationOutput#request_charged #request_charged} => String # # @example Request syntax with placeholder values # # resp = client.put_object_lock_configuration({ # bucket: "BucketName", # required # object_lock_configuration: { # object_lock_enabled: "Enabled", # accepts Enabled # rule: { # default_retention: { # mode: "GOVERNANCE", # accepts GOVERNANCE, COMPLIANCE # days: 1, # years: 1, # }, # }, # }, # request_payer: "requester", # accepts requester # token: "ObjectLockToken", # content_md5: "ContentMD5", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.request_charged #=> String, one of "requester" # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfiguration AWS API Documentation # # @overload put_object_lock_configuration(params = {}) # @param [Hash] params ({}) def put_object_lock_configuration(params = {}, options = {}) req = build_request(:put_object_lock_configuration, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Places an Object Retention configuration on an object. For more # information, see [Locking Objects][1]. Users or accounts require the # `s3:PutObjectRetention` permission in order to place an Object # Retention configuration on objects. Bypassing a Governance Retention # configuration requires the `s3:BypassGovernanceRetention` permission. # # This functionality is not supported for Amazon S3 on Outposts. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html # # @option params [required, String] :bucket # The bucket name that contains the object you want to apply this Object # Retention configuration to. # # **Access points** - When you use this action with an access point, you # must provide the alias of the access point in place of the bucket name # or specify the access point ARN. When using the access point ARN, you # must direct requests to the access point hostname. The access point # hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the bucket # name. For more information about access point ARNs, see [Using access # points][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # # @option params [required, String] :key # The key name for the object that you want to apply this Object # Retention configuration to. # # @option params [Types::ObjectLockRetention] :retention # The container element for the Object Retention configuration. # # @option params [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # # @option params [String] :version_id # The version ID for the object that you want to apply this Object # Retention configuration to. # # @option params [Boolean] :bypass_governance_retention # Indicates whether this action should bypass Governance-mode # restrictions. # # @option params [String] :content_md5 # The MD5 hash for the request body. # # For requests made using the Amazon Web Services Command Line Interface # (CLI) or Amazon Web Services SDKs, this field is calculated # automatically. # # @option params [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::PutObjectRetentionOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::PutObjectRetentionOutput#request_charged #request_charged} => String # # @example Request syntax with placeholder values # # resp = client.put_object_retention({ # bucket: "BucketName", # required # key: "ObjectKey", # required # retention: { # mode: "GOVERNANCE", # accepts GOVERNANCE, COMPLIANCE # retain_until_date: Time.now, # }, # request_payer: "requester", # accepts requester # version_id: "ObjectVersionId", # bypass_governance_retention: false, # content_md5: "ContentMD5", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.request_charged #=> String, one of "requester" # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetention AWS API Documentation # # @overload put_object_retention(params = {}) # @param [Hash] params ({}) def put_object_retention(params = {}, options = {}) req = build_request(:put_object_retention, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Sets the supplied tag-set to an object that already exists in a # bucket. A tag is a key-value pair. For more information, see [Object # Tagging][1]. # # You can associate tags with an object by sending a PUT request against # the tagging subresource that is associated with the object. You can # retrieve tags by sending a GET request. For more information, see # [GetObjectTagging][2]. # # For tagging-related restrictions related to characters and encodings, # see [Tag Restrictions][3]. Note that Amazon S3 limits the maximum # number of tags to 10 tags per object. # # To use this operation, you must have permission to perform the # `s3:PutObjectTagging` action. By default, the bucket owner has this # permission and can grant this permission to others. # # To put tags of any other version, use the `versionId` query parameter. # You also need permission for the `s3:PutObjectVersionTagging` action. # # `PutObjectTagging` has the following special errors. For more Amazon # S3 errors see, [Error Responses][4]. # # * `InvalidTag` - The tag provided was not a valid tag. This error can # occur if the tag did not pass input validation. For more # information, see [Object Tagging][1]. # # * `MalformedXML` - The XML provided does not match the schema. # # * `OperationAborted` - A conflicting conditional action is currently # in progress against this resource. Please try again. # # * `InternalError` - The service was unable to apply the provided tag # to the object. # # The following operations are related to `PutObjectTagging`: # # * [GetObjectTagging][2] # # * [DeleteObjectTagging][5] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html # [3]: https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html # # @option params [required, String] :bucket # The bucket name containing the object. # # **Access points** - When you use this action with an access point, you # must provide the alias of the access point in place of the bucket name # or specify the access point ARN. When using the access point ARN, you # must direct requests to the access point hostname. The access point # hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the bucket # name. For more information about access point ARNs, see [Using access # points][1] in the *Amazon S3 User Guide*. # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. The # S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][2] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # # @option params [required, String] :key # Name of the object key. # # @option params [String] :version_id # The versionId of the object that the tag-set will be added to. # # @option params [String] :content_md5 # The MD5 hash for the request body. # # For requests made using the Amazon Web Services Command Line Interface # (CLI) or Amazon Web Services SDKs, this field is calculated # automatically. # # @option params [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [required, Types::Tagging] :tagging # Container for the `TagSet` and `Tag` elements # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @option params [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # # @return [Types::PutObjectTaggingOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::PutObjectTaggingOutput#version_id #version_id} => String # # # @example Example: To add tags to an existing object # # # The following example adds tags to an existing object. # # resp = client.put_object_tagging({ # bucket: "examplebucket", # key: "HappyFace.jpg", # tagging: { # tag_set: [ # { # key: "Key3", # value: "Value3", # }, # { # key: "Key4", # value: "Value4", # }, # ], # }, # }) # # resp.to_h outputs the following: # { # version_id: "null", # } # # @example Request syntax with placeholder values # # resp = client.put_object_tagging({ # bucket: "BucketName", # required # key: "ObjectKey", # required # version_id: "ObjectVersionId", # content_md5: "ContentMD5", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # tagging: { # required # tag_set: [ # required # { # key: "ObjectKey", # required # value: "Value", # required # }, # ], # }, # expected_bucket_owner: "AccountId", # request_payer: "requester", # accepts requester # }) # # @example Response structure # # resp.version_id #=> String # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging AWS API Documentation # # @overload put_object_tagging(params = {}) # @param [Hash] params ({}) def put_object_tagging(params = {}, options = {}) req = build_request(:put_object_tagging, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Creates or modifies the `PublicAccessBlock` configuration for an # Amazon S3 bucket. To use this operation, you must have the # `s3:PutBucketPublicAccessBlock` permission. For more information about # Amazon S3 permissions, see [Specifying Permissions in a Policy][1]. # # When Amazon S3 evaluates the `PublicAccessBlock` configuration for a # bucket or an object, it checks the `PublicAccessBlock` configuration # for both the bucket (or the bucket that contains the object) and the # bucket owner's account. If the `PublicAccessBlock` configurations are # different between the bucket and the account, Amazon S3 uses the most # restrictive combination of the bucket-level and account-level # settings. # # For more information about when Amazon S3 considers a bucket or an # object public, see [The Meaning of "Public"][2]. # # The following operations are related to `PutPublicAccessBlock`: # # * [GetPublicAccessBlock][3] # # * [DeletePublicAccessBlock][4] # # * [GetBucketPolicyStatus][5] # # * [Using Amazon S3 Block Public Access][6] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html # # @option params [required, String] :bucket # The name of the Amazon S3 bucket whose `PublicAccessBlock` # configuration you want to set. # # @option params [String] :content_md5 # The MD5 hash of the `PutPublicAccessBlock` request body. # # For requests made using the Amazon Web Services Command Line Interface # (CLI) or Amazon Web Services SDKs, this field is calculated # automatically. # # @option params [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [required, Types::PublicAccessBlockConfiguration] :public_access_block_configuration # The `PublicAccessBlock` configuration that you want to apply to this # Amazon S3 bucket. You can enable the configuration options in any # combination. For more information about when Amazon S3 considers a # bucket or object public, see [The Meaning of "Public"][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # @example Request syntax with placeholder values # # resp = client.put_public_access_block({ # bucket: "BucketName", # required # content_md5: "ContentMD5", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # public_access_block_configuration: { # required # block_public_acls: false, # ignore_public_acls: false, # block_public_policy: false, # restrict_public_buckets: false, # }, # expected_bucket_owner: "AccountId", # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock AWS API Documentation # # @overload put_public_access_block(params = {}) # @param [Hash] params ({}) def put_public_access_block(params = {}, options = {}) req = build_request(:put_public_access_block, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Restores an archived copy of an object back into Amazon S3 # # This functionality is not supported for Amazon S3 on Outposts. # # This action performs the following types of requests: # # * `select` - Perform a select query on an archived object # # * `restore an archive` - Restore an archived object # # For more information about the `S3` structure in the request body, see # the following: # # * [PutObject][1] # # * [Managing Access with ACLs][2] in the *Amazon S3 User Guide* # # * [Protecting Data Using Server-Side Encryption][3] in the *Amazon S3 # User Guide* # # Define the SQL expression for the `SELECT` type of restoration for # your query in the request body's `SelectParameters` structure. You # can use expressions like the following examples. # # * The following expression returns all records from the specified # object. # # `SELECT * FROM Object` # # * Assuming that you are not using any headers for data stored in the # object, you can specify columns with positional headers. # # `SELECT s._1, s._2 FROM Object s WHERE s._3 > 100` # # * If you have headers and you set the `fileHeaderInfo` in the `CSV` # structure in the request body to `USE`, you can specify headers in # the query. (If you set the `fileHeaderInfo` field to `IGNORE`, the # first row is skipped for the query.) You cannot mix ordinal # positions with header column names. # # `SELECT s.Id, s.FirstName, s.SSN FROM S3Object s` # # When making a select request, you can also do the following: # # * To expedite your queries, specify the `Expedited` tier. For more # information about tiers, see "Restoring Archives," later in this # topic. # # * Specify details about the data serialization format of both the # input object that is being queried and the serialization of the # CSV-encoded query results. # # The following are additional important facts about the select feature: # # * The output results are new Amazon S3 objects. Unlike archive # retrievals, they are stored until explicitly deleted-manually or # through a lifecycle configuration. # # * You can issue more than one select request on the same Amazon S3 # object. Amazon S3 doesn't duplicate requests, so avoid issuing # duplicate requests. # # * Amazon S3 accepts a select request even if the object has already # been restored. A select request doesn’t return error response `409`. # # Permissions # # : To use this operation, you must have permissions to perform the # `s3:RestoreObject` action. The bucket owner has this permission by # default and can grant this permission to others. For more # information about permissions, see [Permissions Related to Bucket # Subresource Operations][4] and [Managing Access Permissions to Your # Amazon S3 Resources][5] in the *Amazon S3 User Guide*. # # Restoring objects # # : Objects that you archive to the S3 Glacier Flexible Retrieval # Flexible Retrieval or S3 Glacier Deep Archive storage class, and S3 # Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive # tiers, are not accessible in real time. For objects in the S3 # Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep # Archive storage classes, you must first initiate a restore request, # and then wait until a temporary copy of the object is available. If # you want a permanent copy of the object, create a copy of it in the # Amazon S3 Standard storage class in your S3 bucket. To access an # archived object, you must restore the object for the duration # (number of days) that you specify. For objects in the Archive Access # or Deep Archive Access tiers of S3 Intelligent-Tiering, you must # first initiate a restore request, and then wait until the object is # moved into the Frequent Access tier. # # To restore a specific object version, you can provide a version ID. # If you don't provide a version ID, Amazon S3 restores the current # version. # # When restoring an archived object, you can specify one of the # following data access tier options in the `Tier` element of the # request body: # # * `Expedited` - Expedited retrievals allow you to quickly access # your data stored in the S3 Glacier Flexible Retrieval Flexible # Retrieval storage class or S3 Intelligent-Tiering Archive tier # when occasional urgent requests for restoring archives are # required. For all but the largest archived objects (250 MB+), data # accessed using Expedited retrievals is typically made available # within 1–5 minutes. Provisioned capacity ensures that retrieval # capacity for Expedited retrievals is available when you need it. # Expedited retrievals and provisioned capacity are not available # for objects stored in the S3 Glacier Deep Archive storage class or # S3 Intelligent-Tiering Deep Archive tier. # # * `Standard` - Standard retrievals allow you to access any of your # archived objects within several hours. This is the default option # for retrieval requests that do not specify the retrieval option. # Standard retrievals typically finish within 3–5 hours for objects # stored in the S3 Glacier Flexible Retrieval Flexible Retrieval # storage class or S3 Intelligent-Tiering Archive tier. They # typically finish within 12 hours for objects stored in the S3 # Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep # Archive tier. Standard retrievals are free for objects stored in # S3 Intelligent-Tiering. # # * `Bulk` - Bulk retrievals free for objects stored in the S3 Glacier # Flexible Retrieval and S3 Intelligent-Tiering storage classes, # enabling you to retrieve large amounts, even petabytes, of data at # no cost. Bulk retrievals typically finish within 5–12 hours for # objects stored in the S3 Glacier Flexible Retrieval Flexible # Retrieval storage class or S3 Intelligent-Tiering Archive tier. # Bulk retrievals are also the lowest-cost retrieval option when # restoring objects from S3 Glacier Deep Archive. They typically # finish within 48 hours for objects stored in the S3 Glacier Deep # Archive storage class or S3 Intelligent-Tiering Deep Archive tier. # # For more information about archive retrieval options and provisioned # capacity for `Expedited` data access, see [Restoring Archived # Objects][6] in the *Amazon S3 User Guide*. # # You can use Amazon S3 restore speed upgrade to change the restore # speed to a faster speed while it is in progress. For more # information, see [ Upgrading the speed of an in-progress restore][7] # in the *Amazon S3 User Guide*. # # To get the status of object restoration, you can send a `HEAD` # request. Operations return the `x-amz-restore` header, which # provides information about the restoration status, in the response. # You can use Amazon S3 event notifications to notify you when a # restore is initiated or completed. For more information, see # [Configuring Amazon S3 Event Notifications][8] in the *Amazon S3 # User Guide*. # # After restoring an archived object, you can update the restoration # period by reissuing the request with a new period. Amazon S3 updates # the restoration period relative to the current time and charges only # for the request-there are no data transfer charges. You cannot # update the restoration period when Amazon S3 is actively processing # your current restore request for the object. # # If your bucket has a lifecycle configuration with a rule that # includes an expiration action, the object expiration overrides the # life span that you specify in a restore request. For example, if you # restore an object copy for 10 days, but the object is scheduled to # expire in 3 days, Amazon S3 deletes the object in 3 days. For more # information about lifecycle configuration, see # [PutBucketLifecycleConfiguration][9] and [Object Lifecycle # Management][10] in *Amazon S3 User Guide*. # # Responses # # : A successful action returns either the `200 OK` or `202 Accepted` # status code. # # * If the object is not previously restored, then Amazon S3 returns # `202 Accepted` in the response. # # * If the object is previously restored, Amazon S3 returns `200 OK` # in the response. # ^ # # * Special errors: # # * *Code: RestoreAlreadyInProgress* # # * *Cause: Object restore is already in progress. (This error does # not apply to SELECT type requests.)* # # * *HTTP Status Code: 409 Conflict* # # * *SOAP Fault Code Prefix: Client* # # * * *Code: GlacierExpeditedRetrievalNotAvailable* # # * *Cause: expedited retrievals are currently not available. Try # again later. (Returned if there is insufficient capacity to # process the Expedited request. This error applies only to # Expedited retrievals and not to S3 Standard or Bulk # retrievals.)* # # * *HTTP Status Code: 503* # # * *SOAP Fault Code Prefix: N/A* # # The following operations are related to `RestoreObject`: # # * [PutBucketLifecycleConfiguration][9] # # * [GetBucketNotificationConfiguration][11] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources # [5]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html # [7]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html # [8]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html # [9]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html # [10]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html # [11]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html # # @option params [required, String] :bucket # The bucket name containing the object to restore. # # **Access points** - When you use this action with an access point, you # must provide the alias of the access point in place of the bucket name # or specify the access point ARN. When using the access point ARN, you # must direct requests to the access point hostname. The access point # hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the bucket # name. For more information about access point ARNs, see [Using access # points][1] in the *Amazon S3 User Guide*. # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. The # S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][2] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # # @option params [required, String] :key # Object key for which the action was initiated. # # @option params [String] :version_id # VersionId used to reference a specific version of the object. # # @option params [Types::RestoreRequest] :restore_request # Container for restore job parameters. # # @option params [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # # @option params [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::RestoreObjectOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::RestoreObjectOutput#request_charged #request_charged} => String # * {Types::RestoreObjectOutput#restore_output_path #restore_output_path} => String # # # @example Example: To restore an archived object # # # The following example restores for one day an archived copy of an object back into Amazon S3 bucket. # # resp = client.restore_object({ # bucket: "examplebucket", # key: "archivedobjectkey", # restore_request: { # days: 1, # glacier_job_parameters: { # tier: "Expedited", # }, # }, # }) # # resp.to_h outputs the following: # { # } # # @example Request syntax with placeholder values # # resp = client.restore_object({ # bucket: "BucketName", # required # key: "ObjectKey", # required # version_id: "ObjectVersionId", # restore_request: { # days: 1, # glacier_job_parameters: { # tier: "Standard", # required, accepts Standard, Bulk, Expedited # }, # type: "SELECT", # accepts SELECT # tier: "Standard", # accepts Standard, Bulk, Expedited # description: "Description", # select_parameters: { # input_serialization: { # required # csv: { # file_header_info: "USE", # accepts USE, IGNORE, NONE # comments: "Comments", # quote_escape_character: "QuoteEscapeCharacter", # record_delimiter: "RecordDelimiter", # field_delimiter: "FieldDelimiter", # quote_character: "QuoteCharacter", # allow_quoted_record_delimiter: false, # }, # compression_type: "NONE", # accepts NONE, GZIP, BZIP2 # json: { # type: "DOCUMENT", # accepts DOCUMENT, LINES # }, # parquet: { # }, # }, # expression_type: "SQL", # required, accepts SQL # expression: "Expression", # required # output_serialization: { # required # csv: { # quote_fields: "ALWAYS", # accepts ALWAYS, ASNEEDED # quote_escape_character: "QuoteEscapeCharacter", # record_delimiter: "RecordDelimiter", # field_delimiter: "FieldDelimiter", # quote_character: "QuoteCharacter", # }, # json: { # record_delimiter: "RecordDelimiter", # }, # }, # }, # output_location: { # s3: { # bucket_name: "BucketName", # required # prefix: "LocationPrefix", # required # encryption: { # encryption_type: "AES256", # required, accepts AES256, aws:kms, aws:kms:dsse # kms_key_id: "SSEKMSKeyId", # kms_context: "KMSContext", # }, # canned_acl: "private", # accepts private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control # access_control_list: [ # { # grantee: { # display_name: "DisplayName", # email_address: "EmailAddress", # id: "ID", # type: "CanonicalUser", # required, accepts CanonicalUser, AmazonCustomerByEmail, Group # uri: "URI", # }, # permission: "FULL_CONTROL", # accepts FULL_CONTROL, WRITE, WRITE_ACP, READ, READ_ACP # }, # ], # tagging: { # tag_set: [ # required # { # key: "ObjectKey", # required # value: "Value", # required # }, # ], # }, # user_metadata: [ # { # name: "MetadataKey", # value: "MetadataValue", # }, # ], # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE # }, # }, # }, # request_payer: "requester", # accepts requester # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.request_charged #=> String, one of "requester" # resp.restore_output_path #=> String # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject AWS API Documentation # # @overload restore_object(params = {}) # @param [Hash] params ({}) def restore_object(params = {}, options = {}) req = build_request(:restore_object, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # This action filters the contents of an Amazon S3 object based on a # simple structured query language (SQL) statement. In the request, # along with the SQL expression, you must also specify a data # serialization format (JSON, CSV, or Apache Parquet) of the object. # Amazon S3 uses this format to parse object data into records, and # returns only records that match the specified SQL expression. You must # also specify the data serialization format for the response. # # This functionality is not supported for Amazon S3 on Outposts. # # For more information about Amazon S3 Select, see [Selecting Content # from Objects][1] and [SELECT Command][2] in the *Amazon S3 User # Guide*. # # # # Permissions # # : You must have the `s3:GetObject` permission for this # operation. Amazon S3 Select does not support anonymous access. For # more information about permissions, see [Specifying Permissions in a # Policy][3] in the *Amazon S3 User Guide*. # # Object Data Formats # # : You can use Amazon S3 Select to query objects that have the # following format properties: # # * *CSV, JSON, and Parquet* - Objects must be in CSV, JSON, or # Parquet format. # # * *UTF-8* - UTF-8 is the only encoding type Amazon S3 Select # supports. # # * *GZIP or BZIP2* - CSV and JSON files can be compressed using GZIP # or BZIP2. GZIP and BZIP2 are the only compression formats that # Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select # supports columnar compression for Parquet using GZIP or Snappy. # Amazon S3 Select does not support whole-object compression for # Parquet objects. # # * *Server-side encryption* - Amazon S3 Select supports querying # objects that are protected with server-side encryption. # # For objects that are encrypted with customer-provided encryption # keys (SSE-C), you must use HTTPS, and you must use the headers # that are documented in the [GetObject][4]. For more information # about SSE-C, see [Server-Side Encryption (Using Customer-Provided # Encryption Keys)][5] in the *Amazon S3 User Guide*. # # For objects that are encrypted with Amazon S3 managed keys # (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), server-side # encryption is handled transparently, so you don't need to specify # anything. For more information about server-side encryption, # including SSE-S3 and SSE-KMS, see [Protecting Data Using # Server-Side Encryption][6] in the *Amazon S3 User Guide*. # # Working with the Response Body # # : Given the response size is unknown, Amazon S3 Select streams the # response as a series of messages and includes a `Transfer-Encoding` # header with `chunked` as its value in the response. For more # information, see [Appendix: SelectObjectContent Response][7]. # # GetObject Support # # : The `SelectObjectContent` action does not support the following # `GetObject` functionality. For more information, see [GetObject][4]. # # * `Range`: Although you can specify a scan range for an Amazon S3 # Select request (see [SelectObjectContentRequest - ScanRange][8] in # the request parameters), you cannot specify the range of bytes of # an object to return. # # * The `GLACIER`, `DEEP_ARCHIVE`, and `REDUCED_REDUNDANCY` storage # classes, or the `ARCHIVE_ACCESS` and `DEEP_ARCHIVE_ACCESS` access # tiers of the `INTELLIGENT_TIERING` storage class: You cannot query # objects in the `GLACIER`, `DEEP_ARCHIVE`, or `REDUCED_REDUNDANCY` # storage classes, nor objects in the `ARCHIVE_ACCESS` or # `DEEP_ARCHIVE_ACCESS` access tiers of the `INTELLIGENT_TIERING` # storage class. For more information about storage classes, see # [Using Amazon S3 storage classes][9] in the *Amazon S3 User # Guide*. # # Special Errors # # : For a list of special errors for this operation, see [List of SELECT # Object Content Error Codes][10] # # The following operations are related to `SelectObjectContent`: # # * [GetObject][4] # # * [GetBucketLifecycleConfiguration][11] # # * [PutBucketLifecycleConfiguration][12] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange # [9]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html # [10]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList # [11]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html # [12]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html # # @option params [required, String] :bucket # The S3 bucket. # # @option params [required, String] :key # The object key. # # @option params [String] :sse_customer_algorithm # The server-side encryption (SSE) algorithm used to encrypt the object. # This parameter is needed only when the object was created using a # checksum algorithm. For more information, see [Protecting data using # SSE-C keys][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # # @option params [String] :sse_customer_key # The server-side encryption (SSE) customer managed key. This parameter # is needed only when the object was created using a checksum algorithm. # For more information, see [Protecting data using SSE-C keys][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # # @option params [String] :sse_customer_key_md5 # The MD5 server-side encryption (SSE) customer managed key. This # parameter is needed only when the object was created using a checksum # algorithm. For more information, see [Protecting data using SSE-C # keys][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # # @option params [required, String] :expression # The expression that is used to query the object. # # @option params [required, String] :expression_type # The type of the provided expression (for example, SQL). # # @option params [Types::RequestProgress] :request_progress # Specifies if periodic request progress information should be enabled. # # @option params [required, Types::InputSerialization] :input_serialization # Describes the format of the data in the object that is being queried. # # @option params [required, Types::OutputSerialization] :output_serialization # Describes the format of the data that you want Amazon S3 to return in # response. # # @option params [Types::ScanRange] :scan_range # Specifies the byte range of the object to get the records from. A # record is processed when its first byte is contained by the range. # This parameter is optional, but when specified, it must not be empty. # See RFC 2616, Section 14.35.1 about how to specify the start and end # of the range. # # `ScanRange`may be used in the following ways: # # * `50100` - process # only the records starting between the bytes 50 and 100 (inclusive, # counting from zero) # # * `50` - process only the # records starting after the byte 50 # # * `50` - process only the records # within the last 50 bytes of the file. # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::SelectObjectContentOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::SelectObjectContentOutput#payload #payload} => Types::SelectObjectContentEventStream # # @example EventStream Operation Example # # You can process event once it arrives immediately, or wait until # full response complete and iterate through eventstream enumerator. # # To interact with event immediately, you need to register #select_object_content # with callbacks, callbacks can be register for specifc events or for all events, # callback for errors in the event stream is also available for register. # # Callbacks can be passed in by `:event_stream_handler` option or within block # statement attached to #select_object_content call directly. Hybrid pattern of both # is also supported. # # `:event_stream_handler` option takes in either Proc object or # Aws::S3::EventStreams::SelectObjectContentEventStream object. # # Usage pattern a): callbacks with a block attached to #select_object_content # Example for registering callbacks for all event types and error event # # client.select_object_content( # params input# ) do |stream| # stream.on_error_event do |event| # # catch unmodeled error event in the stream # raise event # # => Aws::Errors::EventError # # event.event_type => :error # # event.error_code => String # # event.error_message => String # end # # stream.on_event do |event| # # process all events arrive # puts event.event_type # ... # end # # end # # Usage pattern b): pass in `:event_stream_handler` for #select_object_content # # 1) create a Aws::S3::EventStreams::SelectObjectContentEventStream object # Example for registering callbacks with specific events # # handler = Aws::S3::EventStreams::SelectObjectContentEventStream.new # handler.on_records_event do |event| # event # => Aws::S3::Types::Records # end # handler.on_stats_event do |event| # event # => Aws::S3::Types::Stats # end # handler.on_progress_event do |event| # event # => Aws::S3::Types::Progress # end # handler.on_cont_event do |event| # event # => Aws::S3::Types::Cont # end # handler.on_end_event do |event| # event # => Aws::S3::Types::End # end # # client.select_object_content( # params input #, event_stream_handler: handler) # # 2) use a Ruby Proc object # Example for registering callbacks with specific events # # handler = Proc.new do |stream| # stream.on_records_event do |event| # event # => Aws::S3::Types::Records # end # stream.on_stats_event do |event| # event # => Aws::S3::Types::Stats # end # stream.on_progress_event do |event| # event # => Aws::S3::Types::Progress # end # stream.on_cont_event do |event| # event # => Aws::S3::Types::Cont # end # stream.on_end_event do |event| # event # => Aws::S3::Types::End # end # end # # client.select_object_content( # params input #, event_stream_handler: handler) # # Usage pattern c): hybird pattern of a) and b) # # handler = Aws::S3::EventStreams::SelectObjectContentEventStream.new # handler.on_records_event do |event| # event # => Aws::S3::Types::Records # end # handler.on_stats_event do |event| # event # => Aws::S3::Types::Stats # end # handler.on_progress_event do |event| # event # => Aws::S3::Types::Progress # end # handler.on_cont_event do |event| # event # => Aws::S3::Types::Cont # end # handler.on_end_event do |event| # event # => Aws::S3::Types::End # end # # client.select_object_content( # params input #, event_stream_handler: handler) do |stream| # stream.on_error_event do |event| # # catch unmodeled error event in the stream # raise event # # => Aws::Errors::EventError # # event.event_type => :error # # event.error_code => String # # event.error_message => String # end # end # # Besides above usage patterns for process events when they arrive immediately, you can also # iterate through events after response complete. # # Events are available at resp.payload # => Enumerator # For parameter input example, please refer to following request syntax # # @example Request syntax with placeholder values # # resp = client.select_object_content({ # bucket: "BucketName", # required # key: "ObjectKey", # required # sse_customer_algorithm: "SSECustomerAlgorithm", # sse_customer_key: "SSECustomerKey", # sse_customer_key_md5: "SSECustomerKeyMD5", # expression: "Expression", # required # expression_type: "SQL", # required, accepts SQL # request_progress: { # enabled: false, # }, # input_serialization: { # required # csv: { # file_header_info: "USE", # accepts USE, IGNORE, NONE # comments: "Comments", # quote_escape_character: "QuoteEscapeCharacter", # record_delimiter: "RecordDelimiter", # field_delimiter: "FieldDelimiter", # quote_character: "QuoteCharacter", # allow_quoted_record_delimiter: false, # }, # compression_type: "NONE", # accepts NONE, GZIP, BZIP2 # json: { # type: "DOCUMENT", # accepts DOCUMENT, LINES # }, # parquet: { # }, # }, # output_serialization: { # required # csv: { # quote_fields: "ALWAYS", # accepts ALWAYS, ASNEEDED # quote_escape_character: "QuoteEscapeCharacter", # record_delimiter: "RecordDelimiter", # field_delimiter: "FieldDelimiter", # quote_character: "QuoteCharacter", # }, # json: { # record_delimiter: "RecordDelimiter", # }, # }, # scan_range: { # start: 1, # end: 1, # }, # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # All events are available at resp.payload: # resp.payload #=> Enumerator # resp.payload.event_types #=> [:records, :stats, :progress, :cont, :end] # # For :records event available at #on_records_event callback and response eventstream enumerator: # event.payload #=> IO # # For :stats event available at #on_stats_event callback and response eventstream enumerator: # event.details.bytes_scanned #=> Integer # event.details.bytes_processed #=> Integer # event.details.bytes_returned #=> Integer # # For :progress event available at #on_progress_event callback and response eventstream enumerator: # event.details.bytes_scanned #=> Integer # event.details.bytes_processed #=> Integer # event.details.bytes_returned #=> Integer # # For :cont event available at #on_cont_event callback and response eventstream enumerator: # #=> EmptyStruct # For :end event available at #on_end_event callback and response eventstream enumerator: # #=> EmptyStruct # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent AWS API Documentation # # @overload select_object_content(params = {}) # @param [Hash] params ({}) def select_object_content(params = {}, options = {}, &block) params = params.dup event_stream_handler = case handler = params.delete(:event_stream_handler) when EventStreams::SelectObjectContentEventStream then handler when Proc then EventStreams::SelectObjectContentEventStream.new.tap(&handler) when nil then EventStreams::SelectObjectContentEventStream.new else msg = "expected :event_stream_handler to be a block or "\ "instance of Aws::S3::EventStreams::SelectObjectContentEventStream"\ ", got `#{handler.inspect}` instead" raise ArgumentError, msg end yield(event_stream_handler) if block_given? req = build_request(:select_object_content, params) req.context[:event_stream_handler] = event_stream_handler req.handlers.add(Aws::Binary::DecodeHandler, priority: 95) req.send_request(options, &block) end # Uploads a part in a multipart upload. # # In this operation, you provide new data as a part of an object in your # request. However, you have an option to specify your existing Amazon # S3 object as a data source for the part you are uploading. To upload a # part from an existing object, you use the [UploadPartCopy][1] # operation. # # # # You must initiate a multipart upload (see [CreateMultipartUpload][2]) # before you can upload any part. In response to your initiate request, # Amazon S3 returns an upload ID, a unique identifier that you must # include in your upload part request. # # Part numbers can be any number from 1 to 10,000, inclusive. A part # number uniquely identifies a part and also defines its position within # the object being created. If you upload a new part using the same part # number that was used with a previous part, the previously uploaded # part is overwritten. # # For information about maximum and minimum part sizes and other # multipart upload specifications, see [Multipart upload limits][3] in # the *Amazon S3 User Guide*. # # After you initiate multipart upload and upload one or more parts, you # must either complete or abort multipart upload in order to stop # getting charged for storage of the uploaded parts. Only after you # either complete or abort multipart upload, Amazon S3 frees up the # parts storage and stops charging you for the parts storage. # # # # For more information on multipart uploads, go to [Multipart Upload # Overview][4] in the Amazon S3 User Guide . # # **Directory buckets** - For directory buckets, you must make requests # for this API operation to the Zonal endpoint. These endpoints support # virtual-hosted-style requests in the format # `https://bucket_name.s3express-az_id.region.amazonaws.com/key-name `. # Path-style requests are not supported. For more information, see # [Regional and Zonal endpoints][5] in the *Amazon S3 User Guide*. # # # # Permissions # : * **General purpose bucket permissions** - For information on the # permissions required to use the multipart upload API, see # [Multipart Upload and Permissions][6] in the *Amazon S3 User # Guide*. # # * **Directory bucket permissions** - To grant access to this API # operation on a directory bucket, we recommend that you use the [ # `CreateSession` ][7] API operation for session-based # authorization. Specifically, you grant the # `s3express:CreateSession` permission to the directory bucket in a # bucket policy or an IAM identity-based policy. Then, you make the # `CreateSession` API call on the bucket to obtain a session token. # With the session token in your request header, you can make API # requests to this operation. After the session token expires, you # make another `CreateSession` API call to generate a new session # token for use. Amazon Web Services CLI or SDKs create session and # refresh the session token automatically to avoid service # interruptions when a session expires. For more information about # authorization, see [ `CreateSession` ][7]. # # Data integrity # # : **General purpose bucket** - To ensure that data is not corrupted # traversing the network, specify the `Content-MD5` header in the # upload part request. Amazon S3 checks the part data against the # provided MD5 value. If they do not match, Amazon S3 returns an # error. If the upload request is signed with Signature Version 4, # then Amazon Web Services S3 uses the `x-amz-content-sha256` header # as a checksum instead of `Content-MD5`. For more information see # [Authenticating Requests: Using the Authorization Header (Amazon Web # Services Signature Version 4)][8]. # # **Directory buckets** - MD5 is not supported by directory buckets. # You can use checksum algorithms to check object integrity. # # # # Encryption # : * **General purpose bucket** - Server-side encryption is for data # encryption at rest. Amazon S3 encrypts your data as it writes it # to disks in its data centers and decrypts it when you access it. # You have mutually exclusive options to protect data using # server-side encryption in Amazon S3, depending on how you choose # to manage the encryption keys. Specifically, the encryption key # options are Amazon S3 managed keys (SSE-S3), Amazon Web Services # KMS keys (SSE-KMS), and Customer-Provided Keys (SSE-C). Amazon S3 # encrypts data with server-side encryption using Amazon S3 managed # keys (SSE-S3) by default. You can optionally tell Amazon S3 to # encrypt data at rest using server-side encryption with other key # options. The option you use depends on whether you want to use KMS # keys (SSE-KMS) or provide your own encryption key (SSE-C). # # Server-side encryption is supported by the S3 Multipart Upload # operations. Unless you are using a customer-provided encryption # key (SSE-C), you don't need to specify the encryption parameters # in each UploadPart request. Instead, you only need to specify the # server-side encryption parameters in the initial Initiate # Multipart request. For more information, see # [CreateMultipartUpload][2]. # # If you request server-side encryption using a customer-provided # encryption key (SSE-C) in your initiate multipart upload request, # you must provide identical encryption information in each part # upload using the following request headers. # # * x-amz-server-side-encryption-customer-algorithm # # * x-amz-server-side-encryption-customer-key # # * x-amz-server-side-encryption-customer-key-MD5 # # * **Directory bucket** - For directory buckets, only server-side # encryption with Amazon S3 managed keys (SSE-S3) (`AES256`) is # supported. # # For more information, see [Using Server-Side Encryption][9] in the # *Amazon S3 User Guide*. # # Special errors # : * Error Code: `NoSuchUpload` # # * Description: The specified multipart upload does not exist. The # upload ID might be invalid, or the multipart upload might have # been aborted or completed. # # * HTTP Status Code: 404 Not Found # # * SOAP Fault Code Prefix: Client # # HTTP Host header syntax # # : Directory buckets - The HTTP Host header syntax is ` # Bucket_name.s3express-az_id.region.amazonaws.com`. # # The following operations are related to `UploadPart`: # # * [CreateMultipartUpload][2] # # * [CompleteMultipartUpload][10] # # * [AbortMultipartUpload][11] # # * [ListParts][12] # # * [ListMultipartUploads][13] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html # [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html # [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html # [9]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html # [10]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html # [11]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html # [12]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html # [13]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html # # @option params [String, StringIO, File] :body # Object data. # # @option params [required, String] :bucket # The name of the bucket to which the multipart upload was initiated. # # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style requests # are not supported. Directory bucket names must be unique in the chosen # Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, you # must provide the alias of the access point in place of the bucket name # or specify the access point ARN. When using the access point ARN, you # must direct requests to the access point hostname. The access point # hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the bucket # name. For more information about access point ARNs, see [Using access # points][2] in the *Amazon S3 User Guide*. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. The # S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][3] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # # @option params [Integer] :content_length # Size of the body in bytes. This parameter is useful when the size of # the body cannot be determined automatically. # # @option params [String] :content_md5 # The base64-encoded 128-bit MD5 digest of the part data. This parameter # is auto-populated when using the command from the CLI. This parameter # is required if object lock parameters are specified. # # This functionality is not supported for directory buckets. # # # # @option params [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # This checksum algorithm must be the same for all parts and it match # the checksum value supplied in the `CreateMultipartUpload` request. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [String] :checksum_crc32 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 32-bit CRC32 checksum of the object. For # more information, see [Checking object integrity][1] in the *Amazon S3 # User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [String] :checksum_crc32c # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 32-bit CRC32C checksum of the object. # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [String] :checksum_sha1 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 160-bit SHA-1 digest of the object. For # more information, see [Checking object integrity][1] in the *Amazon S3 # User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [String] :checksum_sha256 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 256-bit SHA-256 digest of the object. # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [required, String] :key # Object key for which the multipart upload was initiated. # # @option params [required, Integer] :part_number # Part number of part being uploaded. This is a positive integer between # 1 and 10,000. # # @option params [required, String] :upload_id # Upload ID identifying the multipart upload whose part is being # uploaded. # # @option params [String] :sse_customer_algorithm # Specifies the algorithm to use when encrypting the object (for # example, AES256). # # This functionality is not supported for directory buckets. # # # # @option params [String] :sse_customer_key # Specifies the customer-provided encryption key for Amazon S3 to use in # encrypting data. This value is used to store the object and then it is # discarded; Amazon S3 does not store the encryption key. The key must # be appropriate for use with the algorithm specified in the # `x-amz-server-side-encryption-customer-algorithm header`. This must be # the same encryption key specified in the initiate multipart upload # request. # # This functionality is not supported for directory buckets. # # # # @option params [String] :sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the encryption key according to # RFC 1321. Amazon S3 uses this header for a message integrity check to # ensure that the encryption key was transmitted without error. # # This functionality is not supported for directory buckets. # # # # @option params [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # # @option params [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # # @return [Types::UploadPartOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::UploadPartOutput#server_side_encryption #server_side_encryption} => String # * {Types::UploadPartOutput#etag #etag} => String # * {Types::UploadPartOutput#checksum_crc32 #checksum_crc32} => String # * {Types::UploadPartOutput#checksum_crc32c #checksum_crc32c} => String # * {Types::UploadPartOutput#checksum_sha1 #checksum_sha1} => String # * {Types::UploadPartOutput#checksum_sha256 #checksum_sha256} => String # * {Types::UploadPartOutput#sse_customer_algorithm #sse_customer_algorithm} => String # * {Types::UploadPartOutput#sse_customer_key_md5 #sse_customer_key_md5} => String # * {Types::UploadPartOutput#ssekms_key_id #ssekms_key_id} => String # * {Types::UploadPartOutput#bucket_key_enabled #bucket_key_enabled} => Boolean # * {Types::UploadPartOutput#request_charged #request_charged} => String # # # @example Example: To upload a part # # # The following example uploads part 1 of a multipart upload. The example specifies a file name for the part data. The # # Upload ID is same that is returned by the initiate multipart upload. # # resp = client.upload_part({ # body: "fileToUpload", # bucket: "examplebucket", # key: "examplelargeobject", # part_number: 1, # upload_id: "xadcOB_7YPBOJuoFiQ9cz4P3Pe6FIZwO4f7wN93uHsNBEw97pl5eNwzExg0LAT2dUN91cOmrEQHDsP3WA60CEg--", # }) # # resp.to_h outputs the following: # { # etag: "\"d8c2eafd90c266e19ab9dcacc479f8af\"", # } # # @example Request syntax with placeholder values # # resp = client.upload_part({ # body: source_file, # bucket: "BucketName", # required # content_length: 1, # content_md5: "ContentMD5", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # checksum_crc32: "ChecksumCRC32", # checksum_crc32c: "ChecksumCRC32C", # checksum_sha1: "ChecksumSHA1", # checksum_sha256: "ChecksumSHA256", # key: "ObjectKey", # required # part_number: 1, # required # upload_id: "MultipartUploadId", # required # sse_customer_algorithm: "SSECustomerAlgorithm", # sse_customer_key: "SSECustomerKey", # sse_customer_key_md5: "SSECustomerKeyMD5", # request_payer: "requester", # accepts requester # expected_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.server_side_encryption #=> String, one of "AES256", "aws:kms", "aws:kms:dsse" # resp.etag #=> String # resp.checksum_crc32 #=> String # resp.checksum_crc32c #=> String # resp.checksum_sha1 #=> String # resp.checksum_sha256 #=> String # resp.sse_customer_algorithm #=> String # resp.sse_customer_key_md5 #=> String # resp.ssekms_key_id #=> String # resp.bucket_key_enabled #=> Boolean # resp.request_charged #=> String, one of "requester" # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart AWS API Documentation # # @overload upload_part(params = {}) # @param [Hash] params ({}) def upload_part(params = {}, options = {}) req = build_request(:upload_part, params) req.send_request(options) end # Uploads a part by copying data from an existing object as data source. # To specify the data source, you add the request header # `x-amz-copy-source` in your request. To specify a byte range, you add # the request header `x-amz-copy-source-range` in your request. # # For information about maximum and minimum part sizes and other # multipart upload specifications, see [Multipart upload limits][1] in # the *Amazon S3 User Guide*. # # Instead of copying data from an existing object as part data, you # might use the [UploadPart][2] action to upload new data as a part of # an object in your request. # # # # You must initiate a multipart upload before you can upload any part. # In response to your initiate request, Amazon S3 returns the upload ID, # a unique identifier that you must include in your upload part request. # # For conceptual information about multipart uploads, see [Uploading # Objects Using Multipart Upload][3] in the *Amazon S3 User Guide*. For # information about copying objects using a single atomic action vs. a # multipart upload, see [Operations on Objects][4] in the *Amazon S3 # User Guide*. # # **Directory buckets** - For directory buckets, you must make requests # for this API operation to the Zonal endpoint. These endpoints support # virtual-hosted-style requests in the format # `https://bucket_name.s3express-az_id.region.amazonaws.com/key-name `. # Path-style requests are not supported. For more information, see # [Regional and Zonal endpoints][5] in the *Amazon S3 User Guide*. # # # # Authentication and authorization # # : All `UploadPartCopy` requests must be authenticated and signed by # using IAM credentials (access key ID and secret access key for the # IAM identities). All headers with the `x-amz-` prefix, including # `x-amz-copy-source`, must be signed. For more information, see [REST # Authentication][6]. # # **Directory buckets** - You must use IAM credentials to authenticate # and authorize your access to the `UploadPartCopy` API operation, # instead of using the temporary security credentials through the # `CreateSession` API operation. # # Amazon Web Services CLI or SDKs handles authentication and # authorization on your behalf. # # Permissions # # : You must have `READ` access to the source object and `WRITE` access # to the destination bucket. # # * **General purpose bucket permissions** - You must have the # permissions in a policy based on the bucket types of your source # bucket and destination bucket in an `UploadPartCopy` operation. # # * If the source object is in a general purpose bucket, you must # have the s3:GetObject permission to read # the source object that is being copied. # # * If the destination bucket is a general purpose bucket, you must # have the s3:PubObject permission to write # the object copy to the destination bucket. # # For information about permissions required to use the multipart # upload API, see [Multipart Upload and Permissions][7] in the # *Amazon S3 User Guide*. # # * **Directory bucket permissions** - You must have permissions in a # bucket policy or an IAM identity-based policy based on the source # and destination bucket types in an `UploadPartCopy` operation. # # * If the source object that you want to copy is in a directory # bucket, you must have the # s3express:CreateSession permission in the # `Action` element of a policy to read the object . By default, # the session is in the `ReadWrite` mode. If you want to restrict # the access, you can explicitly set the `s3express:SessionMode` # condition key to `ReadOnly` on the copy source bucket. # # * If the copy destination is a directory bucket, you must have the # s3express:CreateSession permission in the # `Action` element of a policy to write the object to the # destination. The `s3express:SessionMode` condition key cannot be # set to `ReadOnly` on the copy destination. # # For example policies, see [Example bucket policies for S3 Express # One Zone][8] and [Amazon Web Services Identity and Access # Management (IAM) identity-based policies for S3 Express One # Zone][9] in the *Amazon S3 User Guide*. # # Encryption # : * General purpose buckets - For information about using # server-side encryption with customer-provided encryption keys with # the `UploadPartCopy` operation, see [CopyObject][10] and # [UploadPart][2]. # # * Directory buckets - For directory buckets, only # server-side encryption with Amazon S3 managed keys (SSE-S3) # (`AES256`) is supported. # # Special errors # : * Error Code: `NoSuchUpload` # # * Description: The specified multipart upload does not exist. The # upload ID might be invalid, or the multipart upload might have # been aborted or completed. # # * HTTP Status Code: 404 Not Found # # * Error Code: `InvalidRequest` # # * Description: The specified copy source is not supported as a # byte-range copy source. # # * HTTP Status Code: 400 Bad Request # # HTTP Host header syntax # # : Directory buckets - The HTTP Host header syntax is ` # Bucket_name.s3express-az_id.region.amazonaws.com`. # # The following operations are related to `UploadPartCopy`: # # * [CreateMultipartUpload][11] # # * [UploadPart][2] # # * [CompleteMultipartUpload][12] # # * [AbortMultipartUpload][13] # # * [ListParts][14] # # * [ListMultipartUploads][15] # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html # [6]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html # [7]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html # [8]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html # [9]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html # [10]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html # [11]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html # [12]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html # [13]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html # [14]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html # [15]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html # # @option params [required, String] :bucket # The bucket name. # # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style requests # are not supported. Directory bucket names must be unique in the chosen # Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, you # must provide the alias of the access point in place of the bucket name # or specify the access point ARN. When using the access point ARN, you # must direct requests to the access point hostname. The access point # hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the bucket # name. For more information about access point ARNs, see [Using access # points][2] in the *Amazon S3 User Guide*. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. The # S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][3] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # # @option params [required, String] :copy_source # Specifies the source object for the copy operation. You specify the # value in one of two formats, depending on whether you want to access # the source object through an [access point][1]: # # * For objects not accessed through an access point, specify the name # of the source bucket and key of the source object, separated by a # slash (/). For example, to copy the object `reports/january.pdf` # from the bucket `awsexamplebucket`, use # `awsexamplebucket/reports/january.pdf`. The value must be # URL-encoded. # # * For objects accessed through access points, specify the Amazon # Resource Name (ARN) of the object as accessed through the access # point, in the format # `arn:aws:s3:::accesspoint//object/`. # For example, to copy the object `reports/january.pdf` through access # point `my-access-point` owned by account `123456789012` in Region # `us-west-2`, use the URL encoding of # `arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf`. # The value must be URL encoded. # # * Amazon S3 supports copy operations using Access points only when # the source and destination buckets are in the same Amazon Web # Services Region. # # * Access points are not supported by directory buckets. # # # # Alternatively, for objects accessed through Amazon S3 on Outposts, # specify the ARN of the object as accessed in the format # `arn:aws:s3-outposts:::outpost//object/`. # For example, to copy the object `reports/january.pdf` through # outpost `my-outpost` owned by account `123456789012` in Region # `us-west-2`, use the URL encoding of # `arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf`. # The value must be URL-encoded. # # If your bucket has versioning enabled, you could have multiple # versions of the same object. By default, `x-amz-copy-source` # identifies the current version of the source object to copy. To copy a # specific version of the source object to copy, append # `?versionId=` to the `x-amz-copy-source` request header # (for example, `x-amz-copy-source: # /awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893`). # # If the current version is a delete marker and you don't specify a # versionId in the `x-amz-copy-source` request header, Amazon S3 returns # a `404 Not Found` error, because the object does not exist. If you # specify versionId in the `x-amz-copy-source` and the versionId is a # delete marker, Amazon S3 returns an HTTP `400 Bad Request` error, # because you are not allowed to specify a delete marker as a version # for the `x-amz-copy-source`. # # **Directory buckets** - S3 Versioning isn't enabled and supported for # directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html # # @option params [String] :copy_source_if_match # Copies the object if its entity tag (ETag) matches the specified tag. # # If both of the `x-amz-copy-source-if-match` and # `x-amz-copy-source-if-unmodified-since` headers are present in the # request as follows: # # `x-amz-copy-source-if-match` condition evaluates to `true`, and; # # `x-amz-copy-source-if-unmodified-since` condition evaluates to # `false`; # # Amazon S3 returns `200 OK` and copies the data. # # @option params [Time,DateTime,Date,Integer,String] :copy_source_if_modified_since # Copies the object if it has been modified since the specified time. # # If both of the `x-amz-copy-source-if-none-match` and # `x-amz-copy-source-if-modified-since` headers are present in the # request as follows: # # `x-amz-copy-source-if-none-match` condition evaluates to `false`, and; # # `x-amz-copy-source-if-modified-since` condition evaluates to `true`; # # Amazon S3 returns `412 Precondition Failed` response code. # # @option params [String] :copy_source_if_none_match # Copies the object if its entity tag (ETag) is different than the # specified ETag. # # If both of the `x-amz-copy-source-if-none-match` and # `x-amz-copy-source-if-modified-since` headers are present in the # request as follows: # # `x-amz-copy-source-if-none-match` condition evaluates to `false`, and; # # `x-amz-copy-source-if-modified-since` condition evaluates to `true`; # # Amazon S3 returns `412 Precondition Failed` response code. # # @option params [Time,DateTime,Date,Integer,String] :copy_source_if_unmodified_since # Copies the object if it hasn't been modified since the specified # time. # # If both of the `x-amz-copy-source-if-match` and # `x-amz-copy-source-if-unmodified-since` headers are present in the # request as follows: # # `x-amz-copy-source-if-match` condition evaluates to `true`, and; # # `x-amz-copy-source-if-unmodified-since` condition evaluates to # `false`; # # Amazon S3 returns `200 OK` and copies the data. # # @option params [String] :copy_source_range # The range of bytes to copy from the source object. The range value # must use the form bytes=first-last, where the first and last are the # zero-based byte offsets to copy. For example, bytes=0-9 indicates that # you want to copy the first 10 bytes of the source. You can copy a # range only if the source object is greater than 5 MB. # # @option params [required, String] :key # Object key for which the multipart upload was initiated. # # @option params [required, Integer] :part_number # Part number of part being copied. This is a positive integer between 1 # and 10,000. # # @option params [required, String] :upload_id # Upload ID identifying the multipart upload whose part is being copied. # # @option params [String] :sse_customer_algorithm # Specifies the algorithm to use when encrypting the object (for # example, AES256). # # This functionality is not supported when the destination bucket is a # directory bucket. # # # # @option params [String] :sse_customer_key # Specifies the customer-provided encryption key for Amazon S3 to use in # encrypting data. This value is used to store the object and then it is # discarded; Amazon S3 does not store the encryption key. The key must # be appropriate for use with the algorithm specified in the # `x-amz-server-side-encryption-customer-algorithm` header. This must be # the same encryption key specified in the initiate multipart upload # request. # # This functionality is not supported when the destination bucket is a # directory bucket. # # # # @option params [String] :sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the encryption key according to # RFC 1321. Amazon S3 uses this header for a message integrity check to # ensure that the encryption key was transmitted without error. # # This functionality is not supported when the destination bucket is a # directory bucket. # # # # @option params [String] :copy_source_sse_customer_algorithm # Specifies the algorithm to use when decrypting the source object (for # example, `AES256`). # # This functionality is not supported when the source object is in a # directory bucket. # # # # @option params [String] :copy_source_sse_customer_key # Specifies the customer-provided encryption key for Amazon S3 to use to # decrypt the source object. The encryption key provided in this header # must be one that was used when the source object was created. # # This functionality is not supported when the source object is in a # directory bucket. # # # # @option params [String] :copy_source_sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the encryption key according to # RFC 1321. Amazon S3 uses this header for a message integrity check to # ensure that the encryption key was transmitted without error. # # This functionality is not supported when the source object is in a # directory bucket. # # # # @option params [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # # @option params [String] :expected_bucket_owner # The account ID of the expected destination bucket owner. If the # account ID that you provide does not match the actual owner of the # destination bucket, the request fails with the HTTP status code `403 # Forbidden` (access denied). # # @option params [String] :expected_source_bucket_owner # The account ID of the expected source bucket owner. If the account ID # that you provide does not match the actual owner of the source bucket, # the request fails with the HTTP status code `403 Forbidden` (access # denied). # # @return [Types::UploadPartCopyOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::UploadPartCopyOutput#copy_source_version_id #copy_source_version_id} => String # * {Types::UploadPartCopyOutput#copy_part_result #copy_part_result} => Types::CopyPartResult # * {Types::UploadPartCopyOutput#server_side_encryption #server_side_encryption} => String # * {Types::UploadPartCopyOutput#sse_customer_algorithm #sse_customer_algorithm} => String # * {Types::UploadPartCopyOutput#sse_customer_key_md5 #sse_customer_key_md5} => String # * {Types::UploadPartCopyOutput#ssekms_key_id #ssekms_key_id} => String # * {Types::UploadPartCopyOutput#bucket_key_enabled #bucket_key_enabled} => Boolean # * {Types::UploadPartCopyOutput#request_charged #request_charged} => String # # # @example Example: To upload a part by copying data from an existing object as data source # # # The following example uploads a part of a multipart upload by copying data from an existing object as data source. # # resp = client.upload_part_copy({ # bucket: "examplebucket", # copy_source: "/bucketname/sourceobjectkey", # key: "examplelargeobject", # part_number: 1, # upload_id: "exampleuoh_10OhKhT7YukE9bjzTPRiuaCotmZM_pFngJFir9OZNrSr5cWa3cq3LZSUsfjI4FI7PkP91We7Nrw--", # }) # # resp.to_h outputs the following: # { # copy_part_result: { # etag: "\"b0c6f0e7e054ab8fa2536a2677f8734d\"", # last_modified: Time.parse("2016-12-29T21:24:43.000Z"), # }, # } # # @example Example: To upload a part by copying byte range from an existing object as data source # # # The following example uploads a part of a multipart upload by copying a specified byte range from an existing object as # # data source. # # resp = client.upload_part_copy({ # bucket: "examplebucket", # copy_source: "/bucketname/sourceobjectkey", # copy_source_range: "bytes=1-100000", # key: "examplelargeobject", # part_number: 2, # upload_id: "exampleuoh_10OhKhT7YukE9bjzTPRiuaCotmZM_pFngJFir9OZNrSr5cWa3cq3LZSUsfjI4FI7PkP91We7Nrw--", # }) # # resp.to_h outputs the following: # { # copy_part_result: { # etag: "\"65d16d19e65a7508a51f043180edcc36\"", # last_modified: Time.parse("2016-12-29T21:44:28.000Z"), # }, # } # # @example Request syntax with placeholder values # # resp = client.upload_part_copy({ # bucket: "BucketName", # required # copy_source: "CopySource", # required # copy_source_if_match: "CopySourceIfMatch", # copy_source_if_modified_since: Time.now, # copy_source_if_none_match: "CopySourceIfNoneMatch", # copy_source_if_unmodified_since: Time.now, # copy_source_range: "CopySourceRange", # key: "ObjectKey", # required # part_number: 1, # required # upload_id: "MultipartUploadId", # required # sse_customer_algorithm: "SSECustomerAlgorithm", # sse_customer_key: "SSECustomerKey", # sse_customer_key_md5: "SSECustomerKeyMD5", # copy_source_sse_customer_algorithm: "CopySourceSSECustomerAlgorithm", # copy_source_sse_customer_key: "CopySourceSSECustomerKey", # copy_source_sse_customer_key_md5: "CopySourceSSECustomerKeyMD5", # request_payer: "requester", # accepts requester # expected_bucket_owner: "AccountId", # expected_source_bucket_owner: "AccountId", # }) # # @example Response structure # # resp.copy_source_version_id #=> String # resp.copy_part_result.etag #=> String # resp.copy_part_result.last_modified #=> Time # resp.copy_part_result.checksum_crc32 #=> String # resp.copy_part_result.checksum_crc32c #=> String # resp.copy_part_result.checksum_sha1 #=> String # resp.copy_part_result.checksum_sha256 #=> String # resp.server_side_encryption #=> String, one of "AES256", "aws:kms", "aws:kms:dsse" # resp.sse_customer_algorithm #=> String # resp.sse_customer_key_md5 #=> String # resp.ssekms_key_id #=> String # resp.bucket_key_enabled #=> Boolean # resp.request_charged #=> String, one of "requester" # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy AWS API Documentation # # @overload upload_part_copy(params = {}) # @param [Hash] params ({}) def upload_part_copy(params = {}, options = {}) req = build_request(:upload_part_copy, params) req.send_request(options) end # This operation is not supported by directory buckets. # # # # Passes transformed objects to a `GetObject` operation when using # Object Lambda access points. For information about Object Lambda # access points, see [Transforming objects with Object Lambda access # points][1] in the *Amazon S3 User Guide*. # # This operation supports metadata that can be returned by # [GetObject][2], in addition to `RequestRoute`, `RequestToken`, # `StatusCode`, `ErrorCode`, and `ErrorMessage`. The `GetObject` # response metadata is supported so that the `WriteGetObjectResponse` # caller, typically an Lambda function, can provide the same metadata # when it internally invokes `GetObject`. When `WriteGetObjectResponse` # is called by a customer-owned Lambda function, the metadata returned # to the end user `GetObject` call might differ from what Amazon S3 # would normally return. # # You can include any number of metadata headers. When including a # metadata header, it should be prefaced with `x-amz-meta`. For example, # `x-amz-meta-my-custom-header: MyCustomValue`. The primary use case for # this is to forward `GetObject` metadata. # # Amazon Web Services provides some prebuilt Lambda functions that you # can use with S3 Object Lambda to detect and redact personally # identifiable information (PII) and decompress S3 objects. These Lambda # functions are available in the Amazon Web Services Serverless # Application Repository, and can be selected through the Amazon Web # Services Management Console when you create your Object Lambda access # point. # # Example 1: PII Access Control - This Lambda function uses Amazon # Comprehend, a natural language processing (NLP) service using machine # learning to find insights and relationships in text. It automatically # detects personally identifiable information (PII) such as names, # addresses, dates, credit card numbers, and social security numbers # from documents in your Amazon S3 bucket. # # Example 2: PII Redaction - This Lambda function uses Amazon # Comprehend, a natural language processing (NLP) service using machine # learning to find insights and relationships in text. It automatically # redacts personally identifiable information (PII) such as names, # addresses, dates, credit card numbers, and social security numbers # from documents in your Amazon S3 bucket. # # Example 3: Decompression - The Lambda function # S3ObjectLambdaDecompression, is equipped to decompress objects stored # in S3 in one of six compressed file formats including bzip2, gzip, # snappy, zlib, zstandard and ZIP. # # For information on how to view and use these functions, see [Using # Amazon Web Services built Lambda functions][3] in the *Amazon S3 User # Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-examples.html # # @option params [required, String] :request_route # Route prefix to the HTTP URL generated. # # @option params [required, String] :request_token # A single use encrypted token that maps `WriteGetObjectResponse` to the # end user `GetObject` request. # # @option params [String, IO] :body # The object data. # # @option params [Integer] :status_code # The integer status code for an HTTP response of a corresponding # `GetObject` request. The following is a list of status codes. # # * `200 - OK` # # * `206 - Partial Content` # # * `304 - Not Modified` # # * `400 - Bad Request` # # * `401 - Unauthorized` # # * `403 - Forbidden` # # * `404 - Not Found` # # * `405 - Method Not Allowed` # # * `409 - Conflict` # # * `411 - Length Required` # # * `412 - Precondition Failed` # # * `416 - Range Not Satisfiable` # # * `500 - Internal Server Error` # # * `503 - Service Unavailable` # # @option params [String] :error_code # A string that uniquely identifies an error condition. Returned in the # <Code> tag of the error XML response for a corresponding # `GetObject` call. Cannot be used with a successful `StatusCode` header # or when the transformed object is provided in the body. All error # codes from S3 are sentence-cased. The regular expression (regex) value # is `"^[A-Z][a-zA-Z]+$"`. # # @option params [String] :error_message # Contains a generic description of the error condition. Returned in the # <Message> tag of the error XML response for a corresponding # `GetObject` call. Cannot be used with a successful `StatusCode` header # or when the transformed object is provided in body. # # @option params [String] :accept_ranges # Indicates that a range of bytes was specified. # # @option params [String] :cache_control # Specifies caching behavior along the request/reply chain. # # @option params [String] :content_disposition # Specifies presentational information for the object. # # @option params [String] :content_encoding # Specifies what content encodings have been applied to the object and # thus what decoding mechanisms must be applied to obtain the media-type # referenced by the Content-Type header field. # # @option params [String] :content_language # The language the content is in. # # @option params [Integer] :content_length # The size of the content body in bytes. # # @option params [String] :content_range # The portion of the object returned in the response. # # @option params [String] :content_type # A standard MIME type describing the format of the object data. # # @option params [String] :checksum_crc32 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This # specifies the base64-encoded, 32-bit CRC32 checksum of the object # returned by the Object Lambda function. This may not match the # checksum for the object stored in Amazon S3. Amazon S3 will perform # validation of the checksum values only when the original `GetObject` # request required checksum validation. For more information about # checksums, see [Checking object integrity][1] in the *Amazon S3 User # Guide*. # # Only one checksum header can be specified at a time. If you supply # multiple checksum headers, this request will fail. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [String] :checksum_crc32c # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This # specifies the base64-encoded, 32-bit CRC32C checksum of the object # returned by the Object Lambda function. This may not match the # checksum for the object stored in Amazon S3. Amazon S3 will perform # validation of the checksum values only when the original `GetObject` # request required checksum validation. For more information about # checksums, see [Checking object integrity][1] in the *Amazon S3 User # Guide*. # # Only one checksum header can be specified at a time. If you supply # multiple checksum headers, this request will fail. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [String] :checksum_sha1 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This # specifies the base64-encoded, 160-bit SHA-1 digest of the object # returned by the Object Lambda function. This may not match the # checksum for the object stored in Amazon S3. Amazon S3 will perform # validation of the checksum values only when the original `GetObject` # request required checksum validation. For more information about # checksums, see [Checking object integrity][1] in the *Amazon S3 User # Guide*. # # Only one checksum header can be specified at a time. If you supply # multiple checksum headers, this request will fail. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [String] :checksum_sha256 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This # specifies the base64-encoded, 256-bit SHA-256 digest of the object # returned by the Object Lambda function. This may not match the # checksum for the object stored in Amazon S3. Amazon S3 will perform # validation of the checksum values only when the original `GetObject` # request required checksum validation. For more information about # checksums, see [Checking object integrity][1] in the *Amazon S3 User # Guide*. # # Only one checksum header can be specified at a time. If you supply # multiple checksum headers, this request will fail. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # # @option params [Boolean] :delete_marker # Specifies whether an object stored in Amazon S3 is (`true`) or is not # (`false`) a delete marker. # # @option params [String] :etag # An opaque identifier assigned by a web server to a specific version of # a resource found at a URL. # # @option params [Time,DateTime,Date,Integer,String] :expires # The date and time at which the object is no longer cacheable. # # @option params [String] :expiration # If the object expiration is configured (see PUT Bucket lifecycle), the # response includes this header. It includes the `expiry-date` and # `rule-id` key-value pairs that provide the object expiration # information. The value of the `rule-id` is URL-encoded. # # @option params [Time,DateTime,Date,Integer,String] :last_modified # The date and time that the object was last modified. # # @option params [Integer] :missing_meta # Set to the number of metadata entries not returned in `x-amz-meta` # headers. This can happen if you create metadata using an API like SOAP # that supports more flexible metadata than the REST API. For example, # using SOAP, you can create metadata whose values are not legal HTTP # headers. # # @option params [Hash] :metadata # A map of metadata to store with the object in S3. # # @option params [String] :object_lock_mode # Indicates whether an object stored in Amazon S3 has Object Lock # enabled. For more information about S3 Object Lock, see [Object # Lock][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html # # @option params [String] :object_lock_legal_hold_status # Indicates whether an object stored in Amazon S3 has an active legal # hold. # # @option params [Time,DateTime,Date,Integer,String] :object_lock_retain_until_date # The date and time when Object Lock is configured to expire. # # @option params [Integer] :parts_count # The count of parts this object has. # # @option params [String] :replication_status # Indicates if request involves bucket that is either a source or # destination in a Replication rule. For more information about S3 # Replication, see [Replication][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html # # @option params [String] :request_charged # If present, indicates that the requester was successfully charged for # the request. # # This functionality is not supported for directory buckets. # # # # @option params [String] :restore # Provides information about object restoration operation and expiration # time of the restored object copy. # # @option params [String] :server_side_encryption # The server-side encryption algorithm used when storing requested # object in Amazon S3 (for example, AES256, `aws:kms`). # # @option params [String] :sse_customer_algorithm # Encryption algorithm used if server-side encryption with a # customer-provided encryption key was specified for object stored in # Amazon S3. # # @option params [String] :ssekms_key_id # If present, specifies the ID (Key ID, Key ARN, or Key Alias) of the # Amazon Web Services Key Management Service (Amazon Web Services KMS) # symmetric encryption customer managed key that was used for stored in # Amazon S3 object. # # @option params [String] :sse_customer_key_md5 # 128-bit MD5 digest of customer-provided encryption key used in Amazon # S3 to encrypt data stored in S3. For more information, see [Protecting # data using server-side encryption with customer-provided encryption # keys (SSE-C)][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html # # @option params [String] :storage_class # Provides storage class information of the object. Amazon S3 returns # this header for all objects except for S3 Standard storage class # objects. # # For more information, see [Storage Classes][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html # # @option params [Integer] :tag_count # The number of tags, if any, on the object. # # @option params [String] :version_id # An ID used to reference a specific version of the object. # # @option params [Boolean] :bucket_key_enabled # Indicates whether the object stored in Amazon S3 uses an S3 bucket key # for server-side encryption with Amazon Web Services KMS (SSE-KMS). # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # @example Request syntax with placeholder values # # resp = client.write_get_object_response({ # request_route: "RequestRoute", # required # request_token: "RequestToken", # required # body: source_file, # status_code: 1, # error_code: "ErrorCode", # error_message: "ErrorMessage", # accept_ranges: "AcceptRanges", # cache_control: "CacheControl", # content_disposition: "ContentDisposition", # content_encoding: "ContentEncoding", # content_language: "ContentLanguage", # content_length: 1, # content_range: "ContentRange", # content_type: "ContentType", # checksum_crc32: "ChecksumCRC32", # checksum_crc32c: "ChecksumCRC32C", # checksum_sha1: "ChecksumSHA1", # checksum_sha256: "ChecksumSHA256", # delete_marker: false, # etag: "ETag", # expires: Time.now, # expiration: "Expiration", # last_modified: Time.now, # missing_meta: 1, # metadata: { # "MetadataKey" => "MetadataValue", # }, # object_lock_mode: "GOVERNANCE", # accepts GOVERNANCE, COMPLIANCE # object_lock_legal_hold_status: "ON", # accepts ON, OFF # object_lock_retain_until_date: Time.now, # parts_count: 1, # replication_status: "COMPLETE", # accepts COMPLETE, PENDING, FAILED, REPLICA, COMPLETED # request_charged: "requester", # accepts requester # restore: "Restore", # server_side_encryption: "AES256", # accepts AES256, aws:kms, aws:kms:dsse # sse_customer_algorithm: "SSECustomerAlgorithm", # ssekms_key_id: "SSEKMSKeyId", # sse_customer_key_md5: "SSECustomerKeyMD5", # storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, DEEP_ARCHIVE, OUTPOSTS, GLACIER_IR, SNOW, EXPRESS_ONEZONE # tag_count: 1, # version_id: "ObjectVersionId", # bucket_key_enabled: false, # }) # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WriteGetObjectResponse AWS API Documentation # # @overload write_get_object_response(params = {}) # @param [Hash] params ({}) def write_get_object_response(params = {}, options = {}) req = build_request(:write_get_object_response, params) req.send_request(options) end # @!endgroup # @param params ({}) # @api private def build_request(operation_name, params = {}) handlers = @handlers.for(operation_name) context = Seahorse::Client::RequestContext.new( operation_name: operation_name, operation: config.api.operation(operation_name), client: self, params: params, config: config) context[:gem_name] = 'aws-sdk-s3' context[:gem_version] = '1.143.0' Seahorse::Client::Request.new(handlers, context) end # Polls an API operation until a resource enters a desired state. # # ## Basic Usage # # A waiter will call an API operation until: # # * It is successful # * It enters a terminal state # * It makes the maximum number of attempts # # In between attempts, the waiter will sleep. # # # polls in a loop, sleeping between attempts # client.wait_until(waiter_name, params) # # ## Configuration # # You can configure the maximum number of polling attempts, and the # delay (in seconds) between each polling attempt. You can pass # configuration as the final arguments hash. # # # poll for ~25 seconds # client.wait_until(waiter_name, params, { # max_attempts: 5, # delay: 5, # }) # # ## Callbacks # # You can be notified before each polling attempt and before each # delay. If you throw `:success` or `:failure` from these callbacks, # it will terminate the waiter. # # started_at = Time.now # client.wait_until(waiter_name, params, { # # # disable max attempts # max_attempts: nil, # # # poll for 1 hour, instead of a number of attempts # before_wait: -> (attempts, response) do # throw :failure if Time.now - started_at > 3600 # end # }) # # ## Handling Errors # # When a waiter is unsuccessful, it will raise an error. # All of the failure errors extend from # {Aws::Waiters::Errors::WaiterFailed}. # # begin # client.wait_until(...) # rescue Aws::Waiters::Errors::WaiterFailed # # resource did not enter the desired state in time # end # # ## Valid Waiters # # The following table lists the valid waiter names, the operations they call, # and the default `:delay` and `:max_attempts` values. # # | waiter_name | params | :delay | :max_attempts | # | ----------------- | -------------------- | -------- | ------------- | # | bucket_exists | {Client#head_bucket} | 5 | 20 | # | bucket_not_exists | {Client#head_bucket} | 5 | 20 | # | object_exists | {Client#head_object} | 5 | 20 | # | object_not_exists | {Client#head_object} | 5 | 20 | # # @raise [Errors::FailureStateError] Raised when the waiter terminates # because the waiter has entered a state that it will not transition # out of, preventing success. # # @raise [Errors::TooManyAttemptsError] Raised when the configured # maximum number of attempts have been made, and the waiter is not # yet successful. # # @raise [Errors::UnexpectedError] Raised when an error is encounted # while polling for a resource that is not expected. # # @raise [Errors::NoSuchWaiterError] Raised when you request to wait # for an unknown state. # # @return [Boolean] Returns `true` if the waiter was successful. # @param [Symbol] waiter_name # @param [Hash] params ({}) # @param [Hash] options ({}) # @option options [Integer] :max_attempts # @option options [Integer] :delay # @option options [Proc] :before_attempt # @option options [Proc] :before_wait def wait_until(waiter_name, params = {}, options = {}) w = waiter(waiter_name, options) yield(w.waiter) if block_given? # deprecated w.wait(params) end # @api private # @deprecated def waiter_names waiters.keys end private # @param [Symbol] waiter_name # @param [Hash] options ({}) def waiter(waiter_name, options = {}) waiter_class = waiters[waiter_name] if waiter_class waiter_class.new(options.merge(client: self)) else raise Aws::Waiters::Errors::NoSuchWaiterError.new(waiter_name, waiters.keys) end end def waiters { bucket_exists: Waiters::BucketExists, bucket_not_exists: Waiters::BucketNotExists, object_exists: Waiters::ObjectExists, object_not_exists: Waiters::ObjectNotExists } end class << self # @api private attr_reader :identifier # @api private def errors_module Errors end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/customizations/0000755000004100000410000000000014563445240021350 5ustar www-datawww-dataaws-sdk-s3-1.143.0/lib/aws-sdk-s3/customizations/object.rb0000644000004100000410000006046614563445240023157 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 class Object alias size content_length # Make the method redefinable alias_method :copy_from, :copy_from # Copies another object to this object. Use `multipart_copy: true` # for large objects. This is required for objects that exceed 5GB. # # @param [S3::Object, S3::ObjectVersion, S3::ObjectSummary, String, Hash] # source Where to copy object data from. `source` must be one of the # following: # # * {Aws::S3::Object} # * {Aws::S3::ObjectSummary} # * {Aws::S3::ObjectVersion} # * Hash - with `:bucket` and `:key` and optional `:version_id` # * String - formatted like `"source-bucket-name/uri-escaped-key"` # or `"source-bucket-name/uri-escaped-key?versionId=version-id"` # # @option options [Boolean] :multipart_copy (false) When `true`, # the object will be copied using the multipart APIs. This is # necessary for objects larger than 5GB and can provide # performance improvements on large objects. Amazon S3 does # not accept multipart copies for objects smaller than 5MB. # Object metadata such as Content-Type will be copied, however, # Checksums are not copied. # # @option options [Integer] :content_length Only used when # `:multipart_copy` is `true`. Passing this options avoids a HEAD # request to query the source object size but prevents object metadata # from being copied. Raises an `ArgumentError` if # this option is provided when `:multipart_copy` is `false` or not set. # # @option options [S3::Client] :copy_source_client Only used when # `:multipart_copy` is `true` and the source object is in a # different region. You do not need to specify this option # if you have provided `:content_length`. # # @option options [String] :copy_source_region Only used when # `:multipart_copy` is `true` and the source object is in a # different region. You do not need to specify this option # if you have provided a `:source_client` or a `:content_length`. # # @option options [Boolean] :use_source_parts (false) Only used when # `:multipart_copy` is `true`. Use part sizes defined on the source # object if any exist. If copying or moving an object that # is already multipart, this does not re-part the object, instead # re-using the part definitions on the original. That means the etag # and any checksums will not change. This is especially useful if the # source object has parts with varied sizes. # # @example Basic object copy # # bucket = Aws::S3::Bucket.new('target-bucket') # object = bucket.object('target-key') # # # source as String # object.copy_from('source-bucket/source-key') # # # source as Hash # object.copy_from(bucket:'source-bucket', key:'source-key') # # # source as Aws::S3::Object # object.copy_from(bucket.object('source-key')) # # @example Managed copy of large objects # # # uses multipart upload APIs to copy object # object.copy_from('src-bucket/src-key', multipart_copy: true) # # @see #copy_to # def copy_from(source, options = {}) Aws::Plugins::UserAgent.feature('resource') do if Hash === source && source[:copy_source] # for backwards compatibility @client.copy_object(source.merge(bucket: bucket_name, key: key)) else ObjectCopier.new(self, options).copy_from(source, options) end end end # Copies this object to another object. Use `multipart_copy: true` # for large objects. This is required for objects that exceed 5GB. # # @note If you need to copy to a bucket in a different region, use # {#copy_from}. # # @param [S3::Object, String, Hash] target Where to copy the object # data to. `target` must be one of the following: # # * {Aws::S3::Object} # * Hash - with `:bucket` and `:key` # * String - formatted like `"target-bucket-name/target-key"` # # @example Basic object copy # # bucket = Aws::S3::Bucket.new('source-bucket') # object = bucket.object('source-key') # # # target as String # object.copy_to('target-bucket/target-key') # # # target as Hash # object.copy_to(bucket: 'target-bucket', key: 'target-key') # # # target as Aws::S3::Object # object.copy_to(bucket.object('target-key')) # # @example Managed copy of large objects # # # uses multipart upload APIs to copy object # object.copy_to('src-bucket/src-key', multipart_copy: true) # def copy_to(target, options = {}) Aws::Plugins::UserAgent.feature('resource') do ObjectCopier.new(self, options).copy_to(target, options) end end # Copies and deletes the current object. The object will only be deleted # if the copy operation succeeds. # # @param (see Object#copy_to) # @option (see Object#copy_to) # @return [void] # @see Object#copy_to # @see Object#delete def move_to(target, options = {}) copy_to(target, options) delete end # Creates a {PresignedPost} that makes it easy to upload a file from # a web browser direct to Amazon S3 using an HTML post form with # a file field. # # See the {PresignedPost} documentation for more information. # # @option (see PresignedPost#initialize) # @return [PresignedPost] # @see PresignedPost def presigned_post(options = {}) PresignedPost.new( client.config.credentials, client.config.region, bucket_name, { key: key, url: bucket.url }.merge(options) ) end # Generates a pre-signed URL for this object. # # @example Pre-signed GET URL, valid for one hour # # obj.presigned_url(:get, expires_in: 3600) # #=> "https://bucket-name.s3.amazonaws.com/object-key?..." # # @example Pre-signed PUT with a canned ACL # # # the object uploaded using this URL will be publicly accessible # obj.presigned_url(:put, acl: 'public-read') # #=> "https://bucket-name.s3.amazonaws.com/object-key?..." # # @example Pre-signed UploadPart PUT # # # the object uploaded using this URL will be publicly accessible # obj.presigned_url(:upload_part, part_number: 1, upload_id: 'uploadIdToken') # #=> "https://bucket-name.s3.amazonaws.com/object-key?..." # # @param [Symbol] method # The S3 operation to generate a presigned URL for. Valid values # are `:get`, `:put`, `:head`, `:delete`, `:create_multipart_upload`, # `:list_multipart_uploads`, `:complete_multipart_upload`, # `:abort_multipart_upload`, `:list_parts`, and `:upload_part`. # # @param [Hash] params # Additional request parameters to use when generating the pre-signed # URL. See the related documentation in {Client} for accepted # params. # # | Method | Client Method | # |------------------------------|------------------------------------| # | `:get` | {Client#get_object} | # | `:put` | {Client#put_object} | # | `:head` | {Client#head_object} | # | `:delete` | {Client#delete_object} | # | `:create_multipart_upload` | {Client#create_multipart_upload} | # | `:list_multipart_uploads` | {Client#list_multipart_uploads} | # | `:complete_multipart_upload` | {Client#complete_multipart_upload} | # | `:abort_multipart_upload` | {Client#abort_multipart_upload} | # | `:list_parts` | {Client#list_parts} | # | `:upload_part` | {Client#upload_part} | # # @option params [Boolean] :virtual_host (false) When `true` the # presigned URL will use the bucket name as a virtual host. # # bucket = Aws::S3::Bucket.new('my.bucket.com') # bucket.object('key').presigned_url(virtual_host: true) # #=> "http://my.bucket.com/key?..." # # @option params [Integer] :expires_in (900) Number of seconds before # the pre-signed URL expires. This may not exceed one week (604800 # seconds). Note that the pre-signed URL is also only valid as long as # credentials used to sign it are. For example, when using IAM roles, # temporary tokens generated for signing also have a default expiration # which will affect the effective expiration of the pre-signed URL. # # @raise [ArgumentError] Raised if `:expires_in` exceeds one week # (604800 seconds). # # @return [String] # def presigned_url(method, params = {}) presigner = Presigner.new(client: client) if %w(delete head get put).include?(method.to_s) method = "#{method}_object".to_sym end presigner.presigned_url( method.downcase, params.merge(bucket: bucket_name, key: key) ) end # Allows you to create presigned URL requests for S3 operations. This # method returns a tuple containing the URL and the signed X-amz-* headers # to be used with the presigned url. # # @example Pre-signed GET URL, valid for one hour # # obj.presigned_request(:get, expires_in: 3600) # #=> ["https://bucket-name.s3.amazonaws.com/object-key?...", {}] # # @example Pre-signed PUT with a canned ACL # # # the object uploaded using this URL will be publicly accessible # obj.presigned_request(:put, acl: 'public-read') # #=> ["https://bucket-name.s3.amazonaws.com/object-key?...", # {"x-amz-acl"=>"public-read"}] # # @param [Symbol] method # The S3 operation to generate a presigned request for. Valid values # are `:get`, `:put`, `:head`, `:delete`, `:create_multipart_upload`, # `:list_multipart_uploads`, `:complete_multipart_upload`, # `:abort_multipart_upload`, `:list_parts`, and `:upload_part`. # # @param [Hash] params # Additional request parameters to use when generating the pre-signed # request. See the related documentation in {Client} for accepted # params. # # | Method | Client Method | # |------------------------------|------------------------------------| # | `:get` | {Client#get_object} | # | `:put` | {Client#put_object} | # | `:head` | {Client#head_object} | # | `:delete` | {Client#delete_object} | # | `:create_multipart_upload` | {Client#create_multipart_upload} | # | `:list_multipart_uploads` | {Client#list_multipart_uploads} | # | `:complete_multipart_upload` | {Client#complete_multipart_upload} | # | `:abort_multipart_upload` | {Client#abort_multipart_upload} | # | `:list_parts` | {Client#list_parts} | # | `:upload_part` | {Client#upload_part} | # # @option params [Boolean] :virtual_host (false) When `true` the # presigned URL will use the bucket name as a virtual host. # # bucket = Aws::S3::Bucket.new('my.bucket.com') # bucket.object('key').presigned_request(virtual_host: true) # #=> ["http://my.bucket.com/key?...", {}] # # @option params [Integer] :expires_in (900) Number of seconds before # the pre-signed URL expires. This may not exceed one week (604800 # seconds). Note that the pre-signed URL is also only valid as long as # credentials used to sign it are. For example, when using IAM roles, # temporary tokens generated for signing also have a default expiration # which will affect the effective expiration of the pre-signed URL. # # @raise [ArgumentError] Raised if `:expires_in` exceeds one week # (604800 seconds). # # @return [String, Hash] A tuple with a presigned URL and headers that # should be included with the request. # def presigned_request(method, params = {}) presigner = Presigner.new(client: client) if %w(delete head get put).include?(method.to_s) method = "#{method}_object".to_sym end presigner.presigned_request( method.downcase, params.merge(bucket: bucket_name, key: key) ) end # Returns the public (un-signed) URL for this object. # # s3.bucket('bucket-name').object('obj-key').public_url # #=> "https://bucket-name.s3.amazonaws.com/obj-key" # # To use virtual hosted bucket url. # Uses https unless secure: false is set. If the bucket # name contains dots (.) then you will need to set secure: false. # # s3.bucket('my-bucket.com').object('key') # .public_url(virtual_host: true) # #=> "https://my-bucket.com/key" # # @option options [Boolean] :virtual_host (false) When `true`, the bucket # name will be used as the host name. This is useful when you have # a CNAME configured for the bucket. # # @option options [Boolean] :secure (true) When `false`, http # will be used with virtual_host. This is required when # the bucket name has a dot (.) in it. # # @return [String] def public_url(options = {}) url = URI.parse(bucket.url(options)) url.path += '/' unless url.path[-1] == '/' url.path += key.gsub(/[^\/]+/) { |s| Seahorse::Util.uri_escape(s) } url.to_s end # Uploads a stream in a streaming fashion to the current object in S3. # # Passed chunks automatically split into multipart upload parts and the # parts are uploaded in parallel. This allows for streaming uploads that # never touch the disk. # # Note that this is known to have issues in JRuby until jruby-9.1.15.0, # so avoid using this with older versions of JRuby. # # @example Streaming chunks of data # obj.upload_stream do |write_stream| # 10.times { write_stream << 'foo' } # end # @example Streaming chunks of data # obj.upload_stream do |write_stream| # IO.copy_stream(IO.popen('ls'), write_stream) # end # @example Streaming chunks of data # obj.upload_stream do |write_stream| # IO.copy_stream(STDIN, write_stream) # end # @param [Hash] options # Additional options for {Client#create_multipart_upload}, # {Client#complete_multipart_upload}, # and {Client#upload_part} can be provided. # # @option options [Integer] :thread_count (10) The number of parallel # multipart uploads # # @option options [Boolean] :tempfile (false) Normally read data is stored # in memory when building the parts in order to complete the underlying # multipart upload. By passing `:tempfile => true` data read will be # temporarily stored on disk reducing the memory footprint vastly. # # @option options [Integer] :part_size (5242880) # Define how big each part size but the last should be. # Default `:part_size` is `5 * 1024 * 1024`. # # @raise [MultipartUploadError] If an object is being uploaded in # parts, and the upload can not be completed, then the upload is # aborted and this error is raised. The raised error has a `#errors` # method that returns the failures that caused the upload to be # aborted. # # @return [Boolean] Returns `true` when the object is uploaded # without any errors. # # @see Client#create_multipart_upload # @see Client#complete_multipart_upload # @see Client#upload_part def upload_stream(options = {}, &block) uploading_options = options.dup uploader = MultipartStreamUploader.new( client: client, thread_count: uploading_options.delete(:thread_count), tempfile: uploading_options.delete(:tempfile), part_size: uploading_options.delete(:part_size) ) Aws::Plugins::UserAgent.feature('resource') do uploader.upload( uploading_options.merge(bucket: bucket_name, key: key), &block ) end true end # Uploads a file from disk to the current object in S3. # # # small files are uploaded in a single API call # obj.upload_file('/path/to/file') # # Files larger than or equal to `:multipart_threshold` are uploaded # using the Amazon S3 multipart upload APIs. # # # large files are automatically split into parts # # and the parts are uploaded in parallel # obj.upload_file('/path/to/very_large_file') # # The response of the S3 upload API is yielded if a block given. # # # API response will have etag value of the file # obj.upload_file('/path/to/file') do |response| # etag = response.etag # end # # You can provide a callback to monitor progress of the upload: # # # bytes and totals are each an array with 1 entry per part # progress = Proc.new do |bytes, totals| # puts bytes.map.with_index { |b, i| "Part #{i+1}: #{b} / #{totals[i]}"}.join(' ') + "Total: #{100.0 * bytes.sum / totals.sum }%" } # end # obj.upload_file('/path/to/file', progress_callback: progress) # # @param [String, Pathname, File, Tempfile] source A file on the local # file system that will be uploaded as this object. This can either be # a String or Pathname to the file, an open File object, or an open # Tempfile object. If you pass an open File or Tempfile object, then # you are responsible for closing it after the upload completes. When # using an open Tempfile, rewind it before uploading or else the object # will be empty. # # @param [Hash] options # Additional options for {Client#put_object} # when file sizes below the multipart threshold. For files larger than # the multipart threshold, options for {Client#create_multipart_upload}, # {Client#complete_multipart_upload}, # and {Client#upload_part} can be provided. # # @option options [Integer] :multipart_threshold (104857600) Files larger # than or equal to `:multipart_threshold` are uploaded using the S3 # multipart APIs. # Default threshold is 100MB. # # @option options [Integer] :thread_count (10) The number of parallel # multipart uploads. This option is not used if the file is smaller than # `:multipart_threshold`. # # @option options [Proc] :progress_callback # A Proc that will be called when each chunk of the upload is sent. # It will be invoked with [bytes_read], [total_sizes] # # @raise [MultipartUploadError] If an object is being uploaded in # parts, and the upload can not be completed, then the upload is # aborted and this error is raised. The raised error has a `#errors` # method that returns the failures that caused the upload to be # aborted. # # @return [Boolean] Returns `true` when the object is uploaded # without any errors. # # @see Client#put_object # @see Client#create_multipart_upload # @see Client#complete_multipart_upload # @see Client#upload_part def upload_file(source, options = {}) uploading_options = options.dup uploader = FileUploader.new( multipart_threshold: uploading_options.delete(:multipart_threshold), client: client ) response = Aws::Plugins::UserAgent.feature('resource') do uploader.upload( source, uploading_options.merge(bucket: bucket_name, key: key) ) end yield response if block_given? true end # Downloads a file in S3 to a path on disk. # # # small files (< 5MB) are downloaded in a single API call # obj.download_file('/path/to/file') # # Files larger than 5MB are downloaded using multipart method # # # large files are split into parts # # and the parts are downloaded in parallel # obj.download_file('/path/to/very_large_file') # # You can provide a callback to monitor progress of the download: # # # bytes and part_sizes are each an array with 1 entry per part # # part_sizes may not be known until the first bytes are retrieved # progress = Proc.new do |bytes, part_sizes, file_size| # puts bytes.map.with_index { |b, i| "Part #{i+1}: #{b} / #{part_sizes[i]}"}.join(' ') + "Total: #{100.0 * bytes.sum / file_size}%" } # end # obj.download_file('/path/to/file', progress_callback: progress) # # @param [String] destination Where to download the file to. # # @param [Hash] options # Additional options for {Client#get_object} and #{Client#head_object} # may be provided. # # @option options [String] mode `auto`, `single_request`, `get_range` # `single_request` mode forces only 1 GET request is made in download, # `get_range` mode allows `chunk_size` parameter to configured in # customizing each range size in multipart_download, # By default, `auto` mode is enabled, which performs multipart_download # # @option options [Integer] chunk_size required in get_range mode. # # @option options [Integer] thread_count (10) Customize threads used in # the multipart download. # # @option options [String] version_id The object version id used to # retrieve the object. For more about object versioning, see: # https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectVersioning.html # # @option options [String] checksum_mode (ENABLED) When `ENABLED` and # the object has a stored checksum, it will be used to validate the # download and will raise an `Aws::Errors::ChecksumError` if # checksum validation fails. You may provide a `on_checksum_validated` # callback if you need to verify that validation occurred and which # algorithm was used. To disable checksum validation, set # `checksum_mode` to "DISABLED". # # @option options [Callable] on_checksum_validated Called each time a # request's checksum is validated with the checksum algorithm and the # response. For multipart downloads, this will be called for each # part that is downloaded and validated. # # @option options [Proc] :progress_callback # A Proc that will be called when each chunk of the download is received. # It will be invoked with [bytes_read], [part_sizes], file_size. # When the object is downloaded as parts (rather than by ranges), the # part_sizes will not be known ahead of time and will be nil in the # callback until the first bytes in the part are received. # # @return [Boolean] Returns `true` when the file is downloaded without # any errors. # # @see Client#get_object # @see Client#head_object def download_file(destination, options = {}) downloader = FileDownloader.new(client: client) Aws::Plugins::UserAgent.feature('resource') do downloader.download( destination, options.merge(bucket: bucket_name, key: key) ) end true end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/customizations/object_summary.rb0000644000004100000410000000457314563445240024731 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 class ObjectSummary alias content_length size # Make the method redefinable alias_method :copy_from, :copy_from # @param (see Object#copy_from) # @options (see Object#copy_from) # @return (see Object#copy_from) # @see Object#copy_from def copy_from(source, options = {}) object.copy_from(source, options) end # @param (see Object#copy_to) # @options (see Object#copy_to) # @return (see Object#copy_to) # @see Object#copy_to def copy_to(target, options = {}) object.copy_to(target, options) end # @param (see Object#move_to) # @options (see Object#move_to) # @return (see Object#move_to) # @see Object#move_to def move_to(target, options = {}) object.move_to(target, options) end # @param (see Object#presigned_post) # @options (see Object#presigned_post) # @return (see Object#presigned_post) # @see Object#presigned_post def presigned_post(options = {}) object.presigned_post(options) end # @param (see Object#presigned_url) # @options (see Object#presigned_url) # @return (see Object#presigned_url) # @see Object#presigned_url def presigned_url(http_method, params = {}) object.presigned_url(http_method, params) end # @param (see Object#public_url) # @options (see Object#public_url) # @return (see Object#public_url) # @see Object#public_url def public_url(options = {}) object.public_url(options) end # @param (see Object#upload_file) # @options (see Object#upload_file) # @return (see Object#upload_file) # @see Object#upload_file def upload_file(source, options = {}) object.upload_file(source, options) end # @options (see Object#upload_stream) # @return (see Object#upload_stream) # @see Object#upload_stream def upload_stream(options = {}, &block) object.upload_stream(options, &block) end # @param (see Object#download_file) # @options (see Object#download_file) # @return (see Object#download_file) # @see Object#download_file def download_file(destination, options = {}) object.download_file(destination, options) end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/customizations/bucket.rb0000644000004100000410000001105414563445240023153 0ustar www-datawww-data# frozen_string_literal: true require 'uri' module Aws module S3 class Bucket # Deletes all objects and versioned objects from this bucket # # @example # # bucket.clear! # # @return [void] def clear! object_versions.batch_delete! end # Deletes all objects and versioned objects from this bucket and # then deletes the bucket. # # @example # # bucket.delete! # # @option options [Integer] :max_attempts (3) Maximum number of times to # attempt to delete the empty bucket before raising # `Aws::S3::Errors::BucketNotEmpty`. # # @option options [Float] :initial_wait (1.3) Seconds to wait before # retrying the call to delete the bucket, exponentially increased for # each attempt. # # @return [void] def delete!(options = {}) options = { initial_wait: 1.3, max_attempts: 3 }.merge(options) attempts = 0 begin clear! delete rescue Errors::BucketNotEmpty attempts += 1 raise if attempts >= options[:max_attempts] Kernel.sleep(options[:initial_wait]**attempts) retry end end # Returns a public URL for this bucket. # # @example # # bucket = s3.bucket('bucket-name') # bucket.url # #=> "https://bucket-name.s3.amazonaws.com" # # It will also work when provided an Access Point ARN. # # @example # # bucket = s3.bucket( # 'arn:aws:s3:us-east-1:123456789012:accesspoint:myendpoint' # ) # bucket.url # #=> "https://myendpoint-123456789012.s3-accesspoint.us-west-2.amazonaws.com" # # You can pass `virtual_host: true` to use the bucket name as the # host name. # # bucket = s3.bucket('my-bucket.com') # bucket.url(virtual_host: true) # #=> "http://my-bucket.com" # # @option options [Boolean] :virtual_host (false) When `true`, # the bucket name will be used as the host name. This is useful # when you have a CNAME configured for this bucket. # # @option options [Boolean] :secure (true) When `false`, http # will be used with virtual_host. This is required when # the bucket name has a dot (.) in it. # # @return [String] the URL for this bucket. def url(options = {}) if options[:virtual_host] scheme = options.fetch(:secure, true) ? 'https' : 'http' "#{scheme}://#{name}" else # Taken from Aws::S3::Endpoints module unless client.config.regional_endpoint endpoint = client.config.endpoint.to_s end params = Aws::S3::EndpointParameters.new( bucket: name, region: client.config.region, use_fips: client.config.use_fips_endpoint, use_dual_stack: client.config.use_dualstack_endpoint, endpoint: endpoint, force_path_style: client.config.force_path_style, accelerate: client.config.use_accelerate_endpoint, use_global_endpoint: client.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, disable_access_points: nil, disable_multi_region_access_points: client.config.s3_disable_multiregion_access_points, use_arn_region: client.config.s3_use_arn_region, ) endpoint = Aws::S3::EndpointProvider.new.resolve_endpoint(params) endpoint.url end end # Creates a {PresignedPost} that makes it easy to upload a file from # a web browser direct to Amazon S3 using an HTML post form with # a file field. # # See the {PresignedPost} documentation for more information. # @note You must specify `:key` or `:key_starts_with`. All other options # are optional. # @option (see PresignedPost#initialize) # @return [PresignedPost] # @see PresignedPost def presigned_post(options = {}) PresignedPost.new( client.config.credentials, client.config.region, name, { url: url }.merge(options) ) end # @api private def load @data = Aws::Plugins::UserAgent.feature('resource') do client.list_buckets.buckets.find { |b| b.name == name } end raise "unable to load bucket #{name}" if @data.nil? self end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/customizations/types/0000755000004100000410000000000014563445240022514 5ustar www-datawww-dataaws-sdk-s3-1.143.0/lib/aws-sdk-s3/customizations/types/list_object_versions_output.rb0000644000004100000410000000054314563445240030714 0ustar www-datawww-data# frozen_string_literal: true class Aws::S3::Types::ListObjectVersionsOutput # TODO : Remove this customization once the resource code # generator correct handles the JMESPath || expression. # Only used by the Bucket#object_versions collection. # @api private def versions_delete_markers versions + delete_markers end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/customizations/types/permanent_redirect.rb0000644000004100000410000000120414563445240026710 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 module Types # This error is not modeled. # # The bucket you are attempting to access must be addressed using the # specified endpoint. Please send all future requests to this endpoint. # # @!attribute [rw] endpoint # @return [String] # # @!attribute [rw] bucket # @return [String] # # @!attribute [rw] message # @return [String] # class PermanentRedirect < Struct.new(:endpoint, :bucket, :region, :message) SENSITIVE = [] include Aws::Structure end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/customizations/errors.rb0000644000004100000410000000167114563445240023216 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 module Errors # Hijack PermanentRedirect dynamic error to also include endpoint # and bucket. class PermanentRedirect < ServiceError # @param [Seahorse::Client::RequestContext] context # @param [String] message # @param [Aws::S3::Types::PermanentRedirect] _data def initialize(context, message, _data = Aws::EmptyStructure.new) data = Aws::S3::Types::PermanentRedirect.new(message: message) body = context.http_response.body_contents if (endpoint = body.match(/(.+?)<\/Endpoint>/)) data.endpoint = endpoint[1] end if (bucket = body.match(/(.+?)<\/Bucket>/)) data.bucket = bucket[1] end data.region = context.http_response.headers['x-amz-bucket-region'] super(context, message, data) end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/customizations/multipart_upload.rb0000644000004100000410000000244414563445240025266 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 class MultipartUpload alias_method :basic_complete, :complete # Completes the upload, requires a list of completed parts. You can # provide the list of parts with `:part_number` and `:etag` values. # # upload.complete(multipart_upload: { parts: [ # { part_number: 1, etag:'etag1' }, # { part_number: 2, etag:'etag2' }, # ... # ]}) # # Alternatively, you can pass **`compute_parts: true`** and the part # list will be computed by calling {Client#list_parts}. # # upload.complete(compute_parts: true) # # @option options [Boolean] :compute_parts (false) When `true`, # the {Client#list_parts} method will be called to determine # the list of required part numbers and their ETags. # def complete(options = {}) if options.delete(:compute_parts) options[:multipart_upload] = { parts: compute_parts } end basic_complete(options) end private def compute_parts parts.sort_by(&:part_number).each.with_object([]) do |part, part_list| part_list << { part_number: part.part_number, etag: part.etag } end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/errors.rb0000644000004100000410000000775014563445240020127 0ustar www-datawww-data# frozen_string_literal: true # WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws::S3 # When S3 returns an error response, the Ruby SDK constructs and raises an error. # These errors all extend Aws::S3::Errors::ServiceError < {Aws::Errors::ServiceError} # # You can rescue all S3 errors using ServiceError: # # begin # # do stuff # rescue Aws::S3::Errors::ServiceError # # rescues all S3 API errors # end # # # ## Request Context # ServiceError objects have a {Aws::Errors::ServiceError#context #context} method that returns # information about the request that generated the error. # See {Seahorse::Client::RequestContext} for more information. # # ## Error Classes # * {BucketAlreadyExists} # * {BucketAlreadyOwnedByYou} # * {InvalidObjectState} # * {NoSuchBucket} # * {NoSuchKey} # * {NoSuchUpload} # * {ObjectAlreadyInActiveTierError} # * {ObjectNotInActiveTierError} # # Additionally, error classes are dynamically generated for service errors based on the error code # if they are not defined above. module Errors extend Aws::Errors::DynamicErrors class BucketAlreadyExists < ServiceError # @param [Seahorse::Client::RequestContext] context # @param [String] message # @param [Aws::S3::Types::BucketAlreadyExists] data def initialize(context, message, data = Aws::EmptyStructure.new) super(context, message, data) end end class BucketAlreadyOwnedByYou < ServiceError # @param [Seahorse::Client::RequestContext] context # @param [String] message # @param [Aws::S3::Types::BucketAlreadyOwnedByYou] data def initialize(context, message, data = Aws::EmptyStructure.new) super(context, message, data) end end class InvalidObjectState < ServiceError # @param [Seahorse::Client::RequestContext] context # @param [String] message # @param [Aws::S3::Types::InvalidObjectState] data def initialize(context, message, data = Aws::EmptyStructure.new) super(context, message, data) end # @return [String] def storage_class @data[:storage_class] end # @return [String] def access_tier @data[:access_tier] end end class NoSuchBucket < ServiceError # @param [Seahorse::Client::RequestContext] context # @param [String] message # @param [Aws::S3::Types::NoSuchBucket] data def initialize(context, message, data = Aws::EmptyStructure.new) super(context, message, data) end end class NoSuchKey < ServiceError # @param [Seahorse::Client::RequestContext] context # @param [String] message # @param [Aws::S3::Types::NoSuchKey] data def initialize(context, message, data = Aws::EmptyStructure.new) super(context, message, data) end end class NoSuchUpload < ServiceError # @param [Seahorse::Client::RequestContext] context # @param [String] message # @param [Aws::S3::Types::NoSuchUpload] data def initialize(context, message, data = Aws::EmptyStructure.new) super(context, message, data) end end class ObjectAlreadyInActiveTierError < ServiceError # @param [Seahorse::Client::RequestContext] context # @param [String] message # @param [Aws::S3::Types::ObjectAlreadyInActiveTierError] data def initialize(context, message, data = Aws::EmptyStructure.new) super(context, message, data) end end class ObjectNotInActiveTierError < ServiceError # @param [Seahorse::Client::RequestContext] context # @param [String] message # @param [Aws::S3::Types::ObjectNotInActiveTierError] data def initialize(context, message, data = Aws::EmptyStructure.new) super(context, message, data) end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/multipart_stream_uploader.rb0000644000004100000410000001374114563445240024077 0ustar www-datawww-data# frozen_string_literal: true require 'thread' require 'set' require 'tempfile' require 'stringio' module Aws module S3 # @api private class MultipartStreamUploader # api private PART_SIZE = 5 * 1024 * 1024 # 5MB # api private THREAD_COUNT = 10 # api private TEMPFILE_PREIX = 'aws-sdk-s3-upload_stream'.freeze # @api private CREATE_OPTIONS = Set.new(Client.api.operation(:create_multipart_upload).input.shape.member_names) # @api private UPLOAD_PART_OPTIONS = Set.new(Client.api.operation(:upload_part).input.shape.member_names) # @api private COMPLETE_UPLOAD_OPTIONS = Set.new(Client.api.operation(:complete_multipart_upload).input.shape.member_names) # @option options [Client] :client def initialize(options = {}) @client = options[:client] || Client.new @tempfile = options[:tempfile] @part_size = options[:part_size] || PART_SIZE @thread_count = options[:thread_count] || THREAD_COUNT end # @return [Client] attr_reader :client # @option options [required,String] :bucket # @option options [required,String] :key # @return [Seahorse::Client::Response] - the CompleteMultipartUploadResponse def upload(options = {}, &block) Aws::Plugins::UserAgent.feature('s3-transfer') do upload_id = initiate_upload(options) parts = upload_parts(upload_id, options, &block) complete_upload(upload_id, parts, options) end end private def initiate_upload(options) @client.create_multipart_upload(create_opts(options)).upload_id end def complete_upload(upload_id, parts, options) @client.complete_multipart_upload( **complete_opts(options).merge( upload_id: upload_id, multipart_upload: { parts: parts } ) ) end def upload_parts(upload_id, options, &block) completed = Queue.new thread_errors = [] errors = begin IO.pipe do |read_pipe, write_pipe| threads = upload_in_threads( read_pipe, completed, upload_part_opts(options).merge(upload_id: upload_id), thread_errors) begin block.call(write_pipe) ensure # Ensure the pipe is closed to avoid https://github.com/jruby/jruby/issues/6111 write_pipe.close end threads.map(&:value).compact end rescue => e thread_errors + [e] end if errors.empty? Array.new(completed.size) { completed.pop }.sort_by { |part| part[:part_number] } else abort_upload(upload_id, options, errors) end end def abort_upload(upload_id, options, errors) @client.abort_multipart_upload( bucket: options[:bucket], key: options[:key], upload_id: upload_id ) msg = "multipart upload failed: #{errors.map(&:message).join("; ")}" raise MultipartUploadError.new(msg, errors) rescue MultipartUploadError => error raise error rescue => error msg = "failed to abort multipart upload: #{error.message}" raise MultipartUploadError.new(msg, errors + [error]) end def create_opts(options) CREATE_OPTIONS.inject({}) do |hash, key| hash[key] = options[key] if options.key?(key) hash end end def upload_part_opts(options) UPLOAD_PART_OPTIONS.inject({}) do |hash, key| hash[key] = options[key] if options.key?(key) hash end end def complete_opts(options) COMPLETE_UPLOAD_OPTIONS.inject({}) do |hash, key| hash[key] = options[key] if options.key?(key) hash end end def read_to_part_body(read_pipe) return if read_pipe.closed? temp_io = @tempfile ? Tempfile.new(TEMPFILE_PREIX) : StringIO.new(String.new) temp_io.binmode bytes_copied = IO.copy_stream(read_pipe, temp_io, @part_size) temp_io.rewind if bytes_copied == 0 if Tempfile === temp_io temp_io.close temp_io.unlink end nil else temp_io end end def upload_in_threads(read_pipe, completed, options, thread_errors) mutex = Mutex.new part_number = 0 @thread_count.times.map do thread = Thread.new do begin loop do body, thread_part_number = mutex.synchronize do [read_to_part_body(read_pipe), part_number += 1] end break unless (body || thread_part_number == 1) begin part = options.merge( body: body, part_number: thread_part_number, ) resp = @client.upload_part(part) completed_part = {etag: resp.etag, part_number: part[:part_number]} # get the requested checksum from the response if part[:checksum_algorithm] k = "checksum_#{part[:checksum_algorithm].downcase}".to_sym completed_part[k] = resp[k] end completed.push(completed_part) ensure if Tempfile === body body.close body.unlink elsif StringIO === body body.string.clear end end end nil rescue => error # keep other threads from uploading other parts mutex.synchronize do thread_errors.push(error) read_pipe.close_read unless read_pipe.closed? end error end end thread end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/types.rb0000644000004100000410000244203314563445240017756 0ustar www-datawww-data# frozen_string_literal: true # WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws::S3 module Types # Specifies the days since the initiation of an incomplete multipart # upload that Amazon S3 will wait before permanently removing all parts # of the upload. For more information, see [ Aborting Incomplete # Multipart Uploads Using a Bucket Lifecycle Configuration][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config # # @!attribute [rw] days_after_initiation # Specifies the number of days after which Amazon S3 aborts an # incomplete multipart upload. # @return [Integer] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortIncompleteMultipartUpload AWS API Documentation # class AbortIncompleteMultipartUpload < Struct.new( :days_after_initiation) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] request_charged # If present, indicates that the requester was successfully charged # for the request. # # This functionality is not supported for directory buckets. # # # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadOutput AWS API Documentation # class AbortMultipartUploadOutput < Struct.new( :request_charged) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The bucket name to which the upload was taking place. # # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style # requests are not supported. Directory bucket names must be unique in # the chosen Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, # you must provide the alias of the access point in place of the # bucket name or specify the access point ARN. When using the access # point ARN, you must direct requests to the access point hostname. # The access point hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the # bucket name. For more information about access point ARNs, see # [Using access points][2] in the *Amazon S3 User Guide*. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. # The S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][3] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # @return [String] # # @!attribute [rw] key # Key of the object for which the multipart upload was initiated. # @return [String] # # @!attribute [rw] upload_id # Upload ID that identifies the multipart upload. # @return [String] # # @!attribute [rw] request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has # Requester Pays enabled, the requester will pay for corresponding # charges to copy the object. For information about downloading # objects from Requester Pays buckets, see [Downloading Objects in # Requester Pays Buckets][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadRequest AWS API Documentation # class AbortMultipartUploadRequest < Struct.new( :bucket, :key, :upload_id, :request_payer, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # Configures the transfer acceleration state for an Amazon S3 bucket. # For more information, see [Amazon S3 Transfer Acceleration][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html # # @!attribute [rw] status # Specifies the transfer acceleration status of the bucket. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccelerateConfiguration AWS API Documentation # class AccelerateConfiguration < Struct.new( :status) SENSITIVE = [] include Aws::Structure end # Contains the elements that set the ACL permissions for an object per # grantee. # # @!attribute [rw] grants # A list of grants. # @return [Array] # # @!attribute [rw] owner # Container for the bucket owner's display name and ID. # @return [Types::Owner] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccessControlPolicy AWS API Documentation # class AccessControlPolicy < Struct.new( :grants, :owner) SENSITIVE = [] include Aws::Structure end # A container for information about access control for replicas. # # @!attribute [rw] owner # Specifies the replica ownership. For default and valid values, see # [PUT bucket replication][1] in the *Amazon S3 API Reference*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccessControlTranslation AWS API Documentation # class AccessControlTranslation < Struct.new( :owner) SENSITIVE = [] include Aws::Structure end # A conjunction (logical AND) of predicates, which is used in evaluating # a metrics filter. The operator must have at least two predicates in # any combination, and an object must match all of the predicates for # the filter to apply. # # @!attribute [rw] prefix # The prefix to use when evaluating an AND predicate: The prefix that # an object must have to be included in the metrics results. # @return [String] # # @!attribute [rw] tags # The list of tags to use when evaluating an AND predicate. # @return [Array] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsAndOperator AWS API Documentation # class AnalyticsAndOperator < Struct.new( :prefix, :tags) SENSITIVE = [] include Aws::Structure end # Specifies the configuration and any analyses for the analytics filter # of an Amazon S3 bucket. # # @!attribute [rw] id # The ID that identifies the analytics configuration. # @return [String] # # @!attribute [rw] filter # The filter used to describe a set of objects for analyses. A filter # must have exactly one prefix, one tag, or one conjunction # (AnalyticsAndOperator). If no filter is provided, all objects will # be considered in any analysis. # @return [Types::AnalyticsFilter] # # @!attribute [rw] storage_class_analysis # Contains data related to access patterns to be collected and made # available to analyze the tradeoffs between different storage # classes. # @return [Types::StorageClassAnalysis] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsConfiguration AWS API Documentation # class AnalyticsConfiguration < Struct.new( :id, :filter, :storage_class_analysis) SENSITIVE = [] include Aws::Structure end # Where to publish the analytics results. # # @!attribute [rw] s3_bucket_destination # A destination signifying output to an S3 bucket. # @return [Types::AnalyticsS3BucketDestination] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsExportDestination AWS API Documentation # class AnalyticsExportDestination < Struct.new( :s3_bucket_destination) SENSITIVE = [] include Aws::Structure end # The filter used to describe a set of objects for analyses. A filter # must have exactly one prefix, one tag, or one conjunction # (AnalyticsAndOperator). If no filter is provided, all objects will be # considered in any analysis. # # @!attribute [rw] prefix # The prefix to use when evaluating an analytics filter. # @return [String] # # @!attribute [rw] tag # The tag to use when evaluating an analytics filter. # @return [Types::Tag] # # @!attribute [rw] and # A conjunction (logical AND) of predicates, which is used in # evaluating an analytics filter. The operator must have at least two # predicates. # @return [Types::AnalyticsAndOperator] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsFilter AWS API Documentation # class AnalyticsFilter < Struct.new( :prefix, :tag, :and) SENSITIVE = [] include Aws::Structure end # Contains information about where to publish the analytics results. # # @!attribute [rw] format # Specifies the file format used when exporting data to Amazon S3. # @return [String] # # @!attribute [rw] bucket_account_id # The account ID that owns the destination S3 bucket. If no account ID # is provided, the owner is not validated before exporting data. # # Although this value is optional, we strongly recommend that you set # it to help prevent problems if the destination bucket ownership # changes. # # # @return [String] # # @!attribute [rw] bucket # The Amazon Resource Name (ARN) of the bucket to which data is # exported. # @return [String] # # @!attribute [rw] prefix # The prefix to use when exporting data. The prefix is prepended to # all results. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsS3BucketDestination AWS API Documentation # class AnalyticsS3BucketDestination < Struct.new( :format, :bucket_account_id, :bucket, :prefix) SENSITIVE = [] include Aws::Structure end # In terms of implementation, a Bucket is a resource. # # @!attribute [rw] name # The name of the bucket. # @return [String] # # @!attribute [rw] creation_date # Date the bucket was created. This date can change when making # changes to your bucket, such as editing its bucket policy. # @return [Time] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Bucket AWS API Documentation # class Bucket < Struct.new( :name, :creation_date) SENSITIVE = [] include Aws::Structure end # The requested bucket name is not available. The bucket namespace is # shared by all users of the system. Select a different name and try # again. # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketAlreadyExists AWS API Documentation # class BucketAlreadyExists < Aws::EmptyStructure; end # The bucket you tried to create already exists, and you own it. Amazon # S3 returns this error in all Amazon Web Services Regions except in the # North Virginia Region. For legacy compatibility, if you re-create an # existing bucket that you already own in the North Virginia Region, # Amazon S3 returns 200 OK and resets the bucket access control lists # (ACLs). # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketAlreadyOwnedByYou AWS API Documentation # class BucketAlreadyOwnedByYou < Aws::EmptyStructure; end # Specifies the information about the bucket that will be created. For # more information about directory buckets, see [Directory buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is only supported by directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html # # @!attribute [rw] data_redundancy # The number of Availability Zone that's used for redundancy for the # bucket. # @return [String] # # @!attribute [rw] type # The type of bucket. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketInfo AWS API Documentation # class BucketInfo < Struct.new( :data_redundancy, :type) SENSITIVE = [] include Aws::Structure end # Specifies the lifecycle configuration for objects in an Amazon S3 # bucket. For more information, see [Object Lifecycle Management][1] in # the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html # # @!attribute [rw] rules # A lifecycle rule for individual objects in an Amazon S3 bucket. # @return [Array] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLifecycleConfiguration AWS API Documentation # class BucketLifecycleConfiguration < Struct.new( :rules) SENSITIVE = [] include Aws::Structure end # Container for logging status information. # # @!attribute [rw] logging_enabled # Describes where logs are stored and the prefix that Amazon S3 # assigns to all log object keys for a bucket. For more information, # see [PUT Bucket logging][1] in the *Amazon S3 API Reference*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html # @return [Types::LoggingEnabled] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLoggingStatus AWS API Documentation # class BucketLoggingStatus < Struct.new( :logging_enabled) SENSITIVE = [] include Aws::Structure end # Describes the cross-origin access configuration for objects in an # Amazon S3 bucket. For more information, see [Enabling Cross-Origin # Resource Sharing][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html # # @!attribute [rw] cors_rules # A set of origins and methods (cross-origin access that you want to # allow). You can add up to 100 rules to the configuration. # @return [Array] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSConfiguration AWS API Documentation # class CORSConfiguration < Struct.new( :cors_rules) SENSITIVE = [] include Aws::Structure end # Specifies a cross-origin access rule for an Amazon S3 bucket. # # @!attribute [rw] id # Unique identifier for the rule. The value cannot be longer than 255 # characters. # @return [String] # # @!attribute [rw] allowed_headers # Headers that are specified in the `Access-Control-Request-Headers` # header. These headers are allowed in a preflight OPTIONS request. In # response to any preflight OPTIONS request, Amazon S3 returns any # requested headers that are allowed. # @return [Array] # # @!attribute [rw] allowed_methods # An HTTP method that you allow the origin to execute. Valid values # are `GET`, `PUT`, `HEAD`, `POST`, and `DELETE`. # @return [Array] # # @!attribute [rw] allowed_origins # One or more origins you want customers to be able to access the # bucket from. # @return [Array] # # @!attribute [rw] expose_headers # One or more headers in the response that you want customers to be # able to access from their applications (for example, from a # JavaScript `XMLHttpRequest` object). # @return [Array] # # @!attribute [rw] max_age_seconds # The time in seconds that your browser is to cache the preflight # response for the specified resource. # @return [Integer] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSRule AWS API Documentation # class CORSRule < Struct.new( :id, :allowed_headers, :allowed_methods, :allowed_origins, :expose_headers, :max_age_seconds) SENSITIVE = [] include Aws::Structure end # Describes how an uncompressed comma-separated values (CSV)-formatted # input object is formatted. # # @!attribute [rw] file_header_info # Describes the first line of input. Valid values are: # # * `NONE`: First line is not a header. # # * `IGNORE`: First line is a header, but you can't use the header # values to indicate the column in an expression. You can use column # position (such as \_1, \_2, …) to indicate the column (`SELECT # s._1 FROM OBJECT s`). # # * `Use`: First line is a header, and you can use the header value to # identify a column in an expression (`SELECT "name" FROM OBJECT`). # @return [String] # # @!attribute [rw] comments # A single character used to indicate that a row should be ignored # when the character is present at the start of that row. You can # specify any character to indicate a comment line. The default # character is `#`. # # Default: `#` # @return [String] # # @!attribute [rw] quote_escape_character # A single character used for escaping the quotation mark character # inside an already escaped value. For example, the value `""" a , b # """` is parsed as `" a , b "`. # @return [String] # # @!attribute [rw] record_delimiter # A single character used to separate individual records in the input. # Instead of the default value, you can specify an arbitrary # delimiter. # @return [String] # # @!attribute [rw] field_delimiter # A single character used to separate individual fields in a record. # You can specify an arbitrary delimiter. # @return [String] # # @!attribute [rw] quote_character # A single character used for escaping when the field delimiter is # part of the value. For example, if the value is `a, b`, Amazon S3 # wraps this field value in quotation marks, as follows: `" a , b "`. # # Type: String # # Default: `"` # # Ancestors: `CSV` # @return [String] # # @!attribute [rw] allow_quoted_record_delimiter # Specifies that CSV field values may contain quoted record delimiters # and such records should be allowed. Default value is FALSE. Setting # this value to TRUE may lower performance. # @return [Boolean] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CSVInput AWS API Documentation # class CSVInput < Struct.new( :file_header_info, :comments, :quote_escape_character, :record_delimiter, :field_delimiter, :quote_character, :allow_quoted_record_delimiter) SENSITIVE = [] include Aws::Structure end # Describes how uncompressed comma-separated values (CSV)-formatted # results are formatted. # # @!attribute [rw] quote_fields # Indicates whether to use quotation marks around output fields. # # * `ALWAYS`: Always use quotation marks for output fields. # # * `ASNEEDED`: Use quotation marks for output fields when needed. # @return [String] # # @!attribute [rw] quote_escape_character # The single character used for escaping the quote character inside an # already escaped value. # @return [String] # # @!attribute [rw] record_delimiter # A single character used to separate individual records in the # output. Instead of the default value, you can specify an arbitrary # delimiter. # @return [String] # # @!attribute [rw] field_delimiter # The value used to separate individual fields in a record. You can # specify an arbitrary delimiter. # @return [String] # # @!attribute [rw] quote_character # A single character used for escaping when the field delimiter is # part of the value. For example, if the value is `a, b`, Amazon S3 # wraps this field value in quotation marks, as follows: `" a , b "`. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CSVOutput AWS API Documentation # class CSVOutput < Struct.new( :quote_fields, :quote_escape_character, :record_delimiter, :field_delimiter, :quote_character) SENSITIVE = [] include Aws::Structure end # Contains all the possible checksum or digest values for an object. # # @!attribute [rw] checksum_crc32 # The base64-encoded, 32-bit CRC32 checksum of the object. This will # only be present if it was uploaded with the object. When you use an # API operation on an object that was uploaded using multipart # uploads, this value may not be a direct checksum value of the full # object. Instead, it's a calculation based on the checksum values of # each individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] # # @!attribute [rw] checksum_crc32c # The base64-encoded, 32-bit CRC32C checksum of the object. This will # only be present if it was uploaded with the object. When you use an # API operation on an object that was uploaded using multipart # uploads, this value may not be a direct checksum value of the full # object. Instead, it's a calculation based on the checksum values of # each individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] # # @!attribute [rw] checksum_sha1 # The base64-encoded, 160-bit SHA-1 digest of the object. This will # only be present if it was uploaded with the object. When you use the # API operation on an object that was uploaded using multipart # uploads, this value may not be a direct checksum value of the full # object. Instead, it's a calculation based on the checksum values of # each individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] # # @!attribute [rw] checksum_sha256 # The base64-encoded, 256-bit SHA-256 digest of the object. This will # only be present if it was uploaded with the object. When you use an # API operation on an object that was uploaded using multipart # uploads, this value may not be a direct checksum value of the full # object. Instead, it's a calculation based on the checksum values of # each individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Checksum AWS API Documentation # class Checksum < Struct.new( :checksum_crc32, :checksum_crc32c, :checksum_sha1, :checksum_sha256) SENSITIVE = [] include Aws::Structure end # Container for specifying the Lambda notification configuration. # # @!attribute [rw] id # An optional unique identifier for configurations in a notification # configuration. If you don't provide one, Amazon S3 will assign an # ID. # @return [String] # # @!attribute [rw] event # The bucket event for which to send notifications. # @return [String] # # @!attribute [rw] events # Bucket events for which to send notifications. # @return [Array] # # @!attribute [rw] cloud_function # Lambda cloud function ARN that Amazon S3 can invoke when it detects # events of the specified type. # @return [String] # # @!attribute [rw] invocation_role # The role supporting the invocation of the Lambda function # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CloudFunctionConfiguration AWS API Documentation # class CloudFunctionConfiguration < Struct.new( :id, :event, :events, :cloud_function, :invocation_role) SENSITIVE = [] include Aws::Structure end # Container for all (if there are any) keys between Prefix and the next # occurrence of the string specified by a delimiter. CommonPrefixes # lists keys that act like subdirectories in the directory specified by # Prefix. For example, if the prefix is notes/ and the delimiter is a # slash (/) as in notes/summer/july, the common prefix is notes/summer/. # # @!attribute [rw] prefix # Container for the specified common prefix. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CommonPrefix AWS API Documentation # class CommonPrefix < Struct.new( :prefix) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] location # The URI that identifies the newly created object. # @return [String] # # @!attribute [rw] bucket # The name of the bucket that contains the newly created object. Does # not return the access point ARN or access point alias if used. # # Access points are not supported by directory buckets. # # # @return [String] # # @!attribute [rw] key # The object key of the newly created object. # @return [String] # # @!attribute [rw] expiration # If the object expiration is configured, this will contain the # expiration date (`expiry-date`) and rule ID (`rule-id`). The value # of `rule-id` is URL-encoded. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] etag # Entity tag that identifies the newly created object's data. Objects # with different object data will have different entity tags. The # entity tag is an opaque string. The entity tag may or may not be an # MD5 digest of the object data. If the entity tag is not an MD5 # digest of the object data, it will contain one or more # nonhexadecimal characters and/or will consist of less than 32 or # more than 32 hexadecimal digits. For more information about how the # entity tag is calculated, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] checksum_crc32 # The base64-encoded, 32-bit CRC32 checksum of the object. This will # only be present if it was uploaded with the object. When you use an # API operation on an object that was uploaded using multipart # uploads, this value may not be a direct checksum value of the full # object. Instead, it's a calculation based on the checksum values of # each individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] # # @!attribute [rw] checksum_crc32c # The base64-encoded, 32-bit CRC32C checksum of the object. This will # only be present if it was uploaded with the object. When you use an # API operation on an object that was uploaded using multipart # uploads, this value may not be a direct checksum value of the full # object. Instead, it's a calculation based on the checksum values of # each individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] # # @!attribute [rw] checksum_sha1 # The base64-encoded, 160-bit SHA-1 digest of the object. This will # only be present if it was uploaded with the object. When you use the # API operation on an object that was uploaded using multipart # uploads, this value may not be a direct checksum value of the full # object. Instead, it's a calculation based on the checksum values of # each individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] # # @!attribute [rw] checksum_sha256 # The base64-encoded, 256-bit SHA-256 digest of the object. This will # only be present if it was uploaded with the object. When you use an # API operation on an object that was uploaded using multipart # uploads, this value may not be a direct checksum value of the full # object. Instead, it's a calculation based on the checksum values of # each individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] # # @!attribute [rw] server_side_encryption # The server-side encryption algorithm used when storing this object # in Amazon S3 (for example, `AES256`, `aws:kms`). # # For directory buckets, only server-side encryption with Amazon S3 # managed keys (SSE-S3) (`AES256`) is supported. # # # @return [String] # # @!attribute [rw] version_id # Version ID of the newly created object, in case the bucket has # versioning turned on. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] ssekms_key_id # If present, indicates the ID of the Key Management Service (KMS) # symmetric encryption customer managed key that was used for the # object. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] bucket_key_enabled # Indicates whether the multipart upload uses an S3 Bucket Key for # server-side encryption with Key Management Service (KMS) keys # (SSE-KMS). # # This functionality is not supported for directory buckets. # # # @return [Boolean] # # @!attribute [rw] request_charged # If present, indicates that the requester was successfully charged # for the request. # # This functionality is not supported for directory buckets. # # # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadOutput AWS API Documentation # class CompleteMultipartUploadOutput < Struct.new( :location, :bucket, :key, :expiration, :etag, :checksum_crc32, :checksum_crc32c, :checksum_sha1, :checksum_sha256, :server_side_encryption, :version_id, :ssekms_key_id, :bucket_key_enabled, :request_charged) SENSITIVE = [:ssekms_key_id] include Aws::Structure end # @!attribute [rw] bucket # Name of the bucket to which the multipart upload was initiated. # # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style # requests are not supported. Directory bucket names must be unique in # the chosen Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, # you must provide the alias of the access point in place of the # bucket name or specify the access point ARN. When using the access # point ARN, you must direct requests to the access point hostname. # The access point hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the # bucket name. For more information about access point ARNs, see # [Using access points][2] in the *Amazon S3 User Guide*. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. # The S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][3] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # @return [String] # # @!attribute [rw] key # Object key for which the multipart upload was initiated. # @return [String] # # @!attribute [rw] multipart_upload # The container for the multipart upload request information. # @return [Types::CompletedMultipartUpload] # # @!attribute [rw] upload_id # ID for the initiated multipart upload. # @return [String] # # @!attribute [rw] checksum_crc32 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 32-bit CRC32 checksum of the object. # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] checksum_crc32c # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 32-bit CRC32C checksum of the object. # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] checksum_sha1 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 160-bit SHA-1 digest of the object. # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] checksum_sha256 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 256-bit SHA-256 digest of the object. # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has # Requester Pays enabled, the requester will pay for corresponding # charges to copy the object. For information about downloading # objects from Requester Pays buckets, see [Downloading Objects in # Requester Pays Buckets][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @!attribute [rw] sse_customer_algorithm # The server-side encryption (SSE) algorithm used to encrypt the # object. This parameter is required only when the object was created # using a checksum algorithm or if your bucket policy requires the use # of SSE-C. For more information, see [Protecting data using SSE-C # keys][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html#ssec-require-condition-key # @return [String] # # @!attribute [rw] sse_customer_key # The server-side encryption (SSE) customer managed key. This # parameter is needed only when the object was created using a # checksum algorithm. For more information, see [Protecting data using # SSE-C keys][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # @return [String] # # @!attribute [rw] sse_customer_key_md5 # The MD5 server-side encryption (SSE) customer managed key. This # parameter is needed only when the object was created using a # checksum algorithm. For more information, see [Protecting data using # SSE-C keys][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadRequest AWS API Documentation # class CompleteMultipartUploadRequest < Struct.new( :bucket, :key, :multipart_upload, :upload_id, :checksum_crc32, :checksum_crc32c, :checksum_sha1, :checksum_sha256, :request_payer, :expected_bucket_owner, :sse_customer_algorithm, :sse_customer_key, :sse_customer_key_md5) SENSITIVE = [:sse_customer_key] include Aws::Structure end # The container for the completed multipart upload details. # # @!attribute [rw] parts # Array of CompletedPart data types. # # If you do not supply a valid `Part` with your request, the service # sends back an HTTP 400 response. # @return [Array] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedMultipartUpload AWS API Documentation # class CompletedMultipartUpload < Struct.new( :parts) SENSITIVE = [] include Aws::Structure end # Details of the parts that were uploaded. # # @!attribute [rw] etag # Entity tag returned when the part was uploaded. # @return [String] # # @!attribute [rw] checksum_crc32 # The base64-encoded, 32-bit CRC32 checksum of the object. This will # only be present if it was uploaded with the object. When you use an # API operation on an object that was uploaded using multipart # uploads, this value may not be a direct checksum value of the full # object. Instead, it's a calculation based on the checksum values of # each individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] # # @!attribute [rw] checksum_crc32c # The base64-encoded, 32-bit CRC32C checksum of the object. This will # only be present if it was uploaded with the object. When you use an # API operation on an object that was uploaded using multipart # uploads, this value may not be a direct checksum value of the full # object. Instead, it's a calculation based on the checksum values of # each individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] # # @!attribute [rw] checksum_sha1 # The base64-encoded, 160-bit SHA-1 digest of the object. This will # only be present if it was uploaded with the object. When you use the # API operation on an object that was uploaded using multipart # uploads, this value may not be a direct checksum value of the full # object. Instead, it's a calculation based on the checksum values of # each individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] # # @!attribute [rw] checksum_sha256 # The base64-encoded, 256-bit SHA-256 digest of the object. This will # only be present if it was uploaded with the object. When you use an # API operation on an object that was uploaded using multipart # uploads, this value may not be a direct checksum value of the full # object. Instead, it's a calculation based on the checksum values of # each individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] # # @!attribute [rw] part_number # Part number that identifies the part. This is a positive integer # between 1 and 10,000. # # * **General purpose buckets** - In `CompleteMultipartUpload`, when a # additional checksum (including `x-amz-checksum-crc32`, # `x-amz-checksum-crc32c`, `x-amz-checksum-sha1`, or # `x-amz-checksum-sha256`) is applied to each part, the `PartNumber` # must start at 1 and the part numbers must be consecutive. # Otherwise, Amazon S3 generates an HTTP `400 Bad Request` status # code and an `InvalidPartOrder` error code. # # * **Directory buckets** - In `CompleteMultipartUpload`, the # `PartNumber` must start at 1 and the part numbers must be # consecutive. # # # @return [Integer] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedPart AWS API Documentation # class CompletedPart < Struct.new( :etag, :checksum_crc32, :checksum_crc32c, :checksum_sha1, :checksum_sha256, :part_number) SENSITIVE = [] include Aws::Structure end # A container for describing a condition that must be met for the # specified redirect to apply. For example, 1. If request is for pages # in the `/docs` folder, redirect to the `/documents` folder. 2. If # request results in HTTP error 4xx, redirect request to another host # where you might process the error. # # @!attribute [rw] http_error_code_returned_equals # The HTTP error code when the redirect is applied. In the event of an # error, if the error code equals this value, then the specified # redirect is applied. Required when parent element `Condition` is # specified and sibling `KeyPrefixEquals` is not specified. If both # are specified, then both must be true for the redirect to be # applied. # @return [String] # # @!attribute [rw] key_prefix_equals # The object key name prefix when the redirect is applied. For # example, to redirect requests for `ExamplePage.html`, the key prefix # will be `ExamplePage.html`. To redirect request for all pages with # the prefix `docs/`, the key prefix will be `/docs`, which identifies # all objects in the `docs/` folder. Required when the parent element # `Condition` is specified and sibling `HttpErrorCodeReturnedEquals` # is not specified. If both conditions are specified, both must be # true for the redirect to be applied. # # Replacement must be made for object keys containing special # characters (such as carriage returns) when using XML requests. For # more information, see [ XML related object key constraints][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Condition AWS API Documentation # class Condition < Struct.new( :http_error_code_returned_equals, :key_prefix_equals) SENSITIVE = [] include Aws::Structure end # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ContinuationEvent AWS API Documentation # class ContinuationEvent < Struct.new( :event_type) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] copy_object_result # Container for all response elements. # @return [Types::CopyObjectResult] # # @!attribute [rw] expiration # If the object expiration is configured, the response includes this # header. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] copy_source_version_id # Version ID of the source object that was copied. # # This functionality is not supported when the source object is in a # directory bucket. # # # @return [String] # # @!attribute [rw] version_id # Version ID of the newly created copy. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] server_side_encryption # The server-side encryption algorithm used when you store this object # in Amazon S3 (for example, `AES256`, `aws:kms`, `aws:kms:dsse`). # # For directory buckets, only server-side encryption with Amazon S3 # managed keys (SSE-S3) (`AES256`) is supported. # # # @return [String] # # @!attribute [rw] sse_customer_algorithm # If server-side encryption with a customer-provided encryption key # was requested, the response will include this header to confirm the # encryption algorithm that's used. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] sse_customer_key_md5 # If server-side encryption with a customer-provided encryption key # was requested, the response will include this header to provide the # round-trip message integrity verification of the customer-provided # encryption key. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] ssekms_key_id # If present, indicates the ID of the Key Management Service (KMS) # symmetric encryption customer managed key that was used for the # object. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] ssekms_encryption_context # If present, indicates the Amazon Web Services KMS Encryption Context # to use for object encryption. The value of this header is a # base64-encoded UTF-8 string holding JSON with the encryption context # key-value pairs. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] bucket_key_enabled # Indicates whether the copied object uses an S3 Bucket Key for # server-side encryption with Key Management Service (KMS) keys # (SSE-KMS). # # This functionality is not supported for directory buckets. # # # @return [Boolean] # # @!attribute [rw] request_charged # If present, indicates that the requester was successfully charged # for the request. # # This functionality is not supported for directory buckets. # # # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectOutput AWS API Documentation # class CopyObjectOutput < Struct.new( :copy_object_result, :expiration, :copy_source_version_id, :version_id, :server_side_encryption, :sse_customer_algorithm, :sse_customer_key_md5, :ssekms_key_id, :ssekms_encryption_context, :bucket_key_enabled, :request_charged) SENSITIVE = [:ssekms_key_id, :ssekms_encryption_context] include Aws::Structure end # @!attribute [rw] acl # The canned access control list (ACL) to apply to the object. # # When you copy an object, the ACL metadata is not preserved and is # set to `private` by default. Only the owner has full access control. # To override the default ACL setting, specify a new ACL when you # generate a copy request. For more information, see [Using ACLs][1]. # # If the destination bucket that you're copying objects to uses the # bucket owner enforced setting for S3 Object Ownership, ACLs are # disabled and no longer affect permissions. Buckets that use this # setting only accept `PUT` requests that don't specify an ACL or # `PUT` requests that specify bucket owner full control ACLs, such as # the `bucket-owner-full-control` canned ACL or an equivalent form of # this ACL expressed in the XML format. For more information, see # [Controlling ownership of objects and disabling ACLs][2] in the # *Amazon S3 User Guide*. # # * If your destination bucket uses the bucket owner enforced setting # for Object Ownership, all objects written to the bucket by any # account will be owned by the bucket owner. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html # @return [String] # # @!attribute [rw] bucket # The name of the destination bucket. # # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style # requests are not supported. Directory bucket names must be unique in # the chosen Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, # you must provide the alias of the access point in place of the # bucket name or specify the access point ARN. When using the access # point ARN, you must direct requests to the access point hostname. # The access point hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the # bucket name. For more information about access point ARNs, see # [Using access points][2] in the *Amazon S3 User Guide*. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. # The S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][3] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # @return [String] # # @!attribute [rw] cache_control # Specifies the caching behavior along the request/reply chain. # @return [String] # # @!attribute [rw] checksum_algorithm # Indicates the algorithm that you want Amazon S3 to use to create the # checksum for the object. For more information, see [Checking object # integrity][1] in the *Amazon S3 User Guide*. # # When you copy an object, if the source object has a checksum, that # checksum value will be copied to the new object by default. If the # `CopyObject` request does not include this # `x-amz-checksum-algorithm` header, the checksum algorithm will be # copied from the source object to the destination object (if it's # present on the source object). You can optionally specify a # different checksum algorithm to use with the # `x-amz-checksum-algorithm` header. Unrecognized or unsupported # values will respond with the HTTP status code `400 Bad Request`. # # For directory buckets, when you use Amazon Web Services SDKs, # `CRC32` is the default checksum algorithm that's used for # performance. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] content_disposition # Specifies presentational information for the object. Indicates # whether an object should be displayed in a web browser or downloaded # as a file. It allows specifying the desired filename for the # downloaded file. # @return [String] # # @!attribute [rw] content_encoding # Specifies what content encodings have been applied to the object and # thus what decoding mechanisms must be applied to obtain the # media-type referenced by the Content-Type header field. # # For directory buckets, only the `aws-chunked` value is supported in # this header field. # # # @return [String] # # @!attribute [rw] content_language # The language the content is in. # @return [String] # # @!attribute [rw] content_type # A standard MIME type that describes the format of the object data. # @return [String] # # @!attribute [rw] copy_source # Specifies the source object for the copy operation. The source # object can be up to 5 GB. If the source object is an object that was # uploaded by using a multipart upload, the object copy will be a # single part object after the source object is copied to the # destination bucket. # # You specify the value of the copy source in one of two formats, # depending on whether you want to access the source object through an # [access point][1]: # # * For objects not accessed through an access point, specify the name # of the source bucket and the key of the source object, separated # by a slash (/). For example, to copy the object # `reports/january.pdf` from the general purpose bucket # `awsexamplebucket`, use `awsexamplebucket/reports/january.pdf`. # The value must be URL-encoded. To copy the object # `reports/january.pdf` from the directory bucket # `awsexamplebucket--use1-az5--x-s3`, use # `awsexamplebucket--use1-az5--x-s3/reports/january.pdf`. The value # must be URL-encoded. # # * For objects accessed through access points, specify the Amazon # Resource Name (ARN) of the object as accessed through the access # point, in the format # `arn:aws:s3:::accesspoint//object/`. # For example, to copy the object `reports/january.pdf` through # access point `my-access-point` owned by account `123456789012` in # Region `us-west-2`, use the URL encoding of # `arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf`. # The value must be URL encoded. # # * Amazon S3 supports copy operations using Access points only when # the source and destination buckets are in the same Amazon Web # Services Region. # # * Access points are not supported by directory buckets. # # # # Alternatively, for objects accessed through Amazon S3 on Outposts, # specify the ARN of the object as accessed in the format # `arn:aws:s3-outposts:::outpost//object/`. # For example, to copy the object `reports/january.pdf` through # outpost `my-outpost` owned by account `123456789012` in Region # `us-west-2`, use the URL encoding of # `arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf`. # The value must be URL-encoded. # # If your source bucket versioning is enabled, the `x-amz-copy-source` # header by default identifies the current version of an object to # copy. If the current version is a delete marker, Amazon S3 behaves # as if the object was deleted. To copy a different version, use the # `versionId` query parameter. Specifically, append # `?versionId=` to the value (for example, # `awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893`). # If you don't specify a version ID, Amazon S3 copies the latest # version of the source object. # # If you enable versioning on the destination bucket, Amazon S3 # generates a unique version ID for the copied object. This version ID # is different from the version ID of the source object. Amazon S3 # returns the version ID of the copied object in the # `x-amz-version-id` response header in the response. # # If you do not enable versioning or suspend it on the destination # bucket, the version ID that Amazon S3 generates in the # `x-amz-version-id` response header is always null. # # **Directory buckets** - S3 Versioning isn't enabled and supported # for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html # @return [String] # # @!attribute [rw] copy_source_if_match # Copies the object if its entity tag (ETag) matches the specified # tag. # # If both the `x-amz-copy-source-if-match` and # `x-amz-copy-source-if-unmodified-since` headers are present in the # request and evaluate as follows, Amazon S3 returns `200 OK` and # copies the data: # # * `x-amz-copy-source-if-match` condition evaluates to true # # * `x-amz-copy-source-if-unmodified-since` condition evaluates to # false # @return [String] # # @!attribute [rw] copy_source_if_modified_since # Copies the object if it has been modified since the specified time. # # If both the `x-amz-copy-source-if-none-match` and # `x-amz-copy-source-if-modified-since` headers are present in the # request and evaluate as follows, Amazon S3 returns the `412 # Precondition Failed` response code: # # * `x-amz-copy-source-if-none-match` condition evaluates to false # # * `x-amz-copy-source-if-modified-since` condition evaluates to true # @return [Time] # # @!attribute [rw] copy_source_if_none_match # Copies the object if its entity tag (ETag) is different than the # specified ETag. # # If both the `x-amz-copy-source-if-none-match` and # `x-amz-copy-source-if-modified-since` headers are present in the # request and evaluate as follows, Amazon S3 returns the `412 # Precondition Failed` response code: # # * `x-amz-copy-source-if-none-match` condition evaluates to false # # * `x-amz-copy-source-if-modified-since` condition evaluates to true # @return [String] # # @!attribute [rw] copy_source_if_unmodified_since # Copies the object if it hasn't been modified since the specified # time. # # If both the `x-amz-copy-source-if-match` and # `x-amz-copy-source-if-unmodified-since` headers are present in the # request and evaluate as follows, Amazon S3 returns `200 OK` and # copies the data: # # * `x-amz-copy-source-if-match` condition evaluates to true # # * `x-amz-copy-source-if-unmodified-since` condition evaluates to # false # @return [Time] # # @!attribute [rw] expires # The date and time at which the object is no longer cacheable. # @return [Time] # # @!attribute [rw] grant_full_control # Gives the grantee READ, READ\_ACP, and WRITE\_ACP permissions on the # object. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # @return [String] # # @!attribute [rw] grant_read # Allows grantee to read the object data and its metadata. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # @return [String] # # @!attribute [rw] grant_read_acp # Allows grantee to read the object ACL. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # @return [String] # # @!attribute [rw] grant_write_acp # Allows grantee to write the ACL for the applicable object. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # @return [String] # # @!attribute [rw] key # The key of the destination object. # @return [String] # # @!attribute [rw] metadata # A map of metadata to store with the object in S3. # @return [Hash] # # @!attribute [rw] metadata_directive # Specifies whether the metadata is copied from the source object or # replaced with metadata that's provided in the request. When copying # an object, you can preserve all metadata (the default) or specify # new metadata. If this header isn’t specified, `COPY` is the default # behavior. # # **General purpose bucket** - For general purpose buckets, when you # grant permissions, you can use the `s3:x-amz-metadata-directive` # condition key to enforce certain metadata behavior when objects are # uploaded. For more information, see [Amazon S3 condition key # examples][1] in the *Amazon S3 User Guide*. # # `x-amz-website-redirect-location` is unique to each object and is # not copied when using the `x-amz-metadata-directive` header. To copy # the value, you must specify `x-amz-website-redirect-location` in the # request header. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html # @return [String] # # @!attribute [rw] tagging_directive # Specifies whether the object tag-set is copied from the source # object or replaced with the tag-set that's provided in the request. # # The default value is `COPY`. # # **Directory buckets** - For directory buckets in a `CopyObject` # operation, only the empty tag-set is supported. Any requests that # attempt to write non-empty tags into directory buckets will receive # a `501 Not Implemented` status code. When the destination bucket is # a directory bucket, you will receive a `501 Not Implemented` # response in any of the following situations: # # * When you attempt to `COPY` the tag-set from an S3 source object # that has non-empty tags. # # * When you attempt to `REPLACE` the tag-set of a source object and # set a non-empty value to `x-amz-tagging`. # # * When you don't set the `x-amz-tagging-directive` header and the # source object has non-empty tags. This is because the default # value of `x-amz-tagging-directive` is `COPY`. # # Because only the empty tag-set is supported for directory buckets in # a `CopyObject` operation, the following situations are allowed: # # * When you attempt to `COPY` the tag-set from a directory bucket # source object that has no tags to a general purpose bucket. It # copies an empty tag-set to the destination object. # # * When you attempt to `REPLACE` the tag-set of a directory bucket # source object and set the `x-amz-tagging` value of the directory # bucket destination object to empty. # # * When you attempt to `REPLACE` the tag-set of a general purpose # bucket source object that has non-empty tags and set the # `x-amz-tagging` value of the directory bucket destination object # to empty. # # * When you attempt to `REPLACE` the tag-set of a directory bucket # source object and don't set the `x-amz-tagging` value of the # directory bucket destination object. This is because the default # value of `x-amz-tagging` is the empty value. # # # @return [String] # # @!attribute [rw] server_side_encryption # The server-side encryption algorithm used when storing this object # in Amazon S3 (for example, `AES256`, `aws:kms`, `aws:kms:dsse`). # Unrecognized or unsupported values won’t write a destination object # and will receive a `400 Bad Request` response. # # Amazon S3 automatically encrypts all new objects that are copied to # an S3 bucket. When copying an object, if you don't specify # encryption information in your copy request, the encryption setting # of the target object is set to the default encryption configuration # of the destination bucket. By default, all buckets have a base level # of encryption configuration that uses server-side encryption with # Amazon S3 managed keys (SSE-S3). If the destination bucket has a # default encryption configuration that uses server-side encryption # with Key Management Service (KMS) keys (SSE-KMS), dual-layer # server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), # or server-side encryption with customer-provided encryption keys # (SSE-C), Amazon S3 uses the corresponding KMS key, or a # customer-provided key to encrypt the target object copy. # # When you perform a `CopyObject` operation, if you want to use a # different type of encryption setting for the target object, you can # specify appropriate encryption-related headers to encrypt the target # object with an Amazon S3 managed key, a KMS key, or a # customer-provided key. If the encryption setting in your request is # different from the default encryption configuration of the # destination bucket, the encryption setting in your request takes # precedence. # # With server-side encryption, Amazon S3 encrypts your data as it # writes your data to disks in its data centers and decrypts the data # when you access it. For more information about server-side # encryption, see [Using Server-Side Encryption][1] in the *Amazon S3 # User Guide*. # # For directory buckets, only server-side encryption with Amazon S3 # managed keys (SSE-S3) (`AES256`) is supported. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html # @return [String] # # @!attribute [rw] storage_class # If the `x-amz-storage-class` header is not used, the copied object # will be stored in the `STANDARD` Storage Class by default. The # `STANDARD` storage class provides high durability and high # availability. Depending on performance needs, you can specify a # different Storage Class. # # * Directory buckets - For directory buckets, only the S3 # Express One Zone storage class is supported to store newly created # objects. Unsupported storage class values won't write a # destination object and will respond with the HTTP status code `400 # Bad Request`. # # * Amazon S3 on Outposts - S3 on Outposts only uses the # `OUTPOSTS` Storage Class. # # # # You can use the `CopyObject` action to change the storage class of # an object that is already stored in Amazon S3 by using the # `x-amz-storage-class` header. For more information, see [Storage # Classes][1] in the *Amazon S3 User Guide*. # # Before using an object as a source object for the copy operation, # you must restore a copy of it if it meets any of the following # conditions: # # * The storage class of the source object is `GLACIER` or # `DEEP_ARCHIVE`. # # * The storage class of the source object is `INTELLIGENT_TIERING` # and it's [S3 Intelligent-Tiering access tier][2] is `Archive # Access` or `Deep Archive Access`. # # For more information, see [RestoreObject][3] and [Copying # Objects][4] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering-overview.html#intel-tiering-tier-definition # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html # @return [String] # # @!attribute [rw] website_redirect_location # If the destination bucket is configured as a website, redirects # requests for this object copy to another object in the same bucket # or to an external URL. Amazon S3 stores the value of this header in # the object metadata. This value is unique to each object and is not # copied when using the `x-amz-metadata-directive` header. Instead, # you may opt to provide this header in combination with the # `x-amz-metadata-directive` header. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] sse_customer_algorithm # Specifies the algorithm to use when encrypting the object (for # example, `AES256`). # # When you perform a `CopyObject` operation, if you want to use a # different type of encryption setting for the target object, you can # specify appropriate encryption-related headers to encrypt the target # object with an Amazon S3 managed key, a KMS key, or a # customer-provided key. If the encryption setting in your request is # different from the default encryption configuration of the # destination bucket, the encryption setting in your request takes # precedence. # # This functionality is not supported when the destination bucket is a # directory bucket. # # # @return [String] # # @!attribute [rw] sse_customer_key # Specifies the customer-provided encryption key for Amazon S3 to use # in encrypting data. This value is used to store the object and then # it is discarded. Amazon S3 does not store the encryption key. The # key must be appropriate for use with the algorithm specified in the # `x-amz-server-side-encryption-customer-algorithm` header. # # This functionality is not supported when the destination bucket is a # directory bucket. # # # @return [String] # # @!attribute [rw] sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the encryption key according to # RFC 1321. Amazon S3 uses this header for a message integrity check # to ensure that the encryption key was transmitted without error. # # This functionality is not supported when the destination bucket is a # directory bucket. # # # @return [String] # # @!attribute [rw] ssekms_key_id # Specifies the KMS ID (Key ID, Key ARN, or Key Alias) to use for # object encryption. All GET and PUT requests for an object protected # by KMS will fail if they're not made via SSL or using SigV4. For # information about configuring any of the officially supported Amazon # Web Services SDKs and Amazon Web Services CLI, see [Specifying the # Signature Version in Request Authentication][1] in the *Amazon S3 # User Guide*. # # This functionality is not supported when the destination bucket is a # directory bucket. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version # @return [String] # # @!attribute [rw] ssekms_encryption_context # Specifies the Amazon Web Services KMS Encryption Context to use for # object encryption. The value of this header is a base64-encoded # UTF-8 string holding JSON with the encryption context key-value # pairs. This value must be explicitly added to specify encryption # context for `CopyObject` requests. # # This functionality is not supported when the destination bucket is a # directory bucket. # # # @return [String] # # @!attribute [rw] bucket_key_enabled # Specifies whether Amazon S3 should use an S3 Bucket Key for object # encryption with server-side encryption using Key Management Service # (KMS) keys (SSE-KMS). If a target object uses SSE-KMS, you can # enable an S3 Bucket Key for the object. # # Setting this header to `true` causes Amazon S3 to use an S3 Bucket # Key for object encryption with SSE-KMS. Specifying this header with # a COPY action doesn’t affect bucket-level settings for S3 Bucket # Key. # # For more information, see [Amazon S3 Bucket Keys][1] in the *Amazon # S3 User Guide*. # # This functionality is not supported when the destination bucket is a # directory bucket. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html # @return [Boolean] # # @!attribute [rw] copy_source_sse_customer_algorithm # Specifies the algorithm to use when decrypting the source object # (for example, `AES256`). # # If the source object for the copy is stored in Amazon S3 using # SSE-C, you must provide the necessary encryption information in your # request so that Amazon S3 can decrypt the object for copying. # # This functionality is not supported when the source object is in a # directory bucket. # # # @return [String] # # @!attribute [rw] copy_source_sse_customer_key # Specifies the customer-provided encryption key for Amazon S3 to use # to decrypt the source object. The encryption key provided in this # header must be the same one that was used when the source object was # created. # # If the source object for the copy is stored in Amazon S3 using # SSE-C, you must provide the necessary encryption information in your # request so that Amazon S3 can decrypt the object for copying. # # This functionality is not supported when the source object is in a # directory bucket. # # # @return [String] # # @!attribute [rw] copy_source_sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the encryption key according to # RFC 1321. Amazon S3 uses this header for a message integrity check # to ensure that the encryption key was transmitted without error. # # If the source object for the copy is stored in Amazon S3 using # SSE-C, you must provide the necessary encryption information in your # request so that Amazon S3 can decrypt the object for copying. # # This functionality is not supported when the source object is in a # directory bucket. # # # @return [String] # # @!attribute [rw] request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has # Requester Pays enabled, the requester will pay for corresponding # charges to copy the object. For information about downloading # objects from Requester Pays buckets, see [Downloading Objects in # Requester Pays Buckets][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @return [String] # # @!attribute [rw] tagging # The tag-set for the object copy in the destination bucket. This # value must be used in conjunction with the `x-amz-tagging-directive` # if you choose `REPLACE` for the `x-amz-tagging-directive`. If you # choose `COPY` for the `x-amz-tagging-directive`, you don't need to # set the `x-amz-tagging` header, because the tag-set will be copied # from the source object directly. The tag-set must be encoded as URL # Query parameters. # # The default value is the empty value. # # **Directory buckets** - For directory buckets in a `CopyObject` # operation, only the empty tag-set is supported. Any requests that # attempt to write non-empty tags into directory buckets will receive # a `501 Not Implemented` status code. When the destination bucket is # a directory bucket, you will receive a `501 Not Implemented` # response in any of the following situations: # # * When you attempt to `COPY` the tag-set from an S3 source object # that has non-empty tags. # # * When you attempt to `REPLACE` the tag-set of a source object and # set a non-empty value to `x-amz-tagging`. # # * When you don't set the `x-amz-tagging-directive` header and the # source object has non-empty tags. This is because the default # value of `x-amz-tagging-directive` is `COPY`. # # Because only the empty tag-set is supported for directory buckets in # a `CopyObject` operation, the following situations are allowed: # # * When you attempt to `COPY` the tag-set from a directory bucket # source object that has no tags to a general purpose bucket. It # copies an empty tag-set to the destination object. # # * When you attempt to `REPLACE` the tag-set of a directory bucket # source object and set the `x-amz-tagging` value of the directory # bucket destination object to empty. # # * When you attempt to `REPLACE` the tag-set of a general purpose # bucket source object that has non-empty tags and set the # `x-amz-tagging` value of the directory bucket destination object # to empty. # # * When you attempt to `REPLACE` the tag-set of a directory bucket # source object and don't set the `x-amz-tagging` value of the # directory bucket destination object. This is because the default # value of `x-amz-tagging` is the empty value. # # # @return [String] # # @!attribute [rw] object_lock_mode # The Object Lock mode that you want to apply to the object copy. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] object_lock_retain_until_date # The date and time when you want the Object Lock of the object copy # to expire. # # This functionality is not supported for directory buckets. # # # @return [Time] # # @!attribute [rw] object_lock_legal_hold_status # Specifies whether you want to apply a legal hold to the object copy. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected destination bucket owner. If the # account ID that you provide does not match the actual owner of the # destination bucket, the request fails with the HTTP status code `403 # Forbidden` (access denied). # @return [String] # # @!attribute [rw] expected_source_bucket_owner # The account ID of the expected source bucket owner. If the account # ID that you provide does not match the actual owner of the source # bucket, the request fails with the HTTP status code `403 Forbidden` # (access denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectRequest AWS API Documentation # class CopyObjectRequest < Struct.new( :acl, :bucket, :cache_control, :checksum_algorithm, :content_disposition, :content_encoding, :content_language, :content_type, :copy_source, :copy_source_if_match, :copy_source_if_modified_since, :copy_source_if_none_match, :copy_source_if_unmodified_since, :expires, :grant_full_control, :grant_read, :grant_read_acp, :grant_write_acp, :key, :metadata, :metadata_directive, :tagging_directive, :server_side_encryption, :storage_class, :website_redirect_location, :sse_customer_algorithm, :sse_customer_key, :sse_customer_key_md5, :ssekms_key_id, :ssekms_encryption_context, :bucket_key_enabled, :copy_source_sse_customer_algorithm, :copy_source_sse_customer_key, :copy_source_sse_customer_key_md5, :request_payer, :tagging, :object_lock_mode, :object_lock_retain_until_date, :object_lock_legal_hold_status, :expected_bucket_owner, :expected_source_bucket_owner) SENSITIVE = [:sse_customer_key, :ssekms_key_id, :ssekms_encryption_context, :copy_source_sse_customer_key] include Aws::Structure end # Container for all response elements. # # @!attribute [rw] etag # Returns the ETag of the new object. The ETag reflects only changes # to the contents of an object, not its metadata. # @return [String] # # @!attribute [rw] last_modified # Creation date of the object. # @return [Time] # # @!attribute [rw] checksum_crc32 # The base64-encoded, 32-bit CRC32 checksum of the object. This will # only be present if it was uploaded with the object. For more # information, see [ Checking object integrity][1] in the *Amazon S3 # User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] checksum_crc32c # The base64-encoded, 32-bit CRC32C checksum of the object. This will # only be present if it was uploaded with the object. For more # information, see [ Checking object integrity][1] in the *Amazon S3 # User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] checksum_sha1 # The base64-encoded, 160-bit SHA-1 digest of the object. This will # only be present if it was uploaded with the object. For more # information, see [ Checking object integrity][1] in the *Amazon S3 # User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] checksum_sha256 # The base64-encoded, 256-bit SHA-256 digest of the object. This will # only be present if it was uploaded with the object. For more # information, see [ Checking object integrity][1] in the *Amazon S3 # User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectResult AWS API Documentation # class CopyObjectResult < Struct.new( :etag, :last_modified, :checksum_crc32, :checksum_crc32c, :checksum_sha1, :checksum_sha256) SENSITIVE = [] include Aws::Structure end # Container for all response elements. # # @!attribute [rw] etag # Entity tag of the object. # @return [String] # # @!attribute [rw] last_modified # Date and time at which the object was uploaded. # @return [Time] # # @!attribute [rw] checksum_crc32 # The base64-encoded, 32-bit CRC32 checksum of the object. This will # only be present if it was uploaded with the object. When you use an # API operation on an object that was uploaded using multipart # uploads, this value may not be a direct checksum value of the full # object. Instead, it's a calculation based on the checksum values of # each individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] # # @!attribute [rw] checksum_crc32c # The base64-encoded, 32-bit CRC32C checksum of the object. This will # only be present if it was uploaded with the object. When you use an # API operation on an object that was uploaded using multipart # uploads, this value may not be a direct checksum value of the full # object. Instead, it's a calculation based on the checksum values of # each individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] # # @!attribute [rw] checksum_sha1 # The base64-encoded, 160-bit SHA-1 digest of the object. This will # only be present if it was uploaded with the object. When you use the # API operation on an object that was uploaded using multipart # uploads, this value may not be a direct checksum value of the full # object. Instead, it's a calculation based on the checksum values of # each individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] # # @!attribute [rw] checksum_sha256 # The base64-encoded, 256-bit SHA-256 digest of the object. This will # only be present if it was uploaded with the object. When you use an # API operation on an object that was uploaded using multipart # uploads, this value may not be a direct checksum value of the full # object. Instead, it's a calculation based on the checksum values of # each individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyPartResult AWS API Documentation # class CopyPartResult < Struct.new( :etag, :last_modified, :checksum_crc32, :checksum_crc32c, :checksum_sha1, :checksum_sha256) SENSITIVE = [] include Aws::Structure end # The configuration information for the bucket. # # @!attribute [rw] location_constraint # Specifies the Region where the bucket will be created. You might # choose a Region to optimize latency, minimize costs, or address # regulatory requirements. For example, if you reside in Europe, you # will probably find it advantageous to create buckets in the Europe # (Ireland) Region. For more information, see [Accessing a bucket][1] # in the *Amazon S3 User Guide*. # # If you don't specify a Region, the bucket is created in the US East # (N. Virginia) Region (us-east-1) by default. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro # @return [String] # # @!attribute [rw] location # Specifies the location where the bucket will be created. # # For directory buckets, the location type is Availability Zone. # # This functionality is only supported by directory buckets. # # # @return [Types::LocationInfo] # # @!attribute [rw] bucket # Specifies the information about the bucket that will be created. # # This functionality is only supported by directory buckets. # # # @return [Types::BucketInfo] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketConfiguration AWS API Documentation # class CreateBucketConfiguration < Struct.new( :location_constraint, :location, :bucket) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] location # A forward slash followed by the name of the bucket. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketOutput AWS API Documentation # class CreateBucketOutput < Struct.new( :location) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] acl # The canned ACL to apply to the bucket. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] bucket # The name of the bucket to create. # # **General purpose buckets** - For information about bucket naming # restrictions, see [Bucket naming rules][1] in the *Amazon S3 User # Guide*. # # Directory buckets - When you use this operation with a # directory bucket, you must use path-style requests in the format # `https://s3express-control.region_code.amazonaws.com/bucket-name `. # Virtual-hosted-style requests aren't supported. Directory bucket # names must be unique in the chosen Availability Zone. Bucket names # must also follow the format ` bucket_base_name--az_id--x-s3` (for # example, ` DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information # about bucket naming restrictions, see [Directory bucket naming # rules][2] in the *Amazon S3 User Guide* # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # @return [String] # # @!attribute [rw] create_bucket_configuration # The configuration information for the bucket. # @return [Types::CreateBucketConfiguration] # # @!attribute [rw] grant_full_control # Allows grantee the read, write, read ACP, and write ACP permissions # on the bucket. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] grant_read # Allows grantee to list the objects in the bucket. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] grant_read_acp # Allows grantee to read the bucket ACL. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] grant_write # Allows grantee to create new objects in the bucket. # # For the bucket and object owners of existing objects, also allows # deletions and overwrites of those objects. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] grant_write_acp # Allows grantee to write the ACL for the applicable bucket. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] object_lock_enabled_for_bucket # Specifies whether you want S3 Object Lock to be enabled for the new # bucket. # # This functionality is not supported for directory buckets. # # # @return [Boolean] # # @!attribute [rw] object_ownership # The container element for object ownership for a bucket's ownership # controls. # # `BucketOwnerPreferred` - Objects uploaded to the bucket change # ownership to the bucket owner if the objects are uploaded with the # `bucket-owner-full-control` canned ACL. # # `ObjectWriter` - The uploading account will own the object if the # object is uploaded with the `bucket-owner-full-control` canned ACL. # # `BucketOwnerEnforced` - Access control lists (ACLs) are disabled and # no longer affect permissions. The bucket owner automatically owns # and has full control over every object in the bucket. The bucket # only accepts PUT requests that don't specify an ACL or specify # bucket owner full control ACLs (such as the predefined # `bucket-owner-full-control` canned ACL or a custom ACL in XML format # that grants the same permissions). # # By default, `ObjectOwnership` is set to `BucketOwnerEnforced` and # ACLs are disabled. We recommend keeping ACLs disabled, except in # uncommon use cases where you must control access for each object # individually. For more information about S3 Object Ownership, see # [Controlling ownership of objects and disabling ACLs for your # bucket][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. Directory # buckets use the bucket owner enforced setting for S3 Object # Ownership. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketRequest AWS API Documentation # class CreateBucketRequest < Struct.new( :acl, :bucket, :create_bucket_configuration, :grant_full_control, :grant_read, :grant_read_acp, :grant_write, :grant_write_acp, :object_lock_enabled_for_bucket, :object_ownership) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] abort_date # If the bucket has a lifecycle rule configured with an action to # abort incomplete multipart uploads and the prefix in the lifecycle # rule matches the object name in the request, the response includes # this header. The header indicates when the initiated multipart # upload becomes eligible for an abort operation. For more # information, see [ Aborting Incomplete Multipart Uploads Using a # Bucket Lifecycle Configuration][1] in the *Amazon S3 User Guide*. # # The response also includes the `x-amz-abort-rule-id` header that # provides the ID of the lifecycle configuration rule that defines the # abort action. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config # @return [Time] # # @!attribute [rw] abort_rule_id # This header is returned along with the `x-amz-abort-date` header. It # identifies the applicable lifecycle configuration rule that defines # the action to abort incomplete multipart uploads. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] bucket # The name of the bucket to which the multipart upload was initiated. # Does not return the access point ARN or access point alias if used. # # Access points are not supported by directory buckets. # # # @return [String] # # @!attribute [rw] key # Object key for which the multipart upload was initiated. # @return [String] # # @!attribute [rw] upload_id # ID for the initiated multipart upload. # @return [String] # # @!attribute [rw] server_side_encryption # The server-side encryption algorithm used when you store this object # in Amazon S3 (for example, `AES256`, `aws:kms`). # # For directory buckets, only server-side encryption with Amazon S3 # managed keys (SSE-S3) (`AES256`) is supported. # # # @return [String] # # @!attribute [rw] sse_customer_algorithm # If server-side encryption with a customer-provided encryption key # was requested, the response will include this header to confirm the # encryption algorithm that's used. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] sse_customer_key_md5 # If server-side encryption with a customer-provided encryption key # was requested, the response will include this header to provide the # round-trip message integrity verification of the customer-provided # encryption key. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] ssekms_key_id # If present, indicates the ID of the Key Management Service (KMS) # symmetric encryption customer managed key that was used for the # object. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] ssekms_encryption_context # If present, indicates the Amazon Web Services KMS Encryption Context # to use for object encryption. The value of this header is a # base64-encoded UTF-8 string holding JSON with the encryption context # key-value pairs. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] bucket_key_enabled # Indicates whether the multipart upload uses an S3 Bucket Key for # server-side encryption with Key Management Service (KMS) keys # (SSE-KMS). # # This functionality is not supported for directory buckets. # # # @return [Boolean] # # @!attribute [rw] request_charged # If present, indicates that the requester was successfully charged # for the request. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] checksum_algorithm # The algorithm that was used to create a checksum of the object. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadOutput AWS API Documentation # class CreateMultipartUploadOutput < Struct.new( :abort_date, :abort_rule_id, :bucket, :key, :upload_id, :server_side_encryption, :sse_customer_algorithm, :sse_customer_key_md5, :ssekms_key_id, :ssekms_encryption_context, :bucket_key_enabled, :request_charged, :checksum_algorithm) SENSITIVE = [:ssekms_key_id, :ssekms_encryption_context] include Aws::Structure end # @!attribute [rw] acl # The canned ACL to apply to the object. Amazon S3 supports a set of # predefined ACLs, known as *canned ACLs*. Each canned ACL has a # predefined set of grantees and permissions. For more information, # see [Canned ACL][1] in the *Amazon S3 User Guide*. # # By default, all objects are private. Only the owner has full access # control. When uploading an object, you can grant access permissions # to individual Amazon Web Services accounts or to predefined groups # defined by Amazon S3. These permissions are then added to the access # control list (ACL) on the new object. For more information, see # [Using ACLs][2]. One way to grant the permissions using the request # headers is to specify a canned ACL with the `x-amz-acl` request # header. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html # @return [String] # # @!attribute [rw] bucket # The name of the bucket where the multipart upload is initiated and # where the object is uploaded. # # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style # requests are not supported. Directory bucket names must be unique in # the chosen Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, # you must provide the alias of the access point in place of the # bucket name or specify the access point ARN. When using the access # point ARN, you must direct requests to the access point hostname. # The access point hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the # bucket name. For more information about access point ARNs, see # [Using access points][2] in the *Amazon S3 User Guide*. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. # The S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][3] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # @return [String] # # @!attribute [rw] cache_control # Specifies caching behavior along the request/reply chain. # @return [String] # # @!attribute [rw] content_disposition # Specifies presentational information for the object. # @return [String] # # @!attribute [rw] content_encoding # Specifies what content encodings have been applied to the object and # thus what decoding mechanisms must be applied to obtain the # media-type referenced by the Content-Type header field. # # For directory buckets, only the `aws-chunked` value is supported in # this header field. # # # @return [String] # # @!attribute [rw] content_language # The language that the content is in. # @return [String] # # @!attribute [rw] content_type # A standard MIME type describing the format of the object data. # @return [String] # # @!attribute [rw] expires # The date and time at which the object is no longer cacheable. # @return [Time] # # @!attribute [rw] grant_full_control # Specify access permissions explicitly to give the grantee READ, # READ\_ACP, and WRITE\_ACP permissions on the object. # # By default, all objects are private. Only the owner has full access # control. When uploading an object, you can use this header to # explicitly grant access permissions to specific Amazon Web Services # accounts or groups. This header maps to specific permissions that # Amazon S3 supports in an ACL. For more information, see [Access # Control List (ACL) Overview][1] in the *Amazon S3 User Guide*. # # You specify each grantee as a type=value pair, where the type is one # of the following: # # * `id` – if the value specified is the canonical user ID of an # Amazon Web Services account # # * `uri` – if you are granting permissions to a predefined group # # * `emailAddress` – if the value specified is the email address of an # Amazon Web Services account # # Using email addresses to specify a grantee is only supported in # the following Amazon Web Services Regions: # # * US East (N. Virginia) # # * US West (N. California) # # * US West (Oregon) # # * Asia Pacific (Singapore) # # * Asia Pacific (Sydney) # # * Asia Pacific (Tokyo) # # * Europe (Ireland) # # * South America (São Paulo) # # For a list of all the Amazon S3 supported Regions and endpoints, # see [Regions and Endpoints][2] in the Amazon Web Services General # Reference. # # # # For example, the following `x-amz-grant-read` header grants the # Amazon Web Services accounts identified by account IDs permissions # to read object data and its metadata: # # `x-amz-grant-read: id="11112222333", id="444455556666" ` # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html # [2]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region # @return [String] # # @!attribute [rw] grant_read # Specify access permissions explicitly to allow grantee to read the # object data and its metadata. # # By default, all objects are private. Only the owner has full access # control. When uploading an object, you can use this header to # explicitly grant access permissions to specific Amazon Web Services # accounts or groups. This header maps to specific permissions that # Amazon S3 supports in an ACL. For more information, see [Access # Control List (ACL) Overview][1] in the *Amazon S3 User Guide*. # # You specify each grantee as a type=value pair, where the type is one # of the following: # # * `id` – if the value specified is the canonical user ID of an # Amazon Web Services account # # * `uri` – if you are granting permissions to a predefined group # # * `emailAddress` – if the value specified is the email address of an # Amazon Web Services account # # Using email addresses to specify a grantee is only supported in # the following Amazon Web Services Regions: # # * US East (N. Virginia) # # * US West (N. California) # # * US West (Oregon) # # * Asia Pacific (Singapore) # # * Asia Pacific (Sydney) # # * Asia Pacific (Tokyo) # # * Europe (Ireland) # # * South America (São Paulo) # # For a list of all the Amazon S3 supported Regions and endpoints, # see [Regions and Endpoints][2] in the Amazon Web Services General # Reference. # # # # For example, the following `x-amz-grant-read` header grants the # Amazon Web Services accounts identified by account IDs permissions # to read object data and its metadata: # # `x-amz-grant-read: id="11112222333", id="444455556666" ` # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html # [2]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region # @return [String] # # @!attribute [rw] grant_read_acp # Specify access permissions explicitly to allows grantee to read the # object ACL. # # By default, all objects are private. Only the owner has full access # control. When uploading an object, you can use this header to # explicitly grant access permissions to specific Amazon Web Services # accounts or groups. This header maps to specific permissions that # Amazon S3 supports in an ACL. For more information, see [Access # Control List (ACL) Overview][1] in the *Amazon S3 User Guide*. # # You specify each grantee as a type=value pair, where the type is one # of the following: # # * `id` – if the value specified is the canonical user ID of an # Amazon Web Services account # # * `uri` – if you are granting permissions to a predefined group # # * `emailAddress` – if the value specified is the email address of an # Amazon Web Services account # # Using email addresses to specify a grantee is only supported in # the following Amazon Web Services Regions: # # * US East (N. Virginia) # # * US West (N. California) # # * US West (Oregon) # # * Asia Pacific (Singapore) # # * Asia Pacific (Sydney) # # * Asia Pacific (Tokyo) # # * Europe (Ireland) # # * South America (São Paulo) # # For a list of all the Amazon S3 supported Regions and endpoints, # see [Regions and Endpoints][2] in the Amazon Web Services General # Reference. # # # # For example, the following `x-amz-grant-read` header grants the # Amazon Web Services accounts identified by account IDs permissions # to read object data and its metadata: # # `x-amz-grant-read: id="11112222333", id="444455556666" ` # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html # [2]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region # @return [String] # # @!attribute [rw] grant_write_acp # Specify access permissions explicitly to allows grantee to allow # grantee to write the ACL for the applicable object. # # By default, all objects are private. Only the owner has full access # control. When uploading an object, you can use this header to # explicitly grant access permissions to specific Amazon Web Services # accounts or groups. This header maps to specific permissions that # Amazon S3 supports in an ACL. For more information, see [Access # Control List (ACL) Overview][1] in the *Amazon S3 User Guide*. # # You specify each grantee as a type=value pair, where the type is one # of the following: # # * `id` – if the value specified is the canonical user ID of an # Amazon Web Services account # # * `uri` – if you are granting permissions to a predefined group # # * `emailAddress` – if the value specified is the email address of an # Amazon Web Services account # # Using email addresses to specify a grantee is only supported in # the following Amazon Web Services Regions: # # * US East (N. Virginia) # # * US West (N. California) # # * US West (Oregon) # # * Asia Pacific (Singapore) # # * Asia Pacific (Sydney) # # * Asia Pacific (Tokyo) # # * Europe (Ireland) # # * South America (São Paulo) # # For a list of all the Amazon S3 supported Regions and endpoints, # see [Regions and Endpoints][2] in the Amazon Web Services General # Reference. # # # # For example, the following `x-amz-grant-read` header grants the # Amazon Web Services accounts identified by account IDs permissions # to read object data and its metadata: # # `x-amz-grant-read: id="11112222333", id="444455556666" ` # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html # [2]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region # @return [String] # # @!attribute [rw] key # Object key for which the multipart upload is to be initiated. # @return [String] # # @!attribute [rw] metadata # A map of metadata to store with the object in S3. # @return [Hash] # # @!attribute [rw] server_side_encryption # The server-side encryption algorithm used when you store this object # in Amazon S3 (for example, `AES256`, `aws:kms`). # # For directory buckets, only server-side encryption with Amazon S3 # managed keys (SSE-S3) (`AES256`) is supported. # # # @return [String] # # @!attribute [rw] storage_class # By default, Amazon S3 uses the STANDARD Storage Class to store newly # created objects. The STANDARD storage class provides high durability # and high availability. Depending on performance needs, you can # specify a different Storage Class. For more information, see # [Storage Classes][1] in the *Amazon S3 User Guide*. # # * For directory buckets, only the S3 Express One Zone storage class # is supported to store newly created objects. # # * Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html # @return [String] # # @!attribute [rw] website_redirect_location # If the bucket is configured as a website, redirects requests for # this object to another object in the same bucket or to an external # URL. Amazon S3 stores the value of this header in the object # metadata. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] sse_customer_algorithm # Specifies the algorithm to use when encrypting the object (for # example, AES256). # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] sse_customer_key # Specifies the customer-provided encryption key for Amazon S3 to use # in encrypting data. This value is used to store the object and then # it is discarded; Amazon S3 does not store the encryption key. The # key must be appropriate for use with the algorithm specified in the # `x-amz-server-side-encryption-customer-algorithm` header. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the customer-provided encryption # key according to RFC 1321. Amazon S3 uses this header for a message # integrity check to ensure that the encryption key was transmitted # without error. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] ssekms_key_id # Specifies the ID (Key ID, Key ARN, or Key Alias) of the symmetric # encryption customer managed key to use for object encryption. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] ssekms_encryption_context # Specifies the Amazon Web Services KMS Encryption Context to use for # object encryption. The value of this header is a base64-encoded # UTF-8 string holding JSON with the encryption context key-value # pairs. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] bucket_key_enabled # Specifies whether Amazon S3 should use an S3 Bucket Key for object # encryption with server-side encryption using Key Management Service # (KMS) keys (SSE-KMS). Setting this header to `true` causes Amazon S3 # to use an S3 Bucket Key for object encryption with SSE-KMS. # # Specifying this header with an object action doesn’t affect # bucket-level settings for S3 Bucket Key. # # This functionality is not supported for directory buckets. # # # @return [Boolean] # # @!attribute [rw] request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has # Requester Pays enabled, the requester will pay for corresponding # charges to copy the object. For information about downloading # objects from Requester Pays buckets, see [Downloading Objects in # Requester Pays Buckets][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @return [String] # # @!attribute [rw] tagging # The tag-set for the object. The tag-set must be encoded as URL Query # parameters. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] object_lock_mode # Specifies the Object Lock mode that you want to apply to the # uploaded object. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] object_lock_retain_until_date # Specifies the date and time when you want the Object Lock to expire. # # This functionality is not supported for directory buckets. # # # @return [Time] # # @!attribute [rw] object_lock_legal_hold_status # Specifies whether you want to apply a legal hold to the uploaded # object. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @!attribute [rw] checksum_algorithm # Indicates the algorithm that you want Amazon S3 to use to create the # checksum for the object. For more information, see [Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadRequest AWS API Documentation # class CreateMultipartUploadRequest < Struct.new( :acl, :bucket, :cache_control, :content_disposition, :content_encoding, :content_language, :content_type, :expires, :grant_full_control, :grant_read, :grant_read_acp, :grant_write_acp, :key, :metadata, :server_side_encryption, :storage_class, :website_redirect_location, :sse_customer_algorithm, :sse_customer_key, :sse_customer_key_md5, :ssekms_key_id, :ssekms_encryption_context, :bucket_key_enabled, :request_payer, :tagging, :object_lock_mode, :object_lock_retain_until_date, :object_lock_legal_hold_status, :expected_bucket_owner, :checksum_algorithm) SENSITIVE = [:sse_customer_key, :ssekms_key_id, :ssekms_encryption_context] include Aws::Structure end # @!attribute [rw] credentials # The established temporary security credentials for the created # session.. # @return [Types::SessionCredentials] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateSessionOutput AWS API Documentation # class CreateSessionOutput < Struct.new( :credentials) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] session_mode # Specifies the mode of the session that will be created, either # `ReadWrite` or `ReadOnly`. By default, a `ReadWrite` session is # created. A `ReadWrite` session is capable of executing all the Zonal # endpoint APIs on a directory bucket. A `ReadOnly` session is # constrained to execute the following Zonal endpoint APIs: # `GetObject`, `HeadObject`, `ListObjectsV2`, `GetObjectAttributes`, # `ListParts`, and `ListMultipartUploads`. # @return [String] # # @!attribute [rw] bucket # The name of the bucket that you create a session for. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateSessionRequest AWS API Documentation # class CreateSessionRequest < Struct.new( :session_mode, :bucket) SENSITIVE = [] include Aws::Structure end # The container element for specifying the default Object Lock retention # settings for new objects placed in the specified bucket. # # * The `DefaultRetention` settings require both a mode and a period. # # * The `DefaultRetention` period can be either `Days` or `Years` but # you must select one. You cannot specify `Days` and `Years` at the # same time. # # # # @!attribute [rw] mode # The default Object Lock retention mode you want to apply to new # objects placed in the specified bucket. Must be used with either # `Days` or `Years`. # @return [String] # # @!attribute [rw] days # The number of days that you want to specify for the default # retention period. Must be used with `Mode`. # @return [Integer] # # @!attribute [rw] years # The number of years that you want to specify for the default # retention period. Must be used with `Mode`. # @return [Integer] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DefaultRetention AWS API Documentation # class DefaultRetention < Struct.new( :mode, :days, :years) SENSITIVE = [] include Aws::Structure end # Container for the objects to delete. # # @!attribute [rw] objects # The object to delete. # # **Directory buckets** - For directory buckets, an object that's # composed entirely of whitespace characters is not supported by the # `DeleteObjects` API operation. The request will receive a `400 Bad # Request` error and none of the objects in the request will be # deleted. # # # @return [Array] # # @!attribute [rw] quiet # Element to enable quiet mode for the request. When you add this # element, you must set its value to `true`. # @return [Boolean] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Delete AWS API Documentation # class Delete < Struct.new( :objects, :quiet) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket from which an analytics configuration is # deleted. # @return [String] # # @!attribute [rw] id # The ID that identifies the analytics configuration. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfigurationRequest AWS API Documentation # class DeleteBucketAnalyticsConfigurationRequest < Struct.new( :bucket, :id, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # Specifies the bucket whose `cors` configuration is being deleted. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsRequest AWS API Documentation # class DeleteBucketCorsRequest < Struct.new( :bucket, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket containing the server-side encryption # configuration to delete. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryptionRequest AWS API Documentation # class DeleteBucketEncryptionRequest < Struct.new( :bucket, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the Amazon S3 bucket whose configuration you want to # modify or retrieve. # @return [String] # # @!attribute [rw] id # The ID used to identify the S3 Intelligent-Tiering configuration. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketIntelligentTieringConfigurationRequest AWS API Documentation # class DeleteBucketIntelligentTieringConfigurationRequest < Struct.new( :bucket, :id) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket containing the inventory configuration to # delete. # @return [String] # # @!attribute [rw] id # The ID used to identify the inventory configuration. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationRequest AWS API Documentation # class DeleteBucketInventoryConfigurationRequest < Struct.new( :bucket, :id, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The bucket name of the lifecycle to delete. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleRequest AWS API Documentation # class DeleteBucketLifecycleRequest < Struct.new( :bucket, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket containing the metrics configuration to # delete. # @return [String] # # @!attribute [rw] id # The ID used to identify the metrics configuration. The ID has a 64 # character limit and can only contain letters, numbers, periods, # dashes, and underscores. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfigurationRequest AWS API Documentation # class DeleteBucketMetricsConfigurationRequest < Struct.new( :bucket, :id, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The Amazon S3 bucket whose `OwnershipControls` you want to delete. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOwnershipControlsRequest AWS API Documentation # class DeleteBucketOwnershipControlsRequest < Struct.new( :bucket, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The bucket name. # # Directory buckets - When you use this operation with a # directory bucket, you must use path-style requests in the format # `https://s3express-control.region_code.amazonaws.com/bucket-name `. # Virtual-hosted-style requests aren't supported. Directory bucket # names must be unique in the chosen Availability Zone. Bucket names # must also follow the format ` bucket_base_name--az_id--x-s3` (for # example, ` DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information # about bucket naming restrictions, see [Directory bucket naming # rules][1] in the *Amazon S3 User Guide* # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # # For directory buckets, this header is not supported in this API # operation. If you specify this header, the request fails with the # HTTP status code `501 Not Implemented`. # # # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyRequest AWS API Documentation # class DeleteBucketPolicyRequest < Struct.new( :bucket, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The bucket name. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationRequest AWS API Documentation # class DeleteBucketReplicationRequest < Struct.new( :bucket, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # Specifies the bucket being deleted. # # Directory buckets - When you use this operation with a # directory bucket, you must use path-style requests in the format # `https://s3express-control.region_code.amazonaws.com/bucket-name `. # Virtual-hosted-style requests aren't supported. Directory bucket # names must be unique in the chosen Availability Zone. Bucket names # must also follow the format ` bucket_base_name--az_id--x-s3` (for # example, ` DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information # about bucket naming restrictions, see [Directory bucket naming # rules][1] in the *Amazon S3 User Guide* # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # # For directory buckets, this header is not supported in this API # operation. If you specify this header, the request fails with the # HTTP status code `501 Not Implemented`. # # # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketRequest AWS API Documentation # class DeleteBucketRequest < Struct.new( :bucket, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The bucket that has the tag set to be removed. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingRequest AWS API Documentation # class DeleteBucketTaggingRequest < Struct.new( :bucket, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The bucket name for which you want to remove the website # configuration. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteRequest AWS API Documentation # class DeleteBucketWebsiteRequest < Struct.new( :bucket, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # Information about the delete marker. # # @!attribute [rw] owner # The account that created the delete marker.> # @return [Types::Owner] # # @!attribute [rw] key # The object key. # @return [String] # # @!attribute [rw] version_id # Version ID of an object. # @return [String] # # @!attribute [rw] is_latest # Specifies whether the object is (true) or is not (false) the latest # version of an object. # @return [Boolean] # # @!attribute [rw] last_modified # Date and time when the object was last modified. # @return [Time] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteMarkerEntry AWS API Documentation # class DeleteMarkerEntry < Struct.new( :owner, :key, :version_id, :is_latest, :last_modified) SENSITIVE = [] include Aws::Structure end # Specifies whether Amazon S3 replicates delete markers. If you specify # a `Filter` in your replication configuration, you must also include a # `DeleteMarkerReplication` element. If your `Filter` includes a `Tag` # element, the `DeleteMarkerReplication` `Status` must be set to # Disabled, because Amazon S3 does not support replicating delete # markers for tag-based rules. For an example configuration, see [Basic # Rule Configuration][1]. # # For more information about delete marker replication, see [Basic Rule # Configuration][2]. # # If you are using an earlier version of the replication configuration, # Amazon S3 handles replication of delete markers differently. For more # information, see [Backward Compatibility][3]. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations # # @!attribute [rw] status # Indicates whether to replicate delete markers. # # Indicates whether to replicate delete markers. # # # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteMarkerReplication AWS API Documentation # class DeleteMarkerReplication < Struct.new( :status) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] delete_marker # Indicates whether the specified object version that was permanently # deleted was (true) or was not (false) a delete marker before # deletion. In a simple DELETE, this header indicates whether (true) # or not (false) the current version of the object is a delete marker. # # This functionality is not supported for directory buckets. # # # @return [Boolean] # # @!attribute [rw] version_id # Returns the version ID of the delete marker created as a result of # the DELETE operation. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] request_charged # If present, indicates that the requester was successfully charged # for the request. # # This functionality is not supported for directory buckets. # # # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectOutput AWS API Documentation # class DeleteObjectOutput < Struct.new( :delete_marker, :version_id, :request_charged) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The bucket name of the bucket containing the object. # # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style # requests are not supported. Directory bucket names must be unique in # the chosen Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, # you must provide the alias of the access point in place of the # bucket name or specify the access point ARN. When using the access # point ARN, you must direct requests to the access point hostname. # The access point hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the # bucket name. For more information about access point ARNs, see # [Using access points][2] in the *Amazon S3 User Guide*. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. # The S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][3] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # @return [String] # # @!attribute [rw] key # Key name of the object to delete. # @return [String] # # @!attribute [rw] mfa # The concatenation of the authentication device's serial number, a # space, and the value that is displayed on your authentication # device. Required to permanently delete a versioned object if # versioning is configured with MFA delete enabled. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] version_id # Version ID used to reference a specific version of the object. # # For directory buckets in this API operation, only the `null` value # of the version ID is supported. # # # @return [String] # # @!attribute [rw] request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has # Requester Pays enabled, the requester will pay for corresponding # charges to copy the object. For information about downloading # objects from Requester Pays buckets, see [Downloading Objects in # Requester Pays Buckets][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @return [String] # # @!attribute [rw] bypass_governance_retention # Indicates whether S3 Object Lock should bypass Governance-mode # restrictions to process this operation. To use this header, you must # have the `s3:BypassGovernanceRetention` permission. # # This functionality is not supported for directory buckets. # # # @return [Boolean] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectRequest AWS API Documentation # class DeleteObjectRequest < Struct.new( :bucket, :key, :mfa, :version_id, :request_payer, :bypass_governance_retention, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] version_id # The versionId of the object the tag-set was removed from. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingOutput AWS API Documentation # class DeleteObjectTaggingOutput < Struct.new( :version_id) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The bucket name containing the objects from which to remove the # tags. # # **Access points** - When you use this action with an access point, # you must provide the alias of the access point in place of the # bucket name or specify the access point ARN. When using the access # point ARN, you must direct requests to the access point hostname. # The access point hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the # bucket name. For more information about access point ARNs, see # [Using access points][1] in the *Amazon S3 User Guide*. # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. # The S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][2] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # @return [String] # # @!attribute [rw] key # The key that identifies the object in the bucket from which to # remove all tags. # @return [String] # # @!attribute [rw] version_id # The versionId of the object that the tag-set will be removed from. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingRequest AWS API Documentation # class DeleteObjectTaggingRequest < Struct.new( :bucket, :key, :version_id, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] deleted # Container element for a successful delete. It identifies the object # that was successfully deleted. # @return [Array] # # @!attribute [rw] request_charged # If present, indicates that the requester was successfully charged # for the request. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] errors # Container for a failed delete action that describes the object that # Amazon S3 attempted to delete and the error it encountered. # @return [Array] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsOutput AWS API Documentation # class DeleteObjectsOutput < Struct.new( :deleted, :request_charged, :errors) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The bucket name containing the objects to delete. # # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style # requests are not supported. Directory bucket names must be unique in # the chosen Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, # you must provide the alias of the access point in place of the # bucket name or specify the access point ARN. When using the access # point ARN, you must direct requests to the access point hostname. # The access point hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the # bucket name. For more information about access point ARNs, see # [Using access points][2] in the *Amazon S3 User Guide*. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. # The S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][3] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # @return [String] # # @!attribute [rw] delete # Container for the request. # @return [Types::Delete] # # @!attribute [rw] mfa # The concatenation of the authentication device's serial number, a # space, and the value that is displayed on your authentication # device. Required to permanently delete a versioned object if # versioning is configured with MFA delete enabled. # # When performing the `DeleteObjects` operation on an MFA delete # enabled bucket, which attempts to delete the specified versioned # objects, you must include an MFA token. If you don't provide an MFA # token, the entire request will fail, even if there are non-versioned # objects that you are trying to delete. If you provide an invalid # token, whether there are versioned object keys in the request or # not, the entire Multi-Object Delete request will fail. For # information about MFA Delete, see [ MFA Delete][1] in the *Amazon S3 # User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete # @return [String] # # @!attribute [rw] request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has # Requester Pays enabled, the requester will pay for corresponding # charges to copy the object. For information about downloading # objects from Requester Pays buckets, see [Downloading Objects in # Requester Pays Buckets][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @return [String] # # @!attribute [rw] bypass_governance_retention # Specifies whether you want to delete this object even if it has a # Governance-type Object Lock in place. To use this header, you must # have the `s3:BypassGovernanceRetention` permission. # # This functionality is not supported for directory buckets. # # # @return [Boolean] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @!attribute [rw] checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum-algorithm ` or # `x-amz-trailer` header sent. Otherwise, Amazon S3 fails the request # with the HTTP status code `400 Bad Request`. # # For the `x-amz-checksum-algorithm ` header, replace ` algorithm ` # with the supported algorithm from the following list: # # * CRC32 # # * CRC32C # # * SHA1 # # * SHA256 # # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # If the individual checksum value you provide through # `x-amz-checksum-algorithm ` doesn't match the checksum algorithm # you set through `x-amz-sdk-checksum-algorithm`, Amazon S3 ignores # any provided `ChecksumAlgorithm` parameter and uses the checksum # algorithm that matches the provided value in # `x-amz-checksum-algorithm `. # # If you provide an individual checksum, Amazon S3 ignores any # provided `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsRequest AWS API Documentation # class DeleteObjectsRequest < Struct.new( :bucket, :delete, :mfa, :request_payer, :bypass_governance_retention, :expected_bucket_owner, :checksum_algorithm) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The Amazon S3 bucket whose `PublicAccessBlock` configuration you # want to delete. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlockRequest AWS API Documentation # class DeletePublicAccessBlockRequest < Struct.new( :bucket, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # Information about the deleted object. # # @!attribute [rw] key # The name of the deleted object. # @return [String] # # @!attribute [rw] version_id # The version ID of the deleted object. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] delete_marker # Indicates whether the specified object version that was permanently # deleted was (true) or was not (false) a delete marker before # deletion. In a simple DELETE, this header indicates whether (true) # or not (false) the current version of the object is a delete marker. # # This functionality is not supported for directory buckets. # # # @return [Boolean] # # @!attribute [rw] delete_marker_version_id # The version ID of the delete marker created as a result of the # DELETE operation. If you delete a specific object version, the value # returned by this header is the version ID of the object version # deleted. # # This functionality is not supported for directory buckets. # # # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletedObject AWS API Documentation # class DeletedObject < Struct.new( :key, :version_id, :delete_marker, :delete_marker_version_id) SENSITIVE = [] include Aws::Structure end # Specifies information about where to publish analysis or configuration # results for an Amazon S3 bucket and S3 Replication Time Control (S3 # RTC). # # @!attribute [rw] bucket # The Amazon Resource Name (ARN) of the bucket where you want Amazon # S3 to store the results. # @return [String] # # @!attribute [rw] account # Destination bucket owner account ID. In a cross-account scenario, if # you direct Amazon S3 to change replica ownership to the Amazon Web # Services account that owns the destination bucket by specifying the # `AccessControlTranslation` property, this is the account ID of the # destination bucket owner. For more information, see [Replication # Additional Configuration: Changing the Replica Owner][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html # @return [String] # # @!attribute [rw] storage_class # The storage class to use when replicating objects, such as S3 # Standard or reduced redundancy. By default, Amazon S3 uses the # storage class of the source object to create the object replica. # # For valid values, see the `StorageClass` element of the [PUT Bucket # replication][1] action in the *Amazon S3 API Reference*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html # @return [String] # # @!attribute [rw] access_control_translation # Specify this only in a cross-account scenario (where source and # destination bucket owners are not the same), and you want to change # replica ownership to the Amazon Web Services account that owns the # destination bucket. If this is not specified in the replication # configuration, the replicas are owned by same Amazon Web Services # account that owns the source object. # @return [Types::AccessControlTranslation] # # @!attribute [rw] encryption_configuration # A container that provides information about encryption. If # `SourceSelectionCriteria` is specified, you must specify this # element. # @return [Types::EncryptionConfiguration] # # @!attribute [rw] replication_time # A container specifying S3 Replication Time Control (S3 RTC), # including whether S3 RTC is enabled and the time when all objects # and operations on objects must be replicated. Must be specified # together with a `Metrics` block. # @return [Types::ReplicationTime] # # @!attribute [rw] metrics # A container specifying replication metrics-related settings enabling # replication metrics and events. # @return [Types::Metrics] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Destination AWS API Documentation # class Destination < Struct.new( :bucket, :account, :storage_class, :access_control_translation, :encryption_configuration, :replication_time, :metrics) SENSITIVE = [] include Aws::Structure end # Contains the type of server-side encryption used. # # @!attribute [rw] encryption_type # The server-side encryption algorithm used when storing job results # in Amazon S3 (for example, AES256, `aws:kms`). # @return [String] # # @!attribute [rw] kms_key_id # If the encryption type is `aws:kms`, this optional value specifies # the ID of the symmetric encryption customer managed key to use for # encryption of job results. Amazon S3 only supports symmetric # encryption KMS keys. For more information, see [Asymmetric keys in # KMS][1] in the *Amazon Web Services Key Management Service Developer # Guide*. # # # # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html # @return [String] # # @!attribute [rw] kms_context # If the encryption type is `aws:kms`, this optional value can be used # to specify the encryption context for the restore results. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Encryption AWS API Documentation # class Encryption < Struct.new( :encryption_type, :kms_key_id, :kms_context) SENSITIVE = [:kms_key_id] include Aws::Structure end # Specifies encryption-related information for an Amazon S3 bucket that # is a destination for replicated objects. # # @!attribute [rw] replica_kms_key_id # Specifies the ID (Key ARN or Alias ARN) of the customer managed # Amazon Web Services KMS key stored in Amazon Web Services Key # Management Service (KMS) for the destination bucket. Amazon S3 uses # this key to encrypt replica objects. Amazon S3 only supports # symmetric encryption KMS keys. For more information, see [Asymmetric # keys in Amazon Web Services KMS][1] in the *Amazon Web Services Key # Management Service Developer Guide*. # # # # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/EncryptionConfiguration AWS API Documentation # class EncryptionConfiguration < Struct.new( :replica_kms_key_id) SENSITIVE = [] include Aws::Structure end # A message that indicates the request is complete and no more messages # will be sent. You should not assume that the request is complete until # the client receives an `EndEvent`. # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/EndEvent AWS API Documentation # class EndEvent < Struct.new( :event_type) SENSITIVE = [] include Aws::Structure end # Container for all error elements. # # @!attribute [rw] key # The error key. # @return [String] # # @!attribute [rw] version_id # The version ID of the error. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] code # The error code is a string that uniquely identifies an error # condition. It is meant to be read and understood by programs that # detect and handle errors by type. The following is a list of Amazon # S3 error codes. For more information, see [Error responses][1]. # # * * *Code:* AccessDenied # # * *Description:* Access Denied # # * *HTTP Status Code:* 403 Forbidden # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* AccountProblem # # * *Description:* There is a problem with your Amazon Web Services # account that prevents the action from completing successfully. # Contact Amazon Web Services Support for further assistance. # # * *HTTP Status Code:* 403 Forbidden # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* AllAccessDisabled # # * *Description:* All access to this Amazon S3 resource has been # disabled. Contact Amazon Web Services Support for further # assistance. # # * *HTTP Status Code:* 403 Forbidden # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* AmbiguousGrantByEmailAddress # # * *Description:* The email address you provided is associated with # more than one account. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* AuthorizationHeaderMalformed # # * *Description:* The authorization header you provided is invalid. # # * *HTTP Status Code:* 400 Bad Request # # * *HTTP Status Code:* N/A # # * * *Code:* BadDigest # # * *Description:* The Content-MD5 you specified did not match what # we received. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* BucketAlreadyExists # # * *Description:* The requested bucket name is not available. The # bucket namespace is shared by all users of the system. Please # select a different name and try again. # # * *HTTP Status Code:* 409 Conflict # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* BucketAlreadyOwnedByYou # # * *Description:* The bucket you tried to create already exists, # and you own it. Amazon S3 returns this error in all Amazon Web # Services Regions except in the North Virginia Region. For legacy # compatibility, if you re-create an existing bucket that you # already own in the North Virginia Region, Amazon S3 returns 200 # OK and resets the bucket access control lists (ACLs). # # * *Code:* 409 Conflict (in all Regions except the North Virginia # Region) # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* BucketNotEmpty # # * *Description:* The bucket you tried to delete is not empty. # # * *HTTP Status Code:* 409 Conflict # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* CredentialsNotSupported # # * *Description:* This request does not support credentials. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* CrossLocationLoggingProhibited # # * *Description:* Cross-location logging not allowed. Buckets in # one geographic location cannot log information to a bucket in # another location. # # * *HTTP Status Code:* 403 Forbidden # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* EntityTooSmall # # * *Description:* Your proposed upload is smaller than the minimum # allowed object size. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* EntityTooLarge # # * *Description:* Your proposed upload exceeds the maximum allowed # object size. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* ExpiredToken # # * *Description:* The provided token has expired. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* IllegalVersioningConfigurationException # # * *Description:* Indicates that the versioning configuration # specified in the request is invalid. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* IncompleteBody # # * *Description:* You did not provide the number of bytes specified # by the Content-Length HTTP header # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* IncorrectNumberOfFilesInPostRequest # # * *Description:* POST requires exactly one file upload per # request. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* InlineDataTooLarge # # * *Description:* Inline data exceeds the maximum allowed size. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* InternalError # # * *Description:* We encountered an internal error. Please try # again. # # * *HTTP Status Code:* 500 Internal Server Error # # * *SOAP Fault Code Prefix:* Server # # * * *Code:* InvalidAccessKeyId # # * *Description:* The Amazon Web Services access key ID you # provided does not exist in our records. # # * *HTTP Status Code:* 403 Forbidden # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* InvalidAddressingHeader # # * *Description:* You must specify the Anonymous role. # # * *HTTP Status Code:* N/A # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* InvalidArgument # # * *Description:* Invalid Argument # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* InvalidBucketName # # * *Description:* The specified bucket is not valid. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* InvalidBucketState # # * *Description:* The request is not valid with the current state # of the bucket. # # * *HTTP Status Code:* 409 Conflict # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* InvalidDigest # # * *Description:* The Content-MD5 you specified is not valid. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* InvalidEncryptionAlgorithmError # # * *Description:* The encryption request you specified is not # valid. The valid value is AES256. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* InvalidLocationConstraint # # * *Description:* The specified location constraint is not valid. # For more information about Regions, see [How to Select a Region # for Your Buckets][2]. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* InvalidObjectState # # * *Description:* The action is not valid for the current state of # the object. # # * *HTTP Status Code:* 403 Forbidden # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* InvalidPart # # * *Description:* One or more of the specified parts could not be # found. The part might not have been uploaded, or the specified # entity tag might not have matched the part's entity tag. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* InvalidPartOrder # # * *Description:* The list of parts was not in ascending order. # Parts list must be specified in order by part number. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* InvalidPayer # # * *Description:* All access to this object has been disabled. # Please contact Amazon Web Services Support for further # assistance. # # * *HTTP Status Code:* 403 Forbidden # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* InvalidPolicyDocument # # * *Description:* The content of the form does not meet the # conditions specified in the policy document. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* InvalidRange # # * *Description:* The requested range cannot be satisfied. # # * *HTTP Status Code:* 416 Requested Range Not Satisfiable # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* InvalidRequest # # * *Description:* Please use `AWS4-HMAC-SHA256`. # # * *HTTP Status Code:* 400 Bad Request # # * *Code:* N/A # # * * *Code:* InvalidRequest # # * *Description:* SOAP requests must be made over an HTTPS # connection. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* InvalidRequest # # * *Description:* Amazon S3 Transfer Acceleration is not supported # for buckets with non-DNS compliant names. # # * *HTTP Status Code:* 400 Bad Request # # * *Code:* N/A # # * * *Code:* InvalidRequest # # * *Description:* Amazon S3 Transfer Acceleration is not supported # for buckets with periods (.) in their names. # # * *HTTP Status Code:* 400 Bad Request # # * *Code:* N/A # # * * *Code:* InvalidRequest # # * *Description:* Amazon S3 Transfer Accelerate endpoint only # supports virtual style requests. # # * *HTTP Status Code:* 400 Bad Request # # * *Code:* N/A # # * * *Code:* InvalidRequest # # * *Description:* Amazon S3 Transfer Accelerate is not configured # on this bucket. # # * *HTTP Status Code:* 400 Bad Request # # * *Code:* N/A # # * * *Code:* InvalidRequest # # * *Description:* Amazon S3 Transfer Accelerate is disabled on this # bucket. # # * *HTTP Status Code:* 400 Bad Request # # * *Code:* N/A # # * * *Code:* InvalidRequest # # * *Description:* Amazon S3 Transfer Acceleration is not supported # on this bucket. Contact Amazon Web Services Support for more # information. # # * *HTTP Status Code:* 400 Bad Request # # * *Code:* N/A # # * * *Code:* InvalidRequest # # * *Description:* Amazon S3 Transfer Acceleration cannot be enabled # on this bucket. Contact Amazon Web Services Support for more # information. # # * *HTTP Status Code:* 400 Bad Request # # * *Code:* N/A # # * * *Code:* InvalidSecurity # # * *Description:* The provided security credentials are not valid. # # * *HTTP Status Code:* 403 Forbidden # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* InvalidSOAPRequest # # * *Description:* The SOAP request body is invalid. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* InvalidStorageClass # # * *Description:* The storage class you specified is not valid. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* InvalidTargetBucketForLogging # # * *Description:* The target bucket for logging does not exist, is # not owned by you, or does not have the appropriate grants for # the log-delivery group. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* InvalidToken # # * *Description:* The provided token is malformed or otherwise # invalid. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* InvalidURI # # * *Description:* Couldn't parse the specified URI. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* KeyTooLongError # # * *Description:* Your key is too long. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* MalformedACLError # # * *Description:* The XML you provided was not well-formed or did # not validate against our published schema. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* MalformedPOSTRequest # # * *Description:* The body of your POST request is not well-formed # multipart/form-data. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* MalformedXML # # * *Description:* This happens when the user sends malformed XML # (XML that doesn't conform to the published XSD) for the # configuration. The error message is, "The XML you provided was # not well-formed or did not validate against our published # schema." # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* MaxMessageLengthExceeded # # * *Description:* Your request was too big. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* MaxPostPreDataLengthExceededError # # * *Description:* Your POST request fields preceding the upload # file were too large. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* MetadataTooLarge # # * *Description:* Your metadata headers exceed the maximum allowed # metadata size. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* MethodNotAllowed # # * *Description:* The specified method is not allowed against this # resource. # # * *HTTP Status Code:* 405 Method Not Allowed # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* MissingAttachment # # * *Description:* A SOAP attachment was expected, but none were # found. # # * *HTTP Status Code:* N/A # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* MissingContentLength # # * *Description:* You must provide the Content-Length HTTP header. # # * *HTTP Status Code:* 411 Length Required # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* MissingRequestBodyError # # * *Description:* This happens when the user sends an empty XML # document as a request. The error message is, "Request body is # empty." # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* MissingSecurityElement # # * *Description:* The SOAP 1.1 request is missing a security # element. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* MissingSecurityHeader # # * *Description:* Your request is missing a required header. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* NoLoggingStatusForKey # # * *Description:* There is no such thing as a logging status # subresource for a key. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* NoSuchBucket # # * *Description:* The specified bucket does not exist. # # * *HTTP Status Code:* 404 Not Found # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* NoSuchBucketPolicy # # * *Description:* The specified bucket does not have a bucket # policy. # # * *HTTP Status Code:* 404 Not Found # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* NoSuchKey # # * *Description:* The specified key does not exist. # # * *HTTP Status Code:* 404 Not Found # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* NoSuchLifecycleConfiguration # # * *Description:* The lifecycle configuration does not exist. # # * *HTTP Status Code:* 404 Not Found # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* NoSuchUpload # # * *Description:* The specified multipart upload does not exist. # The upload ID might be invalid, or the multipart upload might # have been aborted or completed. # # * *HTTP Status Code:* 404 Not Found # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* NoSuchVersion # # * *Description:* Indicates that the version ID specified in the # request does not match an existing version. # # * *HTTP Status Code:* 404 Not Found # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* NotImplemented # # * *Description:* A header you provided implies functionality that # is not implemented. # # * *HTTP Status Code:* 501 Not Implemented # # * *SOAP Fault Code Prefix:* Server # # * * *Code:* NotSignedUp # # * *Description:* Your account is not signed up for the Amazon S3 # service. You must sign up before you can use Amazon S3. You can # sign up at the following URL: [Amazon S3][3] # # * *HTTP Status Code:* 403 Forbidden # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* OperationAborted # # * *Description:* A conflicting conditional action is currently in # progress against this resource. Try again. # # * *HTTP Status Code:* 409 Conflict # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* PermanentRedirect # # * *Description:* The bucket you are attempting to access must be # addressed using the specified endpoint. Send all future requests # to this endpoint. # # * *HTTP Status Code:* 301 Moved Permanently # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* PreconditionFailed # # * *Description:* At least one of the preconditions you specified # did not hold. # # * *HTTP Status Code:* 412 Precondition Failed # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* Redirect # # * *Description:* Temporary redirect. # # * *HTTP Status Code:* 307 Moved Temporarily # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* RestoreAlreadyInProgress # # * *Description:* Object restore is already in progress. # # * *HTTP Status Code:* 409 Conflict # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* RequestIsNotMultiPartContent # # * *Description:* Bucket POST must be of the enclosure-type # multipart/form-data. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* RequestTimeout # # * *Description:* Your socket connection to the server was not read # from or written to within the timeout period. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* RequestTimeTooSkewed # # * *Description:* The difference between the request time and the # server's time is too large. # # * *HTTP Status Code:* 403 Forbidden # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* RequestTorrentOfBucketError # # * *Description:* Requesting the torrent file of a bucket is not # permitted. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* SignatureDoesNotMatch # # * *Description:* The request signature we calculated does not # match the signature you provided. Check your Amazon Web Services # secret access key and signing method. For more information, see # [REST Authentication][4] and [SOAP Authentication][5] for # details. # # * *HTTP Status Code:* 403 Forbidden # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* ServiceUnavailable # # * *Description:* Service is unable to handle request. # # * *HTTP Status Code:* 503 Service Unavailable # # * *SOAP Fault Code Prefix:* Server # # * * *Code:* SlowDown # # * *Description:* Reduce your request rate. # # * *HTTP Status Code:* 503 Slow Down # # * *SOAP Fault Code Prefix:* Server # # * * *Code:* TemporaryRedirect # # * *Description:* You are being redirected to the bucket while DNS # updates. # # * *HTTP Status Code:* 307 Moved Temporarily # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* TokenRefreshRequired # # * *Description:* The provided token must be refreshed. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* TooManyBuckets # # * *Description:* You have attempted to create more buckets than # allowed. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* UnexpectedContent # # * *Description:* This request does not support content. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* UnresolvableGrantByEmailAddress # # * *Description:* The email address you provided does not match any # account on record. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # * * *Code:* UserKeyMustBeSpecified # # * *Description:* The bucket POST must contain the specified field # name. If it is specified, check the order of the fields. # # * *HTTP Status Code:* 400 Bad Request # # * *SOAP Fault Code Prefix:* Client # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro # [3]: http://aws.amazon.com/s3 # [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html # [5]: https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html # @return [String] # # @!attribute [rw] message # The error message contains a generic description of the error # condition in English. It is intended for a human audience. Simple # programs display the message directly to the end user if they # encounter an error condition they don't know how or don't care to # handle. Sophisticated programs with more exhaustive error handling # and proper internationalization are more likely to ignore the error # message. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Error AWS API Documentation # class Error < Struct.new( :key, :version_id, :code, :message) SENSITIVE = [] include Aws::Structure end # The error information. # # @!attribute [rw] key # The object key name to use when a 4XX class error occurs. # # Replacement must be made for object keys containing special # characters (such as carriage returns) when using XML requests. For # more information, see [ XML related object key constraints][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ErrorDocument AWS API Documentation # class ErrorDocument < Struct.new( :key) SENSITIVE = [] include Aws::Structure end # A container for specifying the configuration for Amazon EventBridge. # # @api private # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/EventBridgeConfiguration AWS API Documentation # class EventBridgeConfiguration < Aws::EmptyStructure; end # Optional configuration to replicate existing source bucket objects. # For more information, see [Replicating Existing Objects][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication # # @!attribute [rw] status # Specifies whether Amazon S3 replicates existing source bucket # objects. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ExistingObjectReplication AWS API Documentation # class ExistingObjectReplication < Struct.new( :status) SENSITIVE = [] include Aws::Structure end # Specifies the Amazon S3 object key name to filter on and whether to # filter on the suffix or prefix of the key name. # # @!attribute [rw] name # The object key name prefix or suffix identifying one or more objects # to which the filtering rule applies. The maximum length is 1,024 # characters. Overlapping prefixes and suffixes are not supported. For # more information, see [Configuring Event Notifications][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html # @return [String] # # @!attribute [rw] value # The value that the filter searches for in object key names. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/FilterRule AWS API Documentation # class FilterRule < Struct.new( :name, :value) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] status # The accelerate configuration of the bucket. # @return [String] # # @!attribute [rw] request_charged # If present, indicates that the requester was successfully charged # for the request. # # This functionality is not supported for directory buckets. # # # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationOutput AWS API Documentation # class GetBucketAccelerateConfigurationOutput < Struct.new( :status, :request_charged) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket for which the accelerate configuration is # retrieved. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @!attribute [rw] request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has # Requester Pays enabled, the requester will pay for corresponding # charges to copy the object. For information about downloading # objects from Requester Pays buckets, see [Downloading Objects in # Requester Pays Buckets][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationRequest AWS API Documentation # class GetBucketAccelerateConfigurationRequest < Struct.new( :bucket, :expected_bucket_owner, :request_payer) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] owner # Container for the bucket owner's display name and ID. # @return [Types::Owner] # # @!attribute [rw] grants # A list of grants. # @return [Array] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclOutput AWS API Documentation # class GetBucketAclOutput < Struct.new( :owner, :grants) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # Specifies the S3 bucket whose ACL is being requested. # # When you use this API operation with an access point, provide the # alias of the access point in place of the bucket name. # # When you use this API operation with an Object Lambda access point, # provide the alias of the Object Lambda access point in place of the # bucket name. If the Object Lambda access point alias in a request is # not valid, the error code `InvalidAccessPointAliasError` is # returned. For more information about `InvalidAccessPointAliasError`, # see [List of Error Codes][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclRequest AWS API Documentation # class GetBucketAclRequest < Struct.new( :bucket, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] analytics_configuration # The configuration and any analyses for the analytics filter. # @return [Types::AnalyticsConfiguration] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationOutput AWS API Documentation # class GetBucketAnalyticsConfigurationOutput < Struct.new( :analytics_configuration) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket from which an analytics configuration is # retrieved. # @return [String] # # @!attribute [rw] id # The ID that identifies the analytics configuration. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationRequest AWS API Documentation # class GetBucketAnalyticsConfigurationRequest < Struct.new( :bucket, :id, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] cors_rules # A set of origins and methods (cross-origin access that you want to # allow). You can add up to 100 rules to the configuration. # @return [Array] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsOutput AWS API Documentation # class GetBucketCorsOutput < Struct.new( :cors_rules) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The bucket name for which to get the cors configuration. # # When you use this API operation with an access point, provide the # alias of the access point in place of the bucket name. # # When you use this API operation with an Object Lambda access point, # provide the alias of the Object Lambda access point in place of the # bucket name. If the Object Lambda access point alias in a request is # not valid, the error code `InvalidAccessPointAliasError` is # returned. For more information about `InvalidAccessPointAliasError`, # see [List of Error Codes][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsRequest AWS API Documentation # class GetBucketCorsRequest < Struct.new( :bucket, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] server_side_encryption_configuration # Specifies the default server-side-encryption configuration. # @return [Types::ServerSideEncryptionConfiguration] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryptionOutput AWS API Documentation # class GetBucketEncryptionOutput < Struct.new( :server_side_encryption_configuration) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket from which the server-side encryption # configuration is retrieved. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryptionRequest AWS API Documentation # class GetBucketEncryptionRequest < Struct.new( :bucket, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] intelligent_tiering_configuration # Container for S3 Intelligent-Tiering configuration. # @return [Types::IntelligentTieringConfiguration] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketIntelligentTieringConfigurationOutput AWS API Documentation # class GetBucketIntelligentTieringConfigurationOutput < Struct.new( :intelligent_tiering_configuration) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the Amazon S3 bucket whose configuration you want to # modify or retrieve. # @return [String] # # @!attribute [rw] id # The ID used to identify the S3 Intelligent-Tiering configuration. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketIntelligentTieringConfigurationRequest AWS API Documentation # class GetBucketIntelligentTieringConfigurationRequest < Struct.new( :bucket, :id) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] inventory_configuration # Specifies the inventory configuration. # @return [Types::InventoryConfiguration] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationOutput AWS API Documentation # class GetBucketInventoryConfigurationOutput < Struct.new( :inventory_configuration) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket containing the inventory configuration to # retrieve. # @return [String] # # @!attribute [rw] id # The ID used to identify the inventory configuration. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationRequest AWS API Documentation # class GetBucketInventoryConfigurationRequest < Struct.new( :bucket, :id, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] rules # Container for a lifecycle rule. # @return [Array] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationOutput AWS API Documentation # class GetBucketLifecycleConfigurationOutput < Struct.new( :rules) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket for which to get the lifecycle information. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationRequest AWS API Documentation # class GetBucketLifecycleConfigurationRequest < Struct.new( :bucket, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] rules # Container for a lifecycle rule. # @return [Array] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleOutput AWS API Documentation # class GetBucketLifecycleOutput < Struct.new( :rules) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket for which to get the lifecycle information. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleRequest AWS API Documentation # class GetBucketLifecycleRequest < Struct.new( :bucket, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] location_constraint # Specifies the Region where the bucket resides. For a list of all the # Amazon S3 supported location constraints by Region, see [Regions and # Endpoints][1]. Buckets in Region `us-east-1` have a # LocationConstraint of `null`. # # # # [1]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationOutput AWS API Documentation # class GetBucketLocationOutput < Struct.new( :location_constraint) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket for which to get the location. # # When you use this API operation with an access point, provide the # alias of the access point in place of the bucket name. # # When you use this API operation with an Object Lambda access point, # provide the alias of the Object Lambda access point in place of the # bucket name. If the Object Lambda access point alias in a request is # not valid, the error code `InvalidAccessPointAliasError` is # returned. For more information about `InvalidAccessPointAliasError`, # see [List of Error Codes][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationRequest AWS API Documentation # class GetBucketLocationRequest < Struct.new( :bucket, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] logging_enabled # Describes where logs are stored and the prefix that Amazon S3 # assigns to all log object keys for a bucket. For more information, # see [PUT Bucket logging][1] in the *Amazon S3 API Reference*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html # @return [Types::LoggingEnabled] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingOutput AWS API Documentation # class GetBucketLoggingOutput < Struct.new( :logging_enabled) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The bucket name for which to get the logging information. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingRequest AWS API Documentation # class GetBucketLoggingRequest < Struct.new( :bucket, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] metrics_configuration # Specifies the metrics configuration. # @return [Types::MetricsConfiguration] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationOutput AWS API Documentation # class GetBucketMetricsConfigurationOutput < Struct.new( :metrics_configuration) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket containing the metrics configuration to # retrieve. # @return [String] # # @!attribute [rw] id # The ID used to identify the metrics configuration. The ID has a 64 # character limit and can only contain letters, numbers, periods, # dashes, and underscores. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationRequest AWS API Documentation # class GetBucketMetricsConfigurationRequest < Struct.new( :bucket, :id, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket for which to get the notification # configuration. # # When you use this API operation with an access point, provide the # alias of the access point in place of the bucket name. # # When you use this API operation with an Object Lambda access point, # provide the alias of the Object Lambda access point in place of the # bucket name. If the Object Lambda access point alias in a request is # not valid, the error code `InvalidAccessPointAliasError` is # returned. For more information about `InvalidAccessPointAliasError`, # see [List of Error Codes][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfigurationRequest AWS API Documentation # class GetBucketNotificationConfigurationRequest < Struct.new( :bucket, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] ownership_controls # The `OwnershipControls` (BucketOwnerEnforced, BucketOwnerPreferred, # or ObjectWriter) currently in effect for this Amazon S3 bucket. # @return [Types::OwnershipControls] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketOwnershipControlsOutput AWS API Documentation # class GetBucketOwnershipControlsOutput < Struct.new( :ownership_controls) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the Amazon S3 bucket whose `OwnershipControls` you want # to retrieve. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketOwnershipControlsRequest AWS API Documentation # class GetBucketOwnershipControlsRequest < Struct.new( :bucket, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] policy # The bucket policy as a JSON document. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyOutput AWS API Documentation # class GetBucketPolicyOutput < Struct.new( :policy) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The bucket name to get the bucket policy for. # # Directory buckets - When you use this operation with a # directory bucket, you must use path-style requests in the format # `https://s3express-control.region_code.amazonaws.com/bucket-name `. # Virtual-hosted-style requests aren't supported. Directory bucket # names must be unique in the chosen Availability Zone. Bucket names # must also follow the format ` bucket_base_name--az_id--x-s3` (for # example, ` DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information # about bucket naming restrictions, see [Directory bucket naming # rules][1] in the *Amazon S3 User Guide* # # **Access points** - When you use this API operation with an access # point, provide the alias of the access point in place of the bucket # name. # # **Object Lambda access points** - When you use this API operation # with an Object Lambda access point, provide the alias of the Object # Lambda access point in place of the bucket name. If the Object # Lambda access point alias in a request is not valid, the error code # `InvalidAccessPointAliasError` is returned. For more information # about `InvalidAccessPointAliasError`, see [List of Error Codes][2]. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # # For directory buckets, this header is not supported in this API # operation. If you specify this header, the request fails with the # HTTP status code `501 Not Implemented`. # # # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyRequest AWS API Documentation # class GetBucketPolicyRequest < Struct.new( :bucket, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] policy_status # The policy status for the specified bucket. # @return [Types::PolicyStatus] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyStatusOutput AWS API Documentation # class GetBucketPolicyStatusOutput < Struct.new( :policy_status) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the Amazon S3 bucket whose policy status you want to # retrieve. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyStatusRequest AWS API Documentation # class GetBucketPolicyStatusRequest < Struct.new( :bucket, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] replication_configuration # A container for replication rules. You can add up to 1,000 rules. # The maximum size of a replication configuration is 2 MB. # @return [Types::ReplicationConfiguration] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationOutput AWS API Documentation # class GetBucketReplicationOutput < Struct.new( :replication_configuration) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The bucket name for which to get the replication information. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationRequest AWS API Documentation # class GetBucketReplicationRequest < Struct.new( :bucket, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] payer # Specifies who pays for the download and request fees. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentOutput AWS API Documentation # class GetBucketRequestPaymentOutput < Struct.new( :payer) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket for which to get the payment request # configuration # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentRequest AWS API Documentation # class GetBucketRequestPaymentRequest < Struct.new( :bucket, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] tag_set # Contains the tag set. # @return [Array] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingOutput AWS API Documentation # class GetBucketTaggingOutput < Struct.new( :tag_set) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket for which to get the tagging information. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingRequest AWS API Documentation # class GetBucketTaggingRequest < Struct.new( :bucket, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] status # The versioning state of the bucket. # @return [String] # # @!attribute [rw] mfa_delete # Specifies whether MFA delete is enabled in the bucket versioning # configuration. This element is only returned if the bucket has been # configured with MFA delete. If the bucket has never been so # configured, this element is not returned. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningOutput AWS API Documentation # class GetBucketVersioningOutput < Struct.new( :status, :mfa_delete) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket for which to get the versioning information. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningRequest AWS API Documentation # class GetBucketVersioningRequest < Struct.new( :bucket, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] redirect_all_requests_to # Specifies the redirect behavior of all requests to a website # endpoint of an Amazon S3 bucket. # @return [Types::RedirectAllRequestsTo] # # @!attribute [rw] index_document # The name of the index document for the website (for example # `index.html`). # @return [Types::IndexDocument] # # @!attribute [rw] error_document # The object key name of the website error document to use for 4XX # class errors. # @return [Types::ErrorDocument] # # @!attribute [rw] routing_rules # Rules that define when a redirect is applied and the redirect # behavior. # @return [Array] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteOutput AWS API Documentation # class GetBucketWebsiteOutput < Struct.new( :redirect_all_requests_to, :index_document, :error_document, :routing_rules) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The bucket name for which to get the website configuration. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteRequest AWS API Documentation # class GetBucketWebsiteRequest < Struct.new( :bucket, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] owner # Container for the bucket owner's display name and ID. # @return [Types::Owner] # # @!attribute [rw] grants # A list of grants. # @return [Array] # # @!attribute [rw] request_charged # If present, indicates that the requester was successfully charged # for the request. # # This functionality is not supported for directory buckets. # # # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclOutput AWS API Documentation # class GetObjectAclOutput < Struct.new( :owner, :grants, :request_charged) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The bucket name that contains the object for which to get the ACL # information. # # **Access points** - When you use this action with an access point, # you must provide the alias of the access point in place of the # bucket name or specify the access point ARN. When using the access # point ARN, you must direct requests to the access point hostname. # The access point hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the # bucket name. For more information about access point ARNs, see # [Using access points][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # @return [String] # # @!attribute [rw] key # The key of the object for which to get the ACL information. # @return [String] # # @!attribute [rw] version_id # Version ID used to reference a specific version of the object. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has # Requester Pays enabled, the requester will pay for corresponding # charges to copy the object. For information about downloading # objects from Requester Pays buckets, see [Downloading Objects in # Requester Pays Buckets][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclRequest AWS API Documentation # class GetObjectAclRequest < Struct.new( :bucket, :key, :version_id, :request_payer, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] delete_marker # Specifies whether the object retrieved was (`true`) or was not # (`false`) a delete marker. If `false`, this response header does not # appear in the response. # # This functionality is not supported for directory buckets. # # # @return [Boolean] # # @!attribute [rw] last_modified # The creation date of the object. # @return [Time] # # @!attribute [rw] version_id # The version ID of the object. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] request_charged # If present, indicates that the requester was successfully charged # for the request. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] etag # An ETag is an opaque identifier assigned by a web server to a # specific version of a resource found at a URL. # @return [String] # # @!attribute [rw] checksum # The checksum or digest of the object. # @return [Types::Checksum] # # @!attribute [rw] object_parts # A collection of parts associated with a multipart upload. # @return [Types::GetObjectAttributesParts] # # @!attribute [rw] storage_class # Provides the storage class information of the object. Amazon S3 # returns this header for all objects except for S3 Standard storage # class objects. # # For more information, see [Storage Classes][1]. # # **Directory buckets** - Only the S3 Express One Zone storage class # is supported by directory buckets to store objects. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html # @return [String] # # @!attribute [rw] object_size # The size of the object in bytes. # @return [Integer] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAttributesOutput AWS API Documentation # class GetObjectAttributesOutput < Struct.new( :delete_marker, :last_modified, :version_id, :request_charged, :etag, :checksum, :object_parts, :storage_class, :object_size) SENSITIVE = [] include Aws::Structure end # A collection of parts associated with a multipart upload. # # @!attribute [rw] total_parts_count # The total number of parts. # @return [Integer] # # @!attribute [rw] part_number_marker # The marker for the current part. # @return [Integer] # # @!attribute [rw] next_part_number_marker # When a list is truncated, this element specifies the last part in # the list, as well as the value to use for the `PartNumberMarker` # request parameter in a subsequent request. # @return [Integer] # # @!attribute [rw] max_parts # The maximum number of parts allowed in the response. # @return [Integer] # # @!attribute [rw] is_truncated # Indicates whether the returned list of parts is truncated. A value # of `true` indicates that the list was truncated. A list can be # truncated if the number of parts exceeds the limit returned in the # `MaxParts` element. # @return [Boolean] # # @!attribute [rw] parts # A container for elements related to a particular part. A response # can contain zero or more `Parts` elements. # # * **General purpose buckets** - For `GetObjectAttributes`, if a # additional checksum (including `x-amz-checksum-crc32`, # `x-amz-checksum-crc32c`, `x-amz-checksum-sha1`, or # `x-amz-checksum-sha256`) isn't applied to the object specified in # the request, the response doesn't return `Part`. # # * **Directory buckets** - For `GetObjectAttributes`, no matter # whether a additional checksum is applied to the object specified # in the request, the response returns `Part`. # # # @return [Array] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAttributesParts AWS API Documentation # class GetObjectAttributesParts < Struct.new( :total_parts_count, :part_number_marker, :next_part_number_marker, :max_parts, :is_truncated, :parts) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket that contains the object. # # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style # requests are not supported. Directory bucket names must be unique in # the chosen Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, # you must provide the alias of the access point in place of the # bucket name or specify the access point ARN. When using the access # point ARN, you must direct requests to the access point hostname. # The access point hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the # bucket name. For more information about access point ARNs, see # [Using access points][2] in the *Amazon S3 User Guide*. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. # The S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][3] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # @return [String] # # @!attribute [rw] key # The object key. # @return [String] # # @!attribute [rw] version_id # The version ID used to reference a specific version of the object. # # S3 Versioning isn't enabled and supported for directory buckets. # For this API operation, only the `null` value of the version ID is # supported by directory buckets. You can only specify `null` to the # `versionId` query parameter in the request. # # # @return [String] # # @!attribute [rw] max_parts # Sets the maximum number of parts to return. # @return [Integer] # # @!attribute [rw] part_number_marker # Specifies the part after which listing should begin. Only parts with # higher part numbers will be listed. # @return [Integer] # # @!attribute [rw] sse_customer_algorithm # Specifies the algorithm to use when encrypting the object (for # example, AES256). # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] sse_customer_key # Specifies the customer-provided encryption key for Amazon S3 to use # in encrypting data. This value is used to store the object and then # it is discarded; Amazon S3 does not store the encryption key. The # key must be appropriate for use with the algorithm specified in the # `x-amz-server-side-encryption-customer-algorithm` header. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the encryption key according to # RFC 1321. Amazon S3 uses this header for a message integrity check # to ensure that the encryption key was transmitted without error. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has # Requester Pays enabled, the requester will pay for corresponding # charges to copy the object. For information about downloading # objects from Requester Pays buckets, see [Downloading Objects in # Requester Pays Buckets][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @!attribute [rw] object_attributes # Specifies the fields at the root level that you want returned in the # response. Fields that you do not specify are not returned. # @return [Array] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAttributesRequest AWS API Documentation # class GetObjectAttributesRequest < Struct.new( :bucket, :key, :version_id, :max_parts, :part_number_marker, :sse_customer_algorithm, :sse_customer_key, :sse_customer_key_md5, :request_payer, :expected_bucket_owner, :object_attributes) SENSITIVE = [:sse_customer_key] include Aws::Structure end # @!attribute [rw] legal_hold # The current legal hold status for the specified object. # @return [Types::ObjectLockLegalHold] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHoldOutput AWS API Documentation # class GetObjectLegalHoldOutput < Struct.new( :legal_hold) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The bucket name containing the object whose legal hold status you # want to retrieve. # # **Access points** - When you use this action with an access point, # you must provide the alias of the access point in place of the # bucket name or specify the access point ARN. When using the access # point ARN, you must direct requests to the access point hostname. # The access point hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the # bucket name. For more information about access point ARNs, see # [Using access points][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # @return [String] # # @!attribute [rw] key # The key name for the object whose legal hold status you want to # retrieve. # @return [String] # # @!attribute [rw] version_id # The version ID of the object whose legal hold status you want to # retrieve. # @return [String] # # @!attribute [rw] request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has # Requester Pays enabled, the requester will pay for corresponding # charges to copy the object. For information about downloading # objects from Requester Pays buckets, see [Downloading Objects in # Requester Pays Buckets][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHoldRequest AWS API Documentation # class GetObjectLegalHoldRequest < Struct.new( :bucket, :key, :version_id, :request_payer, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] object_lock_configuration # The specified bucket's Object Lock configuration. # @return [Types::ObjectLockConfiguration] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfigurationOutput AWS API Documentation # class GetObjectLockConfigurationOutput < Struct.new( :object_lock_configuration) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The bucket whose Object Lock configuration you want to retrieve. # # **Access points** - When you use this action with an access point, # you must provide the alias of the access point in place of the # bucket name or specify the access point ARN. When using the access # point ARN, you must direct requests to the access point hostname. # The access point hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the # bucket name. For more information about access point ARNs, see # [Using access points][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfigurationRequest AWS API Documentation # class GetObjectLockConfigurationRequest < Struct.new( :bucket, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] body # Object data. # @return [IO] # # @!attribute [rw] delete_marker # Indicates whether the object retrieved was (true) or was not (false) # a Delete Marker. If false, this response header does not appear in # the response. # # * If the current version of the object is a delete marker, Amazon S3 # behaves as if the object was deleted and includes # `x-amz-delete-marker: true` in the response. # # * If the specified version in the request is a delete marker, the # response returns a `405 Method Not Allowed` error and the # `Last-Modified: timestamp` response header. # # # @return [Boolean] # # @!attribute [rw] accept_ranges # Indicates that a range of bytes was specified in the request. # @return [String] # # @!attribute [rw] expiration # If the object expiration is configured (see [ # `PutBucketLifecycleConfiguration` ][1]), the response includes this # header. It includes the `expiry-date` and `rule-id` key-value pairs # providing object expiration information. The value of the `rule-id` # is URL-encoded. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html # @return [String] # # @!attribute [rw] restore # Provides information about object restoration action and expiration # time of the restored object copy. # # This functionality is not supported for directory buckets. Only the # S3 Express One Zone storage class is supported by directory buckets # to store objects. # # # @return [String] # # @!attribute [rw] last_modified # Date and time when the object was last modified. # # General purpose buckets - When you specify a `versionId` of # the object in your request, if the specified version in the request # is a delete marker, the response returns a `405 Method Not Allowed` # error and the `Last-Modified: timestamp` response header. # @return [Time] # # @!attribute [rw] content_length # Size of the body in bytes. # @return [Integer] # # @!attribute [rw] etag # An entity tag (ETag) is an opaque identifier assigned by a web # server to a specific version of a resource found at a URL. # @return [String] # # @!attribute [rw] checksum_crc32 # The base64-encoded, 32-bit CRC32 checksum of the object. This will # only be present if it was uploaded with the object. For more # information, see [ Checking object integrity][1] in the *Amazon S3 # User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] checksum_crc32c # The base64-encoded, 32-bit CRC32C checksum of the object. This will # only be present if it was uploaded with the object. For more # information, see [ Checking object integrity][1] in the *Amazon S3 # User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] checksum_sha1 # The base64-encoded, 160-bit SHA-1 digest of the object. This will # only be present if it was uploaded with the object. For more # information, see [ Checking object integrity][1] in the *Amazon S3 # User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] checksum_sha256 # The base64-encoded, 256-bit SHA-256 digest of the object. This will # only be present if it was uploaded with the object. For more # information, see [ Checking object integrity][1] in the *Amazon S3 # User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] missing_meta # This is set to the number of metadata entries not returned in the # headers that are prefixed with `x-amz-meta-`. This can happen if you # create metadata using an API like SOAP that supports more flexible # metadata than the REST API. For example, using SOAP, you can create # metadata whose values are not legal HTTP headers. # # This functionality is not supported for directory buckets. # # # @return [Integer] # # @!attribute [rw] version_id # Version ID of the object. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] cache_control # Specifies caching behavior along the request/reply chain. # @return [String] # # @!attribute [rw] content_disposition # Specifies presentational information for the object. # @return [String] # # @!attribute [rw] content_encoding # Indicates what content encodings have been applied to the object and # thus what decoding mechanisms must be applied to obtain the # media-type referenced by the Content-Type header field. # @return [String] # # @!attribute [rw] content_language # The language the content is in. # @return [String] # # @!attribute [rw] content_range # The portion of the object returned in the response. # @return [String] # # @!attribute [rw] content_type # A standard MIME type describing the format of the object data. # @return [String] # # @!attribute [rw] expires # The date and time at which the object is no longer cacheable. # @return [Time] # # @!attribute [rw] expires_string # @return [String] # # @!attribute [rw] website_redirect_location # If the bucket is configured as a website, redirects requests for # this object to another object in the same bucket or to an external # URL. Amazon S3 stores the value of this header in the object # metadata. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] server_side_encryption # The server-side encryption algorithm used when you store this object # in Amazon S3 (for example, `AES256`, `aws:kms`, `aws:kms:dsse`). # # For directory buckets, only server-side encryption with Amazon S3 # managed keys (SSE-S3) (`AES256`) is supported. # # # @return [String] # # @!attribute [rw] metadata # A map of metadata to store with the object in S3. # @return [Hash] # # @!attribute [rw] sse_customer_algorithm # If server-side encryption with a customer-provided encryption key # was requested, the response will include this header to confirm the # encryption algorithm that's used. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] sse_customer_key_md5 # If server-side encryption with a customer-provided encryption key # was requested, the response will include this header to provide the # round-trip message integrity verification of the customer-provided # encryption key. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] ssekms_key_id # If present, indicates the ID of the Key Management Service (KMS) # symmetric encryption customer managed key that was used for the # object. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] bucket_key_enabled # Indicates whether the object uses an S3 Bucket Key for server-side # encryption with Key Management Service (KMS) keys (SSE-KMS). # # This functionality is not supported for directory buckets. # # # @return [Boolean] # # @!attribute [rw] storage_class # Provides storage class information of the object. Amazon S3 returns # this header for all objects except for S3 Standard storage class # objects. # # Directory buckets - Only the S3 Express One Zone storage # class is supported by directory buckets to store objects. # # # @return [String] # # @!attribute [rw] request_charged # If present, indicates that the requester was successfully charged # for the request. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] replication_status # Amazon S3 can return this if your request involves a bucket that is # either a source or destination in a replication rule. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] parts_count # The count of parts this object has. This value is only returned if # you specify `partNumber` in your request and the object was uploaded # as a multipart upload. # @return [Integer] # # @!attribute [rw] tag_count # The number of tags, if any, on the object, when you have the # relevant permission to read object tags. # # You can use [GetObjectTagging][1] to retrieve the tag set associated # with an object. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html # @return [Integer] # # @!attribute [rw] object_lock_mode # The Object Lock mode that's currently in place for this object. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] object_lock_retain_until_date # The date and time when this object's Object Lock will expire. # # This functionality is not supported for directory buckets. # # # @return [Time] # # @!attribute [rw] object_lock_legal_hold_status # Indicates whether this object has an active legal hold. This field # is only returned if you have permission to view an object's legal # hold status. # # This functionality is not supported for directory buckets. # # # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectOutput AWS API Documentation # class GetObjectOutput < Struct.new( :body, :delete_marker, :accept_ranges, :expiration, :restore, :last_modified, :content_length, :etag, :checksum_crc32, :checksum_crc32c, :checksum_sha1, :checksum_sha256, :missing_meta, :version_id, :cache_control, :content_disposition, :content_encoding, :content_language, :content_range, :content_type, :expires, :expires_string, :website_redirect_location, :server_side_encryption, :metadata, :sse_customer_algorithm, :sse_customer_key_md5, :ssekms_key_id, :bucket_key_enabled, :storage_class, :request_charged, :replication_status, :parts_count, :tag_count, :object_lock_mode, :object_lock_retain_until_date, :object_lock_legal_hold_status) SENSITIVE = [:ssekms_key_id] include Aws::Structure end # @!attribute [rw] bucket # The bucket name containing the object. # # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style # requests are not supported. Directory bucket names must be unique in # the chosen Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, # you must provide the alias of the access point in place of the # bucket name or specify the access point ARN. When using the access # point ARN, you must direct requests to the access point hostname. # The access point hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the # bucket name. For more information about access point ARNs, see # [Using access points][2] in the *Amazon S3 User Guide*. # # **Object Lambda access points** - When you use this action with an # Object Lambda access point, you must direct requests to the Object # Lambda access point hostname. The Object Lambda access point # hostname takes the form # *AccessPointName*-*AccountId*.s3-object-lambda.*Region*.amazonaws.com. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. # The S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][3] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # @return [String] # # @!attribute [rw] if_match # Return the object only if its entity tag (ETag) is the same as the # one specified in this header; otherwise, return a `412 Precondition # Failed` error. # # If both of the `If-Match` and `If-Unmodified-Since` headers are # present in the request as follows: `If-Match` condition evaluates to # `true`, and; `If-Unmodified-Since` condition evaluates to `false`; # then, S3 returns `200 OK` and the data requested. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # @return [String] # # @!attribute [rw] if_modified_since # Return the object only if it has been modified since the specified # time; otherwise, return a `304 Not Modified` error. # # If both of the `If-None-Match` and `If-Modified-Since` headers are # present in the request as follows:` If-None-Match` condition # evaluates to `false`, and; `If-Modified-Since` condition evaluates # to `true`; then, S3 returns `304 Not Modified` status code. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # @return [Time] # # @!attribute [rw] if_none_match # Return the object only if its entity tag (ETag) is different from # the one specified in this header; otherwise, return a `304 Not # Modified` error. # # If both of the `If-None-Match` and `If-Modified-Since` headers are # present in the request as follows:` If-None-Match` condition # evaluates to `false`, and; `If-Modified-Since` condition evaluates # to `true`; then, S3 returns `304 Not Modified` HTTP status code. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # @return [String] # # @!attribute [rw] if_unmodified_since # Return the object only if it has not been modified since the # specified time; otherwise, return a `412 Precondition Failed` error. # # If both of the `If-Match` and `If-Unmodified-Since` headers are # present in the request as follows: `If-Match` condition evaluates to # `true`, and; `If-Unmodified-Since` condition evaluates to `false`; # then, S3 returns `200 OK` and the data requested. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # @return [Time] # # @!attribute [rw] key # Key of the object to get. # @return [String] # # @!attribute [rw] range # Downloads the specified byte range of an object. For more # information about the HTTP Range header, see # [https://www.rfc-editor.org/rfc/rfc9110.html#name-range][1]. # # Amazon S3 doesn't support retrieving multiple ranges of data per # `GET` request. # # # # # # [1]: https://www.rfc-editor.org/rfc/rfc9110.html#name-range # @return [String] # # @!attribute [rw] response_cache_control # Sets the `Cache-Control` header of the response. # @return [String] # # @!attribute [rw] response_content_disposition # Sets the `Content-Disposition` header of the response. # @return [String] # # @!attribute [rw] response_content_encoding # Sets the `Content-Encoding` header of the response. # @return [String] # # @!attribute [rw] response_content_language # Sets the `Content-Language` header of the response. # @return [String] # # @!attribute [rw] response_content_type # Sets the `Content-Type` header of the response. # @return [String] # # @!attribute [rw] response_expires # Sets the `Expires` header of the response. # @return [Time] # # @!attribute [rw] version_id # Version ID used to reference a specific version of the object. # # By default, the `GetObject` operation returns the current version of # an object. To return a different version, use the `versionId` # subresource. # # * If you include a `versionId` in your request header, you must have # the `s3:GetObjectVersion` permission to access a specific version # of an object. The `s3:GetObject` permission is not required in # this scenario. # # * If you request the current version of an object without a specific # `versionId` in the request header, only the `s3:GetObject` # permission is required. The `s3:GetObjectVersion` permission is # not required in this scenario. # # * **Directory buckets** - S3 Versioning isn't enabled and supported # for directory buckets. For this API operation, only the `null` # value of the version ID is supported by directory buckets. You can # only specify `null` to the `versionId` query parameter in the # request. # # # # For more information about versioning, see [PutBucketVersioning][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html # @return [String] # # @!attribute [rw] sse_customer_algorithm # Specifies the algorithm to use when decrypting the object (for # example, `AES256`). # # If you encrypt an object by using server-side encryption with # customer-provided encryption keys (SSE-C) when you store the object # in Amazon S3, then when you GET the object, you must use the # following headers: # # * `x-amz-server-side-encryption-customer-algorithm` # # * `x-amz-server-side-encryption-customer-key` # # * `x-amz-server-side-encryption-customer-key-MD5` # # For more information about SSE-C, see [Server-Side Encryption (Using # Customer-Provided Encryption Keys)][1] in the *Amazon S3 User # Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # @return [String] # # @!attribute [rw] sse_customer_key # Specifies the customer-provided encryption key that you originally # provided for Amazon S3 to encrypt the data before storing it. This # value is used to decrypt the object when recovering it and must # match the one used when storing the data. The key must be # appropriate for use with the algorithm specified in the # `x-amz-server-side-encryption-customer-algorithm` header. # # If you encrypt an object by using server-side encryption with # customer-provided encryption keys (SSE-C) when you store the object # in Amazon S3, then when you GET the object, you must use the # following headers: # # * `x-amz-server-side-encryption-customer-algorithm` # # * `x-amz-server-side-encryption-customer-key` # # * `x-amz-server-side-encryption-customer-key-MD5` # # For more information about SSE-C, see [Server-Side Encryption (Using # Customer-Provided Encryption Keys)][1] in the *Amazon S3 User # Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # @return [String] # # @!attribute [rw] sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the customer-provided encryption # key according to RFC 1321. Amazon S3 uses this header for a message # integrity check to ensure that the encryption key was transmitted # without error. # # If you encrypt an object by using server-side encryption with # customer-provided encryption keys (SSE-C) when you store the object # in Amazon S3, then when you GET the object, you must use the # following headers: # # * `x-amz-server-side-encryption-customer-algorithm` # # * `x-amz-server-side-encryption-customer-key` # # * `x-amz-server-side-encryption-customer-key-MD5` # # For more information about SSE-C, see [Server-Side Encryption (Using # Customer-Provided Encryption Keys)][1] in the *Amazon S3 User # Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # @return [String] # # @!attribute [rw] request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has # Requester Pays enabled, the requester will pay for corresponding # charges to copy the object. For information about downloading # objects from Requester Pays buckets, see [Downloading Objects in # Requester Pays Buckets][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @return [String] # # @!attribute [rw] part_number # Part number of the object being read. This is a positive integer # between 1 and 10,000. Effectively performs a 'ranged' GET request # for the part specified. Useful for downloading just a part of an # object. # @return [Integer] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @!attribute [rw] checksum_mode # To retrieve the checksum, this mode must be enabled. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRequest AWS API Documentation # class GetObjectRequest < Struct.new( :bucket, :if_match, :if_modified_since, :if_none_match, :if_unmodified_since, :key, :range, :response_cache_control, :response_content_disposition, :response_content_encoding, :response_content_language, :response_content_type, :response_expires, :version_id, :sse_customer_algorithm, :sse_customer_key, :sse_customer_key_md5, :request_payer, :part_number, :expected_bucket_owner, :checksum_mode) SENSITIVE = [:sse_customer_key] include Aws::Structure end # @!attribute [rw] retention # The container element for an object's retention settings. # @return [Types::ObjectLockRetention] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetentionOutput AWS API Documentation # class GetObjectRetentionOutput < Struct.new( :retention) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The bucket name containing the object whose retention settings you # want to retrieve. # # **Access points** - When you use this action with an access point, # you must provide the alias of the access point in place of the # bucket name or specify the access point ARN. When using the access # point ARN, you must direct requests to the access point hostname. # The access point hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the # bucket name. For more information about access point ARNs, see # [Using access points][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # @return [String] # # @!attribute [rw] key # The key name for the object whose retention settings you want to # retrieve. # @return [String] # # @!attribute [rw] version_id # The version ID for the object whose retention settings you want to # retrieve. # @return [String] # # @!attribute [rw] request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has # Requester Pays enabled, the requester will pay for corresponding # charges to copy the object. For information about downloading # objects from Requester Pays buckets, see [Downloading Objects in # Requester Pays Buckets][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetentionRequest AWS API Documentation # class GetObjectRetentionRequest < Struct.new( :bucket, :key, :version_id, :request_payer, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] version_id # The versionId of the object for which you got the tagging # information. # @return [String] # # @!attribute [rw] tag_set # Contains the tag set. # @return [Array] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingOutput AWS API Documentation # class GetObjectTaggingOutput < Struct.new( :version_id, :tag_set) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The bucket name containing the object for which to get the tagging # information. # # **Access points** - When you use this action with an access point, # you must provide the alias of the access point in place of the # bucket name or specify the access point ARN. When using the access # point ARN, you must direct requests to the access point hostname. # The access point hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the # bucket name. For more information about access point ARNs, see # [Using access points][1] in the *Amazon S3 User Guide*. # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. # The S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][2] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # @return [String] # # @!attribute [rw] key # Object key for which to get the tagging information. # @return [String] # # @!attribute [rw] version_id # The versionId of the object for which to get the tagging # information. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @!attribute [rw] request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has # Requester Pays enabled, the requester will pay for corresponding # charges to copy the object. For information about downloading # objects from Requester Pays buckets, see [Downloading Objects in # Requester Pays Buckets][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingRequest AWS API Documentation # class GetObjectTaggingRequest < Struct.new( :bucket, :key, :version_id, :expected_bucket_owner, :request_payer) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] body # A Bencoded dictionary as defined by the BitTorrent specification # @return [IO] # # @!attribute [rw] request_charged # If present, indicates that the requester was successfully charged # for the request. # # This functionality is not supported for directory buckets. # # # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentOutput AWS API Documentation # class GetObjectTorrentOutput < Struct.new( :body, :request_charged) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket containing the object for which to get the # torrent files. # @return [String] # # @!attribute [rw] key # The object key for which to get the information. # @return [String] # # @!attribute [rw] request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has # Requester Pays enabled, the requester will pay for corresponding # charges to copy the object. For information about downloading # objects from Requester Pays buckets, see [Downloading Objects in # Requester Pays Buckets][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentRequest AWS API Documentation # class GetObjectTorrentRequest < Struct.new( :bucket, :key, :request_payer, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] public_access_block_configuration # The `PublicAccessBlock` configuration currently in effect for this # Amazon S3 bucket. # @return [Types::PublicAccessBlockConfiguration] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlockOutput AWS API Documentation # class GetPublicAccessBlockOutput < Struct.new( :public_access_block_configuration) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the Amazon S3 bucket whose `PublicAccessBlock` # configuration you want to retrieve. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlockRequest AWS API Documentation # class GetPublicAccessBlockRequest < Struct.new( :bucket, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # Container for S3 Glacier job parameters. # # @!attribute [rw] tier # Retrieval tier at which the restore will be processed. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GlacierJobParameters AWS API Documentation # class GlacierJobParameters < Struct.new( :tier) SENSITIVE = [] include Aws::Structure end # Container for grant information. # # @!attribute [rw] grantee # The person being granted permissions. # @return [Types::Grantee] # # @!attribute [rw] permission # Specifies the permission given to the grantee. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grant AWS API Documentation # class Grant < Struct.new( :grantee, :permission) SENSITIVE = [] include Aws::Structure end # Container for the person being granted permissions. # # @!attribute [rw] display_name # Screen name of the grantee. # @return [String] # # @!attribute [rw] email_address # Email address of the grantee. # # Using email addresses to specify a grantee is only supported in the # following Amazon Web Services Regions: # # * US East (N. Virginia) # # * US West (N. California) # # * US West (Oregon) # # * Asia Pacific (Singapore) # # * Asia Pacific (Sydney) # # * Asia Pacific (Tokyo) # # * Europe (Ireland) # # * South America (São Paulo) # # For a list of all the Amazon S3 supported Regions and endpoints, see # [Regions and Endpoints][1] in the Amazon Web Services General # Reference. # # # # # # [1]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region # @return [String] # # @!attribute [rw] id # The canonical user ID of the grantee. # @return [String] # # @!attribute [rw] type # Type of grantee # @return [String] # # @!attribute [rw] uri # URI of the grantee group. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grantee AWS API Documentation # class Grantee < Struct.new( :display_name, :email_address, :id, :type, :uri) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket_location_type # The type of location where the bucket is created. # # This functionality is only supported by directory buckets. # # # @return [String] # # @!attribute [rw] bucket_location_name # The name of the location where the bucket will be created. # # For directory buckets, the AZ ID of the Availability Zone where the # bucket is created. An example AZ ID value is `usw2-az2`. # # This functionality is only supported by directory buckets. # # # @return [String] # # @!attribute [rw] bucket_region # The Region that the bucket is located. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] access_point_alias # Indicates whether the bucket name used in the request is an access # point alias. # # This functionality is not supported for directory buckets. # # # @return [Boolean] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketOutput AWS API Documentation # class HeadBucketOutput < Struct.new( :bucket_location_type, :bucket_location_name, :bucket_region, :access_point_alias) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The bucket name. # # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style # requests are not supported. Directory bucket names must be unique in # the chosen Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, # you must provide the alias of the access point in place of the # bucket name or specify the access point ARN. When using the access # point ARN, you must direct requests to the access point hostname. # The access point hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the # bucket name. For more information about access point ARNs, see # [Using access points][2] in the *Amazon S3 User Guide*. # # **Object Lambda access points** - When you use this API operation # with an Object Lambda access point, provide the alias of the Object # Lambda access point in place of the bucket name. If the Object # Lambda access point alias in a request is not valid, the error code # `InvalidAccessPointAliasError` is returned. For more information # about `InvalidAccessPointAliasError`, see [List of Error Codes][3]. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. # The S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][4] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList # [4]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketRequest AWS API Documentation # class HeadBucketRequest < Struct.new( :bucket, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] delete_marker # Specifies whether the object retrieved was (true) or was not (false) # a Delete Marker. If false, this response header does not appear in # the response. # # This functionality is not supported for directory buckets. # # # @return [Boolean] # # @!attribute [rw] accept_ranges # Indicates that a range of bytes was specified. # @return [String] # # @!attribute [rw] expiration # If the object expiration is configured (see [ # `PutBucketLifecycleConfiguration` ][1]), the response includes this # header. It includes the `expiry-date` and `rule-id` key-value pairs # providing object expiration information. The value of the `rule-id` # is URL-encoded. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html # @return [String] # # @!attribute [rw] restore # If the object is an archived object (an object whose storage class # is GLACIER), the response includes this header if either the archive # restoration is in progress (see [RestoreObject][1] or an archive # copy is already restored. # # If an archive copy is already restored, the header value indicates # when Amazon S3 is scheduled to delete the object copy. For example: # # `x-amz-restore: ongoing-request="false", expiry-date="Fri, 21 Dec # 2012 00:00:00 GMT"` # # If the object restoration is in progress, the header returns the # value `ongoing-request="true"`. # # For more information about archiving objects, see [Transitioning # Objects: General Considerations][2]. # # This functionality is not supported for directory buckets. Only the # S3 Express One Zone storage class is supported by directory buckets # to store objects. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations # @return [String] # # @!attribute [rw] archive_status # The archive state of the head object. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] last_modified # Date and time when the object was last modified. # @return [Time] # # @!attribute [rw] content_length # Size of the body in bytes. # @return [Integer] # # @!attribute [rw] checksum_crc32 # The base64-encoded, 32-bit CRC32 checksum of the object. This will # only be present if it was uploaded with the object. When you use an # API operation on an object that was uploaded using multipart # uploads, this value may not be a direct checksum value of the full # object. Instead, it's a calculation based on the checksum values of # each individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] # # @!attribute [rw] checksum_crc32c # The base64-encoded, 32-bit CRC32C checksum of the object. This will # only be present if it was uploaded with the object. When you use an # API operation on an object that was uploaded using multipart # uploads, this value may not be a direct checksum value of the full # object. Instead, it's a calculation based on the checksum values of # each individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] # # @!attribute [rw] checksum_sha1 # The base64-encoded, 160-bit SHA-1 digest of the object. This will # only be present if it was uploaded with the object. When you use the # API operation on an object that was uploaded using multipart # uploads, this value may not be a direct checksum value of the full # object. Instead, it's a calculation based on the checksum values of # each individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] # # @!attribute [rw] checksum_sha256 # The base64-encoded, 256-bit SHA-256 digest of the object. This will # only be present if it was uploaded with the object. When you use an # API operation on an object that was uploaded using multipart # uploads, this value may not be a direct checksum value of the full # object. Instead, it's a calculation based on the checksum values of # each individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] # # @!attribute [rw] etag # An entity tag (ETag) is an opaque identifier assigned by a web # server to a specific version of a resource found at a URL. # @return [String] # # @!attribute [rw] missing_meta # This is set to the number of metadata entries not returned in # `x-amz-meta` headers. This can happen if you create metadata using # an API like SOAP that supports more flexible metadata than the REST # API. For example, using SOAP, you can create metadata whose values # are not legal HTTP headers. # # This functionality is not supported for directory buckets. # # # @return [Integer] # # @!attribute [rw] version_id # Version ID of the object. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] cache_control # Specifies caching behavior along the request/reply chain. # @return [String] # # @!attribute [rw] content_disposition # Specifies presentational information for the object. # @return [String] # # @!attribute [rw] content_encoding # Indicates what content encodings have been applied to the object and # thus what decoding mechanisms must be applied to obtain the # media-type referenced by the Content-Type header field. # @return [String] # # @!attribute [rw] content_language # The language the content is in. # @return [String] # # @!attribute [rw] content_type # A standard MIME type describing the format of the object data. # @return [String] # # @!attribute [rw] expires # The date and time at which the object is no longer cacheable. # @return [Time] # # @!attribute [rw] expires_string # @return [String] # # @!attribute [rw] website_redirect_location # If the bucket is configured as a website, redirects requests for # this object to another object in the same bucket or to an external # URL. Amazon S3 stores the value of this header in the object # metadata. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] server_side_encryption # The server-side encryption algorithm used when you store this object # in Amazon S3 (for example, `AES256`, `aws:kms`, `aws:kms:dsse`). # # For directory buckets, only server-side encryption with Amazon S3 # managed keys (SSE-S3) (`AES256`) is supported. # # # @return [String] # # @!attribute [rw] metadata # A map of metadata to store with the object in S3. # @return [Hash] # # @!attribute [rw] sse_customer_algorithm # If server-side encryption with a customer-provided encryption key # was requested, the response will include this header to confirm the # encryption algorithm that's used. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] sse_customer_key_md5 # If server-side encryption with a customer-provided encryption key # was requested, the response will include this header to provide the # round-trip message integrity verification of the customer-provided # encryption key. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] ssekms_key_id # If present, indicates the ID of the Key Management Service (KMS) # symmetric encryption customer managed key that was used for the # object. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] bucket_key_enabled # Indicates whether the object uses an S3 Bucket Key for server-side # encryption with Key Management Service (KMS) keys (SSE-KMS). # # This functionality is not supported for directory buckets. # # # @return [Boolean] # # @!attribute [rw] storage_class # Provides storage class information of the object. Amazon S3 returns # this header for all objects except for S3 Standard storage class # objects. # # For more information, see [Storage Classes][1]. # # Directory buckets - Only the S3 Express One Zone storage # class is supported by directory buckets to store objects. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html # @return [String] # # @!attribute [rw] request_charged # If present, indicates that the requester was successfully charged # for the request. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] replication_status # Amazon S3 can return this header if your request involves a bucket # that is either a source or a destination in a replication rule. # # In replication, you have a source bucket on which you configure # replication and destination bucket or buckets where Amazon S3 stores # object replicas. When you request an object (`GetObject`) or object # metadata (`HeadObject`) from these buckets, Amazon S3 will return # the `x-amz-replication-status` header in the response as follows: # # * **If requesting an object from the source bucket**, Amazon S3 will # return the `x-amz-replication-status` header if the object in your # request is eligible for replication. # # For example, suppose that in your replication configuration, you # specify object prefix `TaxDocs` requesting Amazon S3 to replicate # objects with key prefix `TaxDocs`. Any objects you upload with # this key name prefix, for example `TaxDocs/document1.pdf`, are # eligible for replication. For any object request with this key # name prefix, Amazon S3 will return the `x-amz-replication-status` # header with value PENDING, COMPLETED or FAILED indicating object # replication status. # # * **If requesting an object from a destination bucket**, Amazon S3 # will return the `x-amz-replication-status` header with value # REPLICA if the object in your request is a replica that Amazon S3 # created and there is no replica modification replication in # progress. # # * **When replicating objects to multiple destination buckets**, the # `x-amz-replication-status` header acts differently. The header of # the source object will only return a value of COMPLETED when # replication is successful to all destinations. The header will # remain at value PENDING until replication has completed for all # destinations. If one or more destinations fails replication the # header will return FAILED. # # For more information, see [Replication][1]. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html # @return [String] # # @!attribute [rw] parts_count # The count of parts this object has. This value is only returned if # you specify `partNumber` in your request and the object was uploaded # as a multipart upload. # @return [Integer] # # @!attribute [rw] object_lock_mode # The Object Lock mode, if any, that's in effect for this object. # This header is only returned if the requester has the # `s3:GetObjectRetention` permission. For more information about S3 # Object Lock, see [Object Lock][1]. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html # @return [String] # # @!attribute [rw] object_lock_retain_until_date # The date and time when the Object Lock retention period expires. # This header is only returned if the requester has the # `s3:GetObjectRetention` permission. # # This functionality is not supported for directory buckets. # # # @return [Time] # # @!attribute [rw] object_lock_legal_hold_status # Specifies whether a legal hold is in effect for this object. This # header is only returned if the requester has the # `s3:GetObjectLegalHold` permission. This header is not returned if # the specified version of this object has never had a legal hold # applied. For more information about S3 Object Lock, see [Object # Lock][1]. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectOutput AWS API Documentation # class HeadObjectOutput < Struct.new( :delete_marker, :accept_ranges, :expiration, :restore, :archive_status, :last_modified, :content_length, :checksum_crc32, :checksum_crc32c, :checksum_sha1, :checksum_sha256, :etag, :missing_meta, :version_id, :cache_control, :content_disposition, :content_encoding, :content_language, :content_type, :expires, :expires_string, :website_redirect_location, :server_side_encryption, :metadata, :sse_customer_algorithm, :sse_customer_key_md5, :ssekms_key_id, :bucket_key_enabled, :storage_class, :request_charged, :replication_status, :parts_count, :object_lock_mode, :object_lock_retain_until_date, :object_lock_legal_hold_status) SENSITIVE = [:ssekms_key_id] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket that contains the object. # # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style # requests are not supported. Directory bucket names must be unique in # the chosen Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, # you must provide the alias of the access point in place of the # bucket name or specify the access point ARN. When using the access # point ARN, you must direct requests to the access point hostname. # The access point hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the # bucket name. For more information about access point ARNs, see # [Using access points][2] in the *Amazon S3 User Guide*. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. # The S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][3] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # @return [String] # # @!attribute [rw] if_match # Return the object only if its entity tag (ETag) is the same as the # one specified; otherwise, return a 412 (precondition failed) error. # # If both of the `If-Match` and `If-Unmodified-Since` headers are # present in the request as follows: # # * `If-Match` condition evaluates to `true`, and; # # * `If-Unmodified-Since` condition evaluates to `false`; # # Then Amazon S3 returns `200 OK` and the data requested. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # @return [String] # # @!attribute [rw] if_modified_since # Return the object only if it has been modified since the specified # time; otherwise, return a 304 (not modified) error. # # If both of the `If-None-Match` and `If-Modified-Since` headers are # present in the request as follows: # # * `If-None-Match` condition evaluates to `false`, and; # # * `If-Modified-Since` condition evaluates to `true`; # # Then Amazon S3 returns the `304 Not Modified` response code. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # @return [Time] # # @!attribute [rw] if_none_match # Return the object only if its entity tag (ETag) is different from # the one specified; otherwise, return a 304 (not modified) error. # # If both of the `If-None-Match` and `If-Modified-Since` headers are # present in the request as follows: # # * `If-None-Match` condition evaluates to `false`, and; # # * `If-Modified-Since` condition evaluates to `true`; # # Then Amazon S3 returns the `304 Not Modified` response code. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # @return [String] # # @!attribute [rw] if_unmodified_since # Return the object only if it has not been modified since the # specified time; otherwise, return a 412 (precondition failed) error. # # If both of the `If-Match` and `If-Unmodified-Since` headers are # present in the request as follows: # # * `If-Match` condition evaluates to `true`, and; # # * `If-Unmodified-Since` condition evaluates to `false`; # # Then Amazon S3 returns `200 OK` and the data requested. # # For more information about conditional requests, see [RFC 7232][1]. # # # # [1]: https://tools.ietf.org/html/rfc7232 # @return [Time] # # @!attribute [rw] key # The object key. # @return [String] # # @!attribute [rw] range # HeadObject returns only the metadata for an object. If the Range is # satisfiable, only the `ContentLength` is affected in the response. # If the Range is not satisfiable, S3 returns a `416 - Requested Range # Not Satisfiable` error. # @return [String] # # @!attribute [rw] version_id # Version ID used to reference a specific version of the object. # # For directory buckets in this API operation, only the `null` value # of the version ID is supported. # # # @return [String] # # @!attribute [rw] sse_customer_algorithm # Specifies the algorithm to use when encrypting the object (for # example, AES256). # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] sse_customer_key # Specifies the customer-provided encryption key for Amazon S3 to use # in encrypting data. This value is used to store the object and then # it is discarded; Amazon S3 does not store the encryption key. The # key must be appropriate for use with the algorithm specified in the # `x-amz-server-side-encryption-customer-algorithm` header. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the encryption key according to # RFC 1321. Amazon S3 uses this header for a message integrity check # to ensure that the encryption key was transmitted without error. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has # Requester Pays enabled, the requester will pay for corresponding # charges to copy the object. For information about downloading # objects from Requester Pays buckets, see [Downloading Objects in # Requester Pays Buckets][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @return [String] # # @!attribute [rw] part_number # Part number of the object being read. This is a positive integer # between 1 and 10,000. Effectively performs a 'ranged' HEAD request # for the part specified. Useful querying about the size of the part # and the number of parts in this object. # @return [Integer] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @!attribute [rw] checksum_mode # To retrieve the checksum, this parameter must be enabled. # # In addition, if you enable `ChecksumMode` and the object is # encrypted with Amazon Web Services Key Management Service (Amazon # Web Services KMS), you must have permission to use the `kms:Decrypt` # action for the request to succeed. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectRequest AWS API Documentation # class HeadObjectRequest < Struct.new( :bucket, :if_match, :if_modified_since, :if_none_match, :if_unmodified_since, :key, :range, :version_id, :sse_customer_algorithm, :sse_customer_key, :sse_customer_key_md5, :request_payer, :part_number, :expected_bucket_owner, :checksum_mode) SENSITIVE = [:sse_customer_key] include Aws::Structure end # Container for the `Suffix` element. # # @!attribute [rw] suffix # A suffix that is appended to a request that is for a directory on # the website endpoint (for example,if the suffix is index.html and # you make a request to samplebucket/images/ the data that is returned # will be for the object with the key name images/index.html) The # suffix must not be empty and must not include a slash character. # # Replacement must be made for object keys containing special # characters (such as carriage returns) when using XML requests. For # more information, see [ XML related object key constraints][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/IndexDocument AWS API Documentation # class IndexDocument < Struct.new( :suffix) SENSITIVE = [] include Aws::Structure end # Container element that identifies who initiated the multipart upload. # # @!attribute [rw] id # If the principal is an Amazon Web Services account, it provides the # Canonical User ID. If the principal is an IAM User, it provides a # user ARN value. # # **Directory buckets** - If the principal is an Amazon Web Services # account, it provides the Amazon Web Services account ID. If the # principal is an IAM User, it provides a user ARN value. # # # @return [String] # # @!attribute [rw] display_name # Name of the Principal. # # This functionality is not supported for directory buckets. # # # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Initiator AWS API Documentation # class Initiator < Struct.new( :id, :display_name) SENSITIVE = [] include Aws::Structure end # Describes the serialization format of the object. # # @!attribute [rw] csv # Describes the serialization of a CSV-encoded object. # @return [Types::CSVInput] # # @!attribute [rw] compression_type # Specifies object's compression format. Valid values: NONE, GZIP, # BZIP2. Default Value: NONE. # @return [String] # # @!attribute [rw] json # Specifies JSON as object's input serialization format. # @return [Types::JSONInput] # # @!attribute [rw] parquet # Specifies Parquet as object's input serialization format. # @return [Types::ParquetInput] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InputSerialization AWS API Documentation # class InputSerialization < Struct.new( :csv, :compression_type, :json, :parquet) SENSITIVE = [] include Aws::Structure end # A container for specifying S3 Intelligent-Tiering filters. The filters # determine the subset of objects to which the rule applies. # # @!attribute [rw] prefix # An object key name prefix that identifies the subset of objects to # which the configuration applies. # @return [String] # # @!attribute [rw] tags # All of these tags must exist in the object's tag set in order for # the configuration to apply. # @return [Array] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/IntelligentTieringAndOperator AWS API Documentation # class IntelligentTieringAndOperator < Struct.new( :prefix, :tags) SENSITIVE = [] include Aws::Structure end # Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 # bucket. # # For information about the S3 Intelligent-Tiering storage class, see # [Storage class for automatically optimizing frequently and # infrequently accessed objects][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access # # @!attribute [rw] id # The ID used to identify the S3 Intelligent-Tiering configuration. # @return [String] # # @!attribute [rw] filter # Specifies a bucket filter. The configuration only includes objects # that meet the filter's criteria. # @return [Types::IntelligentTieringFilter] # # @!attribute [rw] status # Specifies the status of the configuration. # @return [String] # # @!attribute [rw] tierings # Specifies the S3 Intelligent-Tiering storage class tier of the # configuration. # @return [Array] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/IntelligentTieringConfiguration AWS API Documentation # class IntelligentTieringConfiguration < Struct.new( :id, :filter, :status, :tierings) SENSITIVE = [] include Aws::Structure end # The `Filter` is used to identify objects that the S3 # Intelligent-Tiering configuration applies to. # # @!attribute [rw] prefix # An object key name prefix that identifies the subset of objects to # which the rule applies. # # Replacement must be made for object keys containing special # characters (such as carriage returns) when using XML requests. For # more information, see [ XML related object key constraints][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints # @return [String] # # @!attribute [rw] tag # A container of a key value name pair. # @return [Types::Tag] # # @!attribute [rw] and # A conjunction (logical AND) of predicates, which is used in # evaluating a metrics filter. The operator must have at least two # predicates, and an object must match all of the predicates in order # for the filter to apply. # @return [Types::IntelligentTieringAndOperator] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/IntelligentTieringFilter AWS API Documentation # class IntelligentTieringFilter < Struct.new( :prefix, :tag, :and) SENSITIVE = [] include Aws::Structure end # Object is archived and inaccessible until restored. # # If the object you are retrieving is stored in the S3 Glacier Flexible # Retrieval storage class, the S3 Glacier Deep Archive storage class, # the S3 Intelligent-Tiering Archive Access tier, or the S3 # Intelligent-Tiering Deep Archive Access tier, before you can retrieve # the object you must first restore a copy using [RestoreObject][1]. # Otherwise, this operation returns an `InvalidObjectState` error. For # information about restoring archived objects, see [Restoring Archived # Objects][2] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html # # @!attribute [rw] storage_class # @return [String] # # @!attribute [rw] access_tier # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InvalidObjectState AWS API Documentation # class InvalidObjectState < Struct.new( :storage_class, :access_tier) SENSITIVE = [] include Aws::Structure end # Specifies the inventory configuration for an Amazon S3 bucket. For # more information, see [GET Bucket inventory][1] in the *Amazon S3 API # Reference*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html # # @!attribute [rw] destination # Contains information about where to publish the inventory results. # @return [Types::InventoryDestination] # # @!attribute [rw] is_enabled # Specifies whether the inventory is enabled or disabled. If set to # `True`, an inventory list is generated. If set to `False`, no # inventory list is generated. # @return [Boolean] # # @!attribute [rw] filter # Specifies an inventory filter. The inventory only includes objects # that meet the filter's criteria. # @return [Types::InventoryFilter] # # @!attribute [rw] id # The ID used to identify the inventory configuration. # @return [String] # # @!attribute [rw] included_object_versions # Object versions to include in the inventory list. If set to `All`, # the list includes all the object versions, which adds the # version-related fields `VersionId`, `IsLatest`, and `DeleteMarker` # to the list. If set to `Current`, the list does not contain these # version-related fields. # @return [String] # # @!attribute [rw] optional_fields # Contains the optional fields that are included in the inventory # results. # @return [Array] # # @!attribute [rw] schedule # Specifies the schedule for generating inventory results. # @return [Types::InventorySchedule] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryConfiguration AWS API Documentation # class InventoryConfiguration < Struct.new( :destination, :is_enabled, :filter, :id, :included_object_versions, :optional_fields, :schedule) SENSITIVE = [] include Aws::Structure end # Specifies the inventory configuration for an Amazon S3 bucket. # # @!attribute [rw] s3_bucket_destination # Contains the bucket name, file format, bucket owner (optional), and # prefix (optional) where inventory results are published. # @return [Types::InventoryS3BucketDestination] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryDestination AWS API Documentation # class InventoryDestination < Struct.new( :s3_bucket_destination) SENSITIVE = [] include Aws::Structure end # Contains the type of server-side encryption used to encrypt the # inventory results. # # @!attribute [rw] sses3 # Specifies the use of SSE-S3 to encrypt delivered inventory reports. # @return [Types::SSES3] # # @!attribute [rw] ssekms # Specifies the use of SSE-KMS to encrypt delivered inventory reports. # @return [Types::SSEKMS] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryEncryption AWS API Documentation # class InventoryEncryption < Struct.new( :sses3, :ssekms) SENSITIVE = [] include Aws::Structure end # Specifies an inventory filter. The inventory only includes objects # that meet the filter's criteria. # # @!attribute [rw] prefix # The prefix that an object must have to be included in the inventory # results. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryFilter AWS API Documentation # class InventoryFilter < Struct.new( :prefix) SENSITIVE = [] include Aws::Structure end # Contains the bucket name, file format, bucket owner (optional), and # prefix (optional) where inventory results are published. # # @!attribute [rw] account_id # The account ID that owns the destination S3 bucket. If no account ID # is provided, the owner is not validated before exporting data. # # Although this value is optional, we strongly recommend that you set # it to help prevent problems if the destination bucket ownership # changes. # # # @return [String] # # @!attribute [rw] bucket # The Amazon Resource Name (ARN) of the bucket where inventory results # will be published. # @return [String] # # @!attribute [rw] format # Specifies the output format of the inventory results. # @return [String] # # @!attribute [rw] prefix # The prefix that is prepended to all inventory results. # @return [String] # # @!attribute [rw] encryption # Contains the type of server-side encryption used to encrypt the # inventory results. # @return [Types::InventoryEncryption] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryS3BucketDestination AWS API Documentation # class InventoryS3BucketDestination < Struct.new( :account_id, :bucket, :format, :prefix, :encryption) SENSITIVE = [] include Aws::Structure end # Specifies the schedule for generating inventory results. # # @!attribute [rw] frequency # Specifies how frequently inventory results are produced. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventorySchedule AWS API Documentation # class InventorySchedule < Struct.new( :frequency) SENSITIVE = [] include Aws::Structure end # Specifies JSON as object's input serialization format. # # @!attribute [rw] type # The type of JSON. Valid values: Document, Lines. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/JSONInput AWS API Documentation # class JSONInput < Struct.new( :type) SENSITIVE = [] include Aws::Structure end # Specifies JSON as request's output serialization format. # # @!attribute [rw] record_delimiter # The value used to separate individual records in the output. If no # value is specified, Amazon S3 uses a newline character ('\\n'). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/JSONOutput AWS API Documentation # class JSONOutput < Struct.new( :record_delimiter) SENSITIVE = [] include Aws::Structure end # A container for specifying the configuration for Lambda notifications. # # @!attribute [rw] id # An optional unique identifier for configurations in a notification # configuration. If you don't provide one, Amazon S3 will assign an # ID. # @return [String] # # @!attribute [rw] lambda_function_arn # The Amazon Resource Name (ARN) of the Lambda function that Amazon S3 # invokes when the specified event type occurs. # @return [String] # # @!attribute [rw] events # The Amazon S3 bucket event for which to invoke the Lambda function. # For more information, see [Supported Event Types][1] in the *Amazon # S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html # @return [Array] # # @!attribute [rw] filter # Specifies object key name filtering rules. For information about key # name filtering, see [Configuring event notifications using object # key name filtering][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html # @return [Types::NotificationConfigurationFilter] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LambdaFunctionConfiguration AWS API Documentation # class LambdaFunctionConfiguration < Struct.new( :id, :lambda_function_arn, :events, :filter) SENSITIVE = [] include Aws::Structure end # Container for lifecycle rules. You can add as many as 1000 rules. # # For more information see, [Managing your storage lifecycle][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html # # @!attribute [rw] rules # Specifies lifecycle configuration rules for an Amazon S3 bucket. # @return [Array] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleConfiguration AWS API Documentation # class LifecycleConfiguration < Struct.new( :rules) SENSITIVE = [] include Aws::Structure end # Container for the expiration for the lifecycle of the object. # # For more information see, [Managing your storage lifecycle][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html # # @!attribute [rw] date # Indicates at what date the object is to be moved or deleted. The # date value must conform to the ISO 8601 format. The time is always # midnight UTC. # @return [Time] # # @!attribute [rw] days # Indicates the lifetime, in days, of the objects that are subject to # the rule. The value must be a non-zero positive integer. # @return [Integer] # # @!attribute [rw] expired_object_delete_marker # Indicates whether Amazon S3 will remove a delete marker with no # noncurrent versions. If set to true, the delete marker will be # expired; if set to false the policy takes no action. This cannot be # specified with Days or Date in a Lifecycle Expiration Policy. # @return [Boolean] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleExpiration AWS API Documentation # class LifecycleExpiration < Struct.new( :date, :days, :expired_object_delete_marker) SENSITIVE = [] include Aws::Structure end # A lifecycle rule for individual objects in an Amazon S3 bucket. # # For more information see, [Managing your storage lifecycle][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html # # @!attribute [rw] expiration # Specifies the expiration for the lifecycle of the object in the form # of date, days and, whether the object has a delete marker. # @return [Types::LifecycleExpiration] # # @!attribute [rw] id # Unique identifier for the rule. The value cannot be longer than 255 # characters. # @return [String] # # @!attribute [rw] prefix # Prefix identifying one or more objects to which the rule applies. # This is no longer used; use `Filter` instead. # # Replacement must be made for object keys containing special # characters (such as carriage returns) when using XML requests. For # more information, see [ XML related object key constraints][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints # @return [String] # # @!attribute [rw] filter # The `Filter` is used to identify objects that a Lifecycle Rule # applies to. A `Filter` must have exactly one of `Prefix`, `Tag`, or # `And` specified. `Filter` is required if the `LifecycleRule` does # not contain a `Prefix` element. # @return [Types::LifecycleRuleFilter] # # @!attribute [rw] status # If 'Enabled', the rule is currently being applied. If # 'Disabled', the rule is not currently being applied. # @return [String] # # @!attribute [rw] transitions # Specifies when an Amazon S3 object transitions to a specified # storage class. # @return [Array] # # @!attribute [rw] noncurrent_version_transitions # Specifies the transition rule for the lifecycle rule that describes # when noncurrent objects transition to a specific storage class. If # your bucket is versioning-enabled (or versioning is suspended), you # can set this action to request that Amazon S3 transition noncurrent # object versions to a specific storage class at a set period in the # object's lifetime. # @return [Array] # # @!attribute [rw] noncurrent_version_expiration # Specifies when noncurrent object versions expire. Upon expiration, # Amazon S3 permanently deletes the noncurrent object versions. You # set this lifecycle configuration action on a bucket that has # versioning enabled (or suspended) to request that Amazon S3 delete # noncurrent object versions at a specific period in the object's # lifetime. # @return [Types::NoncurrentVersionExpiration] # # @!attribute [rw] abort_incomplete_multipart_upload # Specifies the days since the initiation of an incomplete multipart # upload that Amazon S3 will wait before permanently removing all # parts of the upload. For more information, see [ Aborting Incomplete # Multipart Uploads Using a Bucket Lifecycle Configuration][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config # @return [Types::AbortIncompleteMultipartUpload] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRule AWS API Documentation # class LifecycleRule < Struct.new( :expiration, :id, :prefix, :filter, :status, :transitions, :noncurrent_version_transitions, :noncurrent_version_expiration, :abort_incomplete_multipart_upload) SENSITIVE = [] include Aws::Structure end # This is used in a Lifecycle Rule Filter to apply a logical AND to two # or more predicates. The Lifecycle Rule will apply to any object # matching all of the predicates configured inside the And operator. # # @!attribute [rw] prefix # Prefix identifying one or more objects to which the rule applies. # @return [String] # # @!attribute [rw] tags # All of these tags must exist in the object's tag set in order for # the rule to apply. # @return [Array] # # @!attribute [rw] object_size_greater_than # Minimum object size to which the rule applies. # @return [Integer] # # @!attribute [rw] object_size_less_than # Maximum object size to which the rule applies. # @return [Integer] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleAndOperator AWS API Documentation # class LifecycleRuleAndOperator < Struct.new( :prefix, :tags, :object_size_greater_than, :object_size_less_than) SENSITIVE = [] include Aws::Structure end # The `Filter` is used to identify objects that a Lifecycle Rule applies # to. A `Filter` must have exactly one of `Prefix`, `Tag`, or `And` # specified. # # @!attribute [rw] prefix # Prefix identifying one or more objects to which the rule applies. # # Replacement must be made for object keys containing special # characters (such as carriage returns) when using XML requests. For # more information, see [ XML related object key constraints][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints # @return [String] # # @!attribute [rw] tag # This tag must exist in the object's tag set in order for the rule # to apply. # @return [Types::Tag] # # @!attribute [rw] object_size_greater_than # Minimum object size to which the rule applies. # @return [Integer] # # @!attribute [rw] object_size_less_than # Maximum object size to which the rule applies. # @return [Integer] # # @!attribute [rw] and # This is used in a Lifecycle Rule Filter to apply a logical AND to # two or more predicates. The Lifecycle Rule will apply to any object # matching all of the predicates configured inside the And operator. # @return [Types::LifecycleRuleAndOperator] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleFilter AWS API Documentation # class LifecycleRuleFilter < Struct.new( :prefix, :tag, :object_size_greater_than, :object_size_less_than, :and) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] is_truncated # Indicates whether the returned list of analytics configurations is # complete. A value of true indicates that the list is not complete # and the NextContinuationToken will be provided for a subsequent # request. # @return [Boolean] # # @!attribute [rw] continuation_token # The marker that is used as a starting point for this analytics # configuration list response. This value is present if it was sent in # the request. # @return [String] # # @!attribute [rw] next_continuation_token # `NextContinuationToken` is sent when `isTruncated` is true, which # indicates that there are more analytics configurations to list. The # next request must include this `NextContinuationToken`. The token is # obfuscated and is not a usable value. # @return [String] # # @!attribute [rw] analytics_configuration_list # The list of analytics configurations for a bucket. # @return [Array] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsOutput AWS API Documentation # class ListBucketAnalyticsConfigurationsOutput < Struct.new( :is_truncated, :continuation_token, :next_continuation_token, :analytics_configuration_list) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket from which analytics configurations are # retrieved. # @return [String] # # @!attribute [rw] continuation_token # The `ContinuationToken` that represents a placeholder from where # this request should begin. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsRequest AWS API Documentation # class ListBucketAnalyticsConfigurationsRequest < Struct.new( :bucket, :continuation_token, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] is_truncated # Indicates whether the returned list of analytics configurations is # complete. A value of `true` indicates that the list is not complete # and the `NextContinuationToken` will be provided for a subsequent # request. # @return [Boolean] # # @!attribute [rw] continuation_token # The `ContinuationToken` that represents a placeholder from where # this request should begin. # @return [String] # # @!attribute [rw] next_continuation_token # The marker used to continue this inventory configuration listing. # Use the `NextContinuationToken` from this response to continue the # listing in a subsequent request. The continuation token is an opaque # value that Amazon S3 understands. # @return [String] # # @!attribute [rw] intelligent_tiering_configuration_list # The list of S3 Intelligent-Tiering configurations for a bucket. # @return [Array] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketIntelligentTieringConfigurationsOutput AWS API Documentation # class ListBucketIntelligentTieringConfigurationsOutput < Struct.new( :is_truncated, :continuation_token, :next_continuation_token, :intelligent_tiering_configuration_list) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the Amazon S3 bucket whose configuration you want to # modify or retrieve. # @return [String] # # @!attribute [rw] continuation_token # The `ContinuationToken` that represents a placeholder from where # this request should begin. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketIntelligentTieringConfigurationsRequest AWS API Documentation # class ListBucketIntelligentTieringConfigurationsRequest < Struct.new( :bucket, :continuation_token) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] continuation_token # If sent in the request, the marker that is used as a starting point # for this inventory configuration list response. # @return [String] # # @!attribute [rw] inventory_configuration_list # The list of inventory configurations for a bucket. # @return [Array] # # @!attribute [rw] is_truncated # Tells whether the returned list of inventory configurations is # complete. A value of true indicates that the list is not complete # and the NextContinuationToken is provided for a subsequent request. # @return [Boolean] # # @!attribute [rw] next_continuation_token # The marker used to continue this inventory configuration listing. # Use the `NextContinuationToken` from this response to continue the # listing in a subsequent request. The continuation token is an opaque # value that Amazon S3 understands. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsOutput AWS API Documentation # class ListBucketInventoryConfigurationsOutput < Struct.new( :continuation_token, :inventory_configuration_list, :is_truncated, :next_continuation_token) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket containing the inventory configurations to # retrieve. # @return [String] # # @!attribute [rw] continuation_token # The marker used to continue an inventory configuration listing that # has been truncated. Use the `NextContinuationToken` from a # previously truncated list response to continue the listing. The # continuation token is an opaque value that Amazon S3 understands. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsRequest AWS API Documentation # class ListBucketInventoryConfigurationsRequest < Struct.new( :bucket, :continuation_token, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] is_truncated # Indicates whether the returned list of metrics configurations is # complete. A value of true indicates that the list is not complete # and the NextContinuationToken will be provided for a subsequent # request. # @return [Boolean] # # @!attribute [rw] continuation_token # The marker that is used as a starting point for this metrics # configuration list response. This value is present if it was sent in # the request. # @return [String] # # @!attribute [rw] next_continuation_token # The marker used to continue a metrics configuration listing that has # been truncated. Use the `NextContinuationToken` from a previously # truncated list response to continue the listing. The continuation # token is an opaque value that Amazon S3 understands. # @return [String] # # @!attribute [rw] metrics_configuration_list # The list of metrics configurations for a bucket. # @return [Array] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsOutput AWS API Documentation # class ListBucketMetricsConfigurationsOutput < Struct.new( :is_truncated, :continuation_token, :next_continuation_token, :metrics_configuration_list) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket containing the metrics configurations to # retrieve. # @return [String] # # @!attribute [rw] continuation_token # The marker that is used to continue a metrics configuration listing # that has been truncated. Use the `NextContinuationToken` from a # previously truncated list response to continue the listing. The # continuation token is an opaque value that Amazon S3 understands. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsRequest AWS API Documentation # class ListBucketMetricsConfigurationsRequest < Struct.new( :bucket, :continuation_token, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] buckets # The list of buckets owned by the requester. # @return [Array] # # @!attribute [rw] owner # The owner of the buckets listed. # @return [Types::Owner] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsOutput AWS API Documentation # class ListBucketsOutput < Struct.new( :buckets, :owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] buckets # The list of buckets owned by the requester. # @return [Array] # # @!attribute [rw] continuation_token # If `ContinuationToken` was sent with the request, it is included in # the response. You can use the returned `ContinuationToken` for # pagination of the list response. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListDirectoryBucketsOutput AWS API Documentation # class ListDirectoryBucketsOutput < Struct.new( :buckets, :continuation_token) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] continuation_token # `ContinuationToken` indicates to Amazon S3 that the list is being # continued on this bucket with a token. `ContinuationToken` is # obfuscated and is not a real key. You can use this # `ContinuationToken` for pagination of the list results. # @return [String] # # @!attribute [rw] max_directory_buckets # Maximum number of buckets to be returned in response. When the # number is more than the count of buckets that are owned by an Amazon # Web Services account, return all the buckets in response. # @return [Integer] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListDirectoryBucketsRequest AWS API Documentation # class ListDirectoryBucketsRequest < Struct.new( :continuation_token, :max_directory_buckets) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket to which the multipart upload was initiated. # Does not return the access point ARN or access point alias if used. # @return [String] # # @!attribute [rw] key_marker # The key at or after which the listing began. # @return [String] # # @!attribute [rw] upload_id_marker # Upload ID after which listing began. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] next_key_marker # When a list is truncated, this element specifies the value that # should be used for the key-marker request parameter in a subsequent # request. # @return [String] # # @!attribute [rw] prefix # When a prefix is provided in the request, this field contains the # specified prefix. The result contains only keys starting with the # specified prefix. # # **Directory buckets** - For directory buckets, only prefixes that # end in a delimiter (`/`) are supported. # # # @return [String] # # @!attribute [rw] delimiter # Contains the delimiter you specified in the request. If you don't # specify a delimiter in your request, this element is absent from the # response. # # **Directory buckets** - For directory buckets, `/` is the only # supported delimiter. # # # @return [String] # # @!attribute [rw] next_upload_id_marker # When a list is truncated, this element specifies the value that # should be used for the `upload-id-marker` request parameter in a # subsequent request. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] max_uploads # Maximum number of multipart uploads that could have been included in # the response. # @return [Integer] # # @!attribute [rw] is_truncated # Indicates whether the returned list of multipart uploads is # truncated. A value of true indicates that the list was truncated. # The list can be truncated if the number of multipart uploads exceeds # the limit allowed or specified by max uploads. # @return [Boolean] # # @!attribute [rw] uploads # Container for elements related to a particular multipart upload. A # response can contain zero or more `Upload` elements. # @return [Array] # # @!attribute [rw] common_prefixes # If you specify a delimiter in the request, then the result returns # each distinct key prefix containing the delimiter in a # `CommonPrefixes` element. The distinct key prefixes are returned in # the `Prefix` child element. # # **Directory buckets** - For directory buckets, only prefixes that # end in a delimiter (`/`) are supported. # # # @return [Array] # # @!attribute [rw] encoding_type # Encoding type used by Amazon S3 to encode object keys in the # response. # # If you specify the `encoding-type` request parameter, Amazon S3 # includes this element in the response, and returns encoded key name # values in the following response elements: # # `Delimiter`, `KeyMarker`, `Prefix`, `NextKeyMarker`, `Key`. # @return [String] # # @!attribute [rw] request_charged # If present, indicates that the requester was successfully charged # for the request. # # This functionality is not supported for directory buckets. # # # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsOutput AWS API Documentation # class ListMultipartUploadsOutput < Struct.new( :bucket, :key_marker, :upload_id_marker, :next_key_marker, :prefix, :delimiter, :next_upload_id_marker, :max_uploads, :is_truncated, :uploads, :common_prefixes, :encoding_type, :request_charged) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket to which the multipart upload was initiated. # # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style # requests are not supported. Directory bucket names must be unique in # the chosen Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, # you must provide the alias of the access point in place of the # bucket name or specify the access point ARN. When using the access # point ARN, you must direct requests to the access point hostname. # The access point hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the # bucket name. For more information about access point ARNs, see # [Using access points][2] in the *Amazon S3 User Guide*. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. # The S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][3] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # @return [String] # # @!attribute [rw] delimiter # Character you use to group keys. # # All keys that contain the same string between the prefix, if # specified, and the first occurrence of the delimiter after the # prefix are grouped under a single result element, `CommonPrefixes`. # If you don't specify the prefix parameter, then the substring # starts at the beginning of the key. The keys that are grouped under # `CommonPrefixes` result element are not returned elsewhere in the # response. # # **Directory buckets** - For directory buckets, `/` is the only # supported delimiter. # # # @return [String] # # @!attribute [rw] encoding_type # Requests Amazon S3 to encode the object keys in the response and # specifies the encoding method to use. An object key can contain any # Unicode character; however, the XML 1.0 parser cannot parse some # characters, such as characters with an ASCII value from 0 to 10. For # characters that are not supported in XML 1.0, you can add this # parameter to request that Amazon S3 encode the keys in the response. # @return [String] # # @!attribute [rw] key_marker # Specifies the multipart upload after which listing should begin. # # * **General purpose buckets** - For general purpose buckets, # `key-marker` is an object key. Together with `upload-id-marker`, # this parameter specifies the multipart upload after which listing # should begin. # # If `upload-id-marker` is not specified, only the keys # lexicographically greater than the specified `key-marker` will be # included in the list. # # If `upload-id-marker` is specified, any multipart uploads for a # key equal to the `key-marker` might also be included, provided # those multipart uploads have upload IDs lexicographically greater # than the specified `upload-id-marker`. # # * **Directory buckets** - For directory buckets, `key-marker` is # obfuscated and isn't a real object key. The `upload-id-marker` # parameter isn't supported by directory buckets. To list the # additional multipart uploads, you only need to set the value of # `key-marker` to the `NextKeyMarker` value from the previous # response. # # In the `ListMultipartUploads` response, the multipart uploads # aren't sorted lexicographically based on the object keys. # # # @return [String] # # @!attribute [rw] max_uploads # Sets the maximum number of multipart uploads, from 1 to 1,000, to # return in the response body. 1,000 is the maximum number of uploads # that can be returned in a response. # @return [Integer] # # @!attribute [rw] prefix # Lists in-progress uploads only for those keys that begin with the # specified prefix. You can use prefixes to separate a bucket into # different grouping of keys. (You can think of using `prefix` to make # groups in the same way that you'd use a folder in a file system.) # # **Directory buckets** - For directory buckets, only prefixes that # end in a delimiter (`/`) are supported. # # # @return [String] # # @!attribute [rw] upload_id_marker # Together with key-marker, specifies the multipart upload after which # listing should begin. If key-marker is not specified, the # upload-id-marker parameter is ignored. Otherwise, any multipart # uploads for a key equal to the key-marker might be included in the # list only if they have an upload ID lexicographically greater than # the specified `upload-id-marker`. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @!attribute [rw] request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has # Requester Pays enabled, the requester will pay for corresponding # charges to copy the object. For information about downloading # objects from Requester Pays buckets, see [Downloading Objects in # Requester Pays Buckets][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsRequest AWS API Documentation # class ListMultipartUploadsRequest < Struct.new( :bucket, :delimiter, :encoding_type, :key_marker, :max_uploads, :prefix, :upload_id_marker, :expected_bucket_owner, :request_payer) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] is_truncated # A flag that indicates whether Amazon S3 returned all of the results # that satisfied the search criteria. If your results were truncated, # you can make a follow-up paginated request by using the # `NextKeyMarker` and `NextVersionIdMarker` response parameters as a # starting place in another request to return the rest of the results. # @return [Boolean] # # @!attribute [rw] key_marker # Marks the last key returned in a truncated response. # @return [String] # # @!attribute [rw] version_id_marker # Marks the last version of the key returned in a truncated response. # @return [String] # # @!attribute [rw] next_key_marker # When the number of responses exceeds the value of `MaxKeys`, # `NextKeyMarker` specifies the first key not returned that satisfies # the search criteria. Use this value for the key-marker request # parameter in a subsequent request. # @return [String] # # @!attribute [rw] next_version_id_marker # When the number of responses exceeds the value of `MaxKeys`, # `NextVersionIdMarker` specifies the first object version not # returned that satisfies the search criteria. Use this value for the # `version-id-marker` request parameter in a subsequent request. # @return [String] # # @!attribute [rw] versions # Container for version information. # @return [Array] # # @!attribute [rw] delete_markers # Container for an object that is a delete marker. # @return [Array] # # @!attribute [rw] name # The bucket name. # @return [String] # # @!attribute [rw] prefix # Selects objects that start with the value supplied by this # parameter. # @return [String] # # @!attribute [rw] delimiter # The delimiter grouping the included keys. A delimiter is a character # that you specify to group keys. All keys that contain the same # string between the prefix and the first occurrence of the delimiter # are grouped under a single result element in `CommonPrefixes`. These # groups are counted as one result against the `max-keys` limitation. # These keys are not returned elsewhere in the response. # @return [String] # # @!attribute [rw] max_keys # Specifies the maximum number of objects to return. # @return [Integer] # # @!attribute [rw] common_prefixes # All of the keys rolled up into a common prefix count as a single # return when calculating the number of returns. # @return [Array] # # @!attribute [rw] encoding_type # Encoding type used by Amazon S3 to encode object key names in the # XML response. # # If you specify the `encoding-type` request parameter, Amazon S3 # includes this element in the response, and returns encoded key name # values in the following response elements: # # `KeyMarker, NextKeyMarker, Prefix, Key`, and `Delimiter`. # @return [String] # # @!attribute [rw] request_charged # If present, indicates that the requester was successfully charged # for the request. # # This functionality is not supported for directory buckets. # # # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsOutput AWS API Documentation # class ListObjectVersionsOutput < Struct.new( :is_truncated, :key_marker, :version_id_marker, :next_key_marker, :next_version_id_marker, :versions, :delete_markers, :name, :prefix, :delimiter, :max_keys, :common_prefixes, :encoding_type, :request_charged) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The bucket name that contains the objects. # @return [String] # # @!attribute [rw] delimiter # A delimiter is a character that you specify to group keys. All keys # that contain the same string between the `prefix` and the first # occurrence of the delimiter are grouped under a single result # element in `CommonPrefixes`. These groups are counted as one result # against the `max-keys` limitation. These keys are not returned # elsewhere in the response. # @return [String] # # @!attribute [rw] encoding_type # Requests Amazon S3 to encode the object keys in the response and # specifies the encoding method to use. An object key can contain any # Unicode character; however, the XML 1.0 parser cannot parse some # characters, such as characters with an ASCII value from 0 to 10. For # characters that are not supported in XML 1.0, you can add this # parameter to request that Amazon S3 encode the keys in the response. # @return [String] # # @!attribute [rw] key_marker # Specifies the key to start with when listing objects in a bucket. # @return [String] # # @!attribute [rw] max_keys # Sets the maximum number of keys returned in the response. By # default, the action returns up to 1,000 key names. The response # might contain fewer keys but will never contain more. If additional # keys satisfy the search criteria, but were not returned because # `max-keys` was exceeded, the response contains # `true`. To return the additional keys, # see `key-marker` and `version-id-marker`. # @return [Integer] # # @!attribute [rw] prefix # Use this parameter to select only those keys that begin with the # specified prefix. You can use prefixes to separate a bucket into # different groupings of keys. (You can think of using `prefix` to # make groups in the same way that you'd use a folder in a file # system.) You can use `prefix` with `delimiter` to roll up numerous # objects into a single result under `CommonPrefixes`. # @return [String] # # @!attribute [rw] version_id_marker # Specifies the object version you want to start listing from. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @!attribute [rw] request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has # Requester Pays enabled, the requester will pay for corresponding # charges to copy the object. For information about downloading # objects from Requester Pays buckets, see [Downloading Objects in # Requester Pays Buckets][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @return [String] # # @!attribute [rw] optional_object_attributes # Specifies the optional fields that you want returned in the # response. Fields that you do not specify are not returned. # @return [Array] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsRequest AWS API Documentation # class ListObjectVersionsRequest < Struct.new( :bucket, :delimiter, :encoding_type, :key_marker, :max_keys, :prefix, :version_id_marker, :expected_bucket_owner, :request_payer, :optional_object_attributes) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] is_truncated # A flag that indicates whether Amazon S3 returned all of the results # that satisfied the search criteria. # @return [Boolean] # # @!attribute [rw] marker # Indicates where in the bucket listing begins. Marker is included in # the response if it was sent with the request. # @return [String] # # @!attribute [rw] next_marker # When the response is truncated (the `IsTruncated` element value in # the response is `true`), you can use the key name in this field as # the `marker` parameter in the subsequent request to get the next set # of objects. Amazon S3 lists objects in alphabetical order. # # This element is returned only if you have the `delimiter` request # parameter specified. If the response does not include the # `NextMarker` element and it is truncated, you can use the value of # the last `Key` element in the response as the `marker` parameter in # the subsequent request to get the next set of object keys. # # # @return [String] # # @!attribute [rw] contents # Metadata about each object returned. # @return [Array] # # @!attribute [rw] name # The bucket name. # @return [String] # # @!attribute [rw] prefix # Keys that begin with the indicated prefix. # @return [String] # # @!attribute [rw] delimiter # Causes keys that contain the same string between the prefix and the # first occurrence of the delimiter to be rolled up into a single # result element in the `CommonPrefixes` collection. These rolled-up # keys are not returned elsewhere in the response. Each rolled-up # result counts as only one return against the `MaxKeys` value. # @return [String] # # @!attribute [rw] max_keys # The maximum number of keys returned in the response body. # @return [Integer] # # @!attribute [rw] common_prefixes # All of the keys (up to 1,000) rolled up in a common prefix count as # a single return when calculating the number of returns. # # A response can contain `CommonPrefixes` only if you specify a # delimiter. # # `CommonPrefixes` contains all (if there are any) keys between # `Prefix` and the next occurrence of the string specified by the # delimiter. # # `CommonPrefixes` lists keys that act like subdirectories in the # directory specified by `Prefix`. # # For example, if the prefix is `notes/` and the delimiter is a slash # (`/`), as in `notes/summer/july`, the common prefix is # `notes/summer/`. All of the keys that roll up into a common prefix # count as a single return when calculating the number of returns. # @return [Array] # # @!attribute [rw] encoding_type # Encoding type used by Amazon S3 to encode object keys in the # response. # @return [String] # # @!attribute [rw] request_charged # If present, indicates that the requester was successfully charged # for the request. # # This functionality is not supported for directory buckets. # # # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsOutput AWS API Documentation # class ListObjectsOutput < Struct.new( :is_truncated, :marker, :next_marker, :contents, :name, :prefix, :delimiter, :max_keys, :common_prefixes, :encoding_type, :request_charged) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket containing the objects. # # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style # requests are not supported. Directory bucket names must be unique in # the chosen Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, # you must provide the alias of the access point in place of the # bucket name or specify the access point ARN. When using the access # point ARN, you must direct requests to the access point hostname. # The access point hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the # bucket name. For more information about access point ARNs, see # [Using access points][2] in the *Amazon S3 User Guide*. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. # The S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][3] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # @return [String] # # @!attribute [rw] delimiter # A delimiter is a character that you use to group keys. # @return [String] # # @!attribute [rw] encoding_type # Requests Amazon S3 to encode the object keys in the response and # specifies the encoding method to use. An object key can contain any # Unicode character; however, the XML 1.0 parser cannot parse some # characters, such as characters with an ASCII value from 0 to 10. For # characters that are not supported in XML 1.0, you can add this # parameter to request that Amazon S3 encode the keys in the response. # @return [String] # # @!attribute [rw] marker # Marker is where you want Amazon S3 to start listing from. Amazon S3 # starts listing after this specified key. Marker can be any key in # the bucket. # @return [String] # # @!attribute [rw] max_keys # Sets the maximum number of keys returned in the response. By # default, the action returns up to 1,000 key names. The response # might contain fewer keys but will never contain more. # @return [Integer] # # @!attribute [rw] prefix # Limits the response to keys that begin with the specified prefix. # @return [String] # # @!attribute [rw] request_payer # Confirms that the requester knows that she or he will be charged for # the list objects request. Bucket owners need not specify this # parameter in their requests. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @!attribute [rw] optional_object_attributes # Specifies the optional fields that you want returned in the # response. Fields that you do not specify are not returned. # @return [Array] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsRequest AWS API Documentation # class ListObjectsRequest < Struct.new( :bucket, :delimiter, :encoding_type, :marker, :max_keys, :prefix, :request_payer, :expected_bucket_owner, :optional_object_attributes) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] is_truncated # Set to `false` if all of the results were returned. Set to `true` if # more keys are available to return. If the number of results exceeds # that specified by `MaxKeys`, all of the results might not be # returned. # @return [Boolean] # # @!attribute [rw] contents # Metadata about each object returned. # @return [Array] # # @!attribute [rw] name # The bucket name. # @return [String] # # @!attribute [rw] prefix # Keys that begin with the indicated prefix. # # **Directory buckets** - For directory buckets, only prefixes that # end in a delimiter (`/`) are supported. # # # @return [String] # # @!attribute [rw] delimiter # Causes keys that contain the same string between the `prefix` and # the first occurrence of the delimiter to be rolled up into a single # result element in the `CommonPrefixes` collection. These rolled-up # keys are not returned elsewhere in the response. Each rolled-up # result counts as only one return against the `MaxKeys` value. # # **Directory buckets** - For directory buckets, `/` is the only # supported delimiter. # # # @return [String] # # @!attribute [rw] max_keys # Sets the maximum number of keys returned in the response. By # default, the action returns up to 1,000 key names. The response # might contain fewer keys but will never contain more. # @return [Integer] # # @!attribute [rw] common_prefixes # All of the keys (up to 1,000) that share the same prefix are grouped # together. When counting the total numbers of returns by this API # operation, this group of keys is considered as one item. # # A response can contain `CommonPrefixes` only if you specify a # delimiter. # # `CommonPrefixes` contains all (if there are any) keys between # `Prefix` and the next occurrence of the string specified by a # delimiter. # # `CommonPrefixes` lists keys that act like subdirectories in the # directory specified by `Prefix`. # # For example, if the prefix is `notes/` and the delimiter is a slash # (`/`) as in `notes/summer/july`, the common prefix is # `notes/summer/`. All of the keys that roll up into a common prefix # count as a single return when calculating the number of returns. # # * **Directory buckets** - For directory buckets, only prefixes that # end in a delimiter (`/`) are supported. # # * Directory buckets - When you query `ListObjectsV2` with a # delimiter during in-progress multipart uploads, the # `CommonPrefixes` response parameter contains the prefixes that are # associated with the in-progress multipart uploads. For more # information about multipart uploads, see [Multipart Upload # Overview][1] in the *Amazon S3 User Guide*. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html # @return [Array] # # @!attribute [rw] encoding_type # Encoding type used by Amazon S3 to encode object key names in the # XML response. # # If you specify the `encoding-type` request parameter, Amazon S3 # includes this element in the response, and returns encoded key name # values in the following response elements: # # `Delimiter, Prefix, Key,` and `StartAfter`. # @return [String] # # @!attribute [rw] key_count # `KeyCount` is the number of keys returned with this request. # `KeyCount` will always be less than or equal to the `MaxKeys` field. # For example, if you ask for 50 keys, your result will include 50 # keys or fewer. # @return [Integer] # # @!attribute [rw] continuation_token # If `ContinuationToken` was sent with the request, it is included in # the response. You can use the returned `ContinuationToken` for # pagination of the list response. You can use this # `ContinuationToken` for pagination of the list results. # @return [String] # # @!attribute [rw] next_continuation_token # `NextContinuationToken` is sent when `isTruncated` is true, which # means there are more keys in the bucket that can be listed. The next # list requests to Amazon S3 can be continued with this # `NextContinuationToken`. `NextContinuationToken` is obfuscated and # is not a real key # @return [String] # # @!attribute [rw] start_after # If StartAfter was sent with the request, it is included in the # response. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] request_charged # If present, indicates that the requester was successfully charged # for the request. # # This functionality is not supported for directory buckets. # # # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Output AWS API Documentation # class ListObjectsV2Output < Struct.new( :is_truncated, :contents, :name, :prefix, :delimiter, :max_keys, :common_prefixes, :encoding_type, :key_count, :continuation_token, :next_continuation_token, :start_after, :request_charged) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style # requests are not supported. Directory bucket names must be unique in # the chosen Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, # you must provide the alias of the access point in place of the # bucket name or specify the access point ARN. When using the access # point ARN, you must direct requests to the access point hostname. # The access point hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the # bucket name. For more information about access point ARNs, see # [Using access points][2] in the *Amazon S3 User Guide*. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. # The S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][3] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # @return [String] # # @!attribute [rw] delimiter # A delimiter is a character that you use to group keys. # # * **Directory buckets** - For directory buckets, `/` is the only # supported delimiter. # # * Directory buckets - When you query `ListObjectsV2` with a # delimiter during in-progress multipart uploads, the # `CommonPrefixes` response parameter contains the prefixes that are # associated with the in-progress multipart uploads. For more # information about multipart uploads, see [Multipart Upload # Overview][1] in the *Amazon S3 User Guide*. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html # @return [String] # # @!attribute [rw] encoding_type # Encoding type used by Amazon S3 to encode object keys in the # response. # @return [String] # # @!attribute [rw] max_keys # Sets the maximum number of keys returned in the response. By # default, the action returns up to 1,000 key names. The response # might contain fewer keys but will never contain more. # @return [Integer] # # @!attribute [rw] prefix # Limits the response to keys that begin with the specified prefix. # # **Directory buckets** - For directory buckets, only prefixes that # end in a delimiter (`/`) are supported. # # # @return [String] # # @!attribute [rw] continuation_token # `ContinuationToken` indicates to Amazon S3 that the list is being # continued on this bucket with a token. `ContinuationToken` is # obfuscated and is not a real key. You can use this # `ContinuationToken` for pagination of the list results. # @return [String] # # @!attribute [rw] fetch_owner # The owner field is not present in `ListObjectsV2` by default. If you # want to return the owner field with each key in the result, then set # the `FetchOwner` field to `true`. # # **Directory buckets** - For directory buckets, the bucket owner is # returned as the object owner for all objects. # # # @return [Boolean] # # @!attribute [rw] start_after # StartAfter is where you want Amazon S3 to start listing from. Amazon # S3 starts listing after this specified key. StartAfter can be any # key in the bucket. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] request_payer # Confirms that the requester knows that she or he will be charged for # the list objects request in V2 style. Bucket owners need not specify # this parameter in their requests. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @!attribute [rw] optional_object_attributes # Specifies the optional fields that you want returned in the # response. Fields that you do not specify are not returned. # # This functionality is not supported for directory buckets. # # # @return [Array] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Request AWS API Documentation # class ListObjectsV2Request < Struct.new( :bucket, :delimiter, :encoding_type, :max_keys, :prefix, :continuation_token, :fetch_owner, :start_after, :request_payer, :expected_bucket_owner, :optional_object_attributes) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] abort_date # If the bucket has a lifecycle rule configured with an action to # abort incomplete multipart uploads and the prefix in the lifecycle # rule matches the object name in the request, then the response # includes this header indicating when the initiated multipart upload # will become eligible for abort operation. For more information, see # [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle # Configuration][1]. # # The response will also include the `x-amz-abort-rule-id` header that # will provide the ID of the lifecycle configuration rule that defines # this action. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config # @return [Time] # # @!attribute [rw] abort_rule_id # This header is returned along with the `x-amz-abort-date` header. It # identifies applicable lifecycle configuration rule that defines the # action to abort incomplete multipart uploads. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] bucket # The name of the bucket to which the multipart upload was initiated. # Does not return the access point ARN or access point alias if used. # @return [String] # # @!attribute [rw] key # Object key for which the multipart upload was initiated. # @return [String] # # @!attribute [rw] upload_id # Upload ID identifying the multipart upload whose parts are being # listed. # @return [String] # # @!attribute [rw] part_number_marker # When a list is truncated, this element specifies the last part in # the list, as well as the value to use for the part-number-marker # request parameter in a subsequent request. # @return [Integer] # # @!attribute [rw] next_part_number_marker # When a list is truncated, this element specifies the last part in # the list, as well as the value to use for the `part-number-marker` # request parameter in a subsequent request. # @return [Integer] # # @!attribute [rw] max_parts # Maximum number of parts that were allowed in the response. # @return [Integer] # # @!attribute [rw] is_truncated # Indicates whether the returned list of parts is truncated. A true # value indicates that the list was truncated. A list can be truncated # if the number of parts exceeds the limit returned in the MaxParts # element. # @return [Boolean] # # @!attribute [rw] parts # Container for elements related to a particular part. A response can # contain zero or more `Part` elements. # @return [Array] # # @!attribute [rw] initiator # Container element that identifies who initiated the multipart # upload. If the initiator is an Amazon Web Services account, this # element provides the same information as the `Owner` element. If the # initiator is an IAM User, this element provides the user ARN and # display name. # @return [Types::Initiator] # # @!attribute [rw] owner # Container element that identifies the object owner, after the object # is created. If multipart upload is initiated by an IAM user, this # element provides the parent account ID and display name. # # **Directory buckets** - The bucket owner is returned as the object # owner for all the parts. # # # @return [Types::Owner] # # @!attribute [rw] storage_class # The class of storage used to store the uploaded object. # # **Directory buckets** - Only the S3 Express One Zone storage class # is supported by directory buckets to store objects. # # # @return [String] # # @!attribute [rw] request_charged # If present, indicates that the requester was successfully charged # for the request. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] checksum_algorithm # The algorithm that was used to create a checksum of the object. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsOutput AWS API Documentation # class ListPartsOutput < Struct.new( :abort_date, :abort_rule_id, :bucket, :key, :upload_id, :part_number_marker, :next_part_number_marker, :max_parts, :is_truncated, :parts, :initiator, :owner, :storage_class, :request_charged, :checksum_algorithm) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket to which the parts are being uploaded. # # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style # requests are not supported. Directory bucket names must be unique in # the chosen Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, # you must provide the alias of the access point in place of the # bucket name or specify the access point ARN. When using the access # point ARN, you must direct requests to the access point hostname. # The access point hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the # bucket name. For more information about access point ARNs, see # [Using access points][2] in the *Amazon S3 User Guide*. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. # The S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][3] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # @return [String] # # @!attribute [rw] key # Object key for which the multipart upload was initiated. # @return [String] # # @!attribute [rw] max_parts # Sets the maximum number of parts to return. # @return [Integer] # # @!attribute [rw] part_number_marker # Specifies the part after which listing should begin. Only parts with # higher part numbers will be listed. # @return [Integer] # # @!attribute [rw] upload_id # Upload ID identifying the multipart upload whose parts are being # listed. # @return [String] # # @!attribute [rw] request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has # Requester Pays enabled, the requester will pay for corresponding # charges to copy the object. For information about downloading # objects from Requester Pays buckets, see [Downloading Objects in # Requester Pays Buckets][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @!attribute [rw] sse_customer_algorithm # The server-side encryption (SSE) algorithm used to encrypt the # object. This parameter is needed only when the object was created # using a checksum algorithm. For more information, see [Protecting # data using SSE-C keys][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # @return [String] # # @!attribute [rw] sse_customer_key # The server-side encryption (SSE) customer managed key. This # parameter is needed only when the object was created using a # checksum algorithm. For more information, see [Protecting data using # SSE-C keys][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # @return [String] # # @!attribute [rw] sse_customer_key_md5 # The MD5 server-side encryption (SSE) customer managed key. This # parameter is needed only when the object was created using a # checksum algorithm. For more information, see [Protecting data using # SSE-C keys][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsRequest AWS API Documentation # class ListPartsRequest < Struct.new( :bucket, :key, :max_parts, :part_number_marker, :upload_id, :request_payer, :expected_bucket_owner, :sse_customer_algorithm, :sse_customer_key, :sse_customer_key_md5) SENSITIVE = [:sse_customer_key] include Aws::Structure end # Specifies the location where the bucket will be created. # # For directory buckets, the location type is Availability Zone. For # more information about directory buckets, see [Directory buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is only supported by directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html # # @!attribute [rw] type # The type of location where the bucket will be created. # @return [String] # # @!attribute [rw] name # The name of the location where the bucket will be created. # # For directory buckets, the AZ ID of the Availability Zone where the # bucket will be created. An example AZ ID value is `usw2-az2`. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LocationInfo AWS API Documentation # class LocationInfo < Struct.new( :type, :name) SENSITIVE = [] include Aws::Structure end # Describes where logs are stored and the prefix that Amazon S3 assigns # to all log object keys for a bucket. For more information, see [PUT # Bucket logging][1] in the *Amazon S3 API Reference*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html # # @!attribute [rw] target_bucket # Specifies the bucket where you want Amazon S3 to store server access # logs. You can have your logs delivered to any bucket that you own, # including the same bucket that is being logged. You can also # configure multiple buckets to deliver their logs to the same target # bucket. In this case, you should choose a different `TargetPrefix` # for each source bucket so that the delivered log files can be # distinguished by key. # @return [String] # # @!attribute [rw] target_grants # Container for granting information. # # Buckets that use the bucket owner enforced setting for Object # Ownership don't support target grants. For more information, see # [Permissions for server access log delivery][1] in the *Amazon S3 # User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general # @return [Array] # # @!attribute [rw] target_prefix # A prefix for all log object keys. If you store log files from # multiple Amazon S3 buckets in a single bucket, you can use a prefix # to distinguish which log files came from which bucket. # @return [String] # # @!attribute [rw] target_object_key_format # Amazon S3 key format for log objects. # @return [Types::TargetObjectKeyFormat] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LoggingEnabled AWS API Documentation # class LoggingEnabled < Struct.new( :target_bucket, :target_grants, :target_prefix, :target_object_key_format) SENSITIVE = [] include Aws::Structure end # A metadata key-value pair to store with an object. # # @!attribute [rw] name # Name of the object. # @return [String] # # @!attribute [rw] value # Value of the object. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetadataEntry AWS API Documentation # class MetadataEntry < Struct.new( :name, :value) SENSITIVE = [] include Aws::Structure end # A container specifying replication metrics-related settings enabling # replication metrics and events. # # @!attribute [rw] status # Specifies whether the replication metrics are enabled. # @return [String] # # @!attribute [rw] event_threshold # A container specifying the time threshold for emitting the # `s3:Replication:OperationMissedThreshold` event. # @return [Types::ReplicationTimeValue] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Metrics AWS API Documentation # class Metrics < Struct.new( :status, :event_threshold) SENSITIVE = [] include Aws::Structure end # A conjunction (logical AND) of predicates, which is used in evaluating # a metrics filter. The operator must have at least two predicates, and # an object must match all of the predicates in order for the filter to # apply. # # @!attribute [rw] prefix # The prefix used when evaluating an AND predicate. # @return [String] # # @!attribute [rw] tags # The list of tags used when evaluating an AND predicate. # @return [Array] # # @!attribute [rw] access_point_arn # The access point ARN used when evaluating an `AND` predicate. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsAndOperator AWS API Documentation # class MetricsAndOperator < Struct.new( :prefix, :tags, :access_point_arn) SENSITIVE = [] include Aws::Structure end # Specifies a metrics configuration for the CloudWatch request metrics # (specified by the metrics configuration ID) from an Amazon S3 bucket. # If you're updating an existing metrics configuration, note that this # is a full replacement of the existing metrics configuration. If you # don't include the elements you want to keep, they are erased. For # more information, see [PutBucketMetricsConfiguration][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html # # @!attribute [rw] id # The ID used to identify the metrics configuration. The ID has a 64 # character limit and can only contain letters, numbers, periods, # dashes, and underscores. # @return [String] # # @!attribute [rw] filter # Specifies a metrics configuration filter. The metrics configuration # will only include objects that meet the filter's criteria. A filter # must be a prefix, an object tag, an access point ARN, or a # conjunction (MetricsAndOperator). # @return [Types::MetricsFilter] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsConfiguration AWS API Documentation # class MetricsConfiguration < Struct.new( :id, :filter) SENSITIVE = [] include Aws::Structure end # Specifies a metrics configuration filter. The metrics configuration # only includes objects that meet the filter's criteria. A filter must # be a prefix, an object tag, an access point ARN, or a conjunction # (MetricsAndOperator). For more information, see # [PutBucketMetricsConfiguration][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html # # @!attribute [rw] prefix # The prefix used when evaluating a metrics filter. # @return [String] # # @!attribute [rw] tag # The tag used when evaluating a metrics filter. # @return [Types::Tag] # # @!attribute [rw] access_point_arn # The access point ARN used when evaluating a metrics filter. # @return [String] # # @!attribute [rw] and # A conjunction (logical AND) of predicates, which is used in # evaluating a metrics filter. The operator must have at least two # predicates, and an object must match all of the predicates in order # for the filter to apply. # @return [Types::MetricsAndOperator] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsFilter AWS API Documentation # class MetricsFilter < Struct.new( :prefix, :tag, :access_point_arn, :and) SENSITIVE = [] include Aws::Structure end # Container for the `MultipartUpload` for the Amazon S3 object. # # @!attribute [rw] upload_id # Upload ID that identifies the multipart upload. # @return [String] # # @!attribute [rw] key # Key of the object for which the multipart upload was initiated. # @return [String] # # @!attribute [rw] initiated # Date and time at which the multipart upload was initiated. # @return [Time] # # @!attribute [rw] storage_class # The class of storage used to store the object. # # **Directory buckets** - Only the S3 Express One Zone storage class # is supported by directory buckets to store objects. # # # @return [String] # # @!attribute [rw] owner # Specifies the owner of the object that is part of the multipart # upload. # # **Directory buckets** - The bucket owner is returned as the object # owner for all the objects. # # # @return [Types::Owner] # # @!attribute [rw] initiator # Identifies who initiated the multipart upload. # @return [Types::Initiator] # # @!attribute [rw] checksum_algorithm # The algorithm that was used to create a checksum of the object. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MultipartUpload AWS API Documentation # class MultipartUpload < Struct.new( :upload_id, :key, :initiated, :storage_class, :owner, :initiator, :checksum_algorithm) SENSITIVE = [] include Aws::Structure end # The specified bucket does not exist. # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoSuchBucket AWS API Documentation # class NoSuchBucket < Aws::EmptyStructure; end # The specified key does not exist. # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoSuchKey AWS API Documentation # class NoSuchKey < Aws::EmptyStructure; end # The specified multipart upload does not exist. # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoSuchUpload AWS API Documentation # class NoSuchUpload < Aws::EmptyStructure; end # Specifies when noncurrent object versions expire. Upon expiration, # Amazon S3 permanently deletes the noncurrent object versions. You set # this lifecycle configuration action on a bucket that has versioning # enabled (or suspended) to request that Amazon S3 delete noncurrent # object versions at a specific period in the object's lifetime. # # @!attribute [rw] noncurrent_days # Specifies the number of days an object is noncurrent before Amazon # S3 can perform the associated action. The value must be a non-zero # positive integer. For information about the noncurrent days # calculations, see [How Amazon S3 Calculates When an Object Became # Noncurrent][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations # @return [Integer] # # @!attribute [rw] newer_noncurrent_versions # Specifies how many newer noncurrent versions must exist before # Amazon S3 can perform the associated action on a given version. If # there are this many more recent noncurrent versions, Amazon S3 will # take the associated action. For more information about noncurrent # versions, see [Lifecycle configuration elements][1] in the *Amazon # S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html # @return [Integer] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionExpiration AWS API Documentation # class NoncurrentVersionExpiration < Struct.new( :noncurrent_days, :newer_noncurrent_versions) SENSITIVE = [] include Aws::Structure end # Container for the transition rule that describes when noncurrent # objects transition to the `STANDARD_IA`, `ONEZONE_IA`, # `INTELLIGENT_TIERING`, `GLACIER_IR`, `GLACIER`, or `DEEP_ARCHIVE` # storage class. If your bucket is versioning-enabled (or versioning is # suspended), you can set this action to request that Amazon S3 # transition noncurrent object versions to the `STANDARD_IA`, # `ONEZONE_IA`, `INTELLIGENT_TIERING`, `GLACIER_IR`, `GLACIER`, or # `DEEP_ARCHIVE` storage class at a specific period in the object's # lifetime. # # @!attribute [rw] noncurrent_days # Specifies the number of days an object is noncurrent before Amazon # S3 can perform the associated action. For information about the # noncurrent days calculations, see [How Amazon S3 Calculates How Long # an Object Has Been Noncurrent][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations # @return [Integer] # # @!attribute [rw] storage_class # The class of storage used to store the object. # @return [String] # # @!attribute [rw] newer_noncurrent_versions # Specifies how many newer noncurrent versions must exist before # Amazon S3 can perform the associated action on a given version. If # there are this many more recent noncurrent versions, Amazon S3 will # take the associated action. For more information about noncurrent # versions, see [Lifecycle configuration elements][1] in the *Amazon # S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html # @return [Integer] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionTransition AWS API Documentation # class NoncurrentVersionTransition < Struct.new( :noncurrent_days, :storage_class, :newer_noncurrent_versions) SENSITIVE = [] include Aws::Structure end # A container for specifying the notification configuration of the # bucket. If this element is empty, notifications are turned off for the # bucket. # # @!attribute [rw] topic_configurations # The topic to which notifications are sent and the events for which # notifications are generated. # @return [Array] # # @!attribute [rw] queue_configurations # The Amazon Simple Queue Service queues to publish messages to and # the events for which to publish messages. # @return [Array] # # @!attribute [rw] lambda_function_configurations # Describes the Lambda functions to invoke and the events for which to # invoke them. # @return [Array] # # @!attribute [rw] event_bridge_configuration # Enables delivery of events to Amazon EventBridge. # @return [Types::EventBridgeConfiguration] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfiguration AWS API Documentation # class NotificationConfiguration < Struct.new( :topic_configurations, :queue_configurations, :lambda_function_configurations, :event_bridge_configuration) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] topic_configuration # This data type is deprecated. A container for specifying the # configuration for publication of messages to an Amazon Simple # Notification Service (Amazon SNS) topic when Amazon S3 detects # specified events. # @return [Types::TopicConfigurationDeprecated] # # @!attribute [rw] queue_configuration # This data type is deprecated. This data type specifies the # configuration for publishing messages to an Amazon Simple Queue # Service (Amazon SQS) queue when Amazon S3 detects specified events. # @return [Types::QueueConfigurationDeprecated] # # @!attribute [rw] cloud_function_configuration # Container for specifying the Lambda notification configuration. # @return [Types::CloudFunctionConfiguration] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationDeprecated AWS API Documentation # class NotificationConfigurationDeprecated < Struct.new( :topic_configuration, :queue_configuration, :cloud_function_configuration) SENSITIVE = [] include Aws::Structure end # Specifies object key name filtering rules. For information about key # name filtering, see [Configuring event notifications using object key # name filtering][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html # # @!attribute [rw] key # A container for object key name prefix and suffix filtering rules. # @return [Types::S3KeyFilter] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationFilter AWS API Documentation # class NotificationConfigurationFilter < Struct.new( :key) SENSITIVE = [] include Aws::Structure end # An object consists of data and its descriptive metadata. # # @!attribute [rw] key # The name that you assign to an object. You use the object key to # retrieve the object. # @return [String] # # @!attribute [rw] last_modified # Creation date of the object. # @return [Time] # # @!attribute [rw] etag # The entity tag is a hash of the object. The ETag reflects changes # only to the contents of an object, not its metadata. The ETag may or # may not be an MD5 digest of the object data. Whether or not it is # depends on how the object was created and how it is encrypted as # described below: # # * Objects created by the PUT Object, POST Object, or Copy operation, # or through the Amazon Web Services Management Console, and are # encrypted by SSE-S3 or plaintext, have ETags that are an MD5 # digest of their object data. # # * Objects created by the PUT Object, POST Object, or Copy operation, # or through the Amazon Web Services Management Console, and are # encrypted by SSE-C or SSE-KMS, have ETags that are not an MD5 # digest of their object data. # # * If an object is created by either the Multipart Upload or Part # Copy operation, the ETag is not an MD5 digest, regardless of the # method of encryption. If an object is larger than 16 MB, the # Amazon Web Services Management Console will upload or copy that # object as a Multipart Upload, and therefore the ETag will not be # an MD5 digest. # # **Directory buckets** - MD5 is not supported by directory buckets. # # # @return [String] # # @!attribute [rw] checksum_algorithm # The algorithm that was used to create a checksum of the object. # @return [Array] # # @!attribute [rw] size # Size in bytes of the object # @return [Integer] # # @!attribute [rw] storage_class # The class of storage used to store the object. # # **Directory buckets** - Only the S3 Express One Zone storage class # is supported by directory buckets to store objects. # # # @return [String] # # @!attribute [rw] owner # The owner of the object # # **Directory buckets** - The bucket owner is returned as the object # owner. # # # @return [Types::Owner] # # @!attribute [rw] restore_status # Specifies the restoration status of an object. Objects in certain # storage classes must be restored before they can be retrieved. For # more information about these storage classes and how to work with # archived objects, see [ Working with archived objects][1] in the # *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. Only the # S3 Express One Zone storage class is supported by directory buckets # to store objects. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html # @return [Types::RestoreStatus] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Object AWS API Documentation # class Object < Struct.new( :key, :last_modified, :etag, :checksum_algorithm, :size, :storage_class, :owner, :restore_status) SENSITIVE = [] include Aws::Structure end # This action is not allowed against this storage tier. # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectAlreadyInActiveTierError AWS API Documentation # class ObjectAlreadyInActiveTierError < Aws::EmptyStructure; end # Object Identifier is unique value to identify objects. # # @!attribute [rw] key # Key name of the object. # # Replacement must be made for object keys containing special # characters (such as carriage returns) when using XML requests. For # more information, see [ XML related object key constraints][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints # @return [String] # # @!attribute [rw] version_id # Version ID for the specific version of the object to delete. # # This functionality is not supported for directory buckets. # # # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectIdentifier AWS API Documentation # class ObjectIdentifier < Struct.new( :key, :version_id) SENSITIVE = [] include Aws::Structure end # The container element for Object Lock configuration parameters. # # @!attribute [rw] object_lock_enabled # Indicates whether this bucket has an Object Lock configuration # enabled. Enable `ObjectLockEnabled` when you apply # `ObjectLockConfiguration` to a bucket. # @return [String] # # @!attribute [rw] rule # Specifies the Object Lock rule for the specified object. Enable the # this rule when you apply `ObjectLockConfiguration` to a bucket. # Bucket settings require both a mode and a period. The period can be # either `Days` or `Years` but you must select one. You cannot specify # `Days` and `Years` at the same time. # @return [Types::ObjectLockRule] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectLockConfiguration AWS API Documentation # class ObjectLockConfiguration < Struct.new( :object_lock_enabled, :rule) SENSITIVE = [] include Aws::Structure end # A legal hold configuration for an object. # # @!attribute [rw] status # Indicates whether the specified object has a legal hold in place. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectLockLegalHold AWS API Documentation # class ObjectLockLegalHold < Struct.new( :status) SENSITIVE = [] include Aws::Structure end # A Retention configuration for an object. # # @!attribute [rw] mode # Indicates the Retention mode for the specified object. # @return [String] # # @!attribute [rw] retain_until_date # The date on which this Object Lock Retention will expire. # @return [Time] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectLockRetention AWS API Documentation # class ObjectLockRetention < Struct.new( :mode, :retain_until_date) SENSITIVE = [] include Aws::Structure end # The container element for an Object Lock rule. # # @!attribute [rw] default_retention # The default Object Lock retention mode and period that you want to # apply to new objects placed in the specified bucket. Bucket settings # require both a mode and a period. The period can be either `Days` or # `Years` but you must select one. You cannot specify `Days` and # `Years` at the same time. # @return [Types::DefaultRetention] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectLockRule AWS API Documentation # class ObjectLockRule < Struct.new( :default_retention) SENSITIVE = [] include Aws::Structure end # The source object of the COPY action is not in the active tier and is # only stored in Amazon S3 Glacier. # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectNotInActiveTierError AWS API Documentation # class ObjectNotInActiveTierError < Aws::EmptyStructure; end # A container for elements related to an individual part. # # @!attribute [rw] part_number # The part number identifying the part. This value is a positive # integer between 1 and 10,000. # @return [Integer] # # @!attribute [rw] size # The size of the uploaded part in bytes. # @return [Integer] # # @!attribute [rw] checksum_crc32 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 32-bit CRC32 checksum of the object. # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] checksum_crc32c # The base64-encoded, 32-bit CRC32C checksum of the object. This will # only be present if it was uploaded with the object. When you use an # API operation on an object that was uploaded using multipart # uploads, this value may not be a direct checksum value of the full # object. Instead, it's a calculation based on the checksum values of # each individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] # # @!attribute [rw] checksum_sha1 # The base64-encoded, 160-bit SHA-1 digest of the object. This will # only be present if it was uploaded with the object. When you use the # API operation on an object that was uploaded using multipart # uploads, this value may not be a direct checksum value of the full # object. Instead, it's a calculation based on the checksum values of # each individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] # # @!attribute [rw] checksum_sha256 # The base64-encoded, 256-bit SHA-256 digest of the object. This will # only be present if it was uploaded with the object. When you use an # API operation on an object that was uploaded using multipart # uploads, this value may not be a direct checksum value of the full # object. Instead, it's a calculation based on the checksum values of # each individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectPart AWS API Documentation # class ObjectPart < Struct.new( :part_number, :size, :checksum_crc32, :checksum_crc32c, :checksum_sha1, :checksum_sha256) SENSITIVE = [] include Aws::Structure end # The version of an object. # # @!attribute [rw] etag # The entity tag is an MD5 hash of that version of the object. # @return [String] # # @!attribute [rw] checksum_algorithm # The algorithm that was used to create a checksum of the object. # @return [Array] # # @!attribute [rw] size # Size in bytes of the object. # @return [Integer] # # @!attribute [rw] storage_class # The class of storage used to store the object. # @return [String] # # @!attribute [rw] key # The object key. # @return [String] # # @!attribute [rw] version_id # Version ID of an object. # @return [String] # # @!attribute [rw] is_latest # Specifies whether the object is (true) or is not (false) the latest # version of an object. # @return [Boolean] # # @!attribute [rw] last_modified # Date and time when the object was last modified. # @return [Time] # # @!attribute [rw] owner # Specifies the owner of the object. # @return [Types::Owner] # # @!attribute [rw] restore_status # Specifies the restoration status of an object. Objects in certain # storage classes must be restored before they can be retrieved. For # more information about these storage classes and how to work with # archived objects, see [ Working with archived objects][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html # @return [Types::RestoreStatus] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectVersion AWS API Documentation # class ObjectVersion < Struct.new( :etag, :checksum_algorithm, :size, :storage_class, :key, :version_id, :is_latest, :last_modified, :owner, :restore_status) SENSITIVE = [] include Aws::Structure end # Describes the location where the restore job's output is stored. # # @!attribute [rw] s3 # Describes an S3 location that will receive the results of the # restore request. # @return [Types::S3Location] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/OutputLocation AWS API Documentation # class OutputLocation < Struct.new( :s3) SENSITIVE = [] include Aws::Structure end # Describes how results of the Select job are serialized. # # @!attribute [rw] csv # Describes the serialization of CSV-encoded Select results. # @return [Types::CSVOutput] # # @!attribute [rw] json # Specifies JSON as request's output serialization format. # @return [Types::JSONOutput] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/OutputSerialization AWS API Documentation # class OutputSerialization < Struct.new( :csv, :json) SENSITIVE = [] include Aws::Structure end # Container for the owner's display name and ID. # # @!attribute [rw] display_name # Container for the display name of the owner. This value is only # supported in the following Amazon Web Services Regions: # # * US East (N. Virginia) # # * US West (N. California) # # * US West (Oregon) # # * Asia Pacific (Singapore) # # * Asia Pacific (Sydney) # # * Asia Pacific (Tokyo) # # * Europe (Ireland) # # * South America (São Paulo) # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] id # Container for the ID of the owner. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Owner AWS API Documentation # class Owner < Struct.new( :display_name, :id) SENSITIVE = [] include Aws::Structure end # The container element for a bucket's ownership controls. # # @!attribute [rw] rules # The container element for an ownership control rule. # @return [Array] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/OwnershipControls AWS API Documentation # class OwnershipControls < Struct.new( :rules) SENSITIVE = [] include Aws::Structure end # The container element for an ownership control rule. # # @!attribute [rw] object_ownership # The container element for object ownership for a bucket's ownership # controls. # # `BucketOwnerPreferred` - Objects uploaded to the bucket change # ownership to the bucket owner if the objects are uploaded with the # `bucket-owner-full-control` canned ACL. # # `ObjectWriter` - The uploading account will own the object if the # object is uploaded with the `bucket-owner-full-control` canned ACL. # # `BucketOwnerEnforced` - Access control lists (ACLs) are disabled and # no longer affect permissions. The bucket owner automatically owns # and has full control over every object in the bucket. The bucket # only accepts PUT requests that don't specify an ACL or specify # bucket owner full control ACLs (such as the predefined # `bucket-owner-full-control` canned ACL or a custom ACL in XML format # that grants the same permissions). # # By default, `ObjectOwnership` is set to `BucketOwnerEnforced` and # ACLs are disabled. We recommend keeping ACLs disabled, except in # uncommon use cases where you must control access for each object # individually. For more information about S3 Object Ownership, see # [Controlling ownership of objects and disabling ACLs for your # bucket][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. Directory # buckets use the bucket owner enforced setting for S3 Object # Ownership. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/OwnershipControlsRule AWS API Documentation # class OwnershipControlsRule < Struct.new( :object_ownership) SENSITIVE = [] include Aws::Structure end # Container for Parquet. # # @api private # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ParquetInput AWS API Documentation # class ParquetInput < Aws::EmptyStructure; end # Container for elements related to a part. # # @!attribute [rw] part_number # Part number identifying the part. This is a positive integer between # 1 and 10,000. # @return [Integer] # # @!attribute [rw] last_modified # Date and time at which the part was uploaded. # @return [Time] # # @!attribute [rw] etag # Entity tag returned when the part was uploaded. # @return [String] # # @!attribute [rw] size # Size in bytes of the uploaded part data. # @return [Integer] # # @!attribute [rw] checksum_crc32 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 32-bit CRC32 checksum of the object. # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] checksum_crc32c # The base64-encoded, 32-bit CRC32C checksum of the object. This will # only be present if it was uploaded with the object. When you use an # API operation on an object that was uploaded using multipart # uploads, this value may not be a direct checksum value of the full # object. Instead, it's a calculation based on the checksum values of # each individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] # # @!attribute [rw] checksum_sha1 # The base64-encoded, 160-bit SHA-1 digest of the object. This will # only be present if it was uploaded with the object. When you use the # API operation on an object that was uploaded using multipart # uploads, this value may not be a direct checksum value of the full # object. Instead, it's a calculation based on the checksum values of # each individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] # # @!attribute [rw] checksum_sha256 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 256-bit SHA-256 digest of the object. # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Part AWS API Documentation # class Part < Struct.new( :part_number, :last_modified, :etag, :size, :checksum_crc32, :checksum_crc32c, :checksum_sha1, :checksum_sha256) SENSITIVE = [] include Aws::Structure end # Amazon S3 keys for log objects are partitioned in the following # format: # # `[DestinationPrefix][SourceAccountId]/[SourceRegion]/[SourceBucket]/[YYYY]/[MM]/[DD]/[YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString]` # # PartitionedPrefix defaults to EventTime delivery when server access # logs are delivered. # # @!attribute [rw] partition_date_source # Specifies the partition date source for the partitioned prefix. # PartitionDateSource can be EventTime or DeliveryTime. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PartitionedPrefix AWS API Documentation # class PartitionedPrefix < Struct.new( :partition_date_source) SENSITIVE = [] include Aws::Structure end # The container element for a bucket's policy status. # # @!attribute [rw] is_public # The policy status for this bucket. `TRUE` indicates that this bucket # is public. `FALSE` indicates that the bucket is not public. # @return [Boolean] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PolicyStatus AWS API Documentation # class PolicyStatus < Struct.new( :is_public) SENSITIVE = [] include Aws::Structure end # This data type contains information about progress of an operation. # # @!attribute [rw] bytes_scanned # The current number of object bytes scanned. # @return [Integer] # # @!attribute [rw] bytes_processed # The current number of uncompressed object bytes processed. # @return [Integer] # # @!attribute [rw] bytes_returned # The current number of bytes of records payload data returned. # @return [Integer] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Progress AWS API Documentation # class Progress < Struct.new( :bytes_scanned, :bytes_processed, :bytes_returned) SENSITIVE = [] include Aws::Structure end # This data type contains information about the progress event of an # operation. # # @!attribute [rw] details # The Progress event details. # @return [Types::Progress] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ProgressEvent AWS API Documentation # class ProgressEvent < Struct.new( :details, :event_type) SENSITIVE = [] include Aws::Structure end # The PublicAccessBlock configuration that you want to apply to this # Amazon S3 bucket. You can enable the configuration options in any # combination. For more information about when Amazon S3 considers a # bucket or object public, see [The Meaning of "Public"][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status # # @!attribute [rw] block_public_acls # Specifies whether Amazon S3 should block public access control lists # (ACLs) for this bucket and objects in this bucket. Setting this # element to `TRUE` causes the following behavior: # # * PUT Bucket ACL and PUT Object ACL calls fail if the specified ACL # is public. # # * PUT Object calls fail if the request includes a public ACL. # # * PUT Bucket calls fail if the request includes a public ACL. # # Enabling this setting doesn't affect existing policies or ACLs. # @return [Boolean] # # @!attribute [rw] ignore_public_acls # Specifies whether Amazon S3 should ignore public ACLs for this # bucket and objects in this bucket. Setting this element to `TRUE` # causes Amazon S3 to ignore all public ACLs on this bucket and # objects in this bucket. # # Enabling this setting doesn't affect the persistence of any # existing ACLs and doesn't prevent new public ACLs from being set. # @return [Boolean] # # @!attribute [rw] block_public_policy # Specifies whether Amazon S3 should block public bucket policies for # this bucket. Setting this element to `TRUE` causes Amazon S3 to # reject calls to PUT Bucket policy if the specified bucket policy # allows public access. # # Enabling this setting doesn't affect existing bucket policies. # @return [Boolean] # # @!attribute [rw] restrict_public_buckets # Specifies whether Amazon S3 should restrict public bucket policies # for this bucket. Setting this element to `TRUE` restricts access to # this bucket to only Amazon Web Service principals and authorized # users within this account if the bucket has a public policy. # # Enabling this setting doesn't affect previously stored bucket # policies, except that public and cross-account access within any # public bucket policy, including non-public delegation to specific # accounts, is blocked. # @return [Boolean] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PublicAccessBlockConfiguration AWS API Documentation # class PublicAccessBlockConfiguration < Struct.new( :block_public_acls, :ignore_public_acls, :block_public_policy, :restrict_public_buckets) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket for which the accelerate configuration is # set. # @return [String] # # @!attribute [rw] accelerate_configuration # Container for setting the transfer acceleration state. # @return [Types::AccelerateConfiguration] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @!attribute [rw] checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any # provided `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationRequest AWS API Documentation # class PutBucketAccelerateConfigurationRequest < Struct.new( :bucket, :accelerate_configuration, :expected_bucket_owner, :checksum_algorithm) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] acl # The canned ACL to apply to the bucket. # @return [String] # # @!attribute [rw] access_control_policy # Contains the elements that set the ACL permissions for an object per # grantee. # @return [Types::AccessControlPolicy] # # @!attribute [rw] bucket # The bucket to which to apply the ACL. # @return [String] # # @!attribute [rw] content_md5 # The base64-encoded 128-bit MD5 digest of the data. This header must # be used as a message integrity check to verify that the request body # was not corrupted in transit. For more information, go to [RFC # 1864.][1] # # For requests made using the Amazon Web Services Command Line # Interface (CLI) or Amazon Web Services SDKs, this field is # calculated automatically. # # # # [1]: http://www.ietf.org/rfc/rfc1864.txt # @return [String] # # @!attribute [rw] checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any # provided `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] grant_full_control # Allows grantee the read, write, read ACP, and write ACP permissions # on the bucket. # @return [String] # # @!attribute [rw] grant_read # Allows grantee to list the objects in the bucket. # @return [String] # # @!attribute [rw] grant_read_acp # Allows grantee to read the bucket ACL. # @return [String] # # @!attribute [rw] grant_write # Allows grantee to create new objects in the bucket. # # For the bucket and object owners of existing objects, also allows # deletions and overwrites of those objects. # @return [String] # # @!attribute [rw] grant_write_acp # Allows grantee to write the ACL for the applicable bucket. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAclRequest AWS API Documentation # class PutBucketAclRequest < Struct.new( :acl, :access_control_policy, :bucket, :content_md5, :checksum_algorithm, :grant_full_control, :grant_read, :grant_read_acp, :grant_write, :grant_write_acp, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket to which an analytics configuration is # stored. # @return [String] # # @!attribute [rw] id # The ID that identifies the analytics configuration. # @return [String] # # @!attribute [rw] analytics_configuration # The configuration and any analyses for the analytics filter. # @return [Types::AnalyticsConfiguration] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfigurationRequest AWS API Documentation # class PutBucketAnalyticsConfigurationRequest < Struct.new( :bucket, :id, :analytics_configuration, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # Specifies the bucket impacted by the `cors`configuration. # @return [String] # # @!attribute [rw] cors_configuration # Describes the cross-origin access configuration for objects in an # Amazon S3 bucket. For more information, see [Enabling Cross-Origin # Resource Sharing][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html # @return [Types::CORSConfiguration] # # @!attribute [rw] content_md5 # The base64-encoded 128-bit MD5 digest of the data. This header must # be used as a message integrity check to verify that the request body # was not corrupted in transit. For more information, go to [RFC # 1864.][1] # # For requests made using the Amazon Web Services Command Line # Interface (CLI) or Amazon Web Services SDKs, this field is # calculated automatically. # # # # [1]: http://www.ietf.org/rfc/rfc1864.txt # @return [String] # # @!attribute [rw] checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any # provided `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCorsRequest AWS API Documentation # class PutBucketCorsRequest < Struct.new( :bucket, :cors_configuration, :content_md5, :checksum_algorithm, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # Specifies default encryption for a bucket using server-side # encryption with different key options. By default, all buckets have # a default encryption configuration that uses server-side encryption # with Amazon S3 managed keys (SSE-S3). You can optionally configure # default encryption for a bucket by using server-side encryption with # an Amazon Web Services KMS key (SSE-KMS) or a customer-provided key # (SSE-C). For information about the bucket default encryption # feature, see [Amazon S3 Bucket Default Encryption][1] in the *Amazon # S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html # @return [String] # # @!attribute [rw] content_md5 # The base64-encoded 128-bit MD5 digest of the server-side encryption # configuration. # # For requests made using the Amazon Web Services Command Line # Interface (CLI) or Amazon Web Services SDKs, this field is # calculated automatically. # @return [String] # # @!attribute [rw] checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any # provided `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] server_side_encryption_configuration # Specifies the default server-side-encryption configuration. # @return [Types::ServerSideEncryptionConfiguration] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryptionRequest AWS API Documentation # class PutBucketEncryptionRequest < Struct.new( :bucket, :content_md5, :checksum_algorithm, :server_side_encryption_configuration, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the Amazon S3 bucket whose configuration you want to # modify or retrieve. # @return [String] # # @!attribute [rw] id # The ID used to identify the S3 Intelligent-Tiering configuration. # @return [String] # # @!attribute [rw] intelligent_tiering_configuration # Container for S3 Intelligent-Tiering configuration. # @return [Types::IntelligentTieringConfiguration] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketIntelligentTieringConfigurationRequest AWS API Documentation # class PutBucketIntelligentTieringConfigurationRequest < Struct.new( :bucket, :id, :intelligent_tiering_configuration) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket where the inventory configuration will be # stored. # @return [String] # # @!attribute [rw] id # The ID used to identify the inventory configuration. # @return [String] # # @!attribute [rw] inventory_configuration # Specifies the inventory configuration. # @return [Types::InventoryConfiguration] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfigurationRequest AWS API Documentation # class PutBucketInventoryConfigurationRequest < Struct.new( :bucket, :id, :inventory_configuration, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket for which to set the configuration. # @return [String] # # @!attribute [rw] checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any # provided `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] lifecycle_configuration # Container for lifecycle rules. You can add as many as 1,000 rules. # @return [Types::BucketLifecycleConfiguration] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfigurationRequest AWS API Documentation # class PutBucketLifecycleConfigurationRequest < Struct.new( :bucket, :checksum_algorithm, :lifecycle_configuration, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # @return [String] # # @!attribute [rw] content_md5 # For requests made using the Amazon Web Services Command Line # Interface (CLI) or Amazon Web Services SDKs, this field is # calculated automatically. # @return [String] # # @!attribute [rw] checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any # provided `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] lifecycle_configuration # @return [Types::LifecycleConfiguration] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleRequest AWS API Documentation # class PutBucketLifecycleRequest < Struct.new( :bucket, :content_md5, :checksum_algorithm, :lifecycle_configuration, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket for which to set the logging parameters. # @return [String] # # @!attribute [rw] bucket_logging_status # Container for logging status information. # @return [Types::BucketLoggingStatus] # # @!attribute [rw] content_md5 # The MD5 hash of the `PutBucketLogging` request body. # # For requests made using the Amazon Web Services Command Line # Interface (CLI) or Amazon Web Services SDKs, this field is # calculated automatically. # @return [String] # # @!attribute [rw] checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any # provided `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLoggingRequest AWS API Documentation # class PutBucketLoggingRequest < Struct.new( :bucket, :bucket_logging_status, :content_md5, :checksum_algorithm, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket for which the metrics configuration is set. # @return [String] # # @!attribute [rw] id # The ID used to identify the metrics configuration. The ID has a 64 # character limit and can only contain letters, numbers, periods, # dashes, and underscores. # @return [String] # # @!attribute [rw] metrics_configuration # Specifies the metrics configuration. # @return [Types::MetricsConfiguration] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfigurationRequest AWS API Documentation # class PutBucketMetricsConfigurationRequest < Struct.new( :bucket, :id, :metrics_configuration, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket. # @return [String] # # @!attribute [rw] notification_configuration # A container for specifying the notification configuration of the # bucket. If this element is empty, notifications are turned off for # the bucket. # @return [Types::NotificationConfiguration] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @!attribute [rw] skip_destination_validation # Skips validation of Amazon SQS, Amazon SNS, and Lambda destinations. # True or false value. # @return [Boolean] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfigurationRequest AWS API Documentation # class PutBucketNotificationConfigurationRequest < Struct.new( :bucket, :notification_configuration, :expected_bucket_owner, :skip_destination_validation) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket. # @return [String] # # @!attribute [rw] content_md5 # The MD5 hash of the `PutPublicAccessBlock` request body. # # For requests made using the Amazon Web Services Command Line # Interface (CLI) or Amazon Web Services SDKs, this field is # calculated automatically. # @return [String] # # @!attribute [rw] checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any # provided `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] notification_configuration # The container for the configuration. # @return [Types::NotificationConfigurationDeprecated] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationRequest AWS API Documentation # class PutBucketNotificationRequest < Struct.new( :bucket, :content_md5, :checksum_algorithm, :notification_configuration, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the Amazon S3 bucket whose `OwnershipControls` you want # to set. # @return [String] # # @!attribute [rw] content_md5 # The MD5 hash of the `OwnershipControls` request body. # # For requests made using the Amazon Web Services Command Line # Interface (CLI) or Amazon Web Services SDKs, this field is # calculated automatically. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @!attribute [rw] ownership_controls # The `OwnershipControls` (BucketOwnerEnforced, BucketOwnerPreferred, # or ObjectWriter) that you want to apply to this Amazon S3 bucket. # @return [Types::OwnershipControls] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketOwnershipControlsRequest AWS API Documentation # class PutBucketOwnershipControlsRequest < Struct.new( :bucket, :content_md5, :expected_bucket_owner, :ownership_controls) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket. # # Directory buckets - When you use this operation with a # directory bucket, you must use path-style requests in the format # `https://s3express-control.region_code.amazonaws.com/bucket-name `. # Virtual-hosted-style requests aren't supported. Directory bucket # names must be unique in the chosen Availability Zone. Bucket names # must also follow the format ` bucket_base_name--az_id--x-s3` (for # example, ` DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information # about bucket naming restrictions, see [Directory bucket naming # rules][1] in the *Amazon S3 User Guide* # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # @return [String] # # @!attribute [rw] content_md5 # The MD5 hash of the request body. # # For requests made using the Amazon Web Services Command Line # Interface (CLI) or Amazon Web Services SDKs, this field is # calculated automatically. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum-algorithm ` or # `x-amz-trailer` header sent. Otherwise, Amazon S3 fails the request # with the HTTP status code `400 Bad Request`. # # For the `x-amz-checksum-algorithm ` header, replace ` algorithm ` # with the supported algorithm from the following list: # # * CRC32 # # * CRC32C # # * SHA1 # # * SHA256 # # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # If the individual checksum value you provide through # `x-amz-checksum-algorithm ` doesn't match the checksum algorithm # you set through `x-amz-sdk-checksum-algorithm`, Amazon S3 ignores # any provided `ChecksumAlgorithm` parameter and uses the checksum # algorithm that matches the provided value in # `x-amz-checksum-algorithm `. # # For directory buckets, when you use Amazon Web Services SDKs, # `CRC32` is the default checksum algorithm that's used for # performance. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] confirm_remove_self_bucket_access # Set this parameter to true to confirm that you want to remove your # permissions to change this bucket policy in the future. # # This functionality is not supported for directory buckets. # # # @return [Boolean] # # @!attribute [rw] policy # The bucket policy as a JSON document. # # For directory buckets, the only IAM action supported in the bucket # policy is `s3express:CreateSession`. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # # For directory buckets, this header is not supported in this API # operation. If you specify this header, the request fails with the # HTTP status code `501 Not Implemented`. # # # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicyRequest AWS API Documentation # class PutBucketPolicyRequest < Struct.new( :bucket, :content_md5, :checksum_algorithm, :confirm_remove_self_bucket_access, :policy, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the bucket # @return [String] # # @!attribute [rw] content_md5 # The base64-encoded 128-bit MD5 digest of the data. You must use this # header as a message integrity check to verify that the request body # was not corrupted in transit. For more information, see [RFC # 1864][1]. # # For requests made using the Amazon Web Services Command Line # Interface (CLI) or Amazon Web Services SDKs, this field is # calculated automatically. # # # # [1]: http://www.ietf.org/rfc/rfc1864.txt # @return [String] # # @!attribute [rw] checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any # provided `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] replication_configuration # A container for replication rules. You can add up to 1,000 rules. # The maximum size of a replication configuration is 2 MB. # @return [Types::ReplicationConfiguration] # # @!attribute [rw] token # A token to allow Object Lock to be enabled for an existing bucket. # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplicationRequest AWS API Documentation # class PutBucketReplicationRequest < Struct.new( :bucket, :content_md5, :checksum_algorithm, :replication_configuration, :token, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The bucket name. # @return [String] # # @!attribute [rw] content_md5 # The base64-encoded 128-bit MD5 digest of the data. You must use this # header as a message integrity check to verify that the request body # was not corrupted in transit. For more information, see [RFC # 1864][1]. # # For requests made using the Amazon Web Services Command Line # Interface (CLI) or Amazon Web Services SDKs, this field is # calculated automatically. # # # # [1]: http://www.ietf.org/rfc/rfc1864.txt # @return [String] # # @!attribute [rw] checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any # provided `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] request_payment_configuration # Container for Payer. # @return [Types::RequestPaymentConfiguration] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPaymentRequest AWS API Documentation # class PutBucketRequestPaymentRequest < Struct.new( :bucket, :content_md5, :checksum_algorithm, :request_payment_configuration, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The bucket name. # @return [String] # # @!attribute [rw] content_md5 # The base64-encoded 128-bit MD5 digest of the data. You must use this # header as a message integrity check to verify that the request body # was not corrupted in transit. For more information, see [RFC # 1864][1]. # # For requests made using the Amazon Web Services Command Line # Interface (CLI) or Amazon Web Services SDKs, this field is # calculated automatically. # # # # [1]: http://www.ietf.org/rfc/rfc1864.txt # @return [String] # # @!attribute [rw] checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any # provided `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] tagging # Container for the `TagSet` and `Tag` elements. # @return [Types::Tagging] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTaggingRequest AWS API Documentation # class PutBucketTaggingRequest < Struct.new( :bucket, :content_md5, :checksum_algorithm, :tagging, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The bucket name. # @return [String] # # @!attribute [rw] content_md5 # >The base64-encoded 128-bit MD5 digest of the data. You must use # this header as a message integrity check to verify that the request # body was not corrupted in transit. For more information, see [RFC # 1864][1]. # # For requests made using the Amazon Web Services Command Line # Interface (CLI) or Amazon Web Services SDKs, this field is # calculated automatically. # # # # [1]: http://www.ietf.org/rfc/rfc1864.txt # @return [String] # # @!attribute [rw] checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any # provided `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] mfa # The concatenation of the authentication device's serial number, a # space, and the value that is displayed on your authentication # device. # @return [String] # # @!attribute [rw] versioning_configuration # Container for setting the versioning state. # @return [Types::VersioningConfiguration] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioningRequest AWS API Documentation # class PutBucketVersioningRequest < Struct.new( :bucket, :content_md5, :checksum_algorithm, :mfa, :versioning_configuration, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The bucket name. # @return [String] # # @!attribute [rw] content_md5 # The base64-encoded 128-bit MD5 digest of the data. You must use this # header as a message integrity check to verify that the request body # was not corrupted in transit. For more information, see [RFC # 1864][1]. # # For requests made using the Amazon Web Services Command Line # Interface (CLI) or Amazon Web Services SDKs, this field is # calculated automatically. # # # # [1]: http://www.ietf.org/rfc/rfc1864.txt # @return [String] # # @!attribute [rw] checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any # provided `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] website_configuration # Container for the request. # @return [Types::WebsiteConfiguration] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsiteRequest AWS API Documentation # class PutBucketWebsiteRequest < Struct.new( :bucket, :content_md5, :checksum_algorithm, :website_configuration, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] request_charged # If present, indicates that the requester was successfully charged # for the request. # # This functionality is not supported for directory buckets. # # # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclOutput AWS API Documentation # class PutObjectAclOutput < Struct.new( :request_charged) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] acl # The canned ACL to apply to the object. For more information, see # [Canned ACL][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL # @return [String] # # @!attribute [rw] access_control_policy # Contains the elements that set the ACL permissions for an object per # grantee. # @return [Types::AccessControlPolicy] # # @!attribute [rw] bucket # The bucket name that contains the object to which you want to attach # the ACL. # # **Access points** - When you use this action with an access point, # you must provide the alias of the access point in place of the # bucket name or specify the access point ARN. When using the access # point ARN, you must direct requests to the access point hostname. # The access point hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the # bucket name. For more information about access point ARNs, see # [Using access points][1] in the *Amazon S3 User Guide*. # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. # The S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][2] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # @return [String] # # @!attribute [rw] content_md5 # The base64-encoded 128-bit MD5 digest of the data. This header must # be used as a message integrity check to verify that the request body # was not corrupted in transit. For more information, go to [RFC # 1864.>][1] # # For requests made using the Amazon Web Services Command Line # Interface (CLI) or Amazon Web Services SDKs, this field is # calculated automatically. # # # # [1]: http://www.ietf.org/rfc/rfc1864.txt # @return [String] # # @!attribute [rw] checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any # provided `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] grant_full_control # Allows grantee the read, write, read ACP, and write ACP permissions # on the bucket. # # This functionality is not supported for Amazon S3 on Outposts. # @return [String] # # @!attribute [rw] grant_read # Allows grantee to list the objects in the bucket. # # This functionality is not supported for Amazon S3 on Outposts. # @return [String] # # @!attribute [rw] grant_read_acp # Allows grantee to read the bucket ACL. # # This functionality is not supported for Amazon S3 on Outposts. # @return [String] # # @!attribute [rw] grant_write # Allows grantee to create new objects in the bucket. # # For the bucket and object owners of existing objects, also allows # deletions and overwrites of those objects. # @return [String] # # @!attribute [rw] grant_write_acp # Allows grantee to write the ACL for the applicable bucket. # # This functionality is not supported for Amazon S3 on Outposts. # @return [String] # # @!attribute [rw] key # Key for which the PUT action was initiated. # @return [String] # # @!attribute [rw] request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has # Requester Pays enabled, the requester will pay for corresponding # charges to copy the object. For information about downloading # objects from Requester Pays buckets, see [Downloading Objects in # Requester Pays Buckets][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @return [String] # # @!attribute [rw] version_id # Version ID used to reference a specific version of the object. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclRequest AWS API Documentation # class PutObjectAclRequest < Struct.new( :acl, :access_control_policy, :bucket, :content_md5, :checksum_algorithm, :grant_full_control, :grant_read, :grant_read_acp, :grant_write, :grant_write_acp, :key, :request_payer, :version_id, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] request_charged # If present, indicates that the requester was successfully charged # for the request. # # This functionality is not supported for directory buckets. # # # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHoldOutput AWS API Documentation # class PutObjectLegalHoldOutput < Struct.new( :request_charged) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The bucket name containing the object that you want to place a legal # hold on. # # **Access points** - When you use this action with an access point, # you must provide the alias of the access point in place of the # bucket name or specify the access point ARN. When using the access # point ARN, you must direct requests to the access point hostname. # The access point hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the # bucket name. For more information about access point ARNs, see # [Using access points][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # @return [String] # # @!attribute [rw] key # The key name for the object that you want to place a legal hold on. # @return [String] # # @!attribute [rw] legal_hold # Container element for the legal hold configuration you want to apply # to the specified object. # @return [Types::ObjectLockLegalHold] # # @!attribute [rw] request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has # Requester Pays enabled, the requester will pay for corresponding # charges to copy the object. For information about downloading # objects from Requester Pays buckets, see [Downloading Objects in # Requester Pays Buckets][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @return [String] # # @!attribute [rw] version_id # The version ID of the object that you want to place a legal hold on. # @return [String] # # @!attribute [rw] content_md5 # The MD5 hash for the request body. # # For requests made using the Amazon Web Services Command Line # Interface (CLI) or Amazon Web Services SDKs, this field is # calculated automatically. # @return [String] # # @!attribute [rw] checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any # provided `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHoldRequest AWS API Documentation # class PutObjectLegalHoldRequest < Struct.new( :bucket, :key, :legal_hold, :request_payer, :version_id, :content_md5, :checksum_algorithm, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] request_charged # If present, indicates that the requester was successfully charged # for the request. # # This functionality is not supported for directory buckets. # # # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfigurationOutput AWS API Documentation # class PutObjectLockConfigurationOutput < Struct.new( :request_charged) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The bucket whose Object Lock configuration you want to create or # replace. # @return [String] # # @!attribute [rw] object_lock_configuration # The Object Lock configuration that you want to apply to the # specified bucket. # @return [Types::ObjectLockConfiguration] # # @!attribute [rw] request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has # Requester Pays enabled, the requester will pay for corresponding # charges to copy the object. For information about downloading # objects from Requester Pays buckets, see [Downloading Objects in # Requester Pays Buckets][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @return [String] # # @!attribute [rw] token # A token to allow Object Lock to be enabled for an existing bucket. # @return [String] # # @!attribute [rw] content_md5 # The MD5 hash for the request body. # # For requests made using the Amazon Web Services Command Line # Interface (CLI) or Amazon Web Services SDKs, this field is # calculated automatically. # @return [String] # # @!attribute [rw] checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any # provided `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfigurationRequest AWS API Documentation # class PutObjectLockConfigurationRequest < Struct.new( :bucket, :object_lock_configuration, :request_payer, :token, :content_md5, :checksum_algorithm, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] expiration # If the expiration is configured for the object (see # [PutBucketLifecycleConfiguration][1]) in the *Amazon S3 User Guide*, # the response includes this header. It includes the `expiry-date` and # `rule-id` key-value pairs that provide information about object # expiration. The value of the `rule-id` is URL-encoded. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html # @return [String] # # @!attribute [rw] etag # Entity tag for the uploaded object. # # General purpose buckets - To ensure that data is not # corrupted traversing the network, for objects where the ETag is the # MD5 digest of the object, you can calculate the MD5 while putting an # object to Amazon S3 and compare the returned ETag to the calculated # MD5 value. # # Directory buckets - The ETag for the object in a directory # bucket isn't the MD5 digest of the object. # @return [String] # # @!attribute [rw] checksum_crc32 # The base64-encoded, 32-bit CRC32 checksum of the object. This will # only be present if it was uploaded with the object. When you use an # API operation on an object that was uploaded using multipart # uploads, this value may not be a direct checksum value of the full # object. Instead, it's a calculation based on the checksum values of # each individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] # # @!attribute [rw] checksum_crc32c # The base64-encoded, 32-bit CRC32C checksum of the object. This will # only be present if it was uploaded with the object. When you use an # API operation on an object that was uploaded using multipart # uploads, this value may not be a direct checksum value of the full # object. Instead, it's a calculation based on the checksum values of # each individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] # # @!attribute [rw] checksum_sha1 # The base64-encoded, 160-bit SHA-1 digest of the object. This will # only be present if it was uploaded with the object. When you use the # API operation on an object that was uploaded using multipart # uploads, this value may not be a direct checksum value of the full # object. Instead, it's a calculation based on the checksum values of # each individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] # # @!attribute [rw] checksum_sha256 # The base64-encoded, 256-bit SHA-256 digest of the object. This will # only be present if it was uploaded with the object. When you use an # API operation on an object that was uploaded using multipart # uploads, this value may not be a direct checksum value of the full # object. Instead, it's a calculation based on the checksum values of # each individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] # # @!attribute [rw] server_side_encryption # The server-side encryption algorithm used when you store this object # in Amazon S3 (for example, `AES256`, `aws:kms`, `aws:kms:dsse`). # # For directory buckets, only server-side encryption with Amazon S3 # managed keys (SSE-S3) (`AES256`) is supported. # # # @return [String] # # @!attribute [rw] version_id # Version ID of the object. # # If you enable versioning for a bucket, Amazon S3 automatically # generates a unique version ID for the object being stored. Amazon S3 # returns this ID in the response. When you enable versioning for a # bucket, if Amazon S3 receives multiple write requests for the same # object simultaneously, it stores all of the objects. For more # information about versioning, see [Adding Objects to # Versioning-Enabled Buckets][1] in the *Amazon S3 User Guide*. For # information about returning the versioning state of a bucket, see # [GetBucketVersioning][2]. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html # @return [String] # # @!attribute [rw] sse_customer_algorithm # If server-side encryption with a customer-provided encryption key # was requested, the response will include this header to confirm the # encryption algorithm that's used. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] sse_customer_key_md5 # If server-side encryption with a customer-provided encryption key # was requested, the response will include this header to provide the # round-trip message integrity verification of the customer-provided # encryption key. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] ssekms_key_id # If `x-amz-server-side-encryption` has a valid value of `aws:kms` or # `aws:kms:dsse`, this header indicates the ID of the Key Management # Service (KMS) symmetric encryption customer managed key that was # used for the object. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] ssekms_encryption_context # If present, indicates the Amazon Web Services KMS Encryption Context # to use for object encryption. The value of this header is a # base64-encoded UTF-8 string holding JSON with the encryption context # key-value pairs. This value is stored as object metadata and # automatically gets passed on to Amazon Web Services KMS for future # `GetObject` or `CopyObject` operations on this object. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] bucket_key_enabled # Indicates whether the uploaded object uses an S3 Bucket Key for # server-side encryption with Key Management Service (KMS) keys # (SSE-KMS). # # This functionality is not supported for directory buckets. # # # @return [Boolean] # # @!attribute [rw] request_charged # If present, indicates that the requester was successfully charged # for the request. # # This functionality is not supported for directory buckets. # # # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectOutput AWS API Documentation # class PutObjectOutput < Struct.new( :expiration, :etag, :checksum_crc32, :checksum_crc32c, :checksum_sha1, :checksum_sha256, :server_side_encryption, :version_id, :sse_customer_algorithm, :sse_customer_key_md5, :ssekms_key_id, :ssekms_encryption_context, :bucket_key_enabled, :request_charged) SENSITIVE = [:ssekms_key_id, :ssekms_encryption_context] include Aws::Structure end # @!attribute [rw] acl # The canned ACL to apply to the object. For more information, see # [Canned ACL][1] in the *Amazon S3 User Guide*. # # When adding a new object, you can use headers to grant ACL-based # permissions to individual Amazon Web Services accounts or to # predefined groups defined by Amazon S3. These permissions are then # added to the ACL on the object. By default, all objects are private. # Only the owner has full access control. For more information, see # [Access Control List (ACL) Overview][2] and [Managing ACLs Using the # REST API][3] in the *Amazon S3 User Guide*. # # If the bucket that you're uploading objects to uses the bucket # owner enforced setting for S3 Object Ownership, ACLs are disabled # and no longer affect permissions. Buckets that use this setting only # accept PUT requests that don't specify an ACL or PUT requests that # specify bucket owner full control ACLs, such as the # `bucket-owner-full-control` canned ACL or an equivalent form of this # ACL expressed in the XML format. PUT requests that contain other # ACLs (for example, custom grants to certain Amazon Web Services # accounts) fail and return a `400` error with the error code # `AccessControlListNotSupported`. For more information, see [ # Controlling ownership of objects and disabling ACLs][4] in the # *Amazon S3 User Guide*. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html # [4]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html # @return [String] # # @!attribute [rw] body # Object data. # @return [IO] # # @!attribute [rw] bucket # The bucket name to which the PUT action was initiated. # # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style # requests are not supported. Directory bucket names must be unique in # the chosen Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, # you must provide the alias of the access point in place of the # bucket name or specify the access point ARN. When using the access # point ARN, you must direct requests to the access point hostname. # The access point hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the # bucket name. For more information about access point ARNs, see # [Using access points][2] in the *Amazon S3 User Guide*. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. # The S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][3] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # @return [String] # # @!attribute [rw] cache_control # Can be used to specify caching behavior along the request/reply # chain. For more information, see # [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9][1]. # # # # [1]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 # @return [String] # # @!attribute [rw] content_disposition # Specifies presentational information for the object. For more # information, see # [https://www.rfc-editor.org/rfc/rfc6266#section-4][1]. # # # # [1]: https://www.rfc-editor.org/rfc/rfc6266#section-4 # @return [String] # # @!attribute [rw] content_encoding # Specifies what content encodings have been applied to the object and # thus what decoding mechanisms must be applied to obtain the # media-type referenced by the Content-Type header field. For more # information, see # [https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding][1]. # # # # [1]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding # @return [String] # # @!attribute [rw] content_language # The language the content is in. # @return [String] # # @!attribute [rw] content_length # Size of the body in bytes. This parameter is useful when the size of # the body cannot be determined automatically. For more information, # see # [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length][1]. # # # # [1]: https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length # @return [Integer] # # @!attribute [rw] content_md5 # The base64-encoded 128-bit MD5 digest of the message (without the # headers) according to RFC 1864. This header can be used as a message # integrity check to verify that the data is the same data that was # originally sent. Although it is optional, we recommend using the # Content-MD5 mechanism as an end-to-end integrity check. For more # information about REST request authentication, see [REST # Authentication][1]. # # The `Content-MD5` header is required for any request to upload an # object with a retention period configured using Amazon S3 Object # Lock. For more information about Amazon S3 Object Lock, see [Amazon # S3 Object Lock Overview][2] in the *Amazon S3 User Guide*. # # # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html # @return [String] # # @!attribute [rw] content_type # A standard MIME type describing the format of the contents. For more # information, see # [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type][1]. # # # # [1]: https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type # @return [String] # # @!attribute [rw] checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum-algorithm ` or # `x-amz-trailer` header sent. Otherwise, Amazon S3 fails the request # with the HTTP status code `400 Bad Request`. # # For the `x-amz-checksum-algorithm ` header, replace ` algorithm ` # with the supported algorithm from the following list: # # * CRC32 # # * CRC32C # # * SHA1 # # * SHA256 # # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # If the individual checksum value you provide through # `x-amz-checksum-algorithm ` doesn't match the checksum algorithm # you set through `x-amz-sdk-checksum-algorithm`, Amazon S3 ignores # any provided `ChecksumAlgorithm` parameter and uses the checksum # algorithm that matches the provided value in # `x-amz-checksum-algorithm `. # # For directory buckets, when you use Amazon Web Services SDKs, # `CRC32` is the default checksum algorithm that's used for # performance. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] checksum_crc32 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 32-bit CRC32 checksum of the object. # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] checksum_crc32c # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 32-bit CRC32C checksum of the object. # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] checksum_sha1 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 160-bit SHA-1 digest of the object. # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] checksum_sha256 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 256-bit SHA-256 digest of the object. # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] expires # The date and time at which the object is no longer cacheable. For # more information, see # [https://www.rfc-editor.org/rfc/rfc7234#section-5.3][1]. # # # # [1]: https://www.rfc-editor.org/rfc/rfc7234#section-5.3 # @return [Time] # # @!attribute [rw] grant_full_control # Gives the grantee READ, READ\_ACP, and WRITE\_ACP permissions on the # object. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # @return [String] # # @!attribute [rw] grant_read # Allows grantee to read the object data and its metadata. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # @return [String] # # @!attribute [rw] grant_read_acp # Allows grantee to read the object ACL. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # @return [String] # # @!attribute [rw] grant_write_acp # Allows grantee to write the ACL for the applicable object. # # * This functionality is not supported for directory buckets. # # * This functionality is not supported for Amazon S3 on Outposts. # # # @return [String] # # @!attribute [rw] key # Object key for which the PUT action was initiated. # @return [String] # # @!attribute [rw] metadata # A map of metadata to store with the object in S3. # @return [Hash] # # @!attribute [rw] server_side_encryption # The server-side encryption algorithm that was used when you store # this object in Amazon S3 (for example, `AES256`, `aws:kms`, # `aws:kms:dsse`). # # General purpose buckets - You have four mutually exclusive # options to protect data using server-side encryption in Amazon S3, # depending on how you choose to manage the encryption keys. # Specifically, the encryption key options are Amazon S3 managed keys # (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and # customer-provided keys (SSE-C). Amazon S3 encrypts data with # server-side encryption by using Amazon S3 managed keys (SSE-S3) by # default. You can optionally tell Amazon S3 to encrypt data at rest # by using server-side encryption with other key options. For more # information, see [Using Server-Side Encryption][1] in the *Amazon S3 # User Guide*. # # Directory buckets - For directory buckets, only the # server-side encryption with Amazon S3 managed keys (SSE-S3) # (`AES256`) value is supported. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html # @return [String] # # @!attribute [rw] storage_class # By default, Amazon S3 uses the STANDARD Storage Class to store newly # created objects. The STANDARD storage class provides high durability # and high availability. Depending on performance needs, you can # specify a different Storage Class. For more information, see # [Storage Classes][1] in the *Amazon S3 User Guide*. # # * For directory buckets, only the S3 Express One Zone storage class # is supported to store newly created objects. # # * Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html # @return [String] # # @!attribute [rw] website_redirect_location # If the bucket is configured as a website, redirects requests for # this object to another object in the same bucket or to an external # URL. Amazon S3 stores the value of this header in the object # metadata. For information about object metadata, see [Object Key and # Metadata][1] in the *Amazon S3 User Guide*. # # In the following example, the request header sets the redirect to an # object (anotherPage.html) in the same bucket: # # `x-amz-website-redirect-location: /anotherPage.html` # # In the following example, the request header sets the object # redirect to another website: # # `x-amz-website-redirect-location: http://www.example.com/` # # For more information about website hosting in Amazon S3, see # [Hosting Websites on Amazon S3][2] and [How to Configure Website # Page Redirects][3] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html # @return [String] # # @!attribute [rw] sse_customer_algorithm # Specifies the algorithm to use when encrypting the object (for # example, `AES256`). # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] sse_customer_key # Specifies the customer-provided encryption key for Amazon S3 to use # in encrypting data. This value is used to store the object and then # it is discarded; Amazon S3 does not store the encryption key. The # key must be appropriate for use with the algorithm specified in the # `x-amz-server-side-encryption-customer-algorithm` header. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the encryption key according to # RFC 1321. Amazon S3 uses this header for a message integrity check # to ensure that the encryption key was transmitted without error. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] ssekms_key_id # If `x-amz-server-side-encryption` has a valid value of `aws:kms` or # `aws:kms:dsse`, this header specifies the ID (Key ID, Key ARN, or # Key Alias) of the Key Management Service (KMS) symmetric encryption # customer managed key that was used for the object. If you specify # `x-amz-server-side-encryption:aws:kms` or # `x-amz-server-side-encryption:aws:kms:dsse`, but do not provide` # x-amz-server-side-encryption-aws-kms-key-id`, Amazon S3 uses the # Amazon Web Services managed key (`aws/s3`) to protect the data. If # the KMS key does not exist in the same account that's issuing the # command, you must use the full ARN and not just the ID. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] ssekms_encryption_context # Specifies the Amazon Web Services KMS Encryption Context to use for # object encryption. The value of this header is a base64-encoded # UTF-8 string holding JSON with the encryption context key-value # pairs. This value is stored as object metadata and automatically # gets passed on to Amazon Web Services KMS for future `GetObject` or # `CopyObject` operations on this object. This value must be # explicitly added during `CopyObject` operations. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] bucket_key_enabled # Specifies whether Amazon S3 should use an S3 Bucket Key for object # encryption with server-side encryption using Key Management Service # (KMS) keys (SSE-KMS). Setting this header to `true` causes Amazon S3 # to use an S3 Bucket Key for object encryption with SSE-KMS. # # Specifying this header with a PUT action doesn’t affect bucket-level # settings for S3 Bucket Key. # # This functionality is not supported for directory buckets. # # # @return [Boolean] # # @!attribute [rw] request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has # Requester Pays enabled, the requester will pay for corresponding # charges to copy the object. For information about downloading # objects from Requester Pays buckets, see [Downloading Objects in # Requester Pays Buckets][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @return [String] # # @!attribute [rw] tagging # The tag-set for the object. The tag-set must be encoded as URL Query # parameters. (For example, "Key1=Value1") # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] object_lock_mode # The Object Lock mode that you want to apply to this object. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] object_lock_retain_until_date # The date and time when you want this object's Object Lock to # expire. Must be formatted as a timestamp parameter. # # This functionality is not supported for directory buckets. # # # @return [Time] # # @!attribute [rw] object_lock_legal_hold_status # Specifies whether a legal hold will be applied to this object. For # more information about S3 Object Lock, see [Object Lock][1] in the # *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRequest AWS API Documentation # class PutObjectRequest < Struct.new( :acl, :body, :bucket, :cache_control, :content_disposition, :content_encoding, :content_language, :content_length, :content_md5, :content_type, :checksum_algorithm, :checksum_crc32, :checksum_crc32c, :checksum_sha1, :checksum_sha256, :expires, :grant_full_control, :grant_read, :grant_read_acp, :grant_write_acp, :key, :metadata, :server_side_encryption, :storage_class, :website_redirect_location, :sse_customer_algorithm, :sse_customer_key, :sse_customer_key_md5, :ssekms_key_id, :ssekms_encryption_context, :bucket_key_enabled, :request_payer, :tagging, :object_lock_mode, :object_lock_retain_until_date, :object_lock_legal_hold_status, :expected_bucket_owner) SENSITIVE = [:sse_customer_key, :ssekms_key_id, :ssekms_encryption_context] include Aws::Structure end # @!attribute [rw] request_charged # If present, indicates that the requester was successfully charged # for the request. # # This functionality is not supported for directory buckets. # # # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetentionOutput AWS API Documentation # class PutObjectRetentionOutput < Struct.new( :request_charged) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The bucket name that contains the object you want to apply this # Object Retention configuration to. # # **Access points** - When you use this action with an access point, # you must provide the alias of the access point in place of the # bucket name or specify the access point ARN. When using the access # point ARN, you must direct requests to the access point hostname. # The access point hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the # bucket name. For more information about access point ARNs, see # [Using access points][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # @return [String] # # @!attribute [rw] key # The key name for the object that you want to apply this Object # Retention configuration to. # @return [String] # # @!attribute [rw] retention # The container element for the Object Retention configuration. # @return [Types::ObjectLockRetention] # # @!attribute [rw] request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has # Requester Pays enabled, the requester will pay for corresponding # charges to copy the object. For information about downloading # objects from Requester Pays buckets, see [Downloading Objects in # Requester Pays Buckets][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @return [String] # # @!attribute [rw] version_id # The version ID for the object that you want to apply this Object # Retention configuration to. # @return [String] # # @!attribute [rw] bypass_governance_retention # Indicates whether this action should bypass Governance-mode # restrictions. # @return [Boolean] # # @!attribute [rw] content_md5 # The MD5 hash for the request body. # # For requests made using the Amazon Web Services Command Line # Interface (CLI) or Amazon Web Services SDKs, this field is # calculated automatically. # @return [String] # # @!attribute [rw] checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any # provided `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetentionRequest AWS API Documentation # class PutObjectRetentionRequest < Struct.new( :bucket, :key, :retention, :request_payer, :version_id, :bypass_governance_retention, :content_md5, :checksum_algorithm, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] version_id # The versionId of the object the tag-set was added to. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingOutput AWS API Documentation # class PutObjectTaggingOutput < Struct.new( :version_id) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The bucket name containing the object. # # **Access points** - When you use this action with an access point, # you must provide the alias of the access point in place of the # bucket name or specify the access point ARN. When using the access # point ARN, you must direct requests to the access point hostname. # The access point hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the # bucket name. For more information about access point ARNs, see # [Using access points][1] in the *Amazon S3 User Guide*. # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. # The S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][2] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # @return [String] # # @!attribute [rw] key # Name of the object key. # @return [String] # # @!attribute [rw] version_id # The versionId of the object that the tag-set will be added to. # @return [String] # # @!attribute [rw] content_md5 # The MD5 hash for the request body. # # For requests made using the Amazon Web Services Command Line # Interface (CLI) or Amazon Web Services SDKs, this field is # calculated automatically. # @return [String] # # @!attribute [rw] checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any # provided `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] tagging # Container for the `TagSet` and `Tag` elements # @return [Types::Tagging] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @!attribute [rw] request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has # Requester Pays enabled, the requester will pay for corresponding # charges to copy the object. For information about downloading # objects from Requester Pays buckets, see [Downloading Objects in # Requester Pays Buckets][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingRequest AWS API Documentation # class PutObjectTaggingRequest < Struct.new( :bucket, :key, :version_id, :content_md5, :checksum_algorithm, :tagging, :expected_bucket_owner, :request_payer) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The name of the Amazon S3 bucket whose `PublicAccessBlock` # configuration you want to set. # @return [String] # # @!attribute [rw] content_md5 # The MD5 hash of the `PutPublicAccessBlock` request body. # # For requests made using the Amazon Web Services Command Line # Interface (CLI) or Amazon Web Services SDKs, this field is # calculated automatically. # @return [String] # # @!attribute [rw] checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any # provided `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] public_access_block_configuration # The `PublicAccessBlock` configuration that you want to apply to this # Amazon S3 bucket. You can enable the configuration options in any # combination. For more information about when Amazon S3 considers a # bucket or object public, see [The Meaning of "Public"][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status # @return [Types::PublicAccessBlockConfiguration] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlockRequest AWS API Documentation # class PutPublicAccessBlockRequest < Struct.new( :bucket, :content_md5, :checksum_algorithm, :public_access_block_configuration, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # Specifies the configuration for publishing messages to an Amazon # Simple Queue Service (Amazon SQS) queue when Amazon S3 detects # specified events. # # @!attribute [rw] id # An optional unique identifier for configurations in a notification # configuration. If you don't provide one, Amazon S3 will assign an # ID. # @return [String] # # @!attribute [rw] queue_arn # The Amazon Resource Name (ARN) of the Amazon SQS queue to which # Amazon S3 publishes a message when it detects events of the # specified type. # @return [String] # # @!attribute [rw] events # A collection of bucket events for which to send notifications # @return [Array] # # @!attribute [rw] filter # Specifies object key name filtering rules. For information about key # name filtering, see [Configuring event notifications using object # key name filtering][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html # @return [Types::NotificationConfigurationFilter] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfiguration AWS API Documentation # class QueueConfiguration < Struct.new( :id, :queue_arn, :events, :filter) SENSITIVE = [] include Aws::Structure end # This data type is deprecated. Use [QueueConfiguration][1] for the same # purposes. This data type specifies the configuration for publishing # messages to an Amazon Simple Queue Service (Amazon SQS) queue when # Amazon S3 detects specified events. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_QueueConfiguration.html # # @!attribute [rw] id # An optional unique identifier for configurations in a notification # configuration. If you don't provide one, Amazon S3 will assign an # ID. # @return [String] # # @!attribute [rw] event # The bucket event for which to send notifications. # @return [String] # # @!attribute [rw] events # A collection of bucket events for which to send notifications. # @return [Array] # # @!attribute [rw] queue # The Amazon Resource Name (ARN) of the Amazon SQS queue to which # Amazon S3 publishes a message when it detects events of the # specified type. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfigurationDeprecated AWS API Documentation # class QueueConfigurationDeprecated < Struct.new( :id, :event, :events, :queue) SENSITIVE = [] include Aws::Structure end # The container for the records event. # # @!attribute [rw] payload # The byte array of partial, one or more result records. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RecordsEvent AWS API Documentation # class RecordsEvent < Struct.new( :payload, :event_type) SENSITIVE = [] include Aws::Structure end # Specifies how requests are redirected. In the event of an error, you # can specify a different error code to return. # # @!attribute [rw] host_name # The host name to use in the redirect request. # @return [String] # # @!attribute [rw] http_redirect_code # The HTTP redirect code to use on the response. Not required if one # of the siblings is present. # @return [String] # # @!attribute [rw] protocol # Protocol to use when redirecting requests. The default is the # protocol that is used in the original request. # @return [String] # # @!attribute [rw] replace_key_prefix_with # The object key prefix to use in the redirect request. For example, # to redirect requests for all pages with prefix `docs/` (objects in # the `docs/` folder) to `documents/`, you can set a condition block # with `KeyPrefixEquals` set to `docs/` and in the Redirect set # `ReplaceKeyPrefixWith` to `/documents`. Not required if one of the # siblings is present. Can be present only if `ReplaceKeyWith` is not # provided. # # Replacement must be made for object keys containing special # characters (such as carriage returns) when using XML requests. For # more information, see [ XML related object key constraints][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints # @return [String] # # @!attribute [rw] replace_key_with # The specific object key to use in the redirect request. For example, # redirect request to `error.html`. Not required if one of the # siblings is present. Can be present only if `ReplaceKeyPrefixWith` # is not provided. # # Replacement must be made for object keys containing special # characters (such as carriage returns) when using XML requests. For # more information, see [ XML related object key constraints][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Redirect AWS API Documentation # class Redirect < Struct.new( :host_name, :http_redirect_code, :protocol, :replace_key_prefix_with, :replace_key_with) SENSITIVE = [] include Aws::Structure end # Specifies the redirect behavior of all requests to a website endpoint # of an Amazon S3 bucket. # # @!attribute [rw] host_name # Name of the host where requests are redirected. # @return [String] # # @!attribute [rw] protocol # Protocol to use when redirecting requests. The default is the # protocol that is used in the original request. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RedirectAllRequestsTo AWS API Documentation # class RedirectAllRequestsTo < Struct.new( :host_name, :protocol) SENSITIVE = [] include Aws::Structure end # A filter that you can specify for selection for modifications on # replicas. Amazon S3 doesn't replicate replica modifications by # default. In the latest version of replication configuration (when # `Filter` is specified), you can specify this element and set the # status to `Enabled` to replicate modifications on replicas. # # If you don't specify the `Filter` element, Amazon S3 assumes that the # replication configuration is the earlier version, V1. In the earlier # version, this element is not allowed. # # # # @!attribute [rw] status # Specifies whether Amazon S3 replicates modifications on replicas. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicaModifications AWS API Documentation # class ReplicaModifications < Struct.new( :status) SENSITIVE = [] include Aws::Structure end # A container for replication rules. You can add up to 1,000 rules. The # maximum size of a replication configuration is 2 MB. # # @!attribute [rw] role # The Amazon Resource Name (ARN) of the Identity and Access Management # (IAM) role that Amazon S3 assumes when replicating objects. For more # information, see [How to Set Up Replication][1] in the *Amazon S3 # User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html # @return [String] # # @!attribute [rw] rules # A container for one or more replication rules. A replication # configuration must have at least one rule and can contain a maximum # of 1,000 rules. # @return [Array] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationConfiguration AWS API Documentation # class ReplicationConfiguration < Struct.new( :role, :rules) SENSITIVE = [] include Aws::Structure end # Specifies which Amazon S3 objects to replicate and where to store the # replicas. # # @!attribute [rw] id # A unique identifier for the rule. The maximum value is 255 # characters. # @return [String] # # @!attribute [rw] priority # The priority indicates which rule has precedence whenever two or # more replication rules conflict. Amazon S3 will attempt to replicate # objects according to all replication rules. However, if there are # two or more rules with the same destination bucket, then objects # will be replicated according to the rule with the highest priority. # The higher the number, the higher the priority. # # For more information, see [Replication][1] in the *Amazon S3 User # Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html # @return [Integer] # # @!attribute [rw] prefix # An object key name prefix that identifies the object or objects to # which the rule applies. The maximum prefix length is 1,024 # characters. To include all objects in a bucket, specify an empty # string. # # Replacement must be made for object keys containing special # characters (such as carriage returns) when using XML requests. For # more information, see [ XML related object key constraints][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints # @return [String] # # @!attribute [rw] filter # A filter that identifies the subset of objects to which the # replication rule applies. A `Filter` must specify exactly one # `Prefix`, `Tag`, or an `And` child element. # @return [Types::ReplicationRuleFilter] # # @!attribute [rw] status # Specifies whether the rule is enabled. # @return [String] # # @!attribute [rw] source_selection_criteria # A container that describes additional filters for identifying the # source objects that you want to replicate. You can choose to enable # or disable the replication of these objects. Currently, Amazon S3 # supports only the filter that you can specify for objects created # with server-side encryption using a customer managed key stored in # Amazon Web Services Key Management Service (SSE-KMS). # @return [Types::SourceSelectionCriteria] # # @!attribute [rw] existing_object_replication # Optional configuration to replicate existing source bucket objects. # For more information, see [Replicating Existing Objects][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication # @return [Types::ExistingObjectReplication] # # @!attribute [rw] destination # A container for information about the replication destination and # its configurations including enabling the S3 Replication Time # Control (S3 RTC). # @return [Types::Destination] # # @!attribute [rw] delete_marker_replication # Specifies whether Amazon S3 replicates delete markers. If you # specify a `Filter` in your replication configuration, you must also # include a `DeleteMarkerReplication` element. If your `Filter` # includes a `Tag` element, the `DeleteMarkerReplication` `Status` # must be set to Disabled, because Amazon S3 does not support # replicating delete markers for tag-based rules. For an example # configuration, see [Basic Rule Configuration][1]. # # For more information about delete marker replication, see [Basic # Rule Configuration][2]. # # If you are using an earlier version of the replication # configuration, Amazon S3 handles replication of delete markers # differently. For more information, see [Backward Compatibility][3]. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config # [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations # @return [Types::DeleteMarkerReplication] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationRule AWS API Documentation # class ReplicationRule < Struct.new( :id, :priority, :prefix, :filter, :status, :source_selection_criteria, :existing_object_replication, :destination, :delete_marker_replication) SENSITIVE = [] include Aws::Structure end # A container for specifying rule filters. The filters determine the # subset of objects to which the rule applies. This element is required # only if you specify more than one filter. # # For example: # # * If you specify both a `Prefix` and a `Tag` filter, wrap these # filters in an `And` tag. # # * If you specify a filter based on multiple tags, wrap the `Tag` # elements in an `And` tag. # # @!attribute [rw] prefix # An object key name prefix that identifies the subset of objects to # which the rule applies. # @return [String] # # @!attribute [rw] tags # An array of tags containing key and value pairs. # @return [Array] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationRuleAndOperator AWS API Documentation # class ReplicationRuleAndOperator < Struct.new( :prefix, :tags) SENSITIVE = [] include Aws::Structure end # A filter that identifies the subset of objects to which the # replication rule applies. A `Filter` must specify exactly one # `Prefix`, `Tag`, or an `And` child element. # # @!attribute [rw] prefix # An object key name prefix that identifies the subset of objects to # which the rule applies. # # Replacement must be made for object keys containing special # characters (such as carriage returns) when using XML requests. For # more information, see [ XML related object key constraints][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints # @return [String] # # @!attribute [rw] tag # A container for specifying a tag key and value. # # The rule applies only to objects that have the tag in their tag set. # @return [Types::Tag] # # @!attribute [rw] and # A container for specifying rule filters. The filters determine the # subset of objects to which the rule applies. This element is # required only if you specify more than one filter. For example: # # * If you specify both a `Prefix` and a `Tag` filter, wrap these # filters in an `And` tag. # # * If you specify a filter based on multiple tags, wrap the `Tag` # elements in an `And` tag. # @return [Types::ReplicationRuleAndOperator] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationRuleFilter AWS API Documentation # class ReplicationRuleFilter < Struct.new( :prefix, :tag, :and) SENSITIVE = [] include Aws::Structure end # A container specifying S3 Replication Time Control (S3 RTC) related # information, including whether S3 RTC is enabled and the time when all # objects and operations on objects must be replicated. Must be # specified together with a `Metrics` block. # # @!attribute [rw] status # Specifies whether the replication time is enabled. # @return [String] # # @!attribute [rw] time # A container specifying the time by which replication should be # complete for all objects and operations on objects. # @return [Types::ReplicationTimeValue] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationTime AWS API Documentation # class ReplicationTime < Struct.new( :status, :time) SENSITIVE = [] include Aws::Structure end # A container specifying the time value for S3 Replication Time Control # (S3 RTC) and replication metrics `EventThreshold`. # # @!attribute [rw] minutes # Contains an integer specifying time in minutes. # # Valid value: 15 # @return [Integer] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationTimeValue AWS API Documentation # class ReplicationTimeValue < Struct.new( :minutes) SENSITIVE = [] include Aws::Structure end # Container for Payer. # # @!attribute [rw] payer # Specifies who pays for the download and request fees. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RequestPaymentConfiguration AWS API Documentation # class RequestPaymentConfiguration < Struct.new( :payer) SENSITIVE = [] include Aws::Structure end # Container for specifying if periodic `QueryProgress` messages should # be sent. # # @!attribute [rw] enabled # Specifies whether periodic QueryProgress frames should be sent. # Valid values: TRUE, FALSE. Default value: FALSE. # @return [Boolean] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RequestProgress AWS API Documentation # class RequestProgress < Struct.new( :enabled) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] request_charged # If present, indicates that the requester was successfully charged # for the request. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] restore_output_path # Indicates the path in the provided S3 output location where Select # results will be restored to. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectOutput AWS API Documentation # class RestoreObjectOutput < Struct.new( :request_charged, :restore_output_path) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] bucket # The bucket name containing the object to restore. # # **Access points** - When you use this action with an access point, # you must provide the alias of the access point in place of the # bucket name or specify the access point ARN. When using the access # point ARN, you must direct requests to the access point hostname. # The access point hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the # bucket name. For more information about access point ARNs, see # [Using access points][1] in the *Amazon S3 User Guide*. # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. # The S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][2] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # @return [String] # # @!attribute [rw] key # Object key for which the action was initiated. # @return [String] # # @!attribute [rw] version_id # VersionId used to reference a specific version of the object. # @return [String] # # @!attribute [rw] restore_request # Container for restore job parameters. # @return [Types::RestoreRequest] # # @!attribute [rw] request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has # Requester Pays enabled, the requester will pay for corresponding # charges to copy the object. For information about downloading # objects from Requester Pays buckets, see [Downloading Objects in # Requester Pays Buckets][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @return [String] # # @!attribute [rw] checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any # provided `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectRequest AWS API Documentation # class RestoreObjectRequest < Struct.new( :bucket, :key, :version_id, :restore_request, :request_payer, :checksum_algorithm, :expected_bucket_owner) SENSITIVE = [] include Aws::Structure end # Container for restore job parameters. # # @!attribute [rw] days # Lifetime of the active copy in days. Do not use with restores that # specify `OutputLocation`. # # The Days element is required for regular restores, and must not be # provided for select requests. # @return [Integer] # # @!attribute [rw] glacier_job_parameters # S3 Glacier related parameters pertaining to this job. Do not use # with restores that specify `OutputLocation`. # @return [Types::GlacierJobParameters] # # @!attribute [rw] type # Type of restore request. # @return [String] # # @!attribute [rw] tier # Retrieval tier at which the restore will be processed. # @return [String] # # @!attribute [rw] description # The optional description for the job. # @return [String] # # @!attribute [rw] select_parameters # Describes the parameters for Select job types. # @return [Types::SelectParameters] # # @!attribute [rw] output_location # Describes the location where the restore job's output is stored. # @return [Types::OutputLocation] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreRequest AWS API Documentation # class RestoreRequest < Struct.new( :days, :glacier_job_parameters, :type, :tier, :description, :select_parameters, :output_location) SENSITIVE = [] include Aws::Structure end # Specifies the restoration status of an object. Objects in certain # storage classes must be restored before they can be retrieved. For # more information about these storage classes and how to work with # archived objects, see [ Working with archived objects][1] in the # *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. Only the S3 # Express One Zone storage class is supported by directory buckets to # store objects. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html # # @!attribute [rw] is_restore_in_progress # Specifies whether the object is currently being restored. If the # object restoration is in progress, the header returns the value # `TRUE`. For example: # # `x-amz-optional-object-attributes: IsRestoreInProgress="true"` # # If the object restoration has completed, the header returns the # value `FALSE`. For example: # # `x-amz-optional-object-attributes: IsRestoreInProgress="false", # RestoreExpiryDate="2012-12-21T00:00:00.000Z"` # # If the object hasn't been restored, there is no header response. # @return [Boolean] # # @!attribute [rw] restore_expiry_date # Indicates when the restored copy will expire. This value is # populated only if the object has already been restored. For example: # # `x-amz-optional-object-attributes: IsRestoreInProgress="false", # RestoreExpiryDate="2012-12-21T00:00:00.000Z"` # @return [Time] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreStatus AWS API Documentation # class RestoreStatus < Struct.new( :is_restore_in_progress, :restore_expiry_date) SENSITIVE = [] include Aws::Structure end # Specifies the redirect behavior and when a redirect is applied. For # more information about routing rules, see [Configuring advanced # conditional redirects][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects # # @!attribute [rw] condition # A container for describing a condition that must be met for the # specified redirect to apply. For example, 1. If request is for pages # in the `/docs` folder, redirect to the `/documents` folder. 2. If # request results in HTTP error 4xx, redirect request to another host # where you might process the error. # @return [Types::Condition] # # @!attribute [rw] redirect # Container for redirect information. You can redirect requests to # another host, to another page, or with another protocol. In the # event of an error, you can specify a different error code to return. # @return [Types::Redirect] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RoutingRule AWS API Documentation # class RoutingRule < Struct.new( :condition, :redirect) SENSITIVE = [] include Aws::Structure end # Specifies lifecycle rules for an Amazon S3 bucket. For more # information, see [Put Bucket Lifecycle Configuration][1] in the # *Amazon S3 API Reference*. For examples, see [Put Bucket Lifecycle # Configuration Examples][2]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html#API_PutBucketLifecycleConfiguration_Examples # # @!attribute [rw] expiration # Specifies the expiration for the lifecycle of the object. # @return [Types::LifecycleExpiration] # # @!attribute [rw] id # Unique identifier for the rule. The value can't be longer than 255 # characters. # @return [String] # # @!attribute [rw] prefix # Object key prefix that identifies one or more objects to which this # rule applies. # # Replacement must be made for object keys containing special # characters (such as carriage returns) when using XML requests. For # more information, see [ XML related object key constraints][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints # @return [String] # # @!attribute [rw] status # If `Enabled`, the rule is currently being applied. If `Disabled`, # the rule is not currently being applied. # @return [String] # # @!attribute [rw] transition # Specifies when an object transitions to a specified storage class. # For more information about Amazon S3 lifecycle configuration rules, # see [Transitioning Objects Using Amazon S3 Lifecycle][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html # @return [Types::Transition] # # @!attribute [rw] noncurrent_version_transition # Container for the transition rule that describes when noncurrent # objects transition to the `STANDARD_IA`, `ONEZONE_IA`, # `INTELLIGENT_TIERING`, `GLACIER_IR`, `GLACIER`, or `DEEP_ARCHIVE` # storage class. If your bucket is versioning-enabled (or versioning # is suspended), you can set this action to request that Amazon S3 # transition noncurrent object versions to the `STANDARD_IA`, # `ONEZONE_IA`, `INTELLIGENT_TIERING`, `GLACIER_IR`, `GLACIER`, or # `DEEP_ARCHIVE` storage class at a specific period in the object's # lifetime. # @return [Types::NoncurrentVersionTransition] # # @!attribute [rw] noncurrent_version_expiration # Specifies when noncurrent object versions expire. Upon expiration, # Amazon S3 permanently deletes the noncurrent object versions. You # set this lifecycle configuration action on a bucket that has # versioning enabled (or suspended) to request that Amazon S3 delete # noncurrent object versions at a specific period in the object's # lifetime. # @return [Types::NoncurrentVersionExpiration] # # @!attribute [rw] abort_incomplete_multipart_upload # Specifies the days since the initiation of an incomplete multipart # upload that Amazon S3 will wait before permanently removing all # parts of the upload. For more information, see [ Aborting Incomplete # Multipart Uploads Using a Bucket Lifecycle Configuration][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config # @return [Types::AbortIncompleteMultipartUpload] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Rule AWS API Documentation # class Rule < Struct.new( :expiration, :id, :prefix, :status, :transition, :noncurrent_version_transition, :noncurrent_version_expiration, :abort_incomplete_multipart_upload) SENSITIVE = [] include Aws::Structure end # A container for object key name prefix and suffix filtering rules. # # @!attribute [rw] filter_rules # A list of containers for the key-value pair that defines the # criteria for the filter rule. # @return [Array] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/S3KeyFilter AWS API Documentation # class S3KeyFilter < Struct.new( :filter_rules) SENSITIVE = [] include Aws::Structure end # Describes an Amazon S3 location that will receive the results of the # restore request. # # @!attribute [rw] bucket_name # The name of the bucket where the restore results will be placed. # @return [String] # # @!attribute [rw] prefix # The prefix that is prepended to the restore results for this # request. # @return [String] # # @!attribute [rw] encryption # Contains the type of server-side encryption used. # @return [Types::Encryption] # # @!attribute [rw] canned_acl # The canned ACL to apply to the restore results. # @return [String] # # @!attribute [rw] access_control_list # A list of grants that control access to the staged results. # @return [Array] # # @!attribute [rw] tagging # The tag-set that is applied to the restore results. # @return [Types::Tagging] # # @!attribute [rw] user_metadata # A list of metadata to store with the restore results in S3. # @return [Array] # # @!attribute [rw] storage_class # The class of storage used to store the restore results. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/S3Location AWS API Documentation # class S3Location < Struct.new( :bucket_name, :prefix, :encryption, :canned_acl, :access_control_list, :tagging, :user_metadata, :storage_class) SENSITIVE = [] include Aws::Structure end # Specifies the use of SSE-KMS to encrypt delivered inventory reports. # # @!attribute [rw] key_id # Specifies the ID of the Key Management Service (KMS) symmetric # encryption customer managed key to use for encrypting inventory # reports. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SSEKMS AWS API Documentation # class SSEKMS < Struct.new( :key_id) SENSITIVE = [:key_id] include Aws::Structure end # Specifies the use of SSE-S3 to encrypt delivered inventory reports. # # @api private # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SSES3 AWS API Documentation # class SSES3 < Aws::EmptyStructure; end # Specifies the byte range of the object to get the records from. A # record is processed when its first byte is contained by the range. # This parameter is optional, but when specified, it must not be empty. # See RFC 2616, Section 14.35.1 about how to specify the start and end # of the range. # # @!attribute [rw] start # Specifies the start of the byte range. This parameter is optional. # Valid values: non-negative integers. The default value is 0. If only # `start` is supplied, it means scan from that point to the end of the # file. For example, `50` means # scan from byte 50 until the end of the file. # @return [Integer] # # @!attribute [rw] end # Specifies the end of the byte range. This parameter is optional. # Valid values: non-negative integers. The default value is one less # than the size of the object being queried. If only the End parameter # is supplied, it is interpreted to mean scan the last N bytes of the # file. For example, `50` means scan # the last 50 bytes. # @return [Integer] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ScanRange AWS API Documentation # class ScanRange < Struct.new( :start, :end) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] payload # The array of results. # @return [Types::SelectObjectContentEventStream] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContentOutput AWS API Documentation # class SelectObjectContentOutput < Struct.new( :payload) SENSITIVE = [] include Aws::Structure end # Request to filter the contents of an Amazon S3 object based on a # simple Structured Query Language (SQL) statement. In the request, # along with the SQL expression, you must specify a data serialization # format (JSON or CSV) of the object. Amazon S3 uses this to parse # object data into records. It returns only records that match the # specified SQL expression. You must also specify the data serialization # format for the response. For more information, see [S3Select API # Documentation][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html # # @!attribute [rw] bucket # The S3 bucket. # @return [String] # # @!attribute [rw] key # The object key. # @return [String] # # @!attribute [rw] sse_customer_algorithm # The server-side encryption (SSE) algorithm used to encrypt the # object. This parameter is needed only when the object was created # using a checksum algorithm. For more information, see [Protecting # data using SSE-C keys][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # @return [String] # # @!attribute [rw] sse_customer_key # The server-side encryption (SSE) customer managed key. This # parameter is needed only when the object was created using a # checksum algorithm. For more information, see [Protecting data using # SSE-C keys][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # @return [String] # # @!attribute [rw] sse_customer_key_md5 # The MD5 server-side encryption (SSE) customer managed key. This # parameter is needed only when the object was created using a # checksum algorithm. For more information, see [Protecting data using # SSE-C keys][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # @return [String] # # @!attribute [rw] expression # The expression that is used to query the object. # @return [String] # # @!attribute [rw] expression_type # The type of the provided expression (for example, SQL). # @return [String] # # @!attribute [rw] request_progress # Specifies if periodic request progress information should be # enabled. # @return [Types::RequestProgress] # # @!attribute [rw] input_serialization # Describes the format of the data in the object that is being # queried. # @return [Types::InputSerialization] # # @!attribute [rw] output_serialization # Describes the format of the data that you want Amazon S3 to return # in response. # @return [Types::OutputSerialization] # # @!attribute [rw] scan_range # Specifies the byte range of the object to get the records from. A # record is processed when its first byte is contained by the range. # This parameter is optional, but when specified, it must not be # empty. See RFC 2616, Section 14.35.1 about how to specify the start # and end of the range. # # `ScanRange`may be used in the following ways: # # * `50100` - process # only the records starting between the bytes 50 and 100 (inclusive, # counting from zero) # # * `50` - process only the # records starting after the byte 50 # # * `50` - process only the records # within the last 50 bytes of the file. # @return [Types::ScanRange] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContentRequest AWS API Documentation # class SelectObjectContentRequest < Struct.new( :bucket, :key, :sse_customer_algorithm, :sse_customer_key, :sse_customer_key_md5, :expression, :expression_type, :request_progress, :input_serialization, :output_serialization, :scan_range, :expected_bucket_owner) SENSITIVE = [:sse_customer_key] include Aws::Structure end # Describes the parameters for Select job types. # # @!attribute [rw] input_serialization # Describes the serialization format of the object. # @return [Types::InputSerialization] # # @!attribute [rw] expression_type # The type of the provided expression (for example, SQL). # @return [String] # # @!attribute [rw] expression # The expression that is used to query the object. # @return [String] # # @!attribute [rw] output_serialization # Describes how the results of the Select job are serialized. # @return [Types::OutputSerialization] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectParameters AWS API Documentation # class SelectParameters < Struct.new( :input_serialization, :expression_type, :expression, :output_serialization) SENSITIVE = [] include Aws::Structure end # Describes the default server-side encryption to apply to new objects # in the bucket. If a PUT Object request doesn't specify any # server-side encryption, this default encryption will be applied. If # you don't specify a customer managed key at configuration, Amazon S3 # automatically creates an Amazon Web Services KMS key in your Amazon # Web Services account the first time that you add an object encrypted # with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS key for # SSE-KMS. For more information, see [PUT Bucket encryption][1] in the # *Amazon S3 API Reference*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html # # @!attribute [rw] sse_algorithm # Server-side encryption algorithm to use for the default encryption. # @return [String] # # @!attribute [rw] kms_master_key_id # Amazon Web Services Key Management Service (KMS) customer Amazon Web # Services KMS key ID to use for the default encryption. This # parameter is allowed if and only if `SSEAlgorithm` is set to # `aws:kms`. # # You can specify the key ID, key alias, or the Amazon Resource Name # (ARN) of the KMS key. # # * Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab` # # * Key ARN: # `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` # # * Key Alias: `alias/alias-name` # # If you use a key ID, you can run into a LogDestination undeliverable # error when creating a VPC flow log. # # If you are using encryption with cross-account or Amazon Web # Services service operations you must use a fully qualified KMS key # ARN. For more information, see [Using encryption for cross-account # operations][1]. # # Amazon S3 only supports symmetric encryption KMS keys. For more # information, see [Asymmetric keys in Amazon Web Services KMS][2] in # the *Amazon Web Services Key Management Service Developer Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ServerSideEncryptionByDefault AWS API Documentation # class ServerSideEncryptionByDefault < Struct.new( :sse_algorithm, :kms_master_key_id) SENSITIVE = [:kms_master_key_id] include Aws::Structure end # Specifies the default server-side-encryption configuration. # # @!attribute [rw] rules # Container for information about a particular server-side encryption # configuration rule. # @return [Array] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ServerSideEncryptionConfiguration AWS API Documentation # class ServerSideEncryptionConfiguration < Struct.new( :rules) SENSITIVE = [] include Aws::Structure end # Specifies the default server-side encryption configuration. # # @!attribute [rw] apply_server_side_encryption_by_default # Specifies the default server-side encryption to apply to new objects # in the bucket. If a PUT Object request doesn't specify any # server-side encryption, this default encryption will be applied. # @return [Types::ServerSideEncryptionByDefault] # # @!attribute [rw] bucket_key_enabled # Specifies whether Amazon S3 should use an S3 Bucket Key with # server-side encryption using KMS (SSE-KMS) for new objects in the # bucket. Existing objects are not affected. Setting the # `BucketKeyEnabled` element to `true` causes Amazon S3 to use an S3 # Bucket Key. By default, S3 Bucket Key is not enabled. # # For more information, see [Amazon S3 Bucket Keys][1] in the *Amazon # S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html # @return [Boolean] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ServerSideEncryptionRule AWS API Documentation # class ServerSideEncryptionRule < Struct.new( :apply_server_side_encryption_by_default, :bucket_key_enabled) SENSITIVE = [] include Aws::Structure end # The established temporary security credentials of the session. # # **Directory buckets** - These session credentials are only supported # for the authentication and authorization of Zonal endpoint APIs on # directory buckets. # # # # @!attribute [rw] access_key_id # A unique identifier that's associated with a secret access key. The # access key ID and the secret access key are used together to sign # programmatic Amazon Web Services requests cryptographically. # @return [String] # # @!attribute [rw] secret_access_key # A key that's used with the access key ID to cryptographically sign # programmatic Amazon Web Services requests. Signing a request # identifies the sender and prevents the request from being altered. # @return [String] # # @!attribute [rw] session_token # A part of the temporary security credentials. The session token is # used to validate the temporary security credentials. # @return [String] # # @!attribute [rw] expiration # Temporary security credentials expire after a specified interval. # After temporary credentials expire, any calls that you make with # those credentials will fail. So you must generate a new set of # temporary credentials. Temporary credentials cannot be extended or # refreshed beyond the original specified interval. # @return [Time] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SessionCredentials AWS API Documentation # class SessionCredentials < Struct.new( :access_key_id, :secret_access_key, :session_token, :expiration) SENSITIVE = [:secret_access_key, :session_token] include Aws::Structure end # To use simple format for S3 keys for log objects, set SimplePrefix to # an empty object. # # `[DestinationPrefix][YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString]` # # @api private # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SimplePrefix AWS API Documentation # class SimplePrefix < Aws::EmptyStructure; end # A container that describes additional filters for identifying the # source objects that you want to replicate. You can choose to enable or # disable the replication of these objects. Currently, Amazon S3 # supports only the filter that you can specify for objects created with # server-side encryption using a customer managed key stored in Amazon # Web Services Key Management Service (SSE-KMS). # # @!attribute [rw] sse_kms_encrypted_objects # A container for filter information for the selection of Amazon S3 # objects encrypted with Amazon Web Services KMS. If you include # `SourceSelectionCriteria` in the replication configuration, this # element is required. # @return [Types::SseKmsEncryptedObjects] # # @!attribute [rw] replica_modifications # A filter that you can specify for selections for modifications on # replicas. Amazon S3 doesn't replicate replica modifications by # default. In the latest version of replication configuration (when # `Filter` is specified), you can specify this element and set the # status to `Enabled` to replicate modifications on replicas. # # If you don't specify the `Filter` element, Amazon S3 assumes that # the replication configuration is the earlier version, V1. In the # earlier version, this element is not allowed # # # @return [Types::ReplicaModifications] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SourceSelectionCriteria AWS API Documentation # class SourceSelectionCriteria < Struct.new( :sse_kms_encrypted_objects, :replica_modifications) SENSITIVE = [] include Aws::Structure end # A container for filter information for the selection of S3 objects # encrypted with Amazon Web Services KMS. # # @!attribute [rw] status # Specifies whether Amazon S3 replicates objects created with # server-side encryption using an Amazon Web Services KMS key stored # in Amazon Web Services Key Management Service. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SseKmsEncryptedObjects AWS API Documentation # class SseKmsEncryptedObjects < Struct.new( :status) SENSITIVE = [] include Aws::Structure end # Container for the stats details. # # @!attribute [rw] bytes_scanned # The total number of object bytes scanned. # @return [Integer] # # @!attribute [rw] bytes_processed # The total number of uncompressed object bytes processed. # @return [Integer] # # @!attribute [rw] bytes_returned # The total number of bytes of records payload data returned. # @return [Integer] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Stats AWS API Documentation # class Stats < Struct.new( :bytes_scanned, :bytes_processed, :bytes_returned) SENSITIVE = [] include Aws::Structure end # Container for the Stats Event. # # @!attribute [rw] details # The Stats event details. # @return [Types::Stats] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StatsEvent AWS API Documentation # class StatsEvent < Struct.new( :details, :event_type) SENSITIVE = [] include Aws::Structure end # Specifies data related to access patterns to be collected and made # available to analyze the tradeoffs between different storage classes # for an Amazon S3 bucket. # # @!attribute [rw] data_export # Specifies how data related to the storage class analysis for an # Amazon S3 bucket should be exported. # @return [Types::StorageClassAnalysisDataExport] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysis AWS API Documentation # class StorageClassAnalysis < Struct.new( :data_export) SENSITIVE = [] include Aws::Structure end # Container for data related to the storage class analysis for an Amazon # S3 bucket for export. # # @!attribute [rw] output_schema_version # The version of the output schema to use when exporting data. Must be # `V_1`. # @return [String] # # @!attribute [rw] destination # The place to store the data for an analysis. # @return [Types::AnalyticsExportDestination] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysisDataExport AWS API Documentation # class StorageClassAnalysisDataExport < Struct.new( :output_schema_version, :destination) SENSITIVE = [] include Aws::Structure end # A container of a key value name pair. # # @!attribute [rw] key # Name of the object key. # @return [String] # # @!attribute [rw] value # Value of the tag. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tag AWS API Documentation # class Tag < Struct.new( :key, :value) SENSITIVE = [] include Aws::Structure end # Container for `TagSet` elements. # # @!attribute [rw] tag_set # A collection for a set of tags # @return [Array] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tagging AWS API Documentation # class Tagging < Struct.new( :tag_set) SENSITIVE = [] include Aws::Structure end # Container for granting information. # # Buckets that use the bucket owner enforced setting for Object # Ownership don't support target grants. For more information, see # [Permissions server access log delivery][1] in the *Amazon S3 User # Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general # # @!attribute [rw] grantee # Container for the person being granted permissions. # @return [Types::Grantee] # # @!attribute [rw] permission # Logging permissions assigned to the grantee for the bucket. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TargetGrant AWS API Documentation # class TargetGrant < Struct.new( :grantee, :permission) SENSITIVE = [] include Aws::Structure end # Amazon S3 key format for log objects. Only one format, # PartitionedPrefix or SimplePrefix, is allowed. # # @!attribute [rw] simple_prefix # To use the simple format for S3 keys for log objects. To specify # SimplePrefix format, set SimplePrefix to \\\{\\}. # @return [Types::SimplePrefix] # # @!attribute [rw] partitioned_prefix # Partitioned S3 key for log objects. # @return [Types::PartitionedPrefix] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TargetObjectKeyFormat AWS API Documentation # class TargetObjectKeyFormat < Struct.new( :simple_prefix, :partitioned_prefix) SENSITIVE = [] include Aws::Structure end # The S3 Intelligent-Tiering storage class is designed to optimize # storage costs by automatically moving data to the most cost-effective # storage access tier, without additional operational overhead. # # @!attribute [rw] days # The number of consecutive days of no access after which an object # will be eligible to be transitioned to the corresponding tier. The # minimum number of days specified for Archive Access tier must be at # least 90 days and Deep Archive Access tier must be at least 180 # days. The maximum can be up to 2 years (730 days). # @return [Integer] # # @!attribute [rw] access_tier # S3 Intelligent-Tiering access tier. See [Storage class for # automatically optimizing frequently and infrequently accessed # objects][1] for a list of access tiers in the S3 Intelligent-Tiering # storage class. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tiering AWS API Documentation # class Tiering < Struct.new( :days, :access_tier) SENSITIVE = [] include Aws::Structure end # A container for specifying the configuration for publication of # messages to an Amazon Simple Notification Service (Amazon SNS) topic # when Amazon S3 detects specified events. # # @!attribute [rw] id # An optional unique identifier for configurations in a notification # configuration. If you don't provide one, Amazon S3 will assign an # ID. # @return [String] # # @!attribute [rw] topic_arn # The Amazon Resource Name (ARN) of the Amazon SNS topic to which # Amazon S3 publishes a message when it detects events of the # specified type. # @return [String] # # @!attribute [rw] events # The Amazon S3 bucket event about which to send notifications. For # more information, see [Supported Event Types][1] in the *Amazon S3 # User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html # @return [Array] # # @!attribute [rw] filter # Specifies object key name filtering rules. For information about key # name filtering, see [Configuring event notifications using object # key name filtering][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html # @return [Types::NotificationConfigurationFilter] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfiguration AWS API Documentation # class TopicConfiguration < Struct.new( :id, :topic_arn, :events, :filter) SENSITIVE = [] include Aws::Structure end # A container for specifying the configuration for publication of # messages to an Amazon Simple Notification Service (Amazon SNS) topic # when Amazon S3 detects specified events. This data type is deprecated. # Use [TopicConfiguration][1] instead. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_TopicConfiguration.html # # @!attribute [rw] id # An optional unique identifier for configurations in a notification # configuration. If you don't provide one, Amazon S3 will assign an # ID. # @return [String] # # @!attribute [rw] events # A collection of events related to objects # @return [Array] # # @!attribute [rw] event # Bucket event for which to send notifications. # @return [String] # # @!attribute [rw] topic # Amazon SNS topic to which Amazon S3 will publish a message to report # the specified events for the bucket. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfigurationDeprecated AWS API Documentation # class TopicConfigurationDeprecated < Struct.new( :id, :events, :event, :topic) SENSITIVE = [] include Aws::Structure end # Specifies when an object transitions to a specified storage class. For # more information about Amazon S3 lifecycle configuration rules, see # [Transitioning Objects Using Amazon S3 Lifecycle][1] in the *Amazon S3 # User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html # # @!attribute [rw] date # Indicates when objects are transitioned to the specified storage # class. The date value must be in ISO 8601 format. The time is always # midnight UTC. # @return [Time] # # @!attribute [rw] days # Indicates the number of days after creation when objects are # transitioned to the specified storage class. The value must be a # positive integer. # @return [Integer] # # @!attribute [rw] storage_class # The storage class to which you want the object to transition. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Transition AWS API Documentation # class Transition < Struct.new( :date, :days, :storage_class) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] copy_source_version_id # The version of the source object that was copied, if you have # enabled versioning on the source bucket. # # This functionality is not supported when the source object is in a # directory bucket. # # # @return [String] # # @!attribute [rw] copy_part_result # Container for all response elements. # @return [Types::CopyPartResult] # # @!attribute [rw] server_side_encryption # The server-side encryption algorithm used when you store this object # in Amazon S3 (for example, `AES256`, `aws:kms`). # # For directory buckets, only server-side encryption with Amazon S3 # managed keys (SSE-S3) (`AES256`) is supported. # # # @return [String] # # @!attribute [rw] sse_customer_algorithm # If server-side encryption with a customer-provided encryption key # was requested, the response will include this header to confirm the # encryption algorithm that's used. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] sse_customer_key_md5 # If server-side encryption with a customer-provided encryption key # was requested, the response will include this header to provide the # round-trip message integrity verification of the customer-provided # encryption key. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] ssekms_key_id # If present, indicates the ID of the Key Management Service (KMS) # symmetric encryption customer managed key that was used for the # object. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] bucket_key_enabled # Indicates whether the multipart upload uses an S3 Bucket Key for # server-side encryption with Key Management Service (KMS) keys # (SSE-KMS). # # This functionality is not supported for directory buckets. # # # @return [Boolean] # # @!attribute [rw] request_charged # If present, indicates that the requester was successfully charged # for the request. # # This functionality is not supported for directory buckets. # # # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyOutput AWS API Documentation # class UploadPartCopyOutput < Struct.new( :copy_source_version_id, :copy_part_result, :server_side_encryption, :sse_customer_algorithm, :sse_customer_key_md5, :ssekms_key_id, :bucket_key_enabled, :request_charged) SENSITIVE = [:ssekms_key_id] include Aws::Structure end # @!attribute [rw] bucket # The bucket name. # # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style # requests are not supported. Directory bucket names must be unique in # the chosen Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, # you must provide the alias of the access point in place of the # bucket name or specify the access point ARN. When using the access # point ARN, you must direct requests to the access point hostname. # The access point hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the # bucket name. For more information about access point ARNs, see # [Using access points][2] in the *Amazon S3 User Guide*. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. # The S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][3] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # @return [String] # # @!attribute [rw] copy_source # Specifies the source object for the copy operation. You specify the # value in one of two formats, depending on whether you want to access # the source object through an [access point][1]: # # * For objects not accessed through an access point, specify the name # of the source bucket and key of the source object, separated by a # slash (/). For example, to copy the object `reports/january.pdf` # from the bucket `awsexamplebucket`, use # `awsexamplebucket/reports/january.pdf`. The value must be # URL-encoded. # # * For objects accessed through access points, specify the Amazon # Resource Name (ARN) of the object as accessed through the access # point, in the format # `arn:aws:s3:::accesspoint//object/`. # For example, to copy the object `reports/january.pdf` through # access point `my-access-point` owned by account `123456789012` in # Region `us-west-2`, use the URL encoding of # `arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf`. # The value must be URL encoded. # # * Amazon S3 supports copy operations using Access points only when # the source and destination buckets are in the same Amazon Web # Services Region. # # * Access points are not supported by directory buckets. # # # # Alternatively, for objects accessed through Amazon S3 on Outposts, # specify the ARN of the object as accessed in the format # `arn:aws:s3-outposts:::outpost//object/`. # For example, to copy the object `reports/january.pdf` through # outpost `my-outpost` owned by account `123456789012` in Region # `us-west-2`, use the URL encoding of # `arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf`. # The value must be URL-encoded. # # If your bucket has versioning enabled, you could have multiple # versions of the same object. By default, `x-amz-copy-source` # identifies the current version of the source object to copy. To copy # a specific version of the source object to copy, append # `?versionId=` to the `x-amz-copy-source` request header # (for example, `x-amz-copy-source: # /awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893`). # # If the current version is a delete marker and you don't specify a # versionId in the `x-amz-copy-source` request header, Amazon S3 # returns a `404 Not Found` error, because the object does not exist. # If you specify versionId in the `x-amz-copy-source` and the # versionId is a delete marker, Amazon S3 returns an HTTP `400 Bad # Request` error, because you are not allowed to specify a delete # marker as a version for the `x-amz-copy-source`. # # **Directory buckets** - S3 Versioning isn't enabled and supported # for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html # @return [String] # # @!attribute [rw] copy_source_if_match # Copies the object if its entity tag (ETag) matches the specified # tag. # # If both of the `x-amz-copy-source-if-match` and # `x-amz-copy-source-if-unmodified-since` headers are present in the # request as follows: # # `x-amz-copy-source-if-match` condition evaluates to `true`, and; # # `x-amz-copy-source-if-unmodified-since` condition evaluates to # `false`; # # Amazon S3 returns `200 OK` and copies the data. # @return [String] # # @!attribute [rw] copy_source_if_modified_since # Copies the object if it has been modified since the specified time. # # If both of the `x-amz-copy-source-if-none-match` and # `x-amz-copy-source-if-modified-since` headers are present in the # request as follows: # # `x-amz-copy-source-if-none-match` condition evaluates to `false`, # and; # # `x-amz-copy-source-if-modified-since` condition evaluates to `true`; # # Amazon S3 returns `412 Precondition Failed` response code. # @return [Time] # # @!attribute [rw] copy_source_if_none_match # Copies the object if its entity tag (ETag) is different than the # specified ETag. # # If both of the `x-amz-copy-source-if-none-match` and # `x-amz-copy-source-if-modified-since` headers are present in the # request as follows: # # `x-amz-copy-source-if-none-match` condition evaluates to `false`, # and; # # `x-amz-copy-source-if-modified-since` condition evaluates to `true`; # # Amazon S3 returns `412 Precondition Failed` response code. # @return [String] # # @!attribute [rw] copy_source_if_unmodified_since # Copies the object if it hasn't been modified since the specified # time. # # If both of the `x-amz-copy-source-if-match` and # `x-amz-copy-source-if-unmodified-since` headers are present in the # request as follows: # # `x-amz-copy-source-if-match` condition evaluates to `true`, and; # # `x-amz-copy-source-if-unmodified-since` condition evaluates to # `false`; # # Amazon S3 returns `200 OK` and copies the data. # @return [Time] # # @!attribute [rw] copy_source_range # The range of bytes to copy from the source object. The range value # must use the form bytes=first-last, where the first and last are the # zero-based byte offsets to copy. For example, bytes=0-9 indicates # that you want to copy the first 10 bytes of the source. You can copy # a range only if the source object is greater than 5 MB. # @return [String] # # @!attribute [rw] key # Object key for which the multipart upload was initiated. # @return [String] # # @!attribute [rw] part_number # Part number of part being copied. This is a positive integer between # 1 and 10,000. # @return [Integer] # # @!attribute [rw] upload_id # Upload ID identifying the multipart upload whose part is being # copied. # @return [String] # # @!attribute [rw] sse_customer_algorithm # Specifies the algorithm to use when encrypting the object (for # example, AES256). # # This functionality is not supported when the destination bucket is a # directory bucket. # # # @return [String] # # @!attribute [rw] sse_customer_key # Specifies the customer-provided encryption key for Amazon S3 to use # in encrypting data. This value is used to store the object and then # it is discarded; Amazon S3 does not store the encryption key. The # key must be appropriate for use with the algorithm specified in the # `x-amz-server-side-encryption-customer-algorithm` header. This must # be the same encryption key specified in the initiate multipart # upload request. # # This functionality is not supported when the destination bucket is a # directory bucket. # # # @return [String] # # @!attribute [rw] sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the encryption key according to # RFC 1321. Amazon S3 uses this header for a message integrity check # to ensure that the encryption key was transmitted without error. # # This functionality is not supported when the destination bucket is a # directory bucket. # # # @return [String] # # @!attribute [rw] copy_source_sse_customer_algorithm # Specifies the algorithm to use when decrypting the source object # (for example, `AES256`). # # This functionality is not supported when the source object is in a # directory bucket. # # # @return [String] # # @!attribute [rw] copy_source_sse_customer_key # Specifies the customer-provided encryption key for Amazon S3 to use # to decrypt the source object. The encryption key provided in this # header must be one that was used when the source object was created. # # This functionality is not supported when the source object is in a # directory bucket. # # # @return [String] # # @!attribute [rw] copy_source_sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the encryption key according to # RFC 1321. Amazon S3 uses this header for a message integrity check # to ensure that the encryption key was transmitted without error. # # This functionality is not supported when the source object is in a # directory bucket. # # # @return [String] # # @!attribute [rw] request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has # Requester Pays enabled, the requester will pay for corresponding # charges to copy the object. For information about downloading # objects from Requester Pays buckets, see [Downloading Objects in # Requester Pays Buckets][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected destination bucket owner. If the # account ID that you provide does not match the actual owner of the # destination bucket, the request fails with the HTTP status code `403 # Forbidden` (access denied). # @return [String] # # @!attribute [rw] expected_source_bucket_owner # The account ID of the expected source bucket owner. If the account # ID that you provide does not match the actual owner of the source # bucket, the request fails with the HTTP status code `403 Forbidden` # (access denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyRequest AWS API Documentation # class UploadPartCopyRequest < Struct.new( :bucket, :copy_source, :copy_source_if_match, :copy_source_if_modified_since, :copy_source_if_none_match, :copy_source_if_unmodified_since, :copy_source_range, :key, :part_number, :upload_id, :sse_customer_algorithm, :sse_customer_key, :sse_customer_key_md5, :copy_source_sse_customer_algorithm, :copy_source_sse_customer_key, :copy_source_sse_customer_key_md5, :request_payer, :expected_bucket_owner, :expected_source_bucket_owner) SENSITIVE = [:sse_customer_key, :copy_source_sse_customer_key] include Aws::Structure end # @!attribute [rw] server_side_encryption # The server-side encryption algorithm used when you store this object # in Amazon S3 (for example, `AES256`, `aws:kms`). # # For directory buckets, only server-side encryption with Amazon S3 # managed keys (SSE-S3) (`AES256`) is supported. # # # @return [String] # # @!attribute [rw] etag # Entity tag for the uploaded object. # @return [String] # # @!attribute [rw] checksum_crc32 # The base64-encoded, 32-bit CRC32 checksum of the object. This will # only be present if it was uploaded with the object. When you use an # API operation on an object that was uploaded using multipart # uploads, this value may not be a direct checksum value of the full # object. Instead, it's a calculation based on the checksum values of # each individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] # # @!attribute [rw] checksum_crc32c # The base64-encoded, 32-bit CRC32C checksum of the object. This will # only be present if it was uploaded with the object. When you use an # API operation on an object that was uploaded using multipart # uploads, this value may not be a direct checksum value of the full # object. Instead, it's a calculation based on the checksum values of # each individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] # # @!attribute [rw] checksum_sha1 # The base64-encoded, 160-bit SHA-1 digest of the object. This will # only be present if it was uploaded with the object. When you use the # API operation on an object that was uploaded using multipart # uploads, this value may not be a direct checksum value of the full # object. Instead, it's a calculation based on the checksum values of # each individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] # # @!attribute [rw] checksum_sha256 # The base64-encoded, 256-bit SHA-256 digest of the object. This will # only be present if it was uploaded with the object. When you use an # API operation on an object that was uploaded using multipart # uploads, this value may not be a direct checksum value of the full # object. Instead, it's a calculation based on the checksum values of # each individual part. For more information about how checksums are # calculated with multipart uploads, see [ Checking object # integrity][1] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums # @return [String] # # @!attribute [rw] sse_customer_algorithm # If server-side encryption with a customer-provided encryption key # was requested, the response will include this header to confirm the # encryption algorithm that's used. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] sse_customer_key_md5 # If server-side encryption with a customer-provided encryption key # was requested, the response will include this header to provide the # round-trip message integrity verification of the customer-provided # encryption key. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] ssekms_key_id # If present, indicates the ID of the Key Management Service (KMS) # symmetric encryption customer managed key that was used for the # object. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] bucket_key_enabled # Indicates whether the multipart upload uses an S3 Bucket Key for # server-side encryption with Key Management Service (KMS) keys # (SSE-KMS). # # This functionality is not supported for directory buckets. # # # @return [Boolean] # # @!attribute [rw] request_charged # If present, indicates that the requester was successfully charged # for the request. # # This functionality is not supported for directory buckets. # # # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartOutput AWS API Documentation # class UploadPartOutput < Struct.new( :server_side_encryption, :etag, :checksum_crc32, :checksum_crc32c, :checksum_sha1, :checksum_sha256, :sse_customer_algorithm, :sse_customer_key_md5, :ssekms_key_id, :bucket_key_enabled, :request_charged) SENSITIVE = [:ssekms_key_id] include Aws::Structure end # @!attribute [rw] body # Object data. # @return [IO] # # @!attribute [rw] bucket # The name of the bucket to which the multipart upload was initiated. # # **Directory buckets** - When you use this operation with a directory # bucket, you must use virtual-hosted-style requests in the format ` # Bucket_name.s3express-az_id.region.amazonaws.com`. Path-style # requests are not supported. Directory bucket names must be unique in # the chosen Availability Zone. Bucket names must follow the format ` # bucket_base_name--az-id--x-s3` (for example, ` # DOC-EXAMPLE-BUCKET--usw2-az2--x-s3`). For information about bucket # naming restrictions, see [Directory bucket naming rules][1] in the # *Amazon S3 User Guide*. # # **Access points** - When you use this action with an access point, # you must provide the alias of the access point in place of the # bucket name or specify the access point ARN. When using the access # point ARN, you must direct requests to the access point hostname. # The access point hostname takes the form # *AccessPointName*-*AccountId*.s3-accesspoint.*Region*.amazonaws.com. # When using this action with an access point through the Amazon Web # Services SDKs, you provide the access point ARN in place of the # bucket name. For more information about access point ARNs, see # [Using access points][2] in the *Amazon S3 User Guide*. # # Access points and Object Lambda access points are not supported by # directory buckets. # # # # **S3 on Outposts** - When you use this action with Amazon S3 on # Outposts, you must direct requests to the S3 on Outposts hostname. # The S3 on Outposts hostname takes the form ` # AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com`. # When you use this action with S3 on Outposts through the Amazon Web # Services SDKs, you provide the Outposts access point ARN in place of # the bucket name. For more information about S3 on Outposts ARNs, see # [What is S3 on Outposts?][3] in the *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html # [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html # [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html # @return [String] # # @!attribute [rw] content_length # Size of the body in bytes. This parameter is useful when the size of # the body cannot be determined automatically. # @return [Integer] # # @!attribute [rw] content_md5 # The base64-encoded 128-bit MD5 digest of the part data. This # parameter is auto-populated when using the command from the CLI. # This parameter is required if object lock parameters are specified. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any # provided `ChecksumAlgorithm` parameter. # # This checksum algorithm must be the same for all parts and it match # the checksum value supplied in the `CreateMultipartUpload` request. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] checksum_crc32 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 32-bit CRC32 checksum of the object. # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] checksum_crc32c # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 32-bit CRC32C checksum of the object. # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] checksum_sha1 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 160-bit SHA-1 digest of the object. # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] checksum_sha256 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 256-bit SHA-256 digest of the object. # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] key # Object key for which the multipart upload was initiated. # @return [String] # # @!attribute [rw] part_number # Part number of part being uploaded. This is a positive integer # between 1 and 10,000. # @return [Integer] # # @!attribute [rw] upload_id # Upload ID identifying the multipart upload whose part is being # uploaded. # @return [String] # # @!attribute [rw] sse_customer_algorithm # Specifies the algorithm to use when encrypting the object (for # example, AES256). # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] sse_customer_key # Specifies the customer-provided encryption key for Amazon S3 to use # in encrypting data. This value is used to store the object and then # it is discarded; Amazon S3 does not store the encryption key. The # key must be appropriate for use with the algorithm specified in the # `x-amz-server-side-encryption-customer-algorithm header`. This must # be the same encryption key specified in the initiate multipart # upload request. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] sse_customer_key_md5 # Specifies the 128-bit MD5 digest of the encryption key according to # RFC 1321. Amazon S3 uses this header for a message integrity check # to ensure that the encryption key was transmitted without error. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has # Requester Pays enabled, the requester will pay for corresponding # charges to copy the object. For information about downloading # objects from Requester Pays buckets, see [Downloading Objects in # Requester Pays Buckets][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @return [String] # # @!attribute [rw] expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the # request fails with the HTTP status code `403 Forbidden` (access # denied). # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartRequest AWS API Documentation # class UploadPartRequest < Struct.new( :body, :bucket, :content_length, :content_md5, :checksum_algorithm, :checksum_crc32, :checksum_crc32c, :checksum_sha1, :checksum_sha256, :key, :part_number, :upload_id, :sse_customer_algorithm, :sse_customer_key, :sse_customer_key_md5, :request_payer, :expected_bucket_owner) SENSITIVE = [:sse_customer_key] include Aws::Structure end # Describes the versioning state of an Amazon S3 bucket. For more # information, see [PUT Bucket versioning][1] in the *Amazon S3 API # Reference*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html # # @!attribute [rw] mfa_delete # Specifies whether MFA delete is enabled in the bucket versioning # configuration. This element is only returned if the bucket has been # configured with MFA delete. If the bucket has never been so # configured, this element is not returned. # @return [String] # # @!attribute [rw] status # The versioning state of the bucket. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/VersioningConfiguration AWS API Documentation # class VersioningConfiguration < Struct.new( :mfa_delete, :status) SENSITIVE = [] include Aws::Structure end # Specifies website configuration parameters for an Amazon S3 bucket. # # @!attribute [rw] error_document # The name of the error document for the website. # @return [Types::ErrorDocument] # # @!attribute [rw] index_document # The name of the index document for the website. # @return [Types::IndexDocument] # # @!attribute [rw] redirect_all_requests_to # The redirect behavior for every request to this bucket's website # endpoint. # # If you specify this property, you can't specify any other property. # @return [Types::RedirectAllRequestsTo] # # @!attribute [rw] routing_rules # Rules that define when a redirect is applied and the redirect # behavior. # @return [Array] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WebsiteConfiguration AWS API Documentation # class WebsiteConfiguration < Struct.new( :error_document, :index_document, :redirect_all_requests_to, :routing_rules) SENSITIVE = [] include Aws::Structure end # @!attribute [rw] request_route # Route prefix to the HTTP URL generated. # @return [String] # # @!attribute [rw] request_token # A single use encrypted token that maps `WriteGetObjectResponse` to # the end user `GetObject` request. # @return [String] # # @!attribute [rw] body # The object data. # @return [IO] # # @!attribute [rw] status_code # The integer status code for an HTTP response of a corresponding # `GetObject` request. The following is a list of status codes. # # * `200 - OK` # # * `206 - Partial Content` # # * `304 - Not Modified` # # * `400 - Bad Request` # # * `401 - Unauthorized` # # * `403 - Forbidden` # # * `404 - Not Found` # # * `405 - Method Not Allowed` # # * `409 - Conflict` # # * `411 - Length Required` # # * `412 - Precondition Failed` # # * `416 - Range Not Satisfiable` # # * `500 - Internal Server Error` # # * `503 - Service Unavailable` # @return [Integer] # # @!attribute [rw] error_code # A string that uniquely identifies an error condition. Returned in # the <Code> tag of the error XML response for a corresponding # `GetObject` call. Cannot be used with a successful `StatusCode` # header or when the transformed object is provided in the body. All # error codes from S3 are sentence-cased. The regular expression # (regex) value is `"^[A-Z][a-zA-Z]+$"`. # @return [String] # # @!attribute [rw] error_message # Contains a generic description of the error condition. Returned in # the <Message> tag of the error XML response for a # corresponding `GetObject` call. Cannot be used with a successful # `StatusCode` header or when the transformed object is provided in # body. # @return [String] # # @!attribute [rw] accept_ranges # Indicates that a range of bytes was specified. # @return [String] # # @!attribute [rw] cache_control # Specifies caching behavior along the request/reply chain. # @return [String] # # @!attribute [rw] content_disposition # Specifies presentational information for the object. # @return [String] # # @!attribute [rw] content_encoding # Specifies what content encodings have been applied to the object and # thus what decoding mechanisms must be applied to obtain the # media-type referenced by the Content-Type header field. # @return [String] # # @!attribute [rw] content_language # The language the content is in. # @return [String] # # @!attribute [rw] content_length # The size of the content body in bytes. # @return [Integer] # # @!attribute [rw] content_range # The portion of the object returned in the response. # @return [String] # # @!attribute [rw] content_type # A standard MIME type describing the format of the object data. # @return [String] # # @!attribute [rw] checksum_crc32 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This # specifies the base64-encoded, 32-bit CRC32 checksum of the object # returned by the Object Lambda function. This may not match the # checksum for the object stored in Amazon S3. Amazon S3 will perform # validation of the checksum values only when the original `GetObject` # request required checksum validation. For more information about # checksums, see [Checking object integrity][1] in the *Amazon S3 User # Guide*. # # Only one checksum header can be specified at a time. If you supply # multiple checksum headers, this request will fail. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] checksum_crc32c # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This # specifies the base64-encoded, 32-bit CRC32C checksum of the object # returned by the Object Lambda function. This may not match the # checksum for the object stored in Amazon S3. Amazon S3 will perform # validation of the checksum values only when the original `GetObject` # request required checksum validation. For more information about # checksums, see [Checking object integrity][1] in the *Amazon S3 User # Guide*. # # Only one checksum header can be specified at a time. If you supply # multiple checksum headers, this request will fail. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] checksum_sha1 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This # specifies the base64-encoded, 160-bit SHA-1 digest of the object # returned by the Object Lambda function. This may not match the # checksum for the object stored in Amazon S3. Amazon S3 will perform # validation of the checksum values only when the original `GetObject` # request required checksum validation. For more information about # checksums, see [Checking object integrity][1] in the *Amazon S3 User # Guide*. # # Only one checksum header can be specified at a time. If you supply # multiple checksum headers, this request will fail. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] checksum_sha256 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This # specifies the base64-encoded, 256-bit SHA-256 digest of the object # returned by the Object Lambda function. This may not match the # checksum for the object stored in Amazon S3. Amazon S3 will perform # validation of the checksum values only when the original `GetObject` # request required checksum validation. For more information about # checksums, see [Checking object integrity][1] in the *Amazon S3 User # Guide*. # # Only one checksum header can be specified at a time. If you supply # multiple checksum headers, this request will fail. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @return [String] # # @!attribute [rw] delete_marker # Specifies whether an object stored in Amazon S3 is (`true`) or is # not (`false`) a delete marker. # @return [Boolean] # # @!attribute [rw] etag # An opaque identifier assigned by a web server to a specific version # of a resource found at a URL. # @return [String] # # @!attribute [rw] expires # The date and time at which the object is no longer cacheable. # @return [Time] # # @!attribute [rw] expiration # If the object expiration is configured (see PUT Bucket lifecycle), # the response includes this header. It includes the `expiry-date` and # `rule-id` key-value pairs that provide the object expiration # information. The value of the `rule-id` is URL-encoded. # @return [String] # # @!attribute [rw] last_modified # The date and time that the object was last modified. # @return [Time] # # @!attribute [rw] missing_meta # Set to the number of metadata entries not returned in `x-amz-meta` # headers. This can happen if you create metadata using an API like # SOAP that supports more flexible metadata than the REST API. For # example, using SOAP, you can create metadata whose values are not # legal HTTP headers. # @return [Integer] # # @!attribute [rw] metadata # A map of metadata to store with the object in S3. # @return [Hash] # # @!attribute [rw] object_lock_mode # Indicates whether an object stored in Amazon S3 has Object Lock # enabled. For more information about S3 Object Lock, see [Object # Lock][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html # @return [String] # # @!attribute [rw] object_lock_legal_hold_status # Indicates whether an object stored in Amazon S3 has an active legal # hold. # @return [String] # # @!attribute [rw] object_lock_retain_until_date # The date and time when Object Lock is configured to expire. # @return [Time] # # @!attribute [rw] parts_count # The count of parts this object has. # @return [Integer] # # @!attribute [rw] replication_status # Indicates if request involves bucket that is either a source or # destination in a Replication rule. For more information about S3 # Replication, see [Replication][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html # @return [String] # # @!attribute [rw] request_charged # If present, indicates that the requester was successfully charged # for the request. # # This functionality is not supported for directory buckets. # # # @return [String] # # @!attribute [rw] restore # Provides information about object restoration operation and # expiration time of the restored object copy. # @return [String] # # @!attribute [rw] server_side_encryption # The server-side encryption algorithm used when storing requested # object in Amazon S3 (for example, AES256, `aws:kms`). # @return [String] # # @!attribute [rw] sse_customer_algorithm # Encryption algorithm used if server-side encryption with a # customer-provided encryption key was specified for object stored in # Amazon S3. # @return [String] # # @!attribute [rw] ssekms_key_id # If present, specifies the ID (Key ID, Key ARN, or Key Alias) of the # Amazon Web Services Key Management Service (Amazon Web Services KMS) # symmetric encryption customer managed key that was used for stored # in Amazon S3 object. # @return [String] # # @!attribute [rw] sse_customer_key_md5 # 128-bit MD5 digest of customer-provided encryption key used in # Amazon S3 to encrypt data stored in S3. For more information, see # [Protecting data using server-side encryption with customer-provided # encryption keys (SSE-C)][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html # @return [String] # # @!attribute [rw] storage_class # Provides storage class information of the object. Amazon S3 returns # this header for all objects except for S3 Standard storage class # objects. # # For more information, see [Storage Classes][1]. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html # @return [String] # # @!attribute [rw] tag_count # The number of tags, if any, on the object. # @return [Integer] # # @!attribute [rw] version_id # An ID used to reference a specific version of the object. # @return [String] # # @!attribute [rw] bucket_key_enabled # Indicates whether the object stored in Amazon S3 uses an S3 bucket # key for server-side encryption with Amazon Web Services KMS # (SSE-KMS). # @return [Boolean] # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WriteGetObjectResponseRequest AWS API Documentation # class WriteGetObjectResponseRequest < Struct.new( :request_route, :request_token, :body, :status_code, :error_code, :error_message, :accept_ranges, :cache_control, :content_disposition, :content_encoding, :content_language, :content_length, :content_range, :content_type, :checksum_crc32, :checksum_crc32c, :checksum_sha1, :checksum_sha256, :delete_marker, :etag, :expires, :expiration, :last_modified, :missing_meta, :metadata, :object_lock_mode, :object_lock_legal_hold_status, :object_lock_retain_until_date, :parts_count, :replication_status, :request_charged, :restore, :server_side_encryption, :sse_customer_algorithm, :ssekms_key_id, :sse_customer_key_md5, :storage_class, :tag_count, :version_id, :bucket_key_enabled) SENSITIVE = [:ssekms_key_id] include Aws::Structure end # The container for selecting objects from a content event stream. # # EventStream is an Enumerator of Events. # #event_types #=> Array, returns all modeled event types in the stream # # @see http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContentEventStream AWS API Documentation # class SelectObjectContentEventStream < Enumerator def event_types [ :records, :stats, :progress, :cont, :end ] end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/endpoints.rb0000644000004100000410000032542614563445240020621 0ustar www-datawww-data# frozen_string_literal: true # WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws::S3 # @api private module Endpoints class AbortMultipartUpload def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: context.params[:key], prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class CompleteMultipartUpload def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: context.params[:key], prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class CopyObject def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: true, ) end end class CreateBucket def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: true, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class CreateMultipartUpload def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: context.params[:key], prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class CreateSession def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: true, ) end end class DeleteBucket def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class DeleteBucketAnalyticsConfiguration def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class DeleteBucketCors def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class DeleteBucketEncryption def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class DeleteBucketIntelligentTieringConfiguration def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class DeleteBucketInventoryConfiguration def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class DeleteBucketLifecycle def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class DeleteBucketMetricsConfiguration def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class DeleteBucketOwnershipControls def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class DeleteBucketPolicy def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class DeleteBucketReplication def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class DeleteBucketTagging def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class DeleteBucketWebsite def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class DeleteObject def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: context.params[:key], prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class DeleteObjectTagging def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class DeleteObjects def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class DeletePublicAccessBlock def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class GetBucketAccelerateConfiguration def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class GetBucketAcl def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class GetBucketAnalyticsConfiguration def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class GetBucketCors def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class GetBucketEncryption def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class GetBucketIntelligentTieringConfiguration def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class GetBucketInventoryConfiguration def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class GetBucketLifecycle def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class GetBucketLifecycleConfiguration def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class GetBucketLocation def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class GetBucketLogging def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class GetBucketMetricsConfiguration def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class GetBucketNotification def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class GetBucketNotificationConfiguration def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class GetBucketOwnershipControls def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class GetBucketPolicy def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class GetBucketPolicyStatus def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class GetBucketReplication def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class GetBucketRequestPayment def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class GetBucketTagging def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class GetBucketVersioning def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class GetBucketWebsite def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class GetObject def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: context.params[:key], prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class GetObjectAcl def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: context.params[:key], prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class GetObjectAttributes def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class GetObjectLegalHold def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class GetObjectLockConfiguration def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class GetObjectRetention def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class GetObjectTagging def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class GetObjectTorrent def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class GetPublicAccessBlock def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class HeadBucket def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class HeadObject def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: context.params[:key], prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class ListBucketAnalyticsConfigurations def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class ListBucketIntelligentTieringConfigurations def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class ListBucketInventoryConfigurations def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class ListBucketMetricsConfigurations def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class ListBuckets def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: nil, region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class ListDirectoryBuckets def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: nil, region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class ListMultipartUploads def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: context.params[:prefix], disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class ListObjectVersions def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: context.params[:prefix], disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class ListObjects def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: context.params[:prefix], disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class ListObjectsV2 def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: context.params[:prefix], disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class ListParts def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: context.params[:key], prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class PutBucketAccelerateConfiguration def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class PutBucketAcl def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class PutBucketAnalyticsConfiguration def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class PutBucketCors def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class PutBucketEncryption def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class PutBucketIntelligentTieringConfiguration def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class PutBucketInventoryConfiguration def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class PutBucketLifecycle def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class PutBucketLifecycleConfiguration def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class PutBucketLogging def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class PutBucketMetricsConfiguration def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class PutBucketNotification def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class PutBucketNotificationConfiguration def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class PutBucketOwnershipControls def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class PutBucketPolicy def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class PutBucketReplication def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class PutBucketRequestPayment def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class PutBucketTagging def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class PutBucketVersioning def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class PutBucketWebsite def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class PutObject def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: context.params[:key], prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class PutObjectAcl def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: context.params[:key], prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class PutObjectLegalHold def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class PutObjectLockConfiguration def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class PutObjectRetention def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class PutObjectTagging def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class PutPublicAccessBlock def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: true, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class RestoreObject def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class SelectObjectContent def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class UploadPart def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: context.params[:key], prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end class UploadPartCopy def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: context.params[:bucket], region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: nil, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: true, ) end end class WriteGetObjectResponse def self.build(context) unless context.config.regional_endpoint endpoint = context.config.endpoint.to_s end Aws::S3::EndpointParameters.new( bucket: nil, region: context.config.region, use_fips: context.config.use_fips_endpoint, use_dual_stack: context[:use_dualstack_endpoint], endpoint: endpoint, force_path_style: context.config.force_path_style, accelerate: context[:use_accelerate_endpoint], use_global_endpoint: context.config.s3_us_east_1_regional_endpoint == 'legacy', use_object_lambda_endpoint: true, key: nil, prefix: nil, disable_access_points: nil, disable_multi_region_access_points: context.config.s3_disable_multiregion_access_points, use_arn_region: context.config.s3_use_arn_region, use_s3_express_control_endpoint: nil, disable_s3_express_session_auth: context.config.disable_s3_express_session_auth, ) end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/bucket_versioning.rb0000644000004100000410000003361114563445240022326 0ustar www-datawww-data# frozen_string_literal: true # WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws::S3 class BucketVersioning extend Aws::Deprecations # @overload def initialize(bucket_name, options = {}) # @param [String] bucket_name # @option options [Client] :client # @overload def initialize(options = {}) # @option options [required, String] :bucket_name # @option options [Client] :client def initialize(*args) options = Hash === args.last ? args.pop.dup : {} @bucket_name = extract_bucket_name(args, options) @data = options.delete(:data) @client = options.delete(:client) || Client.new(options) @waiter_block_warned = false end # @!group Read-Only Attributes # @return [String] def bucket_name @bucket_name end # The versioning state of the bucket. # @return [String] def status data[:status] end # Specifies whether MFA delete is enabled in the bucket versioning # configuration. This element is only returned if the bucket has been # configured with MFA delete. If the bucket has never been so # configured, this element is not returned. # @return [String] def mfa_delete data[:mfa_delete] end # @!endgroup # @return [Client] def client @client end # Loads, or reloads {#data} for the current {BucketVersioning}. # Returns `self` making it possible to chain methods. # # bucket_versioning.reload.data # # @return [self] def load resp = Aws::Plugins::UserAgent.feature('resource') do @client.get_bucket_versioning(bucket: @bucket_name) end @data = resp.data self end alias :reload :load # @return [Types::GetBucketVersioningOutput] # Returns the data for this {BucketVersioning}. Calls # {Client#get_bucket_versioning} if {#data_loaded?} is `false`. def data load unless @data @data end # @return [Boolean] # Returns `true` if this resource is loaded. Accessing attributes or # {#data} on an unloaded resource will trigger a call to {#load}. def data_loaded? !!@data end # @deprecated Use [Aws::S3::Client] #wait_until instead # # Waiter polls an API operation until a resource enters a desired # state. # # @note The waiting operation is performed on a copy. The original resource # remains unchanged. # # ## Basic Usage # # Waiter will polls until it is successful, it fails by # entering a terminal state, or until a maximum number of attempts # are made. # # # polls in a loop until condition is true # resource.wait_until(options) {|resource| condition} # # ## Example # # instance.wait_until(max_attempts:10, delay:5) do |instance| # instance.state.name == 'running' # end # # ## Configuration # # You can configure the maximum number of polling attempts, and the # delay (in seconds) between each polling attempt. The waiting condition is # set by passing a block to {#wait_until}: # # # poll for ~25 seconds # resource.wait_until(max_attempts:5,delay:5) {|resource|...} # # ## Callbacks # # You can be notified before each polling attempt and before each # delay. If you throw `:success` or `:failure` from these callbacks, # it will terminate the waiter. # # started_at = Time.now # # poll for 1 hour, instead of a number of attempts # proc = Proc.new do |attempts, response| # throw :failure if Time.now - started_at > 3600 # end # # # disable max attempts # instance.wait_until(before_wait:proc, max_attempts:nil) {...} # # ## Handling Errors # # When a waiter is successful, it returns the Resource. When a waiter # fails, it raises an error. # # begin # resource.wait_until(...) # rescue Aws::Waiters::Errors::WaiterFailed # # resource did not enter the desired state in time # end # # @yieldparam [Resource] resource to be used in the waiting condition. # # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter # terminates because the waiter has entered a state that it will not # transition out of, preventing success. # # yet successful. # # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is # encountered while polling for a resource that is not expected. # # @raise [NotImplementedError] Raised when the resource does not # # @option options [Integer] :max_attempts (10) Maximum number of # attempts # @option options [Integer] :delay (10) Delay between each # attempt in seconds # @option options [Proc] :before_attempt (nil) Callback # invoked before each attempt # @option options [Proc] :before_wait (nil) Callback # invoked before each wait # @return [Resource] if the waiter was successful def wait_until(options = {}, &block) self_copy = self.dup attempts = 0 options[:max_attempts] = 10 unless options.key?(:max_attempts) options[:delay] ||= 10 options[:poller] = Proc.new do attempts += 1 if block.call(self_copy) [:success, self_copy] else self_copy.reload unless attempts == options[:max_attempts] :retry end end Aws::Plugins::UserAgent.feature('resource') do Aws::Waiters::Waiter.new(options).wait({}) end end # @!group Actions # @example Request syntax with placeholder values # # bucket_versioning.enable({ # content_md5: "ContentMD5", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # mfa: "MFA", # expected_bucket_owner: "AccountId", # }) # @param [Hash] options ({}) # @option options [String] :content_md5 # >The base64-encoded 128-bit MD5 digest of the data. You must use # this header as a message integrity check to verify that the request # body was not corrupted in transit. For more information, see [RFC # 1864][1]. # # For requests made using the Amazon Web Services Command Line Interface # (CLI) or Amazon Web Services SDKs, this field is calculated # automatically. # # # # [1]: http://www.ietf.org/rfc/rfc1864.txt # @option options [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [String] :mfa # The concatenation of the authentication device's serial number, a # space, and the value that is displayed on your authentication device. # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @return [EmptyStructure] def enable(options = {}) options = Aws::Util.deep_merge(options, bucket: @bucket_name, versioning_configuration: { status: "Enabled" } ) resp = Aws::Plugins::UserAgent.feature('resource') do @client.put_bucket_versioning(options) end resp.data end # @example Request syntax with placeholder values # # bucket_versioning.put({ # content_md5: "ContentMD5", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # mfa: "MFA", # versioning_configuration: { # required # mfa_delete: "Enabled", # accepts Enabled, Disabled # status: "Enabled", # accepts Enabled, Suspended # }, # expected_bucket_owner: "AccountId", # }) # @param [Hash] options ({}) # @option options [String] :content_md5 # >The base64-encoded 128-bit MD5 digest of the data. You must use # this header as a message integrity check to verify that the request # body was not corrupted in transit. For more information, see [RFC # 1864][1]. # # For requests made using the Amazon Web Services Command Line Interface # (CLI) or Amazon Web Services SDKs, this field is calculated # automatically. # # # # [1]: http://www.ietf.org/rfc/rfc1864.txt # @option options [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [String] :mfa # The concatenation of the authentication device's serial number, a # space, and the value that is displayed on your authentication device. # @option options [required, Types::VersioningConfiguration] :versioning_configuration # Container for setting the versioning state. # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @return [EmptyStructure] def put(options = {}) options = options.merge(bucket: @bucket_name) resp = Aws::Plugins::UserAgent.feature('resource') do @client.put_bucket_versioning(options) end resp.data end # @example Request syntax with placeholder values # # bucket_versioning.suspend({ # content_md5: "ContentMD5", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # mfa: "MFA", # expected_bucket_owner: "AccountId", # }) # @param [Hash] options ({}) # @option options [String] :content_md5 # >The base64-encoded 128-bit MD5 digest of the data. You must use # this header as a message integrity check to verify that the request # body was not corrupted in transit. For more information, see [RFC # 1864][1]. # # For requests made using the Amazon Web Services Command Line Interface # (CLI) or Amazon Web Services SDKs, this field is calculated # automatically. # # # # [1]: http://www.ietf.org/rfc/rfc1864.txt # @option options [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [String] :mfa # The concatenation of the authentication device's serial number, a # space, and the value that is displayed on your authentication device. # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @return [EmptyStructure] def suspend(options = {}) options = Aws::Util.deep_merge(options, bucket: @bucket_name, versioning_configuration: { status: "Suspended" } ) resp = Aws::Plugins::UserAgent.feature('resource') do @client.put_bucket_versioning(options) end resp.data end # @!group Associations # @return [Bucket] def bucket Bucket.new( name: @bucket_name, client: @client ) end # @deprecated # @api private def identifiers { bucket_name: @bucket_name } end deprecated(:identifiers) private def extract_bucket_name(args, options) value = args[0] || options.delete(:bucket_name) case value when String then value when nil then raise ArgumentError, "missing required option :bucket_name" else msg = "expected :bucket_name to be a String, got #{value.class}" raise ArgumentError, msg end end class Collection < Aws::Resources::Collection; end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/express_credentials_cache.rb0000644000004100000410000000115214563445240023772 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 # @api private class ExpressCredentialsCache def initialize @credentials = {} @mutex = Mutex.new end def [](bucket_name) @mutex.synchronize { @credentials[bucket_name] } end def []=(bucket_name, credential_provider) @mutex.synchronize do @credentials[bucket_name] = credential_provider end end def clear @mutex.synchronize { @credentials = {} } end end # @api private EXPRESS_CREDENTIALS_CACHE = ExpressCredentialsCache.new end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/file_uploader.rb0000644000004100000410000000470414563445240021421 0ustar www-datawww-data# frozen_string_literal: true require 'pathname' module Aws module S3 # @api private class FileUploader ONE_HUNDRED_MEGABYTES = 100 * 1024 * 1024 # @param [Hash] options # @option options [Client] :client # @option options [Integer] :multipart_threshold (104857600) def initialize(options = {}) @options = options @client = options[:client] || Client.new @multipart_threshold = options[:multipart_threshold] || ONE_HUNDRED_MEGABYTES end # @return [Client] attr_reader :client # @return [Integer] Files larger than or equal to this in bytes are uploaded # using a {MultipartFileUploader}. attr_reader :multipart_threshold # @param [String, Pathname, File, Tempfile] source The file to upload. # @option options [required, String] :bucket The bucket to upload to. # @option options [required, String] :key The key for the object. # @option options [Proc] :progress_callback # A Proc that will be called when each chunk of the upload is sent. # It will be invoked with [bytes_read], [total_sizes] # @option options [Integer] :thread_count # The thread count to use for multipart uploads. Ignored for # objects smaller than the multipart threshold. # @return [void] def upload(source, options = {}) Aws::Plugins::UserAgent.feature('s3-transfer') do if File.size(source) >= multipart_threshold MultipartFileUploader.new(@options).upload(source, options) else # remove multipart parameters not supported by put_object options.delete(:thread_count) put_object(source, options) end end end private def open_file(source) if String === source || Pathname === source File.open(source, 'rb') { |file| yield(file) } else yield(source) end end def put_object(source, options) if (callback = options.delete(:progress_callback)) options[:on_chunk_sent] = single_part_progress(callback) end open_file(source) do |file| @client.put_object(options.merge(body: file)) end end def single_part_progress(progress_callback) proc do |_chunk, bytes_read, total_size| progress_callback.call([bytes_read], [total_size]) end end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/file_part.rb0000644000004100000410000000337314563445240020555 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 # A utility class that provides an IO-like interface to a portion of a file # on disk. # @api private class FilePart # @option options [required, String, Pathname, File, Tempfile] :source # The file to upload. # # @option options [required, Integer] :offset The file part will read # starting at this byte offset. # # @option options [required, Integer] :size The maximum number of bytes to # read from the `:offset`. def initialize(options = {}) @source = options[:source] @first_byte = options[:offset] @last_byte = @first_byte + options[:size] @size = options[:size] @file = nil end # @return [String, Pathname, File, Tempfile] attr_reader :source # @return [Integer] attr_reader :first_byte # @return [Integer] attr_reader :last_byte # @return [Integer] attr_reader :size def read(bytes = nil, output_buffer = nil) open_file unless @file read_from_file(bytes, output_buffer) end def rewind if @file @file.seek(@first_byte) @position = @first_byte end 0 end def close @file.close if @file end private def open_file @file = File.open(@source, 'rb') rewind end def read_from_file(bytes, output_buffer) length = [remaining_bytes, *bytes].min data = @file.read(length, output_buffer) @position += data ? data.bytesize : 0 data.to_s unless bytes && (data.nil? || data.empty?) end def remaining_bytes @last_byte - @position end end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/multipart_upload.rb0000644000004100000410000005025514563445240022176 0ustar www-datawww-data# frozen_string_literal: true # WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws::S3 class MultipartUpload extend Aws::Deprecations # @overload def initialize(bucket_name, object_key, id, options = {}) # @param [String] bucket_name # @param [String] object_key # @param [String] id # @option options [Client] :client # @overload def initialize(options = {}) # @option options [required, String] :bucket_name # @option options [required, String] :object_key # @option options [required, String] :id # @option options [Client] :client def initialize(*args) options = Hash === args.last ? args.pop.dup : {} @bucket_name = extract_bucket_name(args, options) @object_key = extract_object_key(args, options) @id = extract_id(args, options) @data = options.delete(:data) @client = options.delete(:client) || Client.new(options) @waiter_block_warned = false end # @!group Read-Only Attributes # @return [String] def bucket_name @bucket_name end # @return [String] def object_key @object_key end # @return [String] def id @id end # Upload ID that identifies the multipart upload. # @return [String] def upload_id data[:upload_id] end # Key of the object for which the multipart upload was initiated. # @return [String] def key data[:key] end # Date and time at which the multipart upload was initiated. # @return [Time] def initiated data[:initiated] end # The class of storage used to store the object. # # **Directory buckets** - Only the S3 Express One Zone storage class is # supported by directory buckets to store objects. # # # @return [String] def storage_class data[:storage_class] end # Specifies the owner of the object that is part of the multipart # upload. # # **Directory buckets** - The bucket owner is returned as the object # owner for all the objects. # # # @return [Types::Owner] def owner data[:owner] end # Identifies who initiated the multipart upload. # @return [Types::Initiator] def initiator data[:initiator] end # The algorithm that was used to create a checksum of the object. # @return [String] def checksum_algorithm data[:checksum_algorithm] end # @!endgroup # @return [Client] def client @client end # @raise [NotImplementedError] # @api private def load msg = "#load is not implemented, data only available via enumeration" raise NotImplementedError, msg end alias :reload :load # @raise [NotImplementedError] Raises when {#data_loaded?} is `false`. # @return [Types::MultipartUpload] # Returns the data for this {MultipartUpload}. def data load unless @data @data end # @return [Boolean] # Returns `true` if this resource is loaded. Accessing attributes or # {#data} on an unloaded resource will trigger a call to {#load}. def data_loaded? !!@data end # @deprecated Use [Aws::S3::Client] #wait_until instead # # Waiter polls an API operation until a resource enters a desired # state. # # @note The waiting operation is performed on a copy. The original resource # remains unchanged. # # ## Basic Usage # # Waiter will polls until it is successful, it fails by # entering a terminal state, or until a maximum number of attempts # are made. # # # polls in a loop until condition is true # resource.wait_until(options) {|resource| condition} # # ## Example # # instance.wait_until(max_attempts:10, delay:5) do |instance| # instance.state.name == 'running' # end # # ## Configuration # # You can configure the maximum number of polling attempts, and the # delay (in seconds) between each polling attempt. The waiting condition is # set by passing a block to {#wait_until}: # # # poll for ~25 seconds # resource.wait_until(max_attempts:5,delay:5) {|resource|...} # # ## Callbacks # # You can be notified before each polling attempt and before each # delay. If you throw `:success` or `:failure` from these callbacks, # it will terminate the waiter. # # started_at = Time.now # # poll for 1 hour, instead of a number of attempts # proc = Proc.new do |attempts, response| # throw :failure if Time.now - started_at > 3600 # end # # # disable max attempts # instance.wait_until(before_wait:proc, max_attempts:nil) {...} # # ## Handling Errors # # When a waiter is successful, it returns the Resource. When a waiter # fails, it raises an error. # # begin # resource.wait_until(...) # rescue Aws::Waiters::Errors::WaiterFailed # # resource did not enter the desired state in time # end # # @yieldparam [Resource] resource to be used in the waiting condition. # # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter # terminates because the waiter has entered a state that it will not # transition out of, preventing success. # # yet successful. # # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is # encountered while polling for a resource that is not expected. # # @raise [NotImplementedError] Raised when the resource does not # # @option options [Integer] :max_attempts (10) Maximum number of # attempts # @option options [Integer] :delay (10) Delay between each # attempt in seconds # @option options [Proc] :before_attempt (nil) Callback # invoked before each attempt # @option options [Proc] :before_wait (nil) Callback # invoked before each wait # @return [Resource] if the waiter was successful def wait_until(options = {}, &block) self_copy = self.dup attempts = 0 options[:max_attempts] = 10 unless options.key?(:max_attempts) options[:delay] ||= 10 options[:poller] = Proc.new do attempts += 1 if block.call(self_copy) [:success, self_copy] else self_copy.reload unless attempts == options[:max_attempts] :retry end end Aws::Plugins::UserAgent.feature('resource') do Aws::Waiters::Waiter.new(options).wait({}) end end # @!group Actions # @example Request syntax with placeholder values # # multipart_upload.abort({ # request_payer: "requester", # accepts requester # expected_bucket_owner: "AccountId", # }) # @param [Hash] options ({}) # @option options [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @return [Types::AbortMultipartUploadOutput] def abort(options = {}) options = options.merge( bucket: @bucket_name, key: @object_key, upload_id: @id ) resp = Aws::Plugins::UserAgent.feature('resource') do @client.abort_multipart_upload(options) end resp.data end # @example Request syntax with placeholder values # # object = multipart_upload.complete({ # multipart_upload: { # parts: [ # { # etag: "ETag", # checksum_crc32: "ChecksumCRC32", # checksum_crc32c: "ChecksumCRC32C", # checksum_sha1: "ChecksumSHA1", # checksum_sha256: "ChecksumSHA256", # part_number: 1, # }, # ], # }, # checksum_crc32: "ChecksumCRC32", # checksum_crc32c: "ChecksumCRC32C", # checksum_sha1: "ChecksumSHA1", # checksum_sha256: "ChecksumSHA256", # request_payer: "requester", # accepts requester # expected_bucket_owner: "AccountId", # sse_customer_algorithm: "SSECustomerAlgorithm", # sse_customer_key: "SSECustomerKey", # sse_customer_key_md5: "SSECustomerKeyMD5", # }) # @param [Hash] options ({}) # @option options [Types::CompletedMultipartUpload] :multipart_upload # The container for the multipart upload request information. # @option options [String] :checksum_crc32 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 32-bit CRC32 checksum of the object. For # more information, see [Checking object integrity][1] in the *Amazon S3 # User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [String] :checksum_crc32c # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 32-bit CRC32C checksum of the object. # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [String] :checksum_sha1 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 160-bit SHA-1 digest of the object. For # more information, see [Checking object integrity][1] in the *Amazon S3 # User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [String] :checksum_sha256 # This header can be used as a data integrity check to verify that the # data received is the same data that was originally sent. This header # specifies the base64-encoded, 256-bit SHA-256 digest of the object. # For more information, see [Checking object integrity][1] in the # *Amazon S3 User Guide*. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @option options [String] :sse_customer_algorithm # The server-side encryption (SSE) algorithm used to encrypt the object. # This parameter is required only when the object was created using a # checksum algorithm or if your bucket policy requires the use of SSE-C. # For more information, see [Protecting data using SSE-C keys][1] in the # *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html#ssec-require-condition-key # @option options [String] :sse_customer_key # The server-side encryption (SSE) customer managed key. This parameter # is needed only when the object was created using a checksum algorithm. # For more information, see [Protecting data using SSE-C keys][1] in the # *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # @option options [String] :sse_customer_key_md5 # The MD5 server-side encryption (SSE) customer managed key. This # parameter is needed only when the object was created using a checksum # algorithm. For more information, see [Protecting data using SSE-C # keys][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # @return [Object] def complete(options = {}) options = options.merge( bucket: @bucket_name, key: @object_key, upload_id: @id ) Aws::Plugins::UserAgent.feature('resource') do @client.complete_multipart_upload(options) end Object.new( bucket_name: @bucket_name, key: @object_key, client: @client ) end # @!group Associations # @return [Object] def object Object.new( bucket_name: @bucket_name, key: @object_key, client: @client ) end # @param [String] part_number # @return [MultipartUploadPart] def part(part_number) MultipartUploadPart.new( bucket_name: @bucket_name, object_key: @object_key, multipart_upload_id: @id, part_number: part_number, client: @client ) end # @example Request syntax with placeholder values # # parts = multipart_upload.parts({ # request_payer: "requester", # accepts requester # expected_bucket_owner: "AccountId", # sse_customer_algorithm: "SSECustomerAlgorithm", # sse_customer_key: "SSECustomerKey", # sse_customer_key_md5: "SSECustomerKeyMD5", # }) # @param [Hash] options ({}) # @option options [String] :request_payer # Confirms that the requester knows that they will be charged for the # request. Bucket owners need not specify this parameter in their # requests. If either the source or destination S3 bucket has Requester # Pays enabled, the requester will pay for corresponding charges to copy # the object. For information about downloading objects from Requester # Pays buckets, see [Downloading Objects in Requester Pays Buckets][1] # in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @option options [String] :sse_customer_algorithm # The server-side encryption (SSE) algorithm used to encrypt the object. # This parameter is needed only when the object was created using a # checksum algorithm. For more information, see [Protecting data using # SSE-C keys][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # @option options [String] :sse_customer_key # The server-side encryption (SSE) customer managed key. This parameter # is needed only when the object was created using a checksum algorithm. # For more information, see [Protecting data using SSE-C keys][1] in the # *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # @option options [String] :sse_customer_key_md5 # The MD5 server-side encryption (SSE) customer managed key. This # parameter is needed only when the object was created using a checksum # algorithm. For more information, see [Protecting data using SSE-C # keys][1] in the *Amazon S3 User Guide*. # # This functionality is not supported for directory buckets. # # # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html # @return [MultipartUploadPart::Collection] def parts(options = {}) batches = Enumerator.new do |y| options = options.merge( bucket: @bucket_name, key: @object_key, upload_id: @id ) resp = Aws::Plugins::UserAgent.feature('resource') do @client.list_parts(options) end resp.each_page do |page| batch = [] page.data.parts.each do |p| batch << MultipartUploadPart.new( bucket_name: options[:bucket], object_key: options[:key], multipart_upload_id: options[:upload_id], part_number: p.part_number, data: p, client: @client ) end y.yield(batch) end end MultipartUploadPart::Collection.new(batches) end # @deprecated # @api private def identifiers { bucket_name: @bucket_name, object_key: @object_key, id: @id } end deprecated(:identifiers) private def extract_bucket_name(args, options) value = args[0] || options.delete(:bucket_name) case value when String then value when nil then raise ArgumentError, "missing required option :bucket_name" else msg = "expected :bucket_name to be a String, got #{value.class}" raise ArgumentError, msg end end def extract_object_key(args, options) value = args[1] || options.delete(:object_key) case value when String then value when nil then raise ArgumentError, "missing required option :object_key" else msg = "expected :object_key to be a String, got #{value.class}" raise ArgumentError, msg end end def extract_id(args, options) value = args[2] || options.delete(:id) case value when String then value when nil then raise ArgumentError, "missing required option :id" else msg = "expected :id to be a String, got #{value.class}" raise ArgumentError, msg end end class Collection < Aws::Resources::Collection; end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/endpoint_parameters.rb0000644000004100000410000001475014563445240022654 0ustar www-datawww-data# frozen_string_literal: true # WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws::S3 # Endpoint parameters used to influence endpoints per request. # # @!attribute bucket # The S3 bucket used to send the request. This is an optional parameter that will be set automatically for operations that are scoped to an S3 bucket. # # @return [String] # # @!attribute region # The AWS region used to dispatch the request. # # @return [String] # # @!attribute use_fips # When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error. # # @return [Boolean] # # @!attribute use_dual_stack # When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error. # # @return [Boolean] # # @!attribute endpoint # Override the endpoint used to send this request # # @return [String] # # @!attribute force_path_style # When true, force a path-style endpoint to be used where the bucket name is part of the path. # # @return [Boolean] # # @!attribute accelerate # When true, use S3 Accelerate. NOTE: Not all regions support S3 accelerate. # # @return [Boolean] # # @!attribute use_global_endpoint # Whether the global endpoint should be used, rather then the regional endpoint for us-east-1. # # @return [Boolean] # # @!attribute use_object_lambda_endpoint # Internal parameter to use object lambda endpoint for an operation (eg: WriteGetObjectResponse) # # @return [Boolean] # # @!attribute key # The S3 Key used to send the request. This is an optional parameter that will be set automatically for operations that are scoped to an S3 Key. # # @return [String] # # @!attribute prefix # The S3 Prefix used to send the request. This is an optional parameter that will be set automatically for operations that are scoped to an S3 Prefix. # # @return [String] # # @!attribute disable_access_points # Internal parameter to disable Access Point Buckets # # @return [Boolean] # # @!attribute disable_multi_region_access_points # Whether multi-region access points (MRAP) should be disabled. # # @return [Boolean] # # @!attribute use_arn_region # When an Access Point ARN is provided and this flag is enabled, the SDK MUST use the ARN's region when constructing the endpoint instead of the client's configured region. # # @return [Boolean] # # @!attribute use_s3_express_control_endpoint # Internal parameter to indicate whether S3Express operation should use control plane, (ex. CreateBucket) # # @return [Boolean] # # @!attribute disable_s3_express_session_auth # Parameter to indicate whether S3Express session auth should be disabled # # @return [Boolean] # EndpointParameters = Struct.new( :bucket, :region, :use_fips, :use_dual_stack, :endpoint, :force_path_style, :accelerate, :use_global_endpoint, :use_object_lambda_endpoint, :key, :prefix, :disable_access_points, :disable_multi_region_access_points, :use_arn_region, :use_s3_express_control_endpoint, :disable_s3_express_session_auth, ) do include Aws::Structure # @api private class << self PARAM_MAP = { 'Bucket' => :bucket, 'Region' => :region, 'UseFIPS' => :use_fips, 'UseDualStack' => :use_dual_stack, 'Endpoint' => :endpoint, 'ForcePathStyle' => :force_path_style, 'Accelerate' => :accelerate, 'UseGlobalEndpoint' => :use_global_endpoint, 'UseObjectLambdaEndpoint' => :use_object_lambda_endpoint, 'Key' => :key, 'Prefix' => :prefix, 'DisableAccessPoints' => :disable_access_points, 'DisableMultiRegionAccessPoints' => :disable_multi_region_access_points, 'UseArnRegion' => :use_arn_region, 'UseS3ExpressControlEndpoint' => :use_s3_express_control_endpoint, 'DisableS3ExpressSessionAuth' => :disable_s3_express_session_auth, }.freeze end def initialize(options = {}) self[:bucket] = options[:bucket] self[:region] = options[:region] self[:use_fips] = options[:use_fips] self[:use_fips] = false if self[:use_fips].nil? if self[:use_fips].nil? raise ArgumentError, "Missing required EndpointParameter: :use_fips" end self[:use_dual_stack] = options[:use_dual_stack] self[:use_dual_stack] = false if self[:use_dual_stack].nil? if self[:use_dual_stack].nil? raise ArgumentError, "Missing required EndpointParameter: :use_dual_stack" end self[:endpoint] = options[:endpoint] self[:force_path_style] = options[:force_path_style] self[:force_path_style] = false if self[:force_path_style].nil? if self[:force_path_style].nil? raise ArgumentError, "Missing required EndpointParameter: :force_path_style" end self[:accelerate] = options[:accelerate] self[:accelerate] = false if self[:accelerate].nil? if self[:accelerate].nil? raise ArgumentError, "Missing required EndpointParameter: :accelerate" end self[:use_global_endpoint] = options[:use_global_endpoint] self[:use_global_endpoint] = false if self[:use_global_endpoint].nil? if self[:use_global_endpoint].nil? raise ArgumentError, "Missing required EndpointParameter: :use_global_endpoint" end self[:use_object_lambda_endpoint] = options[:use_object_lambda_endpoint] self[:key] = options[:key] self[:prefix] = options[:prefix] self[:disable_access_points] = options[:disable_access_points] self[:disable_multi_region_access_points] = options[:disable_multi_region_access_points] self[:disable_multi_region_access_points] = false if self[:disable_multi_region_access_points].nil? if self[:disable_multi_region_access_points].nil? raise ArgumentError, "Missing required EndpointParameter: :disable_multi_region_access_points" end self[:use_arn_region] = options[:use_arn_region] self[:use_s3_express_control_endpoint] = options[:use_s3_express_control_endpoint] self[:disable_s3_express_session_auth] = options[:disable_s3_express_session_auth] end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/bucket_website.rb0000644000004100000410000002514314563445240021606 0ustar www-datawww-data# frozen_string_literal: true # WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws::S3 class BucketWebsite extend Aws::Deprecations # @overload def initialize(bucket_name, options = {}) # @param [String] bucket_name # @option options [Client] :client # @overload def initialize(options = {}) # @option options [required, String] :bucket_name # @option options [Client] :client def initialize(*args) options = Hash === args.last ? args.pop.dup : {} @bucket_name = extract_bucket_name(args, options) @data = options.delete(:data) @client = options.delete(:client) || Client.new(options) @waiter_block_warned = false end # @!group Read-Only Attributes # @return [String] def bucket_name @bucket_name end # Specifies the redirect behavior of all requests to a website endpoint # of an Amazon S3 bucket. # @return [Types::RedirectAllRequestsTo] def redirect_all_requests_to data[:redirect_all_requests_to] end # The name of the index document for the website (for example # `index.html`). # @return [Types::IndexDocument] def index_document data[:index_document] end # The object key name of the website error document to use for 4XX class # errors. # @return [Types::ErrorDocument] def error_document data[:error_document] end # Rules that define when a redirect is applied and the redirect # behavior. # @return [Array] def routing_rules data[:routing_rules] end # @!endgroup # @return [Client] def client @client end # Loads, or reloads {#data} for the current {BucketWebsite}. # Returns `self` making it possible to chain methods. # # bucket_website.reload.data # # @return [self] def load resp = Aws::Plugins::UserAgent.feature('resource') do @client.get_bucket_website(bucket: @bucket_name) end @data = resp.data self end alias :reload :load # @return [Types::GetBucketWebsiteOutput] # Returns the data for this {BucketWebsite}. Calls # {Client#get_bucket_website} if {#data_loaded?} is `false`. def data load unless @data @data end # @return [Boolean] # Returns `true` if this resource is loaded. Accessing attributes or # {#data} on an unloaded resource will trigger a call to {#load}. def data_loaded? !!@data end # @deprecated Use [Aws::S3::Client] #wait_until instead # # Waiter polls an API operation until a resource enters a desired # state. # # @note The waiting operation is performed on a copy. The original resource # remains unchanged. # # ## Basic Usage # # Waiter will polls until it is successful, it fails by # entering a terminal state, or until a maximum number of attempts # are made. # # # polls in a loop until condition is true # resource.wait_until(options) {|resource| condition} # # ## Example # # instance.wait_until(max_attempts:10, delay:5) do |instance| # instance.state.name == 'running' # end # # ## Configuration # # You can configure the maximum number of polling attempts, and the # delay (in seconds) between each polling attempt. The waiting condition is # set by passing a block to {#wait_until}: # # # poll for ~25 seconds # resource.wait_until(max_attempts:5,delay:5) {|resource|...} # # ## Callbacks # # You can be notified before each polling attempt and before each # delay. If you throw `:success` or `:failure` from these callbacks, # it will terminate the waiter. # # started_at = Time.now # # poll for 1 hour, instead of a number of attempts # proc = Proc.new do |attempts, response| # throw :failure if Time.now - started_at > 3600 # end # # # disable max attempts # instance.wait_until(before_wait:proc, max_attempts:nil) {...} # # ## Handling Errors # # When a waiter is successful, it returns the Resource. When a waiter # fails, it raises an error. # # begin # resource.wait_until(...) # rescue Aws::Waiters::Errors::WaiterFailed # # resource did not enter the desired state in time # end # # @yieldparam [Resource] resource to be used in the waiting condition. # # @raise [Aws::Waiters::Errors::FailureStateError] Raised when the waiter # terminates because the waiter has entered a state that it will not # transition out of, preventing success. # # yet successful. # # @raise [Aws::Waiters::Errors::UnexpectedError] Raised when an error is # encountered while polling for a resource that is not expected. # # @raise [NotImplementedError] Raised when the resource does not # # @option options [Integer] :max_attempts (10) Maximum number of # attempts # @option options [Integer] :delay (10) Delay between each # attempt in seconds # @option options [Proc] :before_attempt (nil) Callback # invoked before each attempt # @option options [Proc] :before_wait (nil) Callback # invoked before each wait # @return [Resource] if the waiter was successful def wait_until(options = {}, &block) self_copy = self.dup attempts = 0 options[:max_attempts] = 10 unless options.key?(:max_attempts) options[:delay] ||= 10 options[:poller] = Proc.new do attempts += 1 if block.call(self_copy) [:success, self_copy] else self_copy.reload unless attempts == options[:max_attempts] :retry end end Aws::Plugins::UserAgent.feature('resource') do Aws::Waiters::Waiter.new(options).wait({}) end end # @!group Actions # @example Request syntax with placeholder values # # bucket_website.delete({ # expected_bucket_owner: "AccountId", # }) # @param [Hash] options ({}) # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @return [EmptyStructure] def delete(options = {}) options = options.merge(bucket: @bucket_name) resp = Aws::Plugins::UserAgent.feature('resource') do @client.delete_bucket_website(options) end resp.data end # @example Request syntax with placeholder values # # bucket_website.put({ # content_md5: "ContentMD5", # checksum_algorithm: "CRC32", # accepts CRC32, CRC32C, SHA1, SHA256 # website_configuration: { # required # error_document: { # key: "ObjectKey", # required # }, # index_document: { # suffix: "Suffix", # required # }, # redirect_all_requests_to: { # host_name: "HostName", # required # protocol: "http", # accepts http, https # }, # routing_rules: [ # { # condition: { # http_error_code_returned_equals: "HttpErrorCodeReturnedEquals", # key_prefix_equals: "KeyPrefixEquals", # }, # redirect: { # required # host_name: "HostName", # http_redirect_code: "HttpRedirectCode", # protocol: "http", # accepts http, https # replace_key_prefix_with: "ReplaceKeyPrefixWith", # replace_key_with: "ReplaceKeyWith", # }, # }, # ], # }, # expected_bucket_owner: "AccountId", # }) # @param [Hash] options ({}) # @option options [String] :content_md5 # The base64-encoded 128-bit MD5 digest of the data. You must use this # header as a message integrity check to verify that the request body # was not corrupted in transit. For more information, see [RFC 1864][1]. # # For requests made using the Amazon Web Services Command Line Interface # (CLI) or Amazon Web Services SDKs, this field is calculated # automatically. # # # # [1]: http://www.ietf.org/rfc/rfc1864.txt # @option options [String] :checksum_algorithm # Indicates the algorithm used to create the checksum for the object # when you use the SDK. This header will not provide any additional # functionality if you don't use the SDK. When you send this header, # there must be a corresponding `x-amz-checksum` or `x-amz-trailer` # header sent. Otherwise, Amazon S3 fails the request with the HTTP # status code `400 Bad Request`. For more information, see [Checking # object integrity][1] in the *Amazon S3 User Guide*. # # If you provide an individual checksum, Amazon S3 ignores any provided # `ChecksumAlgorithm` parameter. # # # # [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html # @option options [required, Types::WebsiteConfiguration] :website_configuration # Container for the request. # @option options [String] :expected_bucket_owner # The account ID of the expected bucket owner. If the account ID that # you provide does not match the actual owner of the bucket, the request # fails with the HTTP status code `403 Forbidden` (access denied). # @return [EmptyStructure] def put(options = {}) options = options.merge(bucket: @bucket_name) resp = Aws::Plugins::UserAgent.feature('resource') do @client.put_bucket_website(options) end resp.data end # @!group Associations # @return [Bucket] def bucket Bucket.new( name: @bucket_name, client: @client ) end # @deprecated # @api private def identifiers { bucket_name: @bucket_name } end deprecated(:identifiers) private def extract_bucket_name(args, options) value = args[0] || options.delete(:bucket_name) case value when String then value when nil then raise ArgumentError, "missing required option :bucket_name" else msg = "expected :bucket_name to be a String, got #{value.class}" raise ArgumentError, msg end end class Collection < Aws::Resources::Collection; end end end aws-sdk-s3-1.143.0/lib/aws-sdk-s3/express_credentials_provider.rb0000644000004100000410000000201114563445240024554 0ustar www-datawww-data# frozen_string_literal: true module Aws module S3 # Returns Credentials class for S3 Express. Accepts CreateSession # params as options. See {Client#create_session} for details. class ExpressCredentialsProvider # @param [Hash] options # @option options [Client] :client The S3 client used to create the session. # @option options [String] :session_mode (see: {Client#create_session}) # @option options [Callable] :before_refresh Proc called before # credentials are refreshed. def initialize(options = {}) @client = options.delete(:client) @options = options @cache = EXPRESS_CREDENTIALS_CACHE end def express_credentials_for(bucket) @cache[bucket] || new_credentials_for(bucket) end attr_accessor :client private def new_credentials_for(bucket) @cache[bucket] = ExpressCredentials.new( bucket: bucket, client: @client, **@options ) end end end end aws-sdk-s3-1.143.0/LICENSE.txt0000644000004100000410000002613614563445240015446 0ustar www-datawww-data Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. aws-sdk-s3-1.143.0/VERSION0000644000004100000410000000001014563445240014652 0ustar www-datawww-data1.143.0 aws-sdk-s3-1.143.0/sig/0000755000004100000410000000000014563445240014375 5ustar www-datawww-dataaws-sdk-s3-1.143.0/sig/waiters.rbs0000644000004100000410000000647714563445240016601 0ustar www-datawww-data# WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws module S3 module Waiters class BucketExists def initialize: (?client: Client, ?max_attempts: Integer, ?delay: Integer, ?before_attempt: Proc, ?before_wait: Proc) -> void | (?Hash[Symbol, untyped]) -> void def wait: ( bucket: ::String, ?expected_bucket_owner: ::String ) -> Client::_HeadBucketResponseSuccess | (Hash[Symbol, untyped]) -> Client::_HeadBucketResponseSuccess end class BucketNotExists def initialize: (?client: Client, ?max_attempts: Integer, ?delay: Integer, ?before_attempt: Proc, ?before_wait: Proc) -> void | (?Hash[Symbol, untyped]) -> void def wait: ( bucket: ::String, ?expected_bucket_owner: ::String ) -> Client::_HeadBucketResponseSuccess | (Hash[Symbol, untyped]) -> Client::_HeadBucketResponseSuccess end class ObjectExists def initialize: (?client: Client, ?max_attempts: Integer, ?delay: Integer, ?before_attempt: Proc, ?before_wait: Proc) -> void | (?Hash[Symbol, untyped]) -> void def wait: ( bucket: ::String, ?if_match: ::String, ?if_modified_since: ::Time, ?if_none_match: ::String, ?if_unmodified_since: ::Time, key: ::String, ?range: ::String, ?version_id: ::String, ?sse_customer_algorithm: ::String, ?sse_customer_key: ::String, ?sse_customer_key_md5: ::String, ?request_payer: ("requester"), ?part_number: ::Integer, ?expected_bucket_owner: ::String, ?checksum_mode: ("ENABLED") ) -> Client::_HeadObjectResponseSuccess | (Hash[Symbol, untyped]) -> Client::_HeadObjectResponseSuccess end class ObjectNotExists def initialize: (?client: Client, ?max_attempts: Integer, ?delay: Integer, ?before_attempt: Proc, ?before_wait: Proc) -> void | (?Hash[Symbol, untyped]) -> void def wait: ( bucket: ::String, ?if_match: ::String, ?if_modified_since: ::Time, ?if_none_match: ::String, ?if_unmodified_since: ::Time, key: ::String, ?range: ::String, ?version_id: ::String, ?sse_customer_algorithm: ::String, ?sse_customer_key: ::String, ?sse_customer_key_md5: ::String, ?request_payer: ("requester"), ?part_number: ::Integer, ?expected_bucket_owner: ::String, ?checksum_mode: ("ENABLED") ) -> Client::_HeadObjectResponseSuccess | (Hash[Symbol, untyped]) -> Client::_HeadObjectResponseSuccess end end end end aws-sdk-s3-1.143.0/sig/bucket_website.rbs0000644000004100000410000000760014563445240020107 0ustar www-datawww-data# WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws module S3 # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketWebsite.html class BucketWebsite # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketWebsite.html#initialize-instance_method def initialize: (String bucket_name, Hash[Symbol, untyped] options) -> void | (bucket_name: String, ?client: Client) -> void | (Hash[Symbol, untyped] args) -> void # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketWebsite.html#bucket_name-instance_method def bucket_name: () -> String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketWebsite.html#redirect_all_requests_to-instance_method def redirect_all_requests_to: () -> Types::RedirectAllRequestsTo # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketWebsite.html#index_document-instance_method def index_document: () -> Types::IndexDocument # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketWebsite.html#error_document-instance_method def error_document: () -> Types::ErrorDocument # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketWebsite.html#routing_rules-instance_method def routing_rules: () -> ::Array[Types::RoutingRule] def client: () -> Client # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketWebsite.html#load-instance_method def load: () -> self alias reload load # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketWebsite.html#data-instance_method def data: () -> Types::GetBucketWebsiteOutput # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketWebsite.html#data_loaded?-instance_method def data_loaded?: () -> bool # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketWebsite.html#delete-instance_method def delete: ( ?expected_bucket_owner: ::String ) -> ::Aws::EmptyStructure | (?Hash[Symbol, untyped]) -> ::Aws::EmptyStructure # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketWebsite.html#put-instance_method def put: ( ?content_md5: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), website_configuration: { error_document: { key: ::String }?, index_document: { suffix: ::String }?, redirect_all_requests_to: { host_name: ::String, protocol: ("http" | "https")? }?, routing_rules: Array[ { condition: { http_error_code_returned_equals: ::String?, key_prefix_equals: ::String? }?, redirect: { host_name: ::String?, http_redirect_code: ::String?, protocol: ("http" | "https")?, replace_key_prefix_with: ::String?, replace_key_with: ::String? } }, ]? }, ?expected_bucket_owner: ::String ) -> ::Aws::EmptyStructure | (?Hash[Symbol, untyped]) -> ::Aws::EmptyStructure # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketWebsite.html#bucket-instance_method def bucket: () -> Bucket class Collection < ::Aws::Resources::Collection[BucketWebsite] end end end end aws-sdk-s3-1.143.0/sig/bucket_lifecycle.rbs0000644000004100000410000000744614563445240020414 0ustar www-datawww-data# WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws module S3 # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketLifecycle.html class BucketLifecycle # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketLifecycle.html#initialize-instance_method def initialize: (String bucket_name, Hash[Symbol, untyped] options) -> void | (bucket_name: String, ?client: Client) -> void | (Hash[Symbol, untyped] args) -> void # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketLifecycle.html#bucket_name-instance_method def bucket_name: () -> String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketLifecycle.html#rules-instance_method def rules: () -> ::Array[Types::Rule] def client: () -> Client # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketLifecycle.html#load-instance_method def load: () -> self alias reload load # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketLifecycle.html#data-instance_method def data: () -> Types::GetBucketLifecycleOutput # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketLifecycle.html#data_loaded?-instance_method def data_loaded?: () -> bool # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketLifecycle.html#delete-instance_method def delete: ( ?expected_bucket_owner: ::String ) -> ::Aws::EmptyStructure | (?Hash[Symbol, untyped]) -> ::Aws::EmptyStructure # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketLifecycle.html#put-instance_method def put: ( ?content_md5: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), ?lifecycle_configuration: { rules: Array[ { expiration: { date: ::Time?, days: ::Integer?, expired_object_delete_marker: bool? }?, id: ::String?, prefix: ::String, status: ("Enabled" | "Disabled"), transition: { date: ::Time?, days: ::Integer?, storage_class: ("GLACIER" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "DEEP_ARCHIVE" | "GLACIER_IR")? }?, noncurrent_version_transition: { noncurrent_days: ::Integer?, storage_class: ("GLACIER" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "DEEP_ARCHIVE" | "GLACIER_IR")?, newer_noncurrent_versions: ::Integer? }?, noncurrent_version_expiration: { noncurrent_days: ::Integer?, newer_noncurrent_versions: ::Integer? }?, abort_incomplete_multipart_upload: { days_after_initiation: ::Integer? }? }, ] }, ?expected_bucket_owner: ::String ) -> ::Aws::EmptyStructure | (?Hash[Symbol, untyped]) -> ::Aws::EmptyStructure # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketLifecycle.html#bucket-instance_method def bucket: () -> Bucket class Collection < ::Aws::Resources::Collection[BucketLifecycle] end end end end aws-sdk-s3-1.143.0/sig/object_summary.rbs0000644000004100000410000004610714563445240020140 0ustar www-datawww-data# WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws module S3 # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectSummary.html class ObjectSummary # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectSummary.html#initialize-instance_method def initialize: (String bucket_name, String key, Hash[Symbol, untyped] options) -> void | (bucket_name: String, key: String, ?client: Client) -> void | (Hash[Symbol, untyped] args) -> void # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectSummary.html#bucket_name-instance_method def bucket_name: () -> String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectSummary.html#key-instance_method def key: () -> String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectSummary.html#last_modified-instance_method def last_modified: () -> ::Time # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectSummary.html#etag-instance_method def etag: () -> ::String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectSummary.html#checksum_algorithm-instance_method def checksum_algorithm: () -> ::Array[("CRC32" | "CRC32C" | "SHA1" | "SHA256")] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectSummary.html#size-instance_method def size: () -> ::Integer # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectSummary.html#storage_class-instance_method def storage_class: () -> ("STANDARD" | "REDUCED_REDUNDANCY" | "GLACIER" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE") # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectSummary.html#owner-instance_method def owner: () -> Types::Owner # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectSummary.html#restore_status-instance_method def restore_status: () -> Types::RestoreStatus def client: () -> Client # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectSummary.html#data-instance_method def data: () -> Types::Object # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectSummary.html#data_loaded?-instance_method def data_loaded?: () -> bool # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectSummary.html#exists?-instance_method def exists?: (?max_attempts: Integer, ?delay: Numeric, ?before_attempt: (^(Integer attempts) -> void), ?before_wait: (^(Integer attempts, untyped response) -> void)) -> bool | (?Hash[Symbol, untyped]) -> bool # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectSummary.html#wait_until_exists-instance_method def wait_until_exists: (?max_attempts: Integer, ?delay: Numeric, ?before_attempt: (^(Integer attempts) -> void), ?before_wait: (^(Integer attempts, untyped response) -> void)) ?{ (untyped waiter) -> void } -> ObjectSummary | (?Hash[Symbol, untyped]) ?{ (untyped waiter) -> void } -> ObjectSummary # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectSummary.html#wait_until_not_exists-instance_method def wait_until_not_exists: (?max_attempts: Integer, ?delay: Numeric, ?before_attempt: (^(Integer attempts) -> void), ?before_wait: (^(Integer attempts, untyped response) -> void)) ?{ (untyped waiter) -> void } -> ObjectSummary | (?Hash[Symbol, untyped]) ?{ (untyped waiter) -> void } -> ObjectSummary # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectSummary.html#copy_from-instance_method def copy_from: ( ?acl: ("private" | "public-read" | "public-read-write" | "authenticated-read" | "aws-exec-read" | "bucket-owner-read" | "bucket-owner-full-control"), ?cache_control: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), ?content_disposition: ::String, ?content_encoding: ::String, ?content_language: ::String, ?content_type: ::String, copy_source: ::String, ?copy_source_if_match: ::String, ?copy_source_if_modified_since: ::Time, ?copy_source_if_none_match: ::String, ?copy_source_if_unmodified_since: ::Time, ?expires: ::Time, ?grant_full_control: ::String, ?grant_read: ::String, ?grant_read_acp: ::String, ?grant_write_acp: ::String, ?metadata: Hash[::String, ::String], ?metadata_directive: ("COPY" | "REPLACE"), ?tagging_directive: ("COPY" | "REPLACE"), ?server_side_encryption: ("AES256" | "aws:kms" | "aws:kms:dsse"), ?storage_class: ("STANDARD" | "REDUCED_REDUNDANCY" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "GLACIER" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE"), ?website_redirect_location: ::String, ?sse_customer_algorithm: ::String, ?sse_customer_key: ::String, ?sse_customer_key_md5: ::String, ?ssekms_key_id: ::String, ?ssekms_encryption_context: ::String, ?bucket_key_enabled: bool, ?copy_source_sse_customer_algorithm: ::String, ?copy_source_sse_customer_key: ::String, ?copy_source_sse_customer_key_md5: ::String, ?request_payer: ("requester"), ?tagging: ::String, ?object_lock_mode: ("GOVERNANCE" | "COMPLIANCE"), ?object_lock_retain_until_date: ::Time, ?object_lock_legal_hold_status: ("ON" | "OFF"), ?expected_bucket_owner: ::String, ?expected_source_bucket_owner: ::String ) -> Types::CopyObjectOutput | (?Hash[Symbol, untyped]) -> Types::CopyObjectOutput # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectSummary.html#delete-instance_method def delete: ( ?mfa: ::String, ?version_id: ::String, ?request_payer: ("requester"), ?bypass_governance_retention: bool, ?expected_bucket_owner: ::String ) -> Types::DeleteObjectOutput | (?Hash[Symbol, untyped]) -> Types::DeleteObjectOutput # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectSummary.html#get-instance_method def get: ( ?if_match: ::String, ?if_modified_since: ::Time, ?if_none_match: ::String, ?if_unmodified_since: ::Time, ?range: ::String, ?response_cache_control: ::String, ?response_content_disposition: ::String, ?response_content_encoding: ::String, ?response_content_language: ::String, ?response_content_type: ::String, ?response_expires: ::Time, ?version_id: ::String, ?sse_customer_algorithm: ::String, ?sse_customer_key: ::String, ?sse_customer_key_md5: ::String, ?request_payer: ("requester"), ?part_number: ::Integer, ?expected_bucket_owner: ::String, ?checksum_mode: ("ENABLED") ) -> Types::GetObjectOutput | (?Hash[Symbol, untyped]) -> Types::GetObjectOutput # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectSummary.html#initiate_multipart_upload-instance_method def initiate_multipart_upload: ( ?acl: ("private" | "public-read" | "public-read-write" | "authenticated-read" | "aws-exec-read" | "bucket-owner-read" | "bucket-owner-full-control"), ?cache_control: ::String, ?content_disposition: ::String, ?content_encoding: ::String, ?content_language: ::String, ?content_type: ::String, ?expires: ::Time, ?grant_full_control: ::String, ?grant_read: ::String, ?grant_read_acp: ::String, ?grant_write_acp: ::String, ?metadata: Hash[::String, ::String], ?server_side_encryption: ("AES256" | "aws:kms" | "aws:kms:dsse"), ?storage_class: ("STANDARD" | "REDUCED_REDUNDANCY" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "GLACIER" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE"), ?website_redirect_location: ::String, ?sse_customer_algorithm: ::String, ?sse_customer_key: ::String, ?sse_customer_key_md5: ::String, ?ssekms_key_id: ::String, ?ssekms_encryption_context: ::String, ?bucket_key_enabled: bool, ?request_payer: ("requester"), ?tagging: ::String, ?object_lock_mode: ("GOVERNANCE" | "COMPLIANCE"), ?object_lock_retain_until_date: ::Time, ?object_lock_legal_hold_status: ("ON" | "OFF"), ?expected_bucket_owner: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") ) -> MultipartUpload | (?Hash[Symbol, untyped]) -> MultipartUpload # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectSummary.html#put-instance_method def put: ( ?acl: ("private" | "public-read" | "public-read-write" | "authenticated-read" | "aws-exec-read" | "bucket-owner-read" | "bucket-owner-full-control"), ?body: ::String | ::StringIO | ::File, ?cache_control: ::String, ?content_disposition: ::String, ?content_encoding: ::String, ?content_language: ::String, ?content_length: ::Integer, ?content_md5: ::String, ?content_type: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), ?checksum_crc32: ::String, ?checksum_crc32c: ::String, ?checksum_sha1: ::String, ?checksum_sha256: ::String, ?expires: ::Time, ?grant_full_control: ::String, ?grant_read: ::String, ?grant_read_acp: ::String, ?grant_write_acp: ::String, ?metadata: Hash[::String, ::String], ?server_side_encryption: ("AES256" | "aws:kms" | "aws:kms:dsse"), ?storage_class: ("STANDARD" | "REDUCED_REDUNDANCY" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "GLACIER" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE"), ?website_redirect_location: ::String, ?sse_customer_algorithm: ::String, ?sse_customer_key: ::String, ?sse_customer_key_md5: ::String, ?ssekms_key_id: ::String, ?ssekms_encryption_context: ::String, ?bucket_key_enabled: bool, ?request_payer: ("requester"), ?tagging: ::String, ?object_lock_mode: ("GOVERNANCE" | "COMPLIANCE"), ?object_lock_retain_until_date: ::Time, ?object_lock_legal_hold_status: ("ON" | "OFF"), ?expected_bucket_owner: ::String ) -> Types::PutObjectOutput | (?Hash[Symbol, untyped]) -> Types::PutObjectOutput # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectSummary.html#restore_object-instance_method def restore_object: ( ?version_id: ::String, ?restore_request: { days: ::Integer?, glacier_job_parameters: { tier: ("Standard" | "Bulk" | "Expedited") }?, type: ("SELECT")?, tier: ("Standard" | "Bulk" | "Expedited")?, description: ::String?, select_parameters: { input_serialization: { csv: { file_header_info: ("USE" | "IGNORE" | "NONE")?, comments: ::String?, quote_escape_character: ::String?, record_delimiter: ::String?, field_delimiter: ::String?, quote_character: ::String?, allow_quoted_record_delimiter: bool? }?, compression_type: ("NONE" | "GZIP" | "BZIP2")?, json: { type: ("DOCUMENT" | "LINES")? }?, parquet: { }? }, expression_type: ("SQL"), expression: ::String, output_serialization: { csv: { quote_fields: ("ALWAYS" | "ASNEEDED")?, quote_escape_character: ::String?, record_delimiter: ::String?, field_delimiter: ::String?, quote_character: ::String? }?, json: { record_delimiter: ::String? }? } }?, output_location: { s3: { bucket_name: ::String, prefix: ::String, encryption: { encryption_type: ("AES256" | "aws:kms" | "aws:kms:dsse"), kms_key_id: ::String?, kms_context: ::String? }?, canned_acl: ("private" | "public-read" | "public-read-write" | "authenticated-read" | "aws-exec-read" | "bucket-owner-read" | "bucket-owner-full-control")?, access_control_list: Array[ { grantee: { display_name: ::String?, email_address: ::String?, id: ::String?, type: ("CanonicalUser" | "AmazonCustomerByEmail" | "Group"), uri: ::String? }?, permission: ("FULL_CONTROL" | "WRITE" | "WRITE_ACP" | "READ" | "READ_ACP")? }, ]?, tagging: { tag_set: Array[ { key: ::String, value: ::String }, ] }?, user_metadata: Array[ { name: ::String?, value: ::String? }, ]?, storage_class: ("STANDARD" | "REDUCED_REDUNDANCY" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "GLACIER" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE")? }? }? }, ?request_payer: ("requester"), ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), ?expected_bucket_owner: ::String ) -> Types::RestoreObjectOutput | (?Hash[Symbol, untyped]) -> Types::RestoreObjectOutput # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectSummary.html#acl-instance_method def acl: () -> ObjectAcl # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectSummary.html#bucket-instance_method def bucket: () -> Bucket # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectSummary.html#multipart_upload-instance_method def multipart_upload: (String id) -> MultipartUpload # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectSummary.html#object-instance_method def object: () -> Object # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectSummary.html#version-instance_method def version: (String id) -> ObjectVersion class Collection < ::Aws::Resources::Collection[ObjectSummary] def batch_delete!: ( ?mfa: ::String, ?request_payer: ("requester"), ?bypass_governance_retention: bool, ?expected_bucket_owner: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") ) -> void | (?Hash[Symbol, untyped]) -> void end end end end aws-sdk-s3-1.143.0/sig/bucket_lifecycle_configuration.rbs0000644000004100000410000001156614563445240023341 0ustar www-datawww-data# WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws module S3 # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketLifecycleConfiguration.html class BucketLifecycleConfiguration # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketLifecycleConfiguration.html#initialize-instance_method def initialize: (String bucket_name, Hash[Symbol, untyped] options) -> void | (bucket_name: String, ?client: Client) -> void | (Hash[Symbol, untyped] args) -> void # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketLifecycleConfiguration.html#bucket_name-instance_method def bucket_name: () -> String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketLifecycleConfiguration.html#rules-instance_method def rules: () -> ::Array[Types::LifecycleRule] def client: () -> Client # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketLifecycleConfiguration.html#load-instance_method def load: () -> self alias reload load # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketLifecycleConfiguration.html#data-instance_method def data: () -> Types::GetBucketLifecycleConfigurationOutput # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketLifecycleConfiguration.html#data_loaded?-instance_method def data_loaded?: () -> bool # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketLifecycleConfiguration.html#delete-instance_method def delete: ( ?expected_bucket_owner: ::String ) -> ::Aws::EmptyStructure | (?Hash[Symbol, untyped]) -> ::Aws::EmptyStructure # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketLifecycleConfiguration.html#put-instance_method def put: ( ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), ?lifecycle_configuration: { rules: Array[ { expiration: { date: ::Time?, days: ::Integer?, expired_object_delete_marker: bool? }?, id: ::String?, prefix: ::String?, filter: { prefix: ::String?, tag: { key: ::String, value: ::String }?, object_size_greater_than: ::Integer?, object_size_less_than: ::Integer?, and: { prefix: ::String?, tags: Array[ { key: ::String, value: ::String }, ]?, object_size_greater_than: ::Integer?, object_size_less_than: ::Integer? }? }?, status: ("Enabled" | "Disabled"), transitions: Array[ { date: ::Time?, days: ::Integer?, storage_class: ("GLACIER" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "DEEP_ARCHIVE" | "GLACIER_IR")? }, ]?, noncurrent_version_transitions: Array[ { noncurrent_days: ::Integer?, storage_class: ("GLACIER" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "DEEP_ARCHIVE" | "GLACIER_IR")?, newer_noncurrent_versions: ::Integer? }, ]?, noncurrent_version_expiration: { noncurrent_days: ::Integer?, newer_noncurrent_versions: ::Integer? }?, abort_incomplete_multipart_upload: { days_after_initiation: ::Integer? }? }, ] }, ?expected_bucket_owner: ::String ) -> ::Aws::EmptyStructure | (?Hash[Symbol, untyped]) -> ::Aws::EmptyStructure # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketLifecycleConfiguration.html#bucket-instance_method def bucket: () -> Bucket class Collection < ::Aws::Resources::Collection[BucketLifecycleConfiguration] end end end end aws-sdk-s3-1.143.0/sig/multipart_upload.rbs0000644000004100000410000001212714563445240020475 0ustar www-datawww-data# WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws module S3 # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUpload.html class MultipartUpload # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUpload.html#initialize-instance_method def initialize: (String bucket_name, String object_key, String id, Hash[Symbol, untyped] options) -> void | (bucket_name: String, object_key: String, id: String, ?client: Client) -> void | (Hash[Symbol, untyped] args) -> void # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUpload.html#bucket_name-instance_method def bucket_name: () -> String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUpload.html#object_key-instance_method def object_key: () -> String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUpload.html#id-instance_method def id: () -> String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUpload.html#upload_id-instance_method def upload_id: () -> ::String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUpload.html#key-instance_method def key: () -> ::String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUpload.html#initiated-instance_method def initiated: () -> ::Time # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUpload.html#storage_class-instance_method def storage_class: () -> ("STANDARD" | "REDUCED_REDUNDANCY" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "GLACIER" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE") # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUpload.html#owner-instance_method def owner: () -> Types::Owner # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUpload.html#initiator-instance_method def initiator: () -> Types::Initiator # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUpload.html#checksum_algorithm-instance_method def checksum_algorithm: () -> ("CRC32" | "CRC32C" | "SHA1" | "SHA256") def client: () -> Client # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUpload.html#data-instance_method def data: () -> Types::MultipartUpload # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUpload.html#data_loaded?-instance_method def data_loaded?: () -> bool # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUpload.html#abort-instance_method def abort: ( ?request_payer: ("requester"), ?expected_bucket_owner: ::String ) -> Types::AbortMultipartUploadOutput | (?Hash[Symbol, untyped]) -> Types::AbortMultipartUploadOutput # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUpload.html#complete-instance_method def complete: ( ?multipart_upload: { parts: Array[ { etag: ::String?, checksum_crc32: ::String?, checksum_crc32c: ::String?, checksum_sha1: ::String?, checksum_sha256: ::String?, part_number: ::Integer? }, ]? }, ?checksum_crc32: ::String, ?checksum_crc32c: ::String, ?checksum_sha1: ::String, ?checksum_sha256: ::String, ?request_payer: ("requester"), ?expected_bucket_owner: ::String, ?sse_customer_algorithm: ::String, ?sse_customer_key: ::String, ?sse_customer_key_md5: ::String ) -> Object | (?Hash[Symbol, untyped]) -> Object # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUpload.html#object-instance_method def object: () -> Object # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUpload.html#part-instance_method def part: (String part_number) -> MultipartUploadPart # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUpload.html#parts-instance_method def parts: ( ?request_payer: ("requester"), ?expected_bucket_owner: ::String, ?sse_customer_algorithm: ::String, ?sse_customer_key: ::String, ?sse_customer_key_md5: ::String ) -> MultipartUploadPart::Collection | (?Hash[Symbol, untyped]) -> MultipartUploadPart::Collection class Collection < ::Aws::Resources::Collection[MultipartUpload] end end end end aws-sdk-s3-1.143.0/sig/client.rbs0000644000004100000410000046200214563445240016367 0ustar www-datawww-data# WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws module S3 class Client < ::Seahorse::Client::Base include ::Aws::ClientStubs # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#initialize-instance_method def self.new: ( ?credentials: untyped, ?region: String, ?access_key_id: String, ?active_endpoint_cache: bool, ?adaptive_retry_wait_to_fill: bool, ?client_side_monitoring: bool, ?client_side_monitoring_client_id: String, ?client_side_monitoring_host: String, ?client_side_monitoring_port: Integer, ?client_side_monitoring_publisher: untyped, ?compute_checksums: bool, ?convert_params: bool, ?correct_clock_skew: bool, ?defaults_mode: String, ?disable_host_prefix_injection: bool, ?disable_request_compression: bool, ?disable_s3_express_session_auth: bool, ?endpoint: String, ?endpoint_cache_max_entries: Integer, ?endpoint_cache_max_threads: Integer, ?endpoint_cache_poll_interval: Integer, ?endpoint_discovery: bool, ?event_stream_handler: Proc, ?express_credentials_provider: untyped, ?follow_redirects: bool, ?force_path_style: bool, ?ignore_configured_endpoint_urls: bool, ?input_event_stream_handler: Proc, ?log_formatter: untyped, ?log_level: Symbol, ?logger: untyped, ?max_attempts: Integer, ?output_event_stream_handler: Proc, ?profile: String, ?request_min_compression_size_bytes: Integer, ?require_https_for_sse_cpk: bool, ?retry_backoff: Proc, ?retry_base_delay: Float, ?retry_jitter: (:none | :equal | :full | ^(Integer) -> Integer), ?retry_limit: Integer, ?retry_max_delay: Integer, ?retry_mode: ("legacy" | "standard" | "adaptive"), ?s3_disable_multiregion_access_points: bool, ?s3_us_east_1_regional_endpoint: String, ?s3_use_arn_region: bool, ?sdk_ua_app_id: String, ?secret_access_key: String, ?session_token: String, ?stub_responses: untyped, ?token_provider: untyped, ?use_accelerate_endpoint: bool, ?use_dualstack_endpoint: bool, ?use_fips_endpoint: bool, ?validate_params: bool, ?endpoint_provider: untyped, ?http_proxy: String, ?http_open_timeout: (Float | Integer), ?http_read_timeout: (Float | Integer), ?http_idle_timeout: (Float | Integer), ?http_continue_timeout: (Float | Integer), ?ssl_timeout: (Float | Integer | nil), ?http_wire_trace: bool, ?ssl_verify_peer: bool, ?ssl_ca_bundle: String, ?ssl_ca_directory: String, ?ssl_ca_store: String, ?on_chunk_received: Proc, ?on_chunk_sent: Proc, ?raise_response_errors: bool ) -> instance | (?Hash[Symbol, untyped]) -> instance interface _AbortMultipartUploadResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::AbortMultipartUploadOutput] def request_charged: () -> ("requester") end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#abort_multipart_upload-instance_method def abort_multipart_upload: ( bucket: ::String, key: ::String, upload_id: ::String, ?request_payer: ("requester"), ?expected_bucket_owner: ::String ) -> _AbortMultipartUploadResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _AbortMultipartUploadResponseSuccess interface _CompleteMultipartUploadResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::CompleteMultipartUploadOutput] def location: () -> ::String def bucket: () -> ::String def key: () -> ::String def expiration: () -> ::String def etag: () -> ::String def checksum_crc32: () -> ::String def checksum_crc32c: () -> ::String def checksum_sha1: () -> ::String def checksum_sha256: () -> ::String def server_side_encryption: () -> ("AES256" | "aws:kms" | "aws:kms:dsse") def version_id: () -> ::String def ssekms_key_id: () -> ::String def bucket_key_enabled: () -> bool def request_charged: () -> ("requester") end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#complete_multipart_upload-instance_method def complete_multipart_upload: ( bucket: ::String, key: ::String, ?multipart_upload: { parts: Array[ { etag: ::String?, checksum_crc32: ::String?, checksum_crc32c: ::String?, checksum_sha1: ::String?, checksum_sha256: ::String?, part_number: ::Integer? }, ]? }, upload_id: ::String, ?checksum_crc32: ::String, ?checksum_crc32c: ::String, ?checksum_sha1: ::String, ?checksum_sha256: ::String, ?request_payer: ("requester"), ?expected_bucket_owner: ::String, ?sse_customer_algorithm: ::String, ?sse_customer_key: ::String, ?sse_customer_key_md5: ::String ) -> _CompleteMultipartUploadResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _CompleteMultipartUploadResponseSuccess interface _CopyObjectResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::CopyObjectOutput] def copy_object_result: () -> Types::CopyObjectResult def expiration: () -> ::String def copy_source_version_id: () -> ::String def version_id: () -> ::String def server_side_encryption: () -> ("AES256" | "aws:kms" | "aws:kms:dsse") def sse_customer_algorithm: () -> ::String def sse_customer_key_md5: () -> ::String def ssekms_key_id: () -> ::String def ssekms_encryption_context: () -> ::String def bucket_key_enabled: () -> bool def request_charged: () -> ("requester") end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#copy_object-instance_method def copy_object: ( ?acl: ("private" | "public-read" | "public-read-write" | "authenticated-read" | "aws-exec-read" | "bucket-owner-read" | "bucket-owner-full-control"), bucket: ::String, ?cache_control: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), ?content_disposition: ::String, ?content_encoding: ::String, ?content_language: ::String, ?content_type: ::String, copy_source: ::String, ?copy_source_if_match: ::String, ?copy_source_if_modified_since: ::Time, ?copy_source_if_none_match: ::String, ?copy_source_if_unmodified_since: ::Time, ?expires: ::Time, ?grant_full_control: ::String, ?grant_read: ::String, ?grant_read_acp: ::String, ?grant_write_acp: ::String, key: ::String, ?metadata: Hash[::String, ::String], ?metadata_directive: ("COPY" | "REPLACE"), ?tagging_directive: ("COPY" | "REPLACE"), ?server_side_encryption: ("AES256" | "aws:kms" | "aws:kms:dsse"), ?storage_class: ("STANDARD" | "REDUCED_REDUNDANCY" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "GLACIER" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE"), ?website_redirect_location: ::String, ?sse_customer_algorithm: ::String, ?sse_customer_key: ::String, ?sse_customer_key_md5: ::String, ?ssekms_key_id: ::String, ?ssekms_encryption_context: ::String, ?bucket_key_enabled: bool, ?copy_source_sse_customer_algorithm: ::String, ?copy_source_sse_customer_key: ::String, ?copy_source_sse_customer_key_md5: ::String, ?request_payer: ("requester"), ?tagging: ::String, ?object_lock_mode: ("GOVERNANCE" | "COMPLIANCE"), ?object_lock_retain_until_date: ::Time, ?object_lock_legal_hold_status: ("ON" | "OFF"), ?expected_bucket_owner: ::String, ?expected_source_bucket_owner: ::String ) -> _CopyObjectResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _CopyObjectResponseSuccess interface _CreateBucketResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::CreateBucketOutput] def location: () -> ::String end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#create_bucket-instance_method def create_bucket: ( ?acl: ("private" | "public-read" | "public-read-write" | "authenticated-read"), bucket: ::String, ?create_bucket_configuration: { location_constraint: ("af-south-1" | "ap-east-1" | "ap-northeast-1" | "ap-northeast-2" | "ap-northeast-3" | "ap-south-1" | "ap-south-2" | "ap-southeast-1" | "ap-southeast-2" | "ap-southeast-3" | "ca-central-1" | "cn-north-1" | "cn-northwest-1" | "EU" | "eu-central-1" | "eu-north-1" | "eu-south-1" | "eu-south-2" | "eu-west-1" | "eu-west-2" | "eu-west-3" | "me-south-1" | "sa-east-1" | "us-east-2" | "us-gov-east-1" | "us-gov-west-1" | "us-west-1" | "us-west-2")?, location: { type: ("AvailabilityZone")?, name: ::String? }?, bucket: { data_redundancy: ("SingleAvailabilityZone")?, type: ("Directory")? }? }, ?grant_full_control: ::String, ?grant_read: ::String, ?grant_read_acp: ::String, ?grant_write: ::String, ?grant_write_acp: ::String, ?object_lock_enabled_for_bucket: bool, ?object_ownership: ("BucketOwnerPreferred" | "ObjectWriter" | "BucketOwnerEnforced") ) -> _CreateBucketResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _CreateBucketResponseSuccess interface _CreateMultipartUploadResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::CreateMultipartUploadOutput] def abort_date: () -> ::Time def abort_rule_id: () -> ::String def bucket: () -> ::String def key: () -> ::String def upload_id: () -> ::String def server_side_encryption: () -> ("AES256" | "aws:kms" | "aws:kms:dsse") def sse_customer_algorithm: () -> ::String def sse_customer_key_md5: () -> ::String def ssekms_key_id: () -> ::String def ssekms_encryption_context: () -> ::String def bucket_key_enabled: () -> bool def request_charged: () -> ("requester") def checksum_algorithm: () -> ("CRC32" | "CRC32C" | "SHA1" | "SHA256") end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#create_multipart_upload-instance_method def create_multipart_upload: ( ?acl: ("private" | "public-read" | "public-read-write" | "authenticated-read" | "aws-exec-read" | "bucket-owner-read" | "bucket-owner-full-control"), bucket: ::String, ?cache_control: ::String, ?content_disposition: ::String, ?content_encoding: ::String, ?content_language: ::String, ?content_type: ::String, ?expires: ::Time, ?grant_full_control: ::String, ?grant_read: ::String, ?grant_read_acp: ::String, ?grant_write_acp: ::String, key: ::String, ?metadata: Hash[::String, ::String], ?server_side_encryption: ("AES256" | "aws:kms" | "aws:kms:dsse"), ?storage_class: ("STANDARD" | "REDUCED_REDUNDANCY" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "GLACIER" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE"), ?website_redirect_location: ::String, ?sse_customer_algorithm: ::String, ?sse_customer_key: ::String, ?sse_customer_key_md5: ::String, ?ssekms_key_id: ::String, ?ssekms_encryption_context: ::String, ?bucket_key_enabled: bool, ?request_payer: ("requester"), ?tagging: ::String, ?object_lock_mode: ("GOVERNANCE" | "COMPLIANCE"), ?object_lock_retain_until_date: ::Time, ?object_lock_legal_hold_status: ("ON" | "OFF"), ?expected_bucket_owner: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") ) -> _CreateMultipartUploadResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _CreateMultipartUploadResponseSuccess interface _CreateSessionResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::CreateSessionOutput] def credentials: () -> Types::SessionCredentials end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#create_session-instance_method def create_session: ( ?session_mode: ("ReadOnly" | "ReadWrite"), bucket: ::String ) -> _CreateSessionResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _CreateSessionResponseSuccess # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#delete_bucket-instance_method def delete_bucket: ( bucket: ::String, ?expected_bucket_owner: ::String ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#delete_bucket_analytics_configuration-instance_method def delete_bucket_analytics_configuration: ( bucket: ::String, id: ::String, ?expected_bucket_owner: ::String ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#delete_bucket_cors-instance_method def delete_bucket_cors: ( bucket: ::String, ?expected_bucket_owner: ::String ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#delete_bucket_encryption-instance_method def delete_bucket_encryption: ( bucket: ::String, ?expected_bucket_owner: ::String ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#delete_bucket_intelligent_tiering_configuration-instance_method def delete_bucket_intelligent_tiering_configuration: ( bucket: ::String, id: ::String ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#delete_bucket_inventory_configuration-instance_method def delete_bucket_inventory_configuration: ( bucket: ::String, id: ::String, ?expected_bucket_owner: ::String ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#delete_bucket_lifecycle-instance_method def delete_bucket_lifecycle: ( bucket: ::String, ?expected_bucket_owner: ::String ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#delete_bucket_metrics_configuration-instance_method def delete_bucket_metrics_configuration: ( bucket: ::String, id: ::String, ?expected_bucket_owner: ::String ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#delete_bucket_ownership_controls-instance_method def delete_bucket_ownership_controls: ( bucket: ::String, ?expected_bucket_owner: ::String ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#delete_bucket_policy-instance_method def delete_bucket_policy: ( bucket: ::String, ?expected_bucket_owner: ::String ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#delete_bucket_replication-instance_method def delete_bucket_replication: ( bucket: ::String, ?expected_bucket_owner: ::String ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#delete_bucket_tagging-instance_method def delete_bucket_tagging: ( bucket: ::String, ?expected_bucket_owner: ::String ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#delete_bucket_website-instance_method def delete_bucket_website: ( bucket: ::String, ?expected_bucket_owner: ::String ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] interface _DeleteObjectResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::DeleteObjectOutput] def delete_marker: () -> bool def version_id: () -> ::String def request_charged: () -> ("requester") end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#delete_object-instance_method def delete_object: ( bucket: ::String, key: ::String, ?mfa: ::String, ?version_id: ::String, ?request_payer: ("requester"), ?bypass_governance_retention: bool, ?expected_bucket_owner: ::String ) -> _DeleteObjectResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _DeleteObjectResponseSuccess interface _DeleteObjectTaggingResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::DeleteObjectTaggingOutput] def version_id: () -> ::String end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#delete_object_tagging-instance_method def delete_object_tagging: ( bucket: ::String, key: ::String, ?version_id: ::String, ?expected_bucket_owner: ::String ) -> _DeleteObjectTaggingResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _DeleteObjectTaggingResponseSuccess interface _DeleteObjectsResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::DeleteObjectsOutput] def deleted: () -> ::Array[Types::DeletedObject] def request_charged: () -> ("requester") def errors: () -> ::Array[Types::Error] end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#delete_objects-instance_method def delete_objects: ( bucket: ::String, delete: { objects: Array[ { key: ::String, version_id: ::String? }, ], quiet: bool? }, ?mfa: ::String, ?request_payer: ("requester"), ?bypass_governance_retention: bool, ?expected_bucket_owner: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") ) -> _DeleteObjectsResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _DeleteObjectsResponseSuccess # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#delete_public_access_block-instance_method def delete_public_access_block: ( bucket: ::String, ?expected_bucket_owner: ::String ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] interface _GetBucketAccelerateConfigurationResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::GetBucketAccelerateConfigurationOutput] def status: () -> ("Enabled" | "Suspended") def request_charged: () -> ("requester") end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#get_bucket_accelerate_configuration-instance_method def get_bucket_accelerate_configuration: ( bucket: ::String, ?expected_bucket_owner: ::String, ?request_payer: ("requester") ) -> _GetBucketAccelerateConfigurationResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetBucketAccelerateConfigurationResponseSuccess interface _GetBucketAclResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::GetBucketAclOutput] def owner: () -> Types::Owner def grants: () -> ::Array[Types::Grant] end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#get_bucket_acl-instance_method def get_bucket_acl: ( bucket: ::String, ?expected_bucket_owner: ::String ) -> _GetBucketAclResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetBucketAclResponseSuccess interface _GetBucketAnalyticsConfigurationResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::GetBucketAnalyticsConfigurationOutput] def analytics_configuration: () -> Types::AnalyticsConfiguration end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#get_bucket_analytics_configuration-instance_method def get_bucket_analytics_configuration: ( bucket: ::String, id: ::String, ?expected_bucket_owner: ::String ) -> _GetBucketAnalyticsConfigurationResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetBucketAnalyticsConfigurationResponseSuccess interface _GetBucketCorsResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::GetBucketCorsOutput] def cors_rules: () -> ::Array[Types::CORSRule] end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#get_bucket_cors-instance_method def get_bucket_cors: ( bucket: ::String, ?expected_bucket_owner: ::String ) -> _GetBucketCorsResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetBucketCorsResponseSuccess interface _GetBucketEncryptionResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::GetBucketEncryptionOutput] def server_side_encryption_configuration: () -> Types::ServerSideEncryptionConfiguration end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#get_bucket_encryption-instance_method def get_bucket_encryption: ( bucket: ::String, ?expected_bucket_owner: ::String ) -> _GetBucketEncryptionResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetBucketEncryptionResponseSuccess interface _GetBucketIntelligentTieringConfigurationResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::GetBucketIntelligentTieringConfigurationOutput] def intelligent_tiering_configuration: () -> Types::IntelligentTieringConfiguration end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#get_bucket_intelligent_tiering_configuration-instance_method def get_bucket_intelligent_tiering_configuration: ( bucket: ::String, id: ::String ) -> _GetBucketIntelligentTieringConfigurationResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetBucketIntelligentTieringConfigurationResponseSuccess interface _GetBucketInventoryConfigurationResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::GetBucketInventoryConfigurationOutput] def inventory_configuration: () -> Types::InventoryConfiguration end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#get_bucket_inventory_configuration-instance_method def get_bucket_inventory_configuration: ( bucket: ::String, id: ::String, ?expected_bucket_owner: ::String ) -> _GetBucketInventoryConfigurationResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetBucketInventoryConfigurationResponseSuccess interface _GetBucketLifecycleResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::GetBucketLifecycleOutput] def rules: () -> ::Array[Types::Rule] end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#get_bucket_lifecycle-instance_method def get_bucket_lifecycle: ( bucket: ::String, ?expected_bucket_owner: ::String ) -> _GetBucketLifecycleResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetBucketLifecycleResponseSuccess interface _GetBucketLifecycleConfigurationResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::GetBucketLifecycleConfigurationOutput] def rules: () -> ::Array[Types::LifecycleRule] end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#get_bucket_lifecycle_configuration-instance_method def get_bucket_lifecycle_configuration: ( bucket: ::String, ?expected_bucket_owner: ::String ) -> _GetBucketLifecycleConfigurationResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetBucketLifecycleConfigurationResponseSuccess interface _GetBucketLocationResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::GetBucketLocationOutput] def location_constraint: () -> ("af-south-1" | "ap-east-1" | "ap-northeast-1" | "ap-northeast-2" | "ap-northeast-3" | "ap-south-1" | "ap-south-2" | "ap-southeast-1" | "ap-southeast-2" | "ap-southeast-3" | "ca-central-1" | "cn-north-1" | "cn-northwest-1" | "EU" | "eu-central-1" | "eu-north-1" | "eu-south-1" | "eu-south-2" | "eu-west-1" | "eu-west-2" | "eu-west-3" | "me-south-1" | "sa-east-1" | "us-east-2" | "us-gov-east-1" | "us-gov-west-1" | "us-west-1" | "us-west-2") end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#get_bucket_location-instance_method def get_bucket_location: ( bucket: ::String, ?expected_bucket_owner: ::String ) -> _GetBucketLocationResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetBucketLocationResponseSuccess interface _GetBucketLoggingResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::GetBucketLoggingOutput] def logging_enabled: () -> Types::LoggingEnabled end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#get_bucket_logging-instance_method def get_bucket_logging: ( bucket: ::String, ?expected_bucket_owner: ::String ) -> _GetBucketLoggingResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetBucketLoggingResponseSuccess interface _GetBucketMetricsConfigurationResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::GetBucketMetricsConfigurationOutput] def metrics_configuration: () -> Types::MetricsConfiguration end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#get_bucket_metrics_configuration-instance_method def get_bucket_metrics_configuration: ( bucket: ::String, id: ::String, ?expected_bucket_owner: ::String ) -> _GetBucketMetricsConfigurationResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetBucketMetricsConfigurationResponseSuccess interface _GetBucketNotificationResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::NotificationConfigurationDeprecated] def topic_configuration: () -> Types::TopicConfigurationDeprecated def queue_configuration: () -> Types::QueueConfigurationDeprecated def cloud_function_configuration: () -> Types::CloudFunctionConfiguration end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#get_bucket_notification-instance_method def get_bucket_notification: ( bucket: ::String, ?expected_bucket_owner: ::String ) -> _GetBucketNotificationResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetBucketNotificationResponseSuccess interface _GetBucketNotificationConfigurationResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::NotificationConfiguration] def topic_configurations: () -> ::Array[Types::TopicConfiguration] def queue_configurations: () -> ::Array[Types::QueueConfiguration] def lambda_function_configurations: () -> ::Array[Types::LambdaFunctionConfiguration] def event_bridge_configuration: () -> Types::EventBridgeConfiguration end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#get_bucket_notification_configuration-instance_method def get_bucket_notification_configuration: ( bucket: ::String, ?expected_bucket_owner: ::String ) -> _GetBucketNotificationConfigurationResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetBucketNotificationConfigurationResponseSuccess interface _GetBucketOwnershipControlsResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::GetBucketOwnershipControlsOutput] def ownership_controls: () -> Types::OwnershipControls end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#get_bucket_ownership_controls-instance_method def get_bucket_ownership_controls: ( bucket: ::String, ?expected_bucket_owner: ::String ) -> _GetBucketOwnershipControlsResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetBucketOwnershipControlsResponseSuccess interface _GetBucketPolicyResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::GetBucketPolicyOutput] def policy: () -> ::IO end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#get_bucket_policy-instance_method def get_bucket_policy: ( bucket: ::String, ?expected_bucket_owner: ::String ) ?{ (*untyped) -> void } -> _GetBucketPolicyResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) ?{ (*untyped) -> void } -> _GetBucketPolicyResponseSuccess interface _GetBucketPolicyStatusResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::GetBucketPolicyStatusOutput] def policy_status: () -> Types::PolicyStatus end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#get_bucket_policy_status-instance_method def get_bucket_policy_status: ( bucket: ::String, ?expected_bucket_owner: ::String ) -> _GetBucketPolicyStatusResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetBucketPolicyStatusResponseSuccess interface _GetBucketReplicationResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::GetBucketReplicationOutput] def replication_configuration: () -> Types::ReplicationConfiguration end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#get_bucket_replication-instance_method def get_bucket_replication: ( bucket: ::String, ?expected_bucket_owner: ::String ) -> _GetBucketReplicationResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetBucketReplicationResponseSuccess interface _GetBucketRequestPaymentResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::GetBucketRequestPaymentOutput] def payer: () -> ("Requester" | "BucketOwner") end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#get_bucket_request_payment-instance_method def get_bucket_request_payment: ( bucket: ::String, ?expected_bucket_owner: ::String ) -> _GetBucketRequestPaymentResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetBucketRequestPaymentResponseSuccess interface _GetBucketTaggingResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::GetBucketTaggingOutput] def tag_set: () -> ::Array[Types::Tag] end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#get_bucket_tagging-instance_method def get_bucket_tagging: ( bucket: ::String, ?expected_bucket_owner: ::String ) -> _GetBucketTaggingResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetBucketTaggingResponseSuccess interface _GetBucketVersioningResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::GetBucketVersioningOutput] def status: () -> ("Enabled" | "Suspended") def mfa_delete: () -> ("Enabled" | "Disabled") end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#get_bucket_versioning-instance_method def get_bucket_versioning: ( bucket: ::String, ?expected_bucket_owner: ::String ) -> _GetBucketVersioningResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetBucketVersioningResponseSuccess interface _GetBucketWebsiteResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::GetBucketWebsiteOutput] def redirect_all_requests_to: () -> Types::RedirectAllRequestsTo def index_document: () -> Types::IndexDocument def error_document: () -> Types::ErrorDocument def routing_rules: () -> ::Array[Types::RoutingRule] end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#get_bucket_website-instance_method def get_bucket_website: ( bucket: ::String, ?expected_bucket_owner: ::String ) -> _GetBucketWebsiteResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetBucketWebsiteResponseSuccess interface _GetObjectResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::GetObjectOutput] def body: () -> ::IO def delete_marker: () -> bool def accept_ranges: () -> ::String def expiration: () -> ::String def restore: () -> ::String def last_modified: () -> ::Time def content_length: () -> ::Integer def etag: () -> ::String def checksum_crc32: () -> ::String def checksum_crc32c: () -> ::String def checksum_sha1: () -> ::String def checksum_sha256: () -> ::String def missing_meta: () -> ::Integer def version_id: () -> ::String def cache_control: () -> ::String def content_disposition: () -> ::String def content_encoding: () -> ::String def content_language: () -> ::String def content_range: () -> ::String def content_type: () -> ::String def expires: () -> ::Time def expires_string: () -> ::String def website_redirect_location: () -> ::String def server_side_encryption: () -> ("AES256" | "aws:kms" | "aws:kms:dsse") def metadata: () -> ::Hash[::String, ::String] def sse_customer_algorithm: () -> ::String def sse_customer_key_md5: () -> ::String def ssekms_key_id: () -> ::String def bucket_key_enabled: () -> bool def storage_class: () -> ("STANDARD" | "REDUCED_REDUNDANCY" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "GLACIER" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE") def request_charged: () -> ("requester") def replication_status: () -> ("COMPLETE" | "PENDING" | "FAILED" | "REPLICA" | "COMPLETED") def parts_count: () -> ::Integer def tag_count: () -> ::Integer def object_lock_mode: () -> ("GOVERNANCE" | "COMPLIANCE") def object_lock_retain_until_date: () -> ::Time def object_lock_legal_hold_status: () -> ("ON" | "OFF") end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#get_object-instance_method def get_object: ( bucket: ::String, ?if_match: ::String, ?if_modified_since: ::Time, ?if_none_match: ::String, ?if_unmodified_since: ::Time, key: ::String, ?range: ::String, ?response_cache_control: ::String, ?response_content_disposition: ::String, ?response_content_encoding: ::String, ?response_content_language: ::String, ?response_content_type: ::String, ?response_expires: ::Time, ?version_id: ::String, ?sse_customer_algorithm: ::String, ?sse_customer_key: ::String, ?sse_customer_key_md5: ::String, ?request_payer: ("requester"), ?part_number: ::Integer, ?expected_bucket_owner: ::String, ?checksum_mode: ("ENABLED") ) ?{ (*untyped) -> void } -> _GetObjectResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) ?{ (*untyped) -> void } -> _GetObjectResponseSuccess interface _GetObjectAclResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::GetObjectAclOutput] def owner: () -> Types::Owner def grants: () -> ::Array[Types::Grant] def request_charged: () -> ("requester") end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#get_object_acl-instance_method def get_object_acl: ( bucket: ::String, key: ::String, ?version_id: ::String, ?request_payer: ("requester"), ?expected_bucket_owner: ::String ) -> _GetObjectAclResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetObjectAclResponseSuccess interface _GetObjectAttributesResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::GetObjectAttributesOutput] def delete_marker: () -> bool def last_modified: () -> ::Time def version_id: () -> ::String def request_charged: () -> ("requester") def etag: () -> ::String def checksum: () -> Types::Checksum def object_parts: () -> Types::GetObjectAttributesParts def storage_class: () -> ("STANDARD" | "REDUCED_REDUNDANCY" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "GLACIER" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE") def object_size: () -> ::Integer end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#get_object_attributes-instance_method def get_object_attributes: ( bucket: ::String, key: ::String, ?version_id: ::String, ?max_parts: ::Integer, ?part_number_marker: ::Integer, ?sse_customer_algorithm: ::String, ?sse_customer_key: ::String, ?sse_customer_key_md5: ::String, ?request_payer: ("requester"), ?expected_bucket_owner: ::String, object_attributes: Array[("ETag" | "Checksum" | "ObjectParts" | "StorageClass" | "ObjectSize")] ) -> _GetObjectAttributesResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetObjectAttributesResponseSuccess interface _GetObjectLegalHoldResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::GetObjectLegalHoldOutput] def legal_hold: () -> Types::ObjectLockLegalHold end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#get_object_legal_hold-instance_method def get_object_legal_hold: ( bucket: ::String, key: ::String, ?version_id: ::String, ?request_payer: ("requester"), ?expected_bucket_owner: ::String ) -> _GetObjectLegalHoldResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetObjectLegalHoldResponseSuccess interface _GetObjectLockConfigurationResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::GetObjectLockConfigurationOutput] def object_lock_configuration: () -> Types::ObjectLockConfiguration end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#get_object_lock_configuration-instance_method def get_object_lock_configuration: ( bucket: ::String, ?expected_bucket_owner: ::String ) -> _GetObjectLockConfigurationResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetObjectLockConfigurationResponseSuccess interface _GetObjectRetentionResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::GetObjectRetentionOutput] def retention: () -> Types::ObjectLockRetention end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#get_object_retention-instance_method def get_object_retention: ( bucket: ::String, key: ::String, ?version_id: ::String, ?request_payer: ("requester"), ?expected_bucket_owner: ::String ) -> _GetObjectRetentionResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetObjectRetentionResponseSuccess interface _GetObjectTaggingResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::GetObjectTaggingOutput] def version_id: () -> ::String def tag_set: () -> ::Array[Types::Tag] end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#get_object_tagging-instance_method def get_object_tagging: ( bucket: ::String, key: ::String, ?version_id: ::String, ?expected_bucket_owner: ::String, ?request_payer: ("requester") ) -> _GetObjectTaggingResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetObjectTaggingResponseSuccess interface _GetObjectTorrentResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::GetObjectTorrentOutput] def body: () -> ::IO def request_charged: () -> ("requester") end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#get_object_torrent-instance_method def get_object_torrent: ( bucket: ::String, key: ::String, ?request_payer: ("requester"), ?expected_bucket_owner: ::String ) ?{ (*untyped) -> void } -> _GetObjectTorrentResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) ?{ (*untyped) -> void } -> _GetObjectTorrentResponseSuccess interface _GetPublicAccessBlockResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::GetPublicAccessBlockOutput] def public_access_block_configuration: () -> Types::PublicAccessBlockConfiguration end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#get_public_access_block-instance_method def get_public_access_block: ( bucket: ::String, ?expected_bucket_owner: ::String ) -> _GetPublicAccessBlockResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetPublicAccessBlockResponseSuccess interface _HeadBucketResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::HeadBucketOutput] def bucket_location_type: () -> ("AvailabilityZone") def bucket_location_name: () -> ::String def bucket_region: () -> ::String def access_point_alias: () -> bool end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#head_bucket-instance_method def head_bucket: ( bucket: ::String, ?expected_bucket_owner: ::String ) -> _HeadBucketResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _HeadBucketResponseSuccess interface _HeadObjectResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::HeadObjectOutput] def delete_marker: () -> bool def accept_ranges: () -> ::String def expiration: () -> ::String def restore: () -> ::String def archive_status: () -> ("ARCHIVE_ACCESS" | "DEEP_ARCHIVE_ACCESS") def last_modified: () -> ::Time def content_length: () -> ::Integer def checksum_crc32: () -> ::String def checksum_crc32c: () -> ::String def checksum_sha1: () -> ::String def checksum_sha256: () -> ::String def etag: () -> ::String def missing_meta: () -> ::Integer def version_id: () -> ::String def cache_control: () -> ::String def content_disposition: () -> ::String def content_encoding: () -> ::String def content_language: () -> ::String def content_type: () -> ::String def expires: () -> ::Time def expires_string: () -> ::String def website_redirect_location: () -> ::String def server_side_encryption: () -> ("AES256" | "aws:kms" | "aws:kms:dsse") def metadata: () -> ::Hash[::String, ::String] def sse_customer_algorithm: () -> ::String def sse_customer_key_md5: () -> ::String def ssekms_key_id: () -> ::String def bucket_key_enabled: () -> bool def storage_class: () -> ("STANDARD" | "REDUCED_REDUNDANCY" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "GLACIER" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE") def request_charged: () -> ("requester") def replication_status: () -> ("COMPLETE" | "PENDING" | "FAILED" | "REPLICA" | "COMPLETED") def parts_count: () -> ::Integer def object_lock_mode: () -> ("GOVERNANCE" | "COMPLIANCE") def object_lock_retain_until_date: () -> ::Time def object_lock_legal_hold_status: () -> ("ON" | "OFF") end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#head_object-instance_method def head_object: ( bucket: ::String, ?if_match: ::String, ?if_modified_since: ::Time, ?if_none_match: ::String, ?if_unmodified_since: ::Time, key: ::String, ?range: ::String, ?version_id: ::String, ?sse_customer_algorithm: ::String, ?sse_customer_key: ::String, ?sse_customer_key_md5: ::String, ?request_payer: ("requester"), ?part_number: ::Integer, ?expected_bucket_owner: ::String, ?checksum_mode: ("ENABLED") ) -> _HeadObjectResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _HeadObjectResponseSuccess interface _ListBucketAnalyticsConfigurationsResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::ListBucketAnalyticsConfigurationsOutput] def is_truncated: () -> bool def continuation_token: () -> ::String def next_continuation_token: () -> ::String def analytics_configuration_list: () -> ::Array[Types::AnalyticsConfiguration] end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#list_bucket_analytics_configurations-instance_method def list_bucket_analytics_configurations: ( bucket: ::String, ?continuation_token: ::String, ?expected_bucket_owner: ::String ) -> _ListBucketAnalyticsConfigurationsResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListBucketAnalyticsConfigurationsResponseSuccess interface _ListBucketIntelligentTieringConfigurationsResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::ListBucketIntelligentTieringConfigurationsOutput] def is_truncated: () -> bool def continuation_token: () -> ::String def next_continuation_token: () -> ::String def intelligent_tiering_configuration_list: () -> ::Array[Types::IntelligentTieringConfiguration] end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#list_bucket_intelligent_tiering_configurations-instance_method def list_bucket_intelligent_tiering_configurations: ( bucket: ::String, ?continuation_token: ::String ) -> _ListBucketIntelligentTieringConfigurationsResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListBucketIntelligentTieringConfigurationsResponseSuccess interface _ListBucketInventoryConfigurationsResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::ListBucketInventoryConfigurationsOutput] def continuation_token: () -> ::String def inventory_configuration_list: () -> ::Array[Types::InventoryConfiguration] def is_truncated: () -> bool def next_continuation_token: () -> ::String end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#list_bucket_inventory_configurations-instance_method def list_bucket_inventory_configurations: ( bucket: ::String, ?continuation_token: ::String, ?expected_bucket_owner: ::String ) -> _ListBucketInventoryConfigurationsResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListBucketInventoryConfigurationsResponseSuccess interface _ListBucketMetricsConfigurationsResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::ListBucketMetricsConfigurationsOutput] def is_truncated: () -> bool def continuation_token: () -> ::String def next_continuation_token: () -> ::String def metrics_configuration_list: () -> ::Array[Types::MetricsConfiguration] end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#list_bucket_metrics_configurations-instance_method def list_bucket_metrics_configurations: ( bucket: ::String, ?continuation_token: ::String, ?expected_bucket_owner: ::String ) -> _ListBucketMetricsConfigurationsResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListBucketMetricsConfigurationsResponseSuccess interface _ListBucketsResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::ListBucketsOutput] def buckets: () -> ::Array[Types::Bucket] def owner: () -> Types::Owner end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#list_buckets-instance_method def list_buckets: () -> _ListBucketsResponseSuccess | (?Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListBucketsResponseSuccess interface _ListDirectoryBucketsResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::ListDirectoryBucketsOutput] def buckets: () -> ::Array[Types::Bucket] def continuation_token: () -> ::String end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#list_directory_buckets-instance_method def list_directory_buckets: ( ?continuation_token: ::String, ?max_directory_buckets: ::Integer ) -> _ListDirectoryBucketsResponseSuccess | (?Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListDirectoryBucketsResponseSuccess interface _ListMultipartUploadsResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::ListMultipartUploadsOutput] def bucket: () -> ::String def key_marker: () -> ::String def upload_id_marker: () -> ::String def next_key_marker: () -> ::String def prefix: () -> ::String def delimiter: () -> ::String def next_upload_id_marker: () -> ::String def max_uploads: () -> ::Integer def is_truncated: () -> bool def uploads: () -> ::Array[Types::MultipartUpload] def common_prefixes: () -> ::Array[Types::CommonPrefix] def encoding_type: () -> ("url") def request_charged: () -> ("requester") end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#list_multipart_uploads-instance_method def list_multipart_uploads: ( bucket: ::String, ?delimiter: ::String, ?encoding_type: ("url"), ?key_marker: ::String, ?max_uploads: ::Integer, ?prefix: ::String, ?upload_id_marker: ::String, ?expected_bucket_owner: ::String, ?request_payer: ("requester") ) -> _ListMultipartUploadsResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListMultipartUploadsResponseSuccess interface _ListObjectVersionsResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::ListObjectVersionsOutput] def is_truncated: () -> bool def key_marker: () -> ::String def version_id_marker: () -> ::String def next_key_marker: () -> ::String def next_version_id_marker: () -> ::String def versions: () -> ::Array[Types::ObjectVersion] def delete_markers: () -> ::Array[Types::DeleteMarkerEntry] def name: () -> ::String def prefix: () -> ::String def delimiter: () -> ::String def max_keys: () -> ::Integer def common_prefixes: () -> ::Array[Types::CommonPrefix] def encoding_type: () -> ("url") def request_charged: () -> ("requester") end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#list_object_versions-instance_method def list_object_versions: ( bucket: ::String, ?delimiter: ::String, ?encoding_type: ("url"), ?key_marker: ::String, ?max_keys: ::Integer, ?prefix: ::String, ?version_id_marker: ::String, ?expected_bucket_owner: ::String, ?request_payer: ("requester"), ?optional_object_attributes: Array[("RestoreStatus")] ) -> _ListObjectVersionsResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListObjectVersionsResponseSuccess interface _ListObjectsResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::ListObjectsOutput] def is_truncated: () -> bool def marker: () -> ::String def next_marker: () -> ::String def contents: () -> ::Array[Types::Object] def name: () -> ::String def prefix: () -> ::String def delimiter: () -> ::String def max_keys: () -> ::Integer def common_prefixes: () -> ::Array[Types::CommonPrefix] def encoding_type: () -> ("url") def request_charged: () -> ("requester") end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#list_objects-instance_method def list_objects: ( bucket: ::String, ?delimiter: ::String, ?encoding_type: ("url"), ?marker: ::String, ?max_keys: ::Integer, ?prefix: ::String, ?request_payer: ("requester"), ?expected_bucket_owner: ::String, ?optional_object_attributes: Array[("RestoreStatus")] ) -> _ListObjectsResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListObjectsResponseSuccess interface _ListObjectsV2ResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::ListObjectsV2Output] def is_truncated: () -> bool def contents: () -> ::Array[Types::Object] def name: () -> ::String def prefix: () -> ::String def delimiter: () -> ::String def max_keys: () -> ::Integer def common_prefixes: () -> ::Array[Types::CommonPrefix] def encoding_type: () -> ("url") def key_count: () -> ::Integer def continuation_token: () -> ::String def next_continuation_token: () -> ::String def start_after: () -> ::String def request_charged: () -> ("requester") end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#list_objects_v2-instance_method def list_objects_v2: ( bucket: ::String, ?delimiter: ::String, ?encoding_type: ("url"), ?max_keys: ::Integer, ?prefix: ::String, ?continuation_token: ::String, ?fetch_owner: bool, ?start_after: ::String, ?request_payer: ("requester"), ?expected_bucket_owner: ::String, ?optional_object_attributes: Array[("RestoreStatus")] ) -> _ListObjectsV2ResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListObjectsV2ResponseSuccess interface _ListPartsResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::ListPartsOutput] def abort_date: () -> ::Time def abort_rule_id: () -> ::String def bucket: () -> ::String def key: () -> ::String def upload_id: () -> ::String def part_number_marker: () -> ::Integer def next_part_number_marker: () -> ::Integer def max_parts: () -> ::Integer def is_truncated: () -> bool def parts: () -> ::Array[Types::Part] def initiator: () -> Types::Initiator def owner: () -> Types::Owner def storage_class: () -> ("STANDARD" | "REDUCED_REDUNDANCY" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "GLACIER" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE") def request_charged: () -> ("requester") def checksum_algorithm: () -> ("CRC32" | "CRC32C" | "SHA1" | "SHA256") end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#list_parts-instance_method def list_parts: ( bucket: ::String, key: ::String, ?max_parts: ::Integer, ?part_number_marker: ::Integer, upload_id: ::String, ?request_payer: ("requester"), ?expected_bucket_owner: ::String, ?sse_customer_algorithm: ::String, ?sse_customer_key: ::String, ?sse_customer_key_md5: ::String ) -> _ListPartsResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListPartsResponseSuccess # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#put_bucket_accelerate_configuration-instance_method def put_bucket_accelerate_configuration: ( bucket: ::String, accelerate_configuration: { status: ("Enabled" | "Suspended")? }, ?expected_bucket_owner: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#put_bucket_acl-instance_method def put_bucket_acl: ( ?acl: ("private" | "public-read" | "public-read-write" | "authenticated-read"), ?access_control_policy: { grants: Array[ { grantee: { display_name: ::String?, email_address: ::String?, id: ::String?, type: ("CanonicalUser" | "AmazonCustomerByEmail" | "Group"), uri: ::String? }?, permission: ("FULL_CONTROL" | "WRITE" | "WRITE_ACP" | "READ" | "READ_ACP")? }, ]?, owner: { display_name: ::String?, id: ::String? }? }, bucket: ::String, ?content_md5: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), ?grant_full_control: ::String, ?grant_read: ::String, ?grant_read_acp: ::String, ?grant_write: ::String, ?grant_write_acp: ::String, ?expected_bucket_owner: ::String ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#put_bucket_analytics_configuration-instance_method def put_bucket_analytics_configuration: ( bucket: ::String, id: ::String, analytics_configuration: { id: ::String, filter: { prefix: ::String?, tag: { key: ::String, value: ::String }?, and: { prefix: ::String?, tags: Array[ { key: ::String, value: ::String }, ]? }? }?, storage_class_analysis: { data_export: { output_schema_version: ("V_1"), destination: { s3_bucket_destination: { format: ("CSV"), bucket_account_id: ::String?, bucket: ::String, prefix: ::String? } } }? } }, ?expected_bucket_owner: ::String ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#put_bucket_cors-instance_method def put_bucket_cors: ( bucket: ::String, cors_configuration: { cors_rules: Array[ { id: ::String?, allowed_headers: Array[::String]?, allowed_methods: Array[::String], allowed_origins: Array[::String], expose_headers: Array[::String]?, max_age_seconds: ::Integer? }, ] }, ?content_md5: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), ?expected_bucket_owner: ::String ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#put_bucket_encryption-instance_method def put_bucket_encryption: ( bucket: ::String, ?content_md5: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), server_side_encryption_configuration: { rules: Array[ { apply_server_side_encryption_by_default: { sse_algorithm: ("AES256" | "aws:kms" | "aws:kms:dsse"), kms_master_key_id: ::String? }?, bucket_key_enabled: bool? }, ] }, ?expected_bucket_owner: ::String ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#put_bucket_intelligent_tiering_configuration-instance_method def put_bucket_intelligent_tiering_configuration: ( bucket: ::String, id: ::String, intelligent_tiering_configuration: { id: ::String, filter: { prefix: ::String?, tag: { key: ::String, value: ::String }?, and: { prefix: ::String?, tags: Array[ { key: ::String, value: ::String }, ]? }? }?, status: ("Enabled" | "Disabled"), tierings: Array[ { days: ::Integer, access_tier: ("ARCHIVE_ACCESS" | "DEEP_ARCHIVE_ACCESS") }, ] } ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#put_bucket_inventory_configuration-instance_method def put_bucket_inventory_configuration: ( bucket: ::String, id: ::String, inventory_configuration: { destination: { s3_bucket_destination: { account_id: ::String?, bucket: ::String, format: ("CSV" | "ORC" | "Parquet"), prefix: ::String?, encryption: { sses3: { }?, ssekms: { key_id: ::String }? }? } }, is_enabled: bool, filter: { prefix: ::String }?, id: ::String, included_object_versions: ("All" | "Current"), optional_fields: Array[("Size" | "LastModifiedDate" | "StorageClass" | "ETag" | "IsMultipartUploaded" | "ReplicationStatus" | "EncryptionStatus" | "ObjectLockRetainUntilDate" | "ObjectLockMode" | "ObjectLockLegalHoldStatus" | "IntelligentTieringAccessTier" | "BucketKeyStatus" | "ChecksumAlgorithm" | "ObjectAccessControlList" | "ObjectOwner")]?, schedule: { frequency: ("Daily" | "Weekly") } }, ?expected_bucket_owner: ::String ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#put_bucket_lifecycle-instance_method def put_bucket_lifecycle: ( bucket: ::String, ?content_md5: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), ?lifecycle_configuration: { rules: Array[ { expiration: { date: ::Time?, days: ::Integer?, expired_object_delete_marker: bool? }?, id: ::String?, prefix: ::String, status: ("Enabled" | "Disabled"), transition: { date: ::Time?, days: ::Integer?, storage_class: ("GLACIER" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "DEEP_ARCHIVE" | "GLACIER_IR")? }?, noncurrent_version_transition: { noncurrent_days: ::Integer?, storage_class: ("GLACIER" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "DEEP_ARCHIVE" | "GLACIER_IR")?, newer_noncurrent_versions: ::Integer? }?, noncurrent_version_expiration: { noncurrent_days: ::Integer?, newer_noncurrent_versions: ::Integer? }?, abort_incomplete_multipart_upload: { days_after_initiation: ::Integer? }? }, ] }, ?expected_bucket_owner: ::String ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#put_bucket_lifecycle_configuration-instance_method def put_bucket_lifecycle_configuration: ( bucket: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), ?lifecycle_configuration: { rules: Array[ { expiration: { date: ::Time?, days: ::Integer?, expired_object_delete_marker: bool? }?, id: ::String?, prefix: ::String?, filter: { prefix: ::String?, tag: { key: ::String, value: ::String }?, object_size_greater_than: ::Integer?, object_size_less_than: ::Integer?, and: { prefix: ::String?, tags: Array[ { key: ::String, value: ::String }, ]?, object_size_greater_than: ::Integer?, object_size_less_than: ::Integer? }? }?, status: ("Enabled" | "Disabled"), transitions: Array[ { date: ::Time?, days: ::Integer?, storage_class: ("GLACIER" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "DEEP_ARCHIVE" | "GLACIER_IR")? }, ]?, noncurrent_version_transitions: Array[ { noncurrent_days: ::Integer?, storage_class: ("GLACIER" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "DEEP_ARCHIVE" | "GLACIER_IR")?, newer_noncurrent_versions: ::Integer? }, ]?, noncurrent_version_expiration: { noncurrent_days: ::Integer?, newer_noncurrent_versions: ::Integer? }?, abort_incomplete_multipart_upload: { days_after_initiation: ::Integer? }? }, ] }, ?expected_bucket_owner: ::String ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#put_bucket_logging-instance_method def put_bucket_logging: ( bucket: ::String, bucket_logging_status: { logging_enabled: { target_bucket: ::String, target_grants: Array[ { grantee: { display_name: ::String?, email_address: ::String?, id: ::String?, type: ("CanonicalUser" | "AmazonCustomerByEmail" | "Group"), uri: ::String? }?, permission: ("FULL_CONTROL" | "READ" | "WRITE")? }, ]?, target_prefix: ::String, target_object_key_format: { simple_prefix: { }?, partitioned_prefix: { partition_date_source: ("EventTime" | "DeliveryTime")? }? }? }? }, ?content_md5: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), ?expected_bucket_owner: ::String ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#put_bucket_metrics_configuration-instance_method def put_bucket_metrics_configuration: ( bucket: ::String, id: ::String, metrics_configuration: { id: ::String, filter: { prefix: ::String?, tag: { key: ::String, value: ::String }?, access_point_arn: ::String?, and: { prefix: ::String?, tags: Array[ { key: ::String, value: ::String }, ]?, access_point_arn: ::String? }? }? }, ?expected_bucket_owner: ::String ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#put_bucket_notification-instance_method def put_bucket_notification: ( bucket: ::String, ?content_md5: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), notification_configuration: { topic_configuration: { id: ::String?, events: Array[("s3:ReducedRedundancyLostObject" | "s3:ObjectCreated:*" | "s3:ObjectCreated:Put" | "s3:ObjectCreated:Post" | "s3:ObjectCreated:Copy" | "s3:ObjectCreated:CompleteMultipartUpload" | "s3:ObjectRemoved:*" | "s3:ObjectRemoved:Delete" | "s3:ObjectRemoved:DeleteMarkerCreated" | "s3:ObjectRestore:*" | "s3:ObjectRestore:Post" | "s3:ObjectRestore:Completed" | "s3:Replication:*" | "s3:Replication:OperationFailedReplication" | "s3:Replication:OperationNotTracked" | "s3:Replication:OperationMissedThreshold" | "s3:Replication:OperationReplicatedAfterThreshold" | "s3:ObjectRestore:Delete" | "s3:LifecycleTransition" | "s3:IntelligentTiering" | "s3:ObjectAcl:Put" | "s3:LifecycleExpiration:*" | "s3:LifecycleExpiration:Delete" | "s3:LifecycleExpiration:DeleteMarkerCreated" | "s3:ObjectTagging:*" | "s3:ObjectTagging:Put" | "s3:ObjectTagging:Delete")]?, event: ("s3:ReducedRedundancyLostObject" | "s3:ObjectCreated:*" | "s3:ObjectCreated:Put" | "s3:ObjectCreated:Post" | "s3:ObjectCreated:Copy" | "s3:ObjectCreated:CompleteMultipartUpload" | "s3:ObjectRemoved:*" | "s3:ObjectRemoved:Delete" | "s3:ObjectRemoved:DeleteMarkerCreated" | "s3:ObjectRestore:*" | "s3:ObjectRestore:Post" | "s3:ObjectRestore:Completed" | "s3:Replication:*" | "s3:Replication:OperationFailedReplication" | "s3:Replication:OperationNotTracked" | "s3:Replication:OperationMissedThreshold" | "s3:Replication:OperationReplicatedAfterThreshold" | "s3:ObjectRestore:Delete" | "s3:LifecycleTransition" | "s3:IntelligentTiering" | "s3:ObjectAcl:Put" | "s3:LifecycleExpiration:*" | "s3:LifecycleExpiration:Delete" | "s3:LifecycleExpiration:DeleteMarkerCreated" | "s3:ObjectTagging:*" | "s3:ObjectTagging:Put" | "s3:ObjectTagging:Delete")?, topic: ::String? }?, queue_configuration: { id: ::String?, event: ("s3:ReducedRedundancyLostObject" | "s3:ObjectCreated:*" | "s3:ObjectCreated:Put" | "s3:ObjectCreated:Post" | "s3:ObjectCreated:Copy" | "s3:ObjectCreated:CompleteMultipartUpload" | "s3:ObjectRemoved:*" | "s3:ObjectRemoved:Delete" | "s3:ObjectRemoved:DeleteMarkerCreated" | "s3:ObjectRestore:*" | "s3:ObjectRestore:Post" | "s3:ObjectRestore:Completed" | "s3:Replication:*" | "s3:Replication:OperationFailedReplication" | "s3:Replication:OperationNotTracked" | "s3:Replication:OperationMissedThreshold" | "s3:Replication:OperationReplicatedAfterThreshold" | "s3:ObjectRestore:Delete" | "s3:LifecycleTransition" | "s3:IntelligentTiering" | "s3:ObjectAcl:Put" | "s3:LifecycleExpiration:*" | "s3:LifecycleExpiration:Delete" | "s3:LifecycleExpiration:DeleteMarkerCreated" | "s3:ObjectTagging:*" | "s3:ObjectTagging:Put" | "s3:ObjectTagging:Delete")?, events: Array[("s3:ReducedRedundancyLostObject" | "s3:ObjectCreated:*" | "s3:ObjectCreated:Put" | "s3:ObjectCreated:Post" | "s3:ObjectCreated:Copy" | "s3:ObjectCreated:CompleteMultipartUpload" | "s3:ObjectRemoved:*" | "s3:ObjectRemoved:Delete" | "s3:ObjectRemoved:DeleteMarkerCreated" | "s3:ObjectRestore:*" | "s3:ObjectRestore:Post" | "s3:ObjectRestore:Completed" | "s3:Replication:*" | "s3:Replication:OperationFailedReplication" | "s3:Replication:OperationNotTracked" | "s3:Replication:OperationMissedThreshold" | "s3:Replication:OperationReplicatedAfterThreshold" | "s3:ObjectRestore:Delete" | "s3:LifecycleTransition" | "s3:IntelligentTiering" | "s3:ObjectAcl:Put" | "s3:LifecycleExpiration:*" | "s3:LifecycleExpiration:Delete" | "s3:LifecycleExpiration:DeleteMarkerCreated" | "s3:ObjectTagging:*" | "s3:ObjectTagging:Put" | "s3:ObjectTagging:Delete")]?, queue: ::String? }?, cloud_function_configuration: { id: ::String?, event: ("s3:ReducedRedundancyLostObject" | "s3:ObjectCreated:*" | "s3:ObjectCreated:Put" | "s3:ObjectCreated:Post" | "s3:ObjectCreated:Copy" | "s3:ObjectCreated:CompleteMultipartUpload" | "s3:ObjectRemoved:*" | "s3:ObjectRemoved:Delete" | "s3:ObjectRemoved:DeleteMarkerCreated" | "s3:ObjectRestore:*" | "s3:ObjectRestore:Post" | "s3:ObjectRestore:Completed" | "s3:Replication:*" | "s3:Replication:OperationFailedReplication" | "s3:Replication:OperationNotTracked" | "s3:Replication:OperationMissedThreshold" | "s3:Replication:OperationReplicatedAfterThreshold" | "s3:ObjectRestore:Delete" | "s3:LifecycleTransition" | "s3:IntelligentTiering" | "s3:ObjectAcl:Put" | "s3:LifecycleExpiration:*" | "s3:LifecycleExpiration:Delete" | "s3:LifecycleExpiration:DeleteMarkerCreated" | "s3:ObjectTagging:*" | "s3:ObjectTagging:Put" | "s3:ObjectTagging:Delete")?, events: Array[("s3:ReducedRedundancyLostObject" | "s3:ObjectCreated:*" | "s3:ObjectCreated:Put" | "s3:ObjectCreated:Post" | "s3:ObjectCreated:Copy" | "s3:ObjectCreated:CompleteMultipartUpload" | "s3:ObjectRemoved:*" | "s3:ObjectRemoved:Delete" | "s3:ObjectRemoved:DeleteMarkerCreated" | "s3:ObjectRestore:*" | "s3:ObjectRestore:Post" | "s3:ObjectRestore:Completed" | "s3:Replication:*" | "s3:Replication:OperationFailedReplication" | "s3:Replication:OperationNotTracked" | "s3:Replication:OperationMissedThreshold" | "s3:Replication:OperationReplicatedAfterThreshold" | "s3:ObjectRestore:Delete" | "s3:LifecycleTransition" | "s3:IntelligentTiering" | "s3:ObjectAcl:Put" | "s3:LifecycleExpiration:*" | "s3:LifecycleExpiration:Delete" | "s3:LifecycleExpiration:DeleteMarkerCreated" | "s3:ObjectTagging:*" | "s3:ObjectTagging:Put" | "s3:ObjectTagging:Delete")]?, cloud_function: ::String?, invocation_role: ::String? }? }, ?expected_bucket_owner: ::String ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#put_bucket_notification_configuration-instance_method def put_bucket_notification_configuration: ( bucket: ::String, notification_configuration: { topic_configurations: Array[ { id: ::String?, topic_arn: ::String, events: Array[("s3:ReducedRedundancyLostObject" | "s3:ObjectCreated:*" | "s3:ObjectCreated:Put" | "s3:ObjectCreated:Post" | "s3:ObjectCreated:Copy" | "s3:ObjectCreated:CompleteMultipartUpload" | "s3:ObjectRemoved:*" | "s3:ObjectRemoved:Delete" | "s3:ObjectRemoved:DeleteMarkerCreated" | "s3:ObjectRestore:*" | "s3:ObjectRestore:Post" | "s3:ObjectRestore:Completed" | "s3:Replication:*" | "s3:Replication:OperationFailedReplication" | "s3:Replication:OperationNotTracked" | "s3:Replication:OperationMissedThreshold" | "s3:Replication:OperationReplicatedAfterThreshold" | "s3:ObjectRestore:Delete" | "s3:LifecycleTransition" | "s3:IntelligentTiering" | "s3:ObjectAcl:Put" | "s3:LifecycleExpiration:*" | "s3:LifecycleExpiration:Delete" | "s3:LifecycleExpiration:DeleteMarkerCreated" | "s3:ObjectTagging:*" | "s3:ObjectTagging:Put" | "s3:ObjectTagging:Delete")], filter: { key: { filter_rules: Array[ { name: ("prefix" | "suffix")?, value: ::String? }, ]? }? }? }, ]?, queue_configurations: Array[ { id: ::String?, queue_arn: ::String, events: Array[("s3:ReducedRedundancyLostObject" | "s3:ObjectCreated:*" | "s3:ObjectCreated:Put" | "s3:ObjectCreated:Post" | "s3:ObjectCreated:Copy" | "s3:ObjectCreated:CompleteMultipartUpload" | "s3:ObjectRemoved:*" | "s3:ObjectRemoved:Delete" | "s3:ObjectRemoved:DeleteMarkerCreated" | "s3:ObjectRestore:*" | "s3:ObjectRestore:Post" | "s3:ObjectRestore:Completed" | "s3:Replication:*" | "s3:Replication:OperationFailedReplication" | "s3:Replication:OperationNotTracked" | "s3:Replication:OperationMissedThreshold" | "s3:Replication:OperationReplicatedAfterThreshold" | "s3:ObjectRestore:Delete" | "s3:LifecycleTransition" | "s3:IntelligentTiering" | "s3:ObjectAcl:Put" | "s3:LifecycleExpiration:*" | "s3:LifecycleExpiration:Delete" | "s3:LifecycleExpiration:DeleteMarkerCreated" | "s3:ObjectTagging:*" | "s3:ObjectTagging:Put" | "s3:ObjectTagging:Delete")], filter: { key: { filter_rules: Array[ { name: ("prefix" | "suffix")?, value: ::String? }, ]? }? }? }, ]?, lambda_function_configurations: Array[ { id: ::String?, lambda_function_arn: ::String, events: Array[("s3:ReducedRedundancyLostObject" | "s3:ObjectCreated:*" | "s3:ObjectCreated:Put" | "s3:ObjectCreated:Post" | "s3:ObjectCreated:Copy" | "s3:ObjectCreated:CompleteMultipartUpload" | "s3:ObjectRemoved:*" | "s3:ObjectRemoved:Delete" | "s3:ObjectRemoved:DeleteMarkerCreated" | "s3:ObjectRestore:*" | "s3:ObjectRestore:Post" | "s3:ObjectRestore:Completed" | "s3:Replication:*" | "s3:Replication:OperationFailedReplication" | "s3:Replication:OperationNotTracked" | "s3:Replication:OperationMissedThreshold" | "s3:Replication:OperationReplicatedAfterThreshold" | "s3:ObjectRestore:Delete" | "s3:LifecycleTransition" | "s3:IntelligentTiering" | "s3:ObjectAcl:Put" | "s3:LifecycleExpiration:*" | "s3:LifecycleExpiration:Delete" | "s3:LifecycleExpiration:DeleteMarkerCreated" | "s3:ObjectTagging:*" | "s3:ObjectTagging:Put" | "s3:ObjectTagging:Delete")], filter: { key: { filter_rules: Array[ { name: ("prefix" | "suffix")?, value: ::String? }, ]? }? }? }, ]?, event_bridge_configuration: { }? }, ?expected_bucket_owner: ::String, ?skip_destination_validation: bool ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#put_bucket_ownership_controls-instance_method def put_bucket_ownership_controls: ( bucket: ::String, ?content_md5: ::String, ?expected_bucket_owner: ::String, ownership_controls: { rules: Array[ { object_ownership: ("BucketOwnerPreferred" | "ObjectWriter" | "BucketOwnerEnforced") }, ] } ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#put_bucket_policy-instance_method def put_bucket_policy: ( bucket: ::String, ?content_md5: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), ?confirm_remove_self_bucket_access: bool, policy: ::String, ?expected_bucket_owner: ::String ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#put_bucket_replication-instance_method def put_bucket_replication: ( bucket: ::String, ?content_md5: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), replication_configuration: { role: ::String, rules: Array[ { id: ::String?, priority: ::Integer?, prefix: ::String?, filter: { prefix: ::String?, tag: { key: ::String, value: ::String }?, and: { prefix: ::String?, tags: Array[ { key: ::String, value: ::String }, ]? }? }?, status: ("Enabled" | "Disabled"), source_selection_criteria: { sse_kms_encrypted_objects: { status: ("Enabled" | "Disabled") }?, replica_modifications: { status: ("Enabled" | "Disabled") }? }?, existing_object_replication: { status: ("Enabled" | "Disabled") }?, destination: { bucket: ::String, account: ::String?, storage_class: ("STANDARD" | "REDUCED_REDUNDANCY" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "GLACIER" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE")?, access_control_translation: { owner: ("Destination") }?, encryption_configuration: { replica_kms_key_id: ::String? }?, replication_time: { status: ("Enabled" | "Disabled"), time: { minutes: ::Integer? } }?, metrics: { status: ("Enabled" | "Disabled"), event_threshold: { minutes: ::Integer? }? }? }, delete_marker_replication: { status: ("Enabled" | "Disabled")? }? }, ] }, ?token: ::String, ?expected_bucket_owner: ::String ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#put_bucket_request_payment-instance_method def put_bucket_request_payment: ( bucket: ::String, ?content_md5: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), request_payment_configuration: { payer: ("Requester" | "BucketOwner") }, ?expected_bucket_owner: ::String ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#put_bucket_tagging-instance_method def put_bucket_tagging: ( bucket: ::String, ?content_md5: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), tagging: { tag_set: Array[ { key: ::String, value: ::String }, ] }, ?expected_bucket_owner: ::String ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#put_bucket_versioning-instance_method def put_bucket_versioning: ( bucket: ::String, ?content_md5: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), ?mfa: ::String, versioning_configuration: { mfa_delete: ("Enabled" | "Disabled")?, status: ("Enabled" | "Suspended")? }, ?expected_bucket_owner: ::String ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#put_bucket_website-instance_method def put_bucket_website: ( bucket: ::String, ?content_md5: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), website_configuration: { error_document: { key: ::String }?, index_document: { suffix: ::String }?, redirect_all_requests_to: { host_name: ::String, protocol: ("http" | "https")? }?, routing_rules: Array[ { condition: { http_error_code_returned_equals: ::String?, key_prefix_equals: ::String? }?, redirect: { host_name: ::String?, http_redirect_code: ::String?, protocol: ("http" | "https")?, replace_key_prefix_with: ::String?, replace_key_with: ::String? } }, ]? }, ?expected_bucket_owner: ::String ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] interface _PutObjectResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::PutObjectOutput] def expiration: () -> ::String def etag: () -> ::String def checksum_crc32: () -> ::String def checksum_crc32c: () -> ::String def checksum_sha1: () -> ::String def checksum_sha256: () -> ::String def server_side_encryption: () -> ("AES256" | "aws:kms" | "aws:kms:dsse") def version_id: () -> ::String def sse_customer_algorithm: () -> ::String def sse_customer_key_md5: () -> ::String def ssekms_key_id: () -> ::String def ssekms_encryption_context: () -> ::String def bucket_key_enabled: () -> bool def request_charged: () -> ("requester") end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#put_object-instance_method def put_object: ( ?acl: ("private" | "public-read" | "public-read-write" | "authenticated-read" | "aws-exec-read" | "bucket-owner-read" | "bucket-owner-full-control"), ?body: ::String | ::StringIO | ::File, bucket: ::String, ?cache_control: ::String, ?content_disposition: ::String, ?content_encoding: ::String, ?content_language: ::String, ?content_length: ::Integer, ?content_md5: ::String, ?content_type: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), ?checksum_crc32: ::String, ?checksum_crc32c: ::String, ?checksum_sha1: ::String, ?checksum_sha256: ::String, ?expires: ::Time, ?grant_full_control: ::String, ?grant_read: ::String, ?grant_read_acp: ::String, ?grant_write_acp: ::String, key: ::String, ?metadata: Hash[::String, ::String], ?server_side_encryption: ("AES256" | "aws:kms" | "aws:kms:dsse"), ?storage_class: ("STANDARD" | "REDUCED_REDUNDANCY" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "GLACIER" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE"), ?website_redirect_location: ::String, ?sse_customer_algorithm: ::String, ?sse_customer_key: ::String, ?sse_customer_key_md5: ::String, ?ssekms_key_id: ::String, ?ssekms_encryption_context: ::String, ?bucket_key_enabled: bool, ?request_payer: ("requester"), ?tagging: ::String, ?object_lock_mode: ("GOVERNANCE" | "COMPLIANCE"), ?object_lock_retain_until_date: ::Time, ?object_lock_legal_hold_status: ("ON" | "OFF"), ?expected_bucket_owner: ::String ) -> _PutObjectResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _PutObjectResponseSuccess interface _PutObjectAclResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::PutObjectAclOutput] def request_charged: () -> ("requester") end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#put_object_acl-instance_method def put_object_acl: ( ?acl: ("private" | "public-read" | "public-read-write" | "authenticated-read" | "aws-exec-read" | "bucket-owner-read" | "bucket-owner-full-control"), ?access_control_policy: { grants: Array[ { grantee: { display_name: ::String?, email_address: ::String?, id: ::String?, type: ("CanonicalUser" | "AmazonCustomerByEmail" | "Group"), uri: ::String? }?, permission: ("FULL_CONTROL" | "WRITE" | "WRITE_ACP" | "READ" | "READ_ACP")? }, ]?, owner: { display_name: ::String?, id: ::String? }? }, bucket: ::String, ?content_md5: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), ?grant_full_control: ::String, ?grant_read: ::String, ?grant_read_acp: ::String, ?grant_write: ::String, ?grant_write_acp: ::String, key: ::String, ?request_payer: ("requester"), ?version_id: ::String, ?expected_bucket_owner: ::String ) -> _PutObjectAclResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _PutObjectAclResponseSuccess interface _PutObjectLegalHoldResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::PutObjectLegalHoldOutput] def request_charged: () -> ("requester") end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#put_object_legal_hold-instance_method def put_object_legal_hold: ( bucket: ::String, key: ::String, ?legal_hold: { status: ("ON" | "OFF")? }, ?request_payer: ("requester"), ?version_id: ::String, ?content_md5: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), ?expected_bucket_owner: ::String ) -> _PutObjectLegalHoldResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _PutObjectLegalHoldResponseSuccess interface _PutObjectLockConfigurationResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::PutObjectLockConfigurationOutput] def request_charged: () -> ("requester") end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#put_object_lock_configuration-instance_method def put_object_lock_configuration: ( bucket: ::String, ?object_lock_configuration: { object_lock_enabled: ("Enabled")?, rule: { default_retention: { mode: ("GOVERNANCE" | "COMPLIANCE")?, days: ::Integer?, years: ::Integer? }? }? }, ?request_payer: ("requester"), ?token: ::String, ?content_md5: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), ?expected_bucket_owner: ::String ) -> _PutObjectLockConfigurationResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _PutObjectLockConfigurationResponseSuccess interface _PutObjectRetentionResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::PutObjectRetentionOutput] def request_charged: () -> ("requester") end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#put_object_retention-instance_method def put_object_retention: ( bucket: ::String, key: ::String, ?retention: { mode: ("GOVERNANCE" | "COMPLIANCE")?, retain_until_date: ::Time? }, ?request_payer: ("requester"), ?version_id: ::String, ?bypass_governance_retention: bool, ?content_md5: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), ?expected_bucket_owner: ::String ) -> _PutObjectRetentionResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _PutObjectRetentionResponseSuccess interface _PutObjectTaggingResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::PutObjectTaggingOutput] def version_id: () -> ::String end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#put_object_tagging-instance_method def put_object_tagging: ( bucket: ::String, key: ::String, ?version_id: ::String, ?content_md5: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), tagging: { tag_set: Array[ { key: ::String, value: ::String }, ] }, ?expected_bucket_owner: ::String, ?request_payer: ("requester") ) -> _PutObjectTaggingResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _PutObjectTaggingResponseSuccess # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#put_public_access_block-instance_method def put_public_access_block: ( bucket: ::String, ?content_md5: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), public_access_block_configuration: { block_public_acls: bool?, ignore_public_acls: bool?, block_public_policy: bool?, restrict_public_buckets: bool? }, ?expected_bucket_owner: ::String ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] interface _RestoreObjectResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::RestoreObjectOutput] def request_charged: () -> ("requester") def restore_output_path: () -> ::String end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#restore_object-instance_method def restore_object: ( bucket: ::String, key: ::String, ?version_id: ::String, ?restore_request: { days: ::Integer?, glacier_job_parameters: { tier: ("Standard" | "Bulk" | "Expedited") }?, type: ("SELECT")?, tier: ("Standard" | "Bulk" | "Expedited")?, description: ::String?, select_parameters: { input_serialization: { csv: { file_header_info: ("USE" | "IGNORE" | "NONE")?, comments: ::String?, quote_escape_character: ::String?, record_delimiter: ::String?, field_delimiter: ::String?, quote_character: ::String?, allow_quoted_record_delimiter: bool? }?, compression_type: ("NONE" | "GZIP" | "BZIP2")?, json: { type: ("DOCUMENT" | "LINES")? }?, parquet: { }? }, expression_type: ("SQL"), expression: ::String, output_serialization: { csv: { quote_fields: ("ALWAYS" | "ASNEEDED")?, quote_escape_character: ::String?, record_delimiter: ::String?, field_delimiter: ::String?, quote_character: ::String? }?, json: { record_delimiter: ::String? }? } }?, output_location: { s3: { bucket_name: ::String, prefix: ::String, encryption: { encryption_type: ("AES256" | "aws:kms" | "aws:kms:dsse"), kms_key_id: ::String?, kms_context: ::String? }?, canned_acl: ("private" | "public-read" | "public-read-write" | "authenticated-read" | "aws-exec-read" | "bucket-owner-read" | "bucket-owner-full-control")?, access_control_list: Array[ { grantee: { display_name: ::String?, email_address: ::String?, id: ::String?, type: ("CanonicalUser" | "AmazonCustomerByEmail" | "Group"), uri: ::String? }?, permission: ("FULL_CONTROL" | "WRITE" | "WRITE_ACP" | "READ" | "READ_ACP")? }, ]?, tagging: { tag_set: Array[ { key: ::String, value: ::String }, ] }?, user_metadata: Array[ { name: ::String?, value: ::String? }, ]?, storage_class: ("STANDARD" | "REDUCED_REDUNDANCY" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "GLACIER" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE")? }? }? }, ?request_payer: ("requester"), ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), ?expected_bucket_owner: ::String ) -> _RestoreObjectResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _RestoreObjectResponseSuccess interface _SelectObjectContentResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::SelectObjectContentOutput] def payload: () -> Types::SelectObjectContentEventStream end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#select_object_content-instance_method def select_object_content: ( bucket: ::String, key: ::String, ?sse_customer_algorithm: ::String, ?sse_customer_key: ::String, ?sse_customer_key_md5: ::String, expression: ::String, expression_type: ("SQL"), ?request_progress: { enabled: bool? }, input_serialization: { csv: { file_header_info: ("USE" | "IGNORE" | "NONE")?, comments: ::String?, quote_escape_character: ::String?, record_delimiter: ::String?, field_delimiter: ::String?, quote_character: ::String?, allow_quoted_record_delimiter: bool? }?, compression_type: ("NONE" | "GZIP" | "BZIP2")?, json: { type: ("DOCUMENT" | "LINES")? }?, parquet: { }? }, output_serialization: { csv: { quote_fields: ("ALWAYS" | "ASNEEDED")?, quote_escape_character: ::String?, record_delimiter: ::String?, field_delimiter: ::String?, quote_character: ::String? }?, json: { record_delimiter: ::String? }? }, ?scan_range: { start: ::Integer?, end: ::Integer? }, ?expected_bucket_owner: ::String ) ?{ (*untyped) -> void } -> _SelectObjectContentResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) ?{ (*untyped) -> void } -> _SelectObjectContentResponseSuccess interface _UploadPartResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::UploadPartOutput] def server_side_encryption: () -> ("AES256" | "aws:kms" | "aws:kms:dsse") def etag: () -> ::String def checksum_crc32: () -> ::String def checksum_crc32c: () -> ::String def checksum_sha1: () -> ::String def checksum_sha256: () -> ::String def sse_customer_algorithm: () -> ::String def sse_customer_key_md5: () -> ::String def ssekms_key_id: () -> ::String def bucket_key_enabled: () -> bool def request_charged: () -> ("requester") end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#upload_part-instance_method def upload_part: ( ?body: ::String | ::StringIO | ::File, bucket: ::String, ?content_length: ::Integer, ?content_md5: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), ?checksum_crc32: ::String, ?checksum_crc32c: ::String, ?checksum_sha1: ::String, ?checksum_sha256: ::String, key: ::String, part_number: ::Integer, upload_id: ::String, ?sse_customer_algorithm: ::String, ?sse_customer_key: ::String, ?sse_customer_key_md5: ::String, ?request_payer: ("requester"), ?expected_bucket_owner: ::String ) -> _UploadPartResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _UploadPartResponseSuccess interface _UploadPartCopyResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::UploadPartCopyOutput] def copy_source_version_id: () -> ::String def copy_part_result: () -> Types::CopyPartResult def server_side_encryption: () -> ("AES256" | "aws:kms" | "aws:kms:dsse") def sse_customer_algorithm: () -> ::String def sse_customer_key_md5: () -> ::String def ssekms_key_id: () -> ::String def bucket_key_enabled: () -> bool def request_charged: () -> ("requester") end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#upload_part_copy-instance_method def upload_part_copy: ( bucket: ::String, copy_source: ::String, ?copy_source_if_match: ::String, ?copy_source_if_modified_since: ::Time, ?copy_source_if_none_match: ::String, ?copy_source_if_unmodified_since: ::Time, ?copy_source_range: ::String, key: ::String, part_number: ::Integer, upload_id: ::String, ?sse_customer_algorithm: ::String, ?sse_customer_key: ::String, ?sse_customer_key_md5: ::String, ?copy_source_sse_customer_algorithm: ::String, ?copy_source_sse_customer_key: ::String, ?copy_source_sse_customer_key_md5: ::String, ?request_payer: ("requester"), ?expected_bucket_owner: ::String, ?expected_source_bucket_owner: ::String ) -> _UploadPartCopyResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _UploadPartCopyResponseSuccess # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#write_get_object_response-instance_method def write_get_object_response: ( request_route: ::String, request_token: ::String, ?body: ::String | ::StringIO | ::File, ?status_code: ::Integer, ?error_code: ::String, ?error_message: ::String, ?accept_ranges: ::String, ?cache_control: ::String, ?content_disposition: ::String, ?content_encoding: ::String, ?content_language: ::String, ?content_length: ::Integer, ?content_range: ::String, ?content_type: ::String, ?checksum_crc32: ::String, ?checksum_crc32c: ::String, ?checksum_sha1: ::String, ?checksum_sha256: ::String, ?delete_marker: bool, ?etag: ::String, ?expires: ::Time, ?expiration: ::String, ?last_modified: ::Time, ?missing_meta: ::Integer, ?metadata: Hash[::String, ::String], ?object_lock_mode: ("GOVERNANCE" | "COMPLIANCE"), ?object_lock_legal_hold_status: ("ON" | "OFF"), ?object_lock_retain_until_date: ::Time, ?parts_count: ::Integer, ?replication_status: ("COMPLETE" | "PENDING" | "FAILED" | "REPLICA" | "COMPLETED"), ?request_charged: ("requester"), ?restore: ::String, ?server_side_encryption: ("AES256" | "aws:kms" | "aws:kms:dsse"), ?sse_customer_algorithm: ::String, ?ssekms_key_id: ::String, ?sse_customer_key_md5: ::String, ?storage_class: ("STANDARD" | "REDUCED_REDUNDANCY" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "GLACIER" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE"), ?tag_count: ::Integer, ?version_id: ::String, ?bucket_key_enabled: bool ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#wait_until-instance_method def wait_until: (:bucket_exists waiter_name, bucket: ::String, ?expected_bucket_owner: ::String ) -> Client::_HeadBucketResponseSuccess | (:bucket_exists waiter_name, Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> Client::_HeadBucketResponseSuccess | (:bucket_not_exists waiter_name, bucket: ::String, ?expected_bucket_owner: ::String ) -> Client::_HeadBucketResponseSuccess | (:bucket_not_exists waiter_name, Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> Client::_HeadBucketResponseSuccess | (:object_exists waiter_name, bucket: ::String, ?if_match: ::String, ?if_modified_since: ::Time, ?if_none_match: ::String, ?if_unmodified_since: ::Time, key: ::String, ?range: ::String, ?version_id: ::String, ?sse_customer_algorithm: ::String, ?sse_customer_key: ::String, ?sse_customer_key_md5: ::String, ?request_payer: ("requester"), ?part_number: ::Integer, ?expected_bucket_owner: ::String, ?checksum_mode: ("ENABLED") ) -> Client::_HeadObjectResponseSuccess | (:object_exists waiter_name, Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> Client::_HeadObjectResponseSuccess | (:object_not_exists waiter_name, bucket: ::String, ?if_match: ::String, ?if_modified_since: ::Time, ?if_none_match: ::String, ?if_unmodified_since: ::Time, key: ::String, ?range: ::String, ?version_id: ::String, ?sse_customer_algorithm: ::String, ?sse_customer_key: ::String, ?sse_customer_key_md5: ::String, ?request_payer: ("requester"), ?part_number: ::Integer, ?expected_bucket_owner: ::String, ?checksum_mode: ("ENABLED") ) -> Client::_HeadObjectResponseSuccess | (:object_not_exists waiter_name, Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> Client::_HeadObjectResponseSuccess end end end aws-sdk-s3-1.143.0/sig/bucket_request_payment.rbs0000644000004100000410000000437014563445240021673 0ustar www-datawww-data# WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws module S3 # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketRequestPayment.html class BucketRequestPayment # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketRequestPayment.html#initialize-instance_method def initialize: (String bucket_name, Hash[Symbol, untyped] options) -> void | (bucket_name: String, ?client: Client) -> void | (Hash[Symbol, untyped] args) -> void # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketRequestPayment.html#bucket_name-instance_method def bucket_name: () -> String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketRequestPayment.html#payer-instance_method def payer: () -> ("Requester" | "BucketOwner") def client: () -> Client # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketRequestPayment.html#load-instance_method def load: () -> self alias reload load # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketRequestPayment.html#data-instance_method def data: () -> Types::GetBucketRequestPaymentOutput # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketRequestPayment.html#data_loaded?-instance_method def data_loaded?: () -> bool # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketRequestPayment.html#put-instance_method def put: ( ?content_md5: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), request_payment_configuration: { payer: ("Requester" | "BucketOwner") }, ?expected_bucket_owner: ::String ) -> ::Aws::EmptyStructure | (?Hash[Symbol, untyped]) -> ::Aws::EmptyStructure # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketRequestPayment.html#bucket-instance_method def bucket: () -> Bucket class Collection < ::Aws::Resources::Collection[BucketRequestPayment] end end end end aws-sdk-s3-1.143.0/sig/object.rbs0000644000004100000410000005767714563445240016401 0ustar www-datawww-data# WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws module S3 # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html class Object # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#initialize-instance_method def initialize: (String bucket_name, String key, Hash[Symbol, untyped] options) -> void | (bucket_name: String, key: String, ?client: Client) -> void | (Hash[Symbol, untyped] args) -> void # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#bucket_name-instance_method def bucket_name: () -> String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#key-instance_method def key: () -> String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#delete_marker-instance_method def delete_marker: () -> bool # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#accept_ranges-instance_method def accept_ranges: () -> ::String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#expiration-instance_method def expiration: () -> ::String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#restore-instance_method def restore: () -> ::String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#archive_status-instance_method def archive_status: () -> ("ARCHIVE_ACCESS" | "DEEP_ARCHIVE_ACCESS") # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#last_modified-instance_method def last_modified: () -> ::Time # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#content_length-instance_method def content_length: () -> ::Integer # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#checksum_crc32-instance_method def checksum_crc32: () -> ::String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#checksum_crc32c-instance_method def checksum_crc32c: () -> ::String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#checksum_sha1-instance_method def checksum_sha1: () -> ::String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#checksum_sha256-instance_method def checksum_sha256: () -> ::String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#etag-instance_method def etag: () -> ::String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#missing_meta-instance_method def missing_meta: () -> ::Integer # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#version_id-instance_method def version_id: () -> ::String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#cache_control-instance_method def cache_control: () -> ::String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#content_disposition-instance_method def content_disposition: () -> ::String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#content_encoding-instance_method def content_encoding: () -> ::String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#content_language-instance_method def content_language: () -> ::String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#content_type-instance_method def content_type: () -> ::String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#expires-instance_method def expires: () -> ::Time # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#expires_string-instance_method def expires_string: () -> ::String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#website_redirect_location-instance_method def website_redirect_location: () -> ::String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#server_side_encryption-instance_method def server_side_encryption: () -> ("AES256" | "aws:kms" | "aws:kms:dsse") # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#metadata-instance_method def metadata: () -> ::Hash[::String, ::String] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#sse_customer_algorithm-instance_method def sse_customer_algorithm: () -> ::String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#sse_customer_key_md5-instance_method def sse_customer_key_md5: () -> ::String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#ssekms_key_id-instance_method def ssekms_key_id: () -> ::String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#bucket_key_enabled-instance_method def bucket_key_enabled: () -> bool # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#storage_class-instance_method def storage_class: () -> ("STANDARD" | "REDUCED_REDUNDANCY" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "GLACIER" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE") # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#request_charged-instance_method def request_charged: () -> ("requester") # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#replication_status-instance_method def replication_status: () -> ("COMPLETE" | "PENDING" | "FAILED" | "REPLICA" | "COMPLETED") # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#parts_count-instance_method def parts_count: () -> ::Integer # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#object_lock_mode-instance_method def object_lock_mode: () -> ("GOVERNANCE" | "COMPLIANCE") # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#object_lock_retain_until_date-instance_method def object_lock_retain_until_date: () -> ::Time # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#object_lock_legal_hold_status-instance_method def object_lock_legal_hold_status: () -> ("ON" | "OFF") def client: () -> Client # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#load-instance_method def load: () -> self alias reload load # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#data-instance_method def data: () -> Types::HeadObjectOutput # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#data_loaded?-instance_method def data_loaded?: () -> bool # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#exists?-instance_method def exists?: (?max_attempts: Integer, ?delay: Numeric, ?before_attempt: (^(Integer attempts) -> void), ?before_wait: (^(Integer attempts, untyped response) -> void)) -> bool | (?Hash[Symbol, untyped]) -> bool # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#wait_until_exists-instance_method def wait_until_exists: (?max_attempts: Integer, ?delay: Numeric, ?before_attempt: (^(Integer attempts) -> void), ?before_wait: (^(Integer attempts, untyped response) -> void)) ?{ (untyped waiter) -> void } -> Object | (?Hash[Symbol, untyped]) ?{ (untyped waiter) -> void } -> Object # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#wait_until_not_exists-instance_method def wait_until_not_exists: (?max_attempts: Integer, ?delay: Numeric, ?before_attempt: (^(Integer attempts) -> void), ?before_wait: (^(Integer attempts, untyped response) -> void)) ?{ (untyped waiter) -> void } -> Object | (?Hash[Symbol, untyped]) ?{ (untyped waiter) -> void } -> Object # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#copy_from-instance_method def copy_from: ( ?acl: ("private" | "public-read" | "public-read-write" | "authenticated-read" | "aws-exec-read" | "bucket-owner-read" | "bucket-owner-full-control"), ?cache_control: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), ?content_disposition: ::String, ?content_encoding: ::String, ?content_language: ::String, ?content_type: ::String, copy_source: ::String, ?copy_source_if_match: ::String, ?copy_source_if_modified_since: ::Time, ?copy_source_if_none_match: ::String, ?copy_source_if_unmodified_since: ::Time, ?expires: ::Time, ?grant_full_control: ::String, ?grant_read: ::String, ?grant_read_acp: ::String, ?grant_write_acp: ::String, ?metadata: Hash[::String, ::String], ?metadata_directive: ("COPY" | "REPLACE"), ?tagging_directive: ("COPY" | "REPLACE"), ?server_side_encryption: ("AES256" | "aws:kms" | "aws:kms:dsse"), ?storage_class: ("STANDARD" | "REDUCED_REDUNDANCY" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "GLACIER" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE"), ?website_redirect_location: ::String, ?sse_customer_algorithm: ::String, ?sse_customer_key: ::String, ?sse_customer_key_md5: ::String, ?ssekms_key_id: ::String, ?ssekms_encryption_context: ::String, ?bucket_key_enabled: bool, ?copy_source_sse_customer_algorithm: ::String, ?copy_source_sse_customer_key: ::String, ?copy_source_sse_customer_key_md5: ::String, ?request_payer: ("requester"), ?tagging: ::String, ?object_lock_mode: ("GOVERNANCE" | "COMPLIANCE"), ?object_lock_retain_until_date: ::Time, ?object_lock_legal_hold_status: ("ON" | "OFF"), ?expected_bucket_owner: ::String, ?expected_source_bucket_owner: ::String ) -> Types::CopyObjectOutput | (?Hash[Symbol, untyped]) -> Types::CopyObjectOutput # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#delete-instance_method def delete: ( ?mfa: ::String, ?version_id: ::String, ?request_payer: ("requester"), ?bypass_governance_retention: bool, ?expected_bucket_owner: ::String ) -> Types::DeleteObjectOutput | (?Hash[Symbol, untyped]) -> Types::DeleteObjectOutput # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#get-instance_method def get: ( ?if_match: ::String, ?if_modified_since: ::Time, ?if_none_match: ::String, ?if_unmodified_since: ::Time, ?range: ::String, ?response_cache_control: ::String, ?response_content_disposition: ::String, ?response_content_encoding: ::String, ?response_content_language: ::String, ?response_content_type: ::String, ?response_expires: ::Time, ?version_id: ::String, ?sse_customer_algorithm: ::String, ?sse_customer_key: ::String, ?sse_customer_key_md5: ::String, ?request_payer: ("requester"), ?part_number: ::Integer, ?expected_bucket_owner: ::String, ?checksum_mode: ("ENABLED") ) -> Types::GetObjectOutput | (?Hash[Symbol, untyped]) -> Types::GetObjectOutput # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#initiate_multipart_upload-instance_method def initiate_multipart_upload: ( ?acl: ("private" | "public-read" | "public-read-write" | "authenticated-read" | "aws-exec-read" | "bucket-owner-read" | "bucket-owner-full-control"), ?cache_control: ::String, ?content_disposition: ::String, ?content_encoding: ::String, ?content_language: ::String, ?content_type: ::String, ?expires: ::Time, ?grant_full_control: ::String, ?grant_read: ::String, ?grant_read_acp: ::String, ?grant_write_acp: ::String, ?metadata: Hash[::String, ::String], ?server_side_encryption: ("AES256" | "aws:kms" | "aws:kms:dsse"), ?storage_class: ("STANDARD" | "REDUCED_REDUNDANCY" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "GLACIER" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE"), ?website_redirect_location: ::String, ?sse_customer_algorithm: ::String, ?sse_customer_key: ::String, ?sse_customer_key_md5: ::String, ?ssekms_key_id: ::String, ?ssekms_encryption_context: ::String, ?bucket_key_enabled: bool, ?request_payer: ("requester"), ?tagging: ::String, ?object_lock_mode: ("GOVERNANCE" | "COMPLIANCE"), ?object_lock_retain_until_date: ::Time, ?object_lock_legal_hold_status: ("ON" | "OFF"), ?expected_bucket_owner: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") ) -> MultipartUpload | (?Hash[Symbol, untyped]) -> MultipartUpload # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#put-instance_method def put: ( ?acl: ("private" | "public-read" | "public-read-write" | "authenticated-read" | "aws-exec-read" | "bucket-owner-read" | "bucket-owner-full-control"), ?body: ::String | ::StringIO | ::File, ?cache_control: ::String, ?content_disposition: ::String, ?content_encoding: ::String, ?content_language: ::String, ?content_length: ::Integer, ?content_md5: ::String, ?content_type: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), ?checksum_crc32: ::String, ?checksum_crc32c: ::String, ?checksum_sha1: ::String, ?checksum_sha256: ::String, ?expires: ::Time, ?grant_full_control: ::String, ?grant_read: ::String, ?grant_read_acp: ::String, ?grant_write_acp: ::String, ?metadata: Hash[::String, ::String], ?server_side_encryption: ("AES256" | "aws:kms" | "aws:kms:dsse"), ?storage_class: ("STANDARD" | "REDUCED_REDUNDANCY" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "GLACIER" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE"), ?website_redirect_location: ::String, ?sse_customer_algorithm: ::String, ?sse_customer_key: ::String, ?sse_customer_key_md5: ::String, ?ssekms_key_id: ::String, ?ssekms_encryption_context: ::String, ?bucket_key_enabled: bool, ?request_payer: ("requester"), ?tagging: ::String, ?object_lock_mode: ("GOVERNANCE" | "COMPLIANCE"), ?object_lock_retain_until_date: ::Time, ?object_lock_legal_hold_status: ("ON" | "OFF"), ?expected_bucket_owner: ::String ) -> Types::PutObjectOutput | (?Hash[Symbol, untyped]) -> Types::PutObjectOutput # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#restore_object-instance_method def restore_object: ( ?version_id: ::String, ?restore_request: { days: ::Integer?, glacier_job_parameters: { tier: ("Standard" | "Bulk" | "Expedited") }?, type: ("SELECT")?, tier: ("Standard" | "Bulk" | "Expedited")?, description: ::String?, select_parameters: { input_serialization: { csv: { file_header_info: ("USE" | "IGNORE" | "NONE")?, comments: ::String?, quote_escape_character: ::String?, record_delimiter: ::String?, field_delimiter: ::String?, quote_character: ::String?, allow_quoted_record_delimiter: bool? }?, compression_type: ("NONE" | "GZIP" | "BZIP2")?, json: { type: ("DOCUMENT" | "LINES")? }?, parquet: { }? }, expression_type: ("SQL"), expression: ::String, output_serialization: { csv: { quote_fields: ("ALWAYS" | "ASNEEDED")?, quote_escape_character: ::String?, record_delimiter: ::String?, field_delimiter: ::String?, quote_character: ::String? }?, json: { record_delimiter: ::String? }? } }?, output_location: { s3: { bucket_name: ::String, prefix: ::String, encryption: { encryption_type: ("AES256" | "aws:kms" | "aws:kms:dsse"), kms_key_id: ::String?, kms_context: ::String? }?, canned_acl: ("private" | "public-read" | "public-read-write" | "authenticated-read" | "aws-exec-read" | "bucket-owner-read" | "bucket-owner-full-control")?, access_control_list: Array[ { grantee: { display_name: ::String?, email_address: ::String?, id: ::String?, type: ("CanonicalUser" | "AmazonCustomerByEmail" | "Group"), uri: ::String? }?, permission: ("FULL_CONTROL" | "WRITE" | "WRITE_ACP" | "READ" | "READ_ACP")? }, ]?, tagging: { tag_set: Array[ { key: ::String, value: ::String }, ] }?, user_metadata: Array[ { name: ::String?, value: ::String? }, ]?, storage_class: ("STANDARD" | "REDUCED_REDUNDANCY" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "GLACIER" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE")? }? }? }, ?request_payer: ("requester"), ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), ?expected_bucket_owner: ::String ) -> Types::RestoreObjectOutput | (?Hash[Symbol, untyped]) -> Types::RestoreObjectOutput # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#head-instance_method def head: ( ?if_match: ::String, ?if_modified_since: ::Time, ?if_none_match: ::String, ?if_unmodified_since: ::Time, ?range: ::String, ?version_id: ::String, ?sse_customer_algorithm: ::String, ?sse_customer_key: ::String, ?sse_customer_key_md5: ::String, ?request_payer: ("requester"), ?part_number: ::Integer, ?expected_bucket_owner: ::String, ?checksum_mode: ("ENABLED") ) -> Types::HeadObjectOutput | (?Hash[Symbol, untyped]) -> Types::HeadObjectOutput # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#acl-instance_method def acl: () -> ObjectAcl # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#bucket-instance_method def bucket: () -> Bucket # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#multipart_upload-instance_method def multipart_upload: (String id) -> MultipartUpload # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Object.html#version-instance_method def version: (String id) -> ObjectVersion class Collection < ::Aws::Resources::Collection[Object] def batch_delete!: ( ?mfa: ::String, ?request_payer: ("requester"), ?bypass_governance_retention: bool, ?expected_bucket_owner: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") ) -> void | (?Hash[Symbol, untyped]) -> void end end end end aws-sdk-s3-1.143.0/sig/bucket_versioning.rbs0000644000004100000410000000653414563445240020635 0ustar www-datawww-data# WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws module S3 # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketVersioning.html class BucketVersioning # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketVersioning.html#initialize-instance_method def initialize: (String bucket_name, Hash[Symbol, untyped] options) -> void | (bucket_name: String, ?client: Client) -> void | (Hash[Symbol, untyped] args) -> void # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketVersioning.html#bucket_name-instance_method def bucket_name: () -> String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketVersioning.html#status-instance_method def status: () -> ("Enabled" | "Suspended") # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketVersioning.html#mfa_delete-instance_method def mfa_delete: () -> ("Enabled" | "Disabled") def client: () -> Client # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketVersioning.html#load-instance_method def load: () -> self alias reload load # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketVersioning.html#data-instance_method def data: () -> Types::GetBucketVersioningOutput # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketVersioning.html#data_loaded?-instance_method def data_loaded?: () -> bool # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketVersioning.html#enable-instance_method def enable: ( ?content_md5: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), ?mfa: ::String, ?expected_bucket_owner: ::String ) -> ::Aws::EmptyStructure | (?Hash[Symbol, untyped]) -> ::Aws::EmptyStructure # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketVersioning.html#put-instance_method def put: ( ?content_md5: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), ?mfa: ::String, versioning_configuration: { mfa_delete: ("Enabled" | "Disabled")?, status: ("Enabled" | "Suspended")? }, ?expected_bucket_owner: ::String ) -> ::Aws::EmptyStructure | (?Hash[Symbol, untyped]) -> ::Aws::EmptyStructure # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketVersioning.html#suspend-instance_method def suspend: ( ?content_md5: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), ?mfa: ::String, ?expected_bucket_owner: ::String ) -> ::Aws::EmptyStructure | (?Hash[Symbol, untyped]) -> ::Aws::EmptyStructure # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketVersioning.html#bucket-instance_method def bucket: () -> Bucket class Collection < ::Aws::Resources::Collection[BucketVersioning] end end end end aws-sdk-s3-1.143.0/sig/object_version.rbs0000644000004100000410000001353614563445240020130 0ustar www-datawww-data# WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws module S3 # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectVersion.html class ObjectVersion # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectVersion.html#initialize-instance_method def initialize: (String bucket_name, String object_key, String id, Hash[Symbol, untyped] options) -> void | (bucket_name: String, object_key: String, id: String, ?client: Client) -> void | (Hash[Symbol, untyped] args) -> void # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectVersion.html#bucket_name-instance_method def bucket_name: () -> String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectVersion.html#object_key-instance_method def object_key: () -> String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectVersion.html#id-instance_method def id: () -> String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectVersion.html#etag-instance_method def etag: () -> ::String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectVersion.html#checksum_algorithm-instance_method def checksum_algorithm: () -> ::Array[("CRC32" | "CRC32C" | "SHA1" | "SHA256")] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectVersion.html#size-instance_method def size: () -> ::Integer # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectVersion.html#storage_class-instance_method def storage_class: () -> ("STANDARD") # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectVersion.html#key-instance_method def key: () -> ::String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectVersion.html#version_id-instance_method def version_id: () -> ::String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectVersion.html#is_latest-instance_method def is_latest: () -> bool # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectVersion.html#last_modified-instance_method def last_modified: () -> ::Time # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectVersion.html#owner-instance_method def owner: () -> Types::Owner # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectVersion.html#restore_status-instance_method def restore_status: () -> Types::RestoreStatus def client: () -> Client # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectVersion.html#data-instance_method def data: () -> Types::ObjectVersion # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectVersion.html#data_loaded?-instance_method def data_loaded?: () -> bool # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectVersion.html#delete-instance_method def delete: ( ?mfa: ::String, ?request_payer: ("requester"), ?bypass_governance_retention: bool, ?expected_bucket_owner: ::String ) -> Types::DeleteObjectOutput | (?Hash[Symbol, untyped]) -> Types::DeleteObjectOutput # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectVersion.html#get-instance_method def get: ( ?if_match: ::String, ?if_modified_since: ::Time, ?if_none_match: ::String, ?if_unmodified_since: ::Time, ?range: ::String, ?response_cache_control: ::String, ?response_content_disposition: ::String, ?response_content_encoding: ::String, ?response_content_language: ::String, ?response_content_type: ::String, ?response_expires: ::Time, ?sse_customer_algorithm: ::String, ?sse_customer_key: ::String, ?sse_customer_key_md5: ::String, ?request_payer: ("requester"), ?part_number: ::Integer, ?expected_bucket_owner: ::String, ?checksum_mode: ("ENABLED") ) -> Types::GetObjectOutput | (?Hash[Symbol, untyped]) -> Types::GetObjectOutput # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectVersion.html#head-instance_method def head: ( ?if_match: ::String, ?if_modified_since: ::Time, ?if_none_match: ::String, ?if_unmodified_since: ::Time, ?range: ::String, ?sse_customer_algorithm: ::String, ?sse_customer_key: ::String, ?sse_customer_key_md5: ::String, ?request_payer: ("requester"), ?part_number: ::Integer, ?expected_bucket_owner: ::String, ?checksum_mode: ("ENABLED") ) -> Types::HeadObjectOutput | (?Hash[Symbol, untyped]) -> Types::HeadObjectOutput # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectVersion.html#object-instance_method def object: () -> Object class Collection < ::Aws::Resources::Collection[ObjectVersion] def batch_delete!: ( ?mfa: ::String, ?request_payer: ("requester"), ?bypass_governance_retention: bool, ?expected_bucket_owner: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") ) -> void | (?Hash[Symbol, untyped]) -> void end end end end aws-sdk-s3-1.143.0/sig/resource.rbs0000644000004100000410000001465414563445240016746 0ustar www-datawww-data# WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws module S3 # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Resource.html class Resource # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Resource.html#initialize-instance_method def initialize: ( ?client: Client, ?credentials: untyped, ?region: String, ?access_key_id: String, ?active_endpoint_cache: bool, ?adaptive_retry_wait_to_fill: bool, ?client_side_monitoring: bool, ?client_side_monitoring_client_id: String, ?client_side_monitoring_host: String, ?client_side_monitoring_port: Integer, ?client_side_monitoring_publisher: untyped, ?compute_checksums: bool, ?convert_params: bool, ?correct_clock_skew: bool, ?defaults_mode: String, ?disable_host_prefix_injection: bool, ?disable_request_compression: bool, ?disable_s3_express_session_auth: bool, ?endpoint: String, ?endpoint_cache_max_entries: Integer, ?endpoint_cache_max_threads: Integer, ?endpoint_cache_poll_interval: Integer, ?endpoint_discovery: bool, ?event_stream_handler: Proc, ?express_credentials_provider: untyped, ?follow_redirects: bool, ?force_path_style: bool, ?ignore_configured_endpoint_urls: bool, ?input_event_stream_handler: Proc, ?log_formatter: untyped, ?log_level: Symbol, ?logger: untyped, ?max_attempts: Integer, ?output_event_stream_handler: Proc, ?profile: String, ?request_min_compression_size_bytes: Integer, ?require_https_for_sse_cpk: bool, ?retry_backoff: Proc, ?retry_base_delay: Float, ?retry_jitter: (:none | :equal | :full | ^(Integer) -> Integer), ?retry_limit: Integer, ?retry_max_delay: Integer, ?retry_mode: ("legacy" | "standard" | "adaptive"), ?s3_disable_multiregion_access_points: bool, ?s3_us_east_1_regional_endpoint: String, ?s3_use_arn_region: bool, ?sdk_ua_app_id: String, ?secret_access_key: String, ?session_token: String, ?stub_responses: untyped, ?token_provider: untyped, ?use_accelerate_endpoint: bool, ?use_dualstack_endpoint: bool, ?use_fips_endpoint: bool, ?validate_params: bool, ?endpoint_provider: untyped, ?http_proxy: String, ?http_open_timeout: (Float | Integer), ?http_read_timeout: (Float | Integer), ?http_idle_timeout: (Float | Integer), ?http_continue_timeout: (Float | Integer), ?ssl_timeout: (Float | Integer | nil), ?http_wire_trace: bool, ?ssl_verify_peer: bool, ?ssl_ca_bundle: String, ?ssl_ca_directory: String, ?ssl_ca_store: String, ?on_chunk_received: Proc, ?on_chunk_sent: Proc, ?raise_response_errors: bool ) -> void | (?Hash[Symbol, untyped]) -> void def client: () -> Client # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Resource.html#create_bucket-instance_method def create_bucket: ( ?acl: ("private" | "public-read" | "public-read-write" | "authenticated-read"), bucket: ::String, ?create_bucket_configuration: { location_constraint: ("af-south-1" | "ap-east-1" | "ap-northeast-1" | "ap-northeast-2" | "ap-northeast-3" | "ap-south-1" | "ap-south-2" | "ap-southeast-1" | "ap-southeast-2" | "ap-southeast-3" | "ca-central-1" | "cn-north-1" | "cn-northwest-1" | "EU" | "eu-central-1" | "eu-north-1" | "eu-south-1" | "eu-south-2" | "eu-west-1" | "eu-west-2" | "eu-west-3" | "me-south-1" | "sa-east-1" | "us-east-2" | "us-gov-east-1" | "us-gov-west-1" | "us-west-1" | "us-west-2")?, location: { type: ("AvailabilityZone")?, name: ::String? }?, bucket: { data_redundancy: ("SingleAvailabilityZone")?, type: ("Directory")? }? }, ?grant_full_control: ::String, ?grant_read: ::String, ?grant_read_acp: ::String, ?grant_write: ::String, ?grant_write_acp: ::String, ?object_lock_enabled_for_bucket: bool, ?object_ownership: ("BucketOwnerPreferred" | "ObjectWriter" | "BucketOwnerEnforced") ) -> Bucket | (?Hash[Symbol, untyped]) -> Bucket # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Resource.html#bucket-instance_method def bucket: (String name) -> Bucket # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Resource.html#buckets-instance_method def buckets: () -> Bucket::Collection | (?Hash[Symbol, untyped]) -> Bucket::Collection end end end aws-sdk-s3-1.143.0/sig/bucket_notification.rbs0000644000004100000410000001612614563445240021136 0ustar www-datawww-data# WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws module S3 # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketNotification.html class BucketNotification # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketNotification.html#initialize-instance_method def initialize: (String bucket_name, Hash[Symbol, untyped] options) -> void | (bucket_name: String, ?client: Client) -> void | (Hash[Symbol, untyped] args) -> void # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketNotification.html#bucket_name-instance_method def bucket_name: () -> String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketNotification.html#topic_configurations-instance_method def topic_configurations: () -> ::Array[Types::TopicConfiguration] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketNotification.html#queue_configurations-instance_method def queue_configurations: () -> ::Array[Types::QueueConfiguration] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketNotification.html#lambda_function_configurations-instance_method def lambda_function_configurations: () -> ::Array[Types::LambdaFunctionConfiguration] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketNotification.html#event_bridge_configuration-instance_method def event_bridge_configuration: () -> Types::EventBridgeConfiguration def client: () -> Client # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketNotification.html#load-instance_method def load: () -> self alias reload load # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketNotification.html#data-instance_method def data: () -> Types::NotificationConfiguration # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketNotification.html#data_loaded?-instance_method def data_loaded?: () -> bool # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketNotification.html#put-instance_method def put: ( notification_configuration: { topic_configurations: Array[ { id: ::String?, topic_arn: ::String, events: Array[("s3:ReducedRedundancyLostObject" | "s3:ObjectCreated:*" | "s3:ObjectCreated:Put" | "s3:ObjectCreated:Post" | "s3:ObjectCreated:Copy" | "s3:ObjectCreated:CompleteMultipartUpload" | "s3:ObjectRemoved:*" | "s3:ObjectRemoved:Delete" | "s3:ObjectRemoved:DeleteMarkerCreated" | "s3:ObjectRestore:*" | "s3:ObjectRestore:Post" | "s3:ObjectRestore:Completed" | "s3:Replication:*" | "s3:Replication:OperationFailedReplication" | "s3:Replication:OperationNotTracked" | "s3:Replication:OperationMissedThreshold" | "s3:Replication:OperationReplicatedAfterThreshold" | "s3:ObjectRestore:Delete" | "s3:LifecycleTransition" | "s3:IntelligentTiering" | "s3:ObjectAcl:Put" | "s3:LifecycleExpiration:*" | "s3:LifecycleExpiration:Delete" | "s3:LifecycleExpiration:DeleteMarkerCreated" | "s3:ObjectTagging:*" | "s3:ObjectTagging:Put" | "s3:ObjectTagging:Delete")], filter: { key: { filter_rules: Array[ { name: ("prefix" | "suffix")?, value: ::String? }, ]? }? }? }, ]?, queue_configurations: Array[ { id: ::String?, queue_arn: ::String, events: Array[("s3:ReducedRedundancyLostObject" | "s3:ObjectCreated:*" | "s3:ObjectCreated:Put" | "s3:ObjectCreated:Post" | "s3:ObjectCreated:Copy" | "s3:ObjectCreated:CompleteMultipartUpload" | "s3:ObjectRemoved:*" | "s3:ObjectRemoved:Delete" | "s3:ObjectRemoved:DeleteMarkerCreated" | "s3:ObjectRestore:*" | "s3:ObjectRestore:Post" | "s3:ObjectRestore:Completed" | "s3:Replication:*" | "s3:Replication:OperationFailedReplication" | "s3:Replication:OperationNotTracked" | "s3:Replication:OperationMissedThreshold" | "s3:Replication:OperationReplicatedAfterThreshold" | "s3:ObjectRestore:Delete" | "s3:LifecycleTransition" | "s3:IntelligentTiering" | "s3:ObjectAcl:Put" | "s3:LifecycleExpiration:*" | "s3:LifecycleExpiration:Delete" | "s3:LifecycleExpiration:DeleteMarkerCreated" | "s3:ObjectTagging:*" | "s3:ObjectTagging:Put" | "s3:ObjectTagging:Delete")], filter: { key: { filter_rules: Array[ { name: ("prefix" | "suffix")?, value: ::String? }, ]? }? }? }, ]?, lambda_function_configurations: Array[ { id: ::String?, lambda_function_arn: ::String, events: Array[("s3:ReducedRedundancyLostObject" | "s3:ObjectCreated:*" | "s3:ObjectCreated:Put" | "s3:ObjectCreated:Post" | "s3:ObjectCreated:Copy" | "s3:ObjectCreated:CompleteMultipartUpload" | "s3:ObjectRemoved:*" | "s3:ObjectRemoved:Delete" | "s3:ObjectRemoved:DeleteMarkerCreated" | "s3:ObjectRestore:*" | "s3:ObjectRestore:Post" | "s3:ObjectRestore:Completed" | "s3:Replication:*" | "s3:Replication:OperationFailedReplication" | "s3:Replication:OperationNotTracked" | "s3:Replication:OperationMissedThreshold" | "s3:Replication:OperationReplicatedAfterThreshold" | "s3:ObjectRestore:Delete" | "s3:LifecycleTransition" | "s3:IntelligentTiering" | "s3:ObjectAcl:Put" | "s3:LifecycleExpiration:*" | "s3:LifecycleExpiration:Delete" | "s3:LifecycleExpiration:DeleteMarkerCreated" | "s3:ObjectTagging:*" | "s3:ObjectTagging:Put" | "s3:ObjectTagging:Delete")], filter: { key: { filter_rules: Array[ { name: ("prefix" | "suffix")?, value: ::String? }, ]? }? }? }, ]?, event_bridge_configuration: { }? }, ?expected_bucket_owner: ::String, ?skip_destination_validation: bool ) -> ::Aws::EmptyStructure | (?Hash[Symbol, untyped]) -> ::Aws::EmptyStructure # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketNotification.html#bucket-instance_method def bucket: () -> Bucket class Collection < ::Aws::Resources::Collection[BucketNotification] end end end end aws-sdk-s3-1.143.0/sig/bucket.rbs0000644000004100000410000002665614563445240016401 0ustar www-datawww-data# WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws module S3 # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Bucket.html class Bucket # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Bucket.html#initialize-instance_method def initialize: (String name, Hash[Symbol, untyped] options) -> void | (name: String, ?client: Client) -> void | (Hash[Symbol, untyped] args) -> void # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Bucket.html#name-instance_method def name: () -> String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Bucket.html#creation_date-instance_method def creation_date: () -> ::Time def client: () -> Client # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Bucket.html#data-instance_method def data: () -> Types::Bucket # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Bucket.html#data_loaded?-instance_method def data_loaded?: () -> bool # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Bucket.html#exists?-instance_method def exists?: (?max_attempts: Integer, ?delay: Numeric, ?before_attempt: (^(Integer attempts) -> void), ?before_wait: (^(Integer attempts, untyped response) -> void)) -> bool | (?Hash[Symbol, untyped]) -> bool # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Bucket.html#wait_until_exists-instance_method def wait_until_exists: (?max_attempts: Integer, ?delay: Numeric, ?before_attempt: (^(Integer attempts) -> void), ?before_wait: (^(Integer attempts, untyped response) -> void)) ?{ (untyped waiter) -> void } -> Bucket | (?Hash[Symbol, untyped]) ?{ (untyped waiter) -> void } -> Bucket # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Bucket.html#wait_until_not_exists-instance_method def wait_until_not_exists: (?max_attempts: Integer, ?delay: Numeric, ?before_attempt: (^(Integer attempts) -> void), ?before_wait: (^(Integer attempts, untyped response) -> void)) ?{ (untyped waiter) -> void } -> Bucket | (?Hash[Symbol, untyped]) ?{ (untyped waiter) -> void } -> Bucket # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Bucket.html#create-instance_method def create: ( ?acl: ("private" | "public-read" | "public-read-write" | "authenticated-read"), ?create_bucket_configuration: { location_constraint: ("af-south-1" | "ap-east-1" | "ap-northeast-1" | "ap-northeast-2" | "ap-northeast-3" | "ap-south-1" | "ap-south-2" | "ap-southeast-1" | "ap-southeast-2" | "ap-southeast-3" | "ca-central-1" | "cn-north-1" | "cn-northwest-1" | "EU" | "eu-central-1" | "eu-north-1" | "eu-south-1" | "eu-south-2" | "eu-west-1" | "eu-west-2" | "eu-west-3" | "me-south-1" | "sa-east-1" | "us-east-2" | "us-gov-east-1" | "us-gov-west-1" | "us-west-1" | "us-west-2")?, location: { type: ("AvailabilityZone")?, name: ::String? }?, bucket: { data_redundancy: ("SingleAvailabilityZone")?, type: ("Directory")? }? }, ?grant_full_control: ::String, ?grant_read: ::String, ?grant_read_acp: ::String, ?grant_write: ::String, ?grant_write_acp: ::String, ?object_lock_enabled_for_bucket: bool, ?object_ownership: ("BucketOwnerPreferred" | "ObjectWriter" | "BucketOwnerEnforced") ) -> Types::CreateBucketOutput | (?Hash[Symbol, untyped]) -> Types::CreateBucketOutput # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Bucket.html#delete-instance_method def delete: ( ?expected_bucket_owner: ::String ) -> ::Aws::EmptyStructure | (?Hash[Symbol, untyped]) -> ::Aws::EmptyStructure # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Bucket.html#delete_objects-instance_method def delete_objects: ( delete: { objects: Array[ { key: ::String, version_id: ::String? }, ], quiet: bool? }, ?mfa: ::String, ?request_payer: ("requester"), ?bypass_governance_retention: bool, ?expected_bucket_owner: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") ) -> Types::DeleteObjectsOutput | (?Hash[Symbol, untyped]) -> Types::DeleteObjectsOutput # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Bucket.html#put_object-instance_method def put_object: ( ?acl: ("private" | "public-read" | "public-read-write" | "authenticated-read" | "aws-exec-read" | "bucket-owner-read" | "bucket-owner-full-control"), ?body: ::String | ::StringIO | ::File, ?cache_control: ::String, ?content_disposition: ::String, ?content_encoding: ::String, ?content_language: ::String, ?content_length: ::Integer, ?content_md5: ::String, ?content_type: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), ?checksum_crc32: ::String, ?checksum_crc32c: ::String, ?checksum_sha1: ::String, ?checksum_sha256: ::String, ?expires: ::Time, ?grant_full_control: ::String, ?grant_read: ::String, ?grant_read_acp: ::String, ?grant_write_acp: ::String, key: ::String, ?metadata: Hash[::String, ::String], ?server_side_encryption: ("AES256" | "aws:kms" | "aws:kms:dsse"), ?storage_class: ("STANDARD" | "REDUCED_REDUNDANCY" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "GLACIER" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE"), ?website_redirect_location: ::String, ?sse_customer_algorithm: ::String, ?sse_customer_key: ::String, ?sse_customer_key_md5: ::String, ?ssekms_key_id: ::String, ?ssekms_encryption_context: ::String, ?bucket_key_enabled: bool, ?request_payer: ("requester"), ?tagging: ::String, ?object_lock_mode: ("GOVERNANCE" | "COMPLIANCE"), ?object_lock_retain_until_date: ::Time, ?object_lock_legal_hold_status: ("ON" | "OFF"), ?expected_bucket_owner: ::String ) -> Object | (?Hash[Symbol, untyped]) -> Object # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Bucket.html#acl-instance_method def acl: () -> BucketAcl # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Bucket.html#cors-instance_method def cors: () -> BucketCors # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Bucket.html#lifecycle-instance_method def lifecycle: () -> BucketLifecycle # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Bucket.html#lifecycle_configuration-instance_method def lifecycle_configuration: () -> BucketLifecycleConfiguration # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Bucket.html#logging-instance_method def logging: () -> BucketLogging # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Bucket.html#multipart_uploads-instance_method def multipart_uploads: ( ?delimiter: ::String, ?encoding_type: ("url"), ?key_marker: ::String, ?prefix: ::String, ?upload_id_marker: ::String, ?expected_bucket_owner: ::String, ?request_payer: ("requester") ) -> MultipartUpload::Collection | (?Hash[Symbol, untyped]) -> MultipartUpload::Collection # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Bucket.html#notification-instance_method def notification: () -> BucketNotification # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Bucket.html#object-instance_method def object: (String key) -> Object # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Bucket.html#object_versions-instance_method def object_versions: ( ?delimiter: ::String, ?encoding_type: ("url"), ?key_marker: ::String, ?prefix: ::String, ?version_id_marker: ::String, ?expected_bucket_owner: ::String, ?request_payer: ("requester"), ?optional_object_attributes: Array[("RestoreStatus")] ) -> ObjectVersion::Collection | (?Hash[Symbol, untyped]) -> ObjectVersion::Collection # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Bucket.html#objects-instance_method def objects: ( ?delimiter: ::String, ?encoding_type: ("url"), ?prefix: ::String, ?fetch_owner: bool, ?start_after: ::String, ?request_payer: ("requester"), ?expected_bucket_owner: ::String, ?optional_object_attributes: Array[("RestoreStatus")] ) -> ObjectSummary::Collection | (?Hash[Symbol, untyped]) -> ObjectSummary::Collection # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Bucket.html#policy-instance_method def policy: () -> BucketPolicy # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Bucket.html#request_payment-instance_method def request_payment: () -> BucketRequestPayment # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Bucket.html#tagging-instance_method def tagging: () -> BucketTagging # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Bucket.html#versioning-instance_method def versioning: () -> BucketVersioning # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Bucket.html#website-instance_method def website: () -> BucketWebsite class Collection < ::Aws::Resources::Collection[Bucket] end end end end aws-sdk-s3-1.143.0/sig/bucket_acl.rbs0000644000004100000410000000621114563445240017201 0ustar www-datawww-data# WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws module S3 # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketAcl.html class BucketAcl # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketAcl.html#initialize-instance_method def initialize: (String bucket_name, Hash[Symbol, untyped] options) -> void | (bucket_name: String, ?client: Client) -> void | (Hash[Symbol, untyped] args) -> void # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketAcl.html#bucket_name-instance_method def bucket_name: () -> String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketAcl.html#owner-instance_method def owner: () -> Types::Owner # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketAcl.html#grants-instance_method def grants: () -> ::Array[Types::Grant] def client: () -> Client # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketAcl.html#load-instance_method def load: () -> self alias reload load # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketAcl.html#data-instance_method def data: () -> Types::GetBucketAclOutput # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketAcl.html#data_loaded?-instance_method def data_loaded?: () -> bool # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketAcl.html#put-instance_method def put: ( ?acl: ("private" | "public-read" | "public-read-write" | "authenticated-read"), ?access_control_policy: { grants: Array[ { grantee: { display_name: ::String?, email_address: ::String?, id: ::String?, type: ("CanonicalUser" | "AmazonCustomerByEmail" | "Group"), uri: ::String? }?, permission: ("FULL_CONTROL" | "WRITE" | "WRITE_ACP" | "READ" | "READ_ACP")? }, ]?, owner: { display_name: ::String?, id: ::String? }? }, ?content_md5: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), ?grant_full_control: ::String, ?grant_read: ::String, ?grant_read_acp: ::String, ?grant_write: ::String, ?grant_write_acp: ::String, ?expected_bucket_owner: ::String ) -> ::Aws::EmptyStructure | (?Hash[Symbol, untyped]) -> ::Aws::EmptyStructure # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketAcl.html#bucket-instance_method def bucket: () -> Bucket class Collection < ::Aws::Resources::Collection[BucketAcl] end end end end aws-sdk-s3-1.143.0/sig/object_acl.rbs0000644000004100000410000000720114563445240017172 0ustar www-datawww-data# WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws module S3 # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectAcl.html class ObjectAcl # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectAcl.html#initialize-instance_method def initialize: (String bucket_name, String object_key, Hash[Symbol, untyped] options) -> void | (bucket_name: String, object_key: String, ?client: Client) -> void | (Hash[Symbol, untyped] args) -> void # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectAcl.html#bucket_name-instance_method def bucket_name: () -> String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectAcl.html#object_key-instance_method def object_key: () -> String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectAcl.html#owner-instance_method def owner: () -> Types::Owner # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectAcl.html#grants-instance_method def grants: () -> ::Array[Types::Grant] # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectAcl.html#request_charged-instance_method def request_charged: () -> ("requester") def client: () -> Client # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectAcl.html#load-instance_method def load: () -> self alias reload load # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectAcl.html#data-instance_method def data: () -> Types::GetObjectAclOutput # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectAcl.html#data_loaded?-instance_method def data_loaded?: () -> bool # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectAcl.html#put-instance_method def put: ( ?acl: ("private" | "public-read" | "public-read-write" | "authenticated-read" | "aws-exec-read" | "bucket-owner-read" | "bucket-owner-full-control"), ?access_control_policy: { grants: Array[ { grantee: { display_name: ::String?, email_address: ::String?, id: ::String?, type: ("CanonicalUser" | "AmazonCustomerByEmail" | "Group"), uri: ::String? }?, permission: ("FULL_CONTROL" | "WRITE" | "WRITE_ACP" | "READ" | "READ_ACP")? }, ]?, owner: { display_name: ::String?, id: ::String? }? }, ?content_md5: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), ?grant_full_control: ::String, ?grant_read: ::String, ?grant_read_acp: ::String, ?grant_write: ::String, ?grant_write_acp: ::String, ?request_payer: ("requester"), ?version_id: ::String, ?expected_bucket_owner: ::String ) -> Types::PutObjectAclOutput | (?Hash[Symbol, untyped]) -> Types::PutObjectAclOutput # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/ObjectAcl.html#object-instance_method def object: () -> Object class Collection < ::Aws::Resources::Collection[ObjectAcl] end end end end aws-sdk-s3-1.143.0/sig/bucket_logging.rbs0000644000004100000410000000610214563445240020067 0ustar www-datawww-data# WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws module S3 # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketLogging.html class BucketLogging # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketLogging.html#initialize-instance_method def initialize: (String bucket_name, Hash[Symbol, untyped] options) -> void | (bucket_name: String, ?client: Client) -> void | (Hash[Symbol, untyped] args) -> void # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketLogging.html#bucket_name-instance_method def bucket_name: () -> String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketLogging.html#logging_enabled-instance_method def logging_enabled: () -> Types::LoggingEnabled def client: () -> Client # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketLogging.html#load-instance_method def load: () -> self alias reload load # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketLogging.html#data-instance_method def data: () -> Types::GetBucketLoggingOutput # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketLogging.html#data_loaded?-instance_method def data_loaded?: () -> bool # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketLogging.html#put-instance_method def put: ( bucket_logging_status: { logging_enabled: { target_bucket: ::String, target_grants: Array[ { grantee: { display_name: ::String?, email_address: ::String?, id: ::String?, type: ("CanonicalUser" | "AmazonCustomerByEmail" | "Group"), uri: ::String? }?, permission: ("FULL_CONTROL" | "READ" | "WRITE")? }, ]?, target_prefix: ::String, target_object_key_format: { simple_prefix: { }?, partitioned_prefix: { partition_date_source: ("EventTime" | "DeliveryTime")? }? }? }? }, ?content_md5: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), ?expected_bucket_owner: ::String ) -> ::Aws::EmptyStructure | (?Hash[Symbol, untyped]) -> ::Aws::EmptyStructure # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketLogging.html#bucket-instance_method def bucket: () -> Bucket class Collection < ::Aws::Resources::Collection[BucketLogging] end end end end aws-sdk-s3-1.143.0/sig/bucket_cors.rbs0000644000004100000410000000541314563445240017413 0ustar www-datawww-data# WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws module S3 # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketCors.html class BucketCors # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketCors.html#initialize-instance_method def initialize: (String bucket_name, Hash[Symbol, untyped] options) -> void | (bucket_name: String, ?client: Client) -> void | (Hash[Symbol, untyped] args) -> void # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketCors.html#bucket_name-instance_method def bucket_name: () -> String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketCors.html#cors_rules-instance_method def cors_rules: () -> ::Array[Types::CORSRule] def client: () -> Client # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketCors.html#load-instance_method def load: () -> self alias reload load # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketCors.html#data-instance_method def data: () -> Types::GetBucketCorsOutput # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketCors.html#data_loaded?-instance_method def data_loaded?: () -> bool # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketCors.html#delete-instance_method def delete: ( ?expected_bucket_owner: ::String ) -> ::Aws::EmptyStructure | (?Hash[Symbol, untyped]) -> ::Aws::EmptyStructure # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketCors.html#put-instance_method def put: ( cors_configuration: { cors_rules: Array[ { id: ::String?, allowed_headers: Array[::String]?, allowed_methods: Array[::String], allowed_origins: Array[::String], expose_headers: Array[::String]?, max_age_seconds: ::Integer? }, ] }, ?content_md5: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), ?expected_bucket_owner: ::String ) -> ::Aws::EmptyStructure | (?Hash[Symbol, untyped]) -> ::Aws::EmptyStructure # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketCors.html#bucket-instance_method def bucket: () -> Bucket class Collection < ::Aws::Resources::Collection[BucketCors] end end end end aws-sdk-s3-1.143.0/sig/bucket_tagging.rbs0000644000004100000410000000505014563445240020062 0ustar www-datawww-data# WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws module S3 # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketTagging.html class BucketTagging # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketTagging.html#initialize-instance_method def initialize: (String bucket_name, Hash[Symbol, untyped] options) -> void | (bucket_name: String, ?client: Client) -> void | (Hash[Symbol, untyped] args) -> void # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketTagging.html#bucket_name-instance_method def bucket_name: () -> String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketTagging.html#tag_set-instance_method def tag_set: () -> ::Array[Types::Tag] def client: () -> Client # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketTagging.html#load-instance_method def load: () -> self alias reload load # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketTagging.html#data-instance_method def data: () -> Types::GetBucketTaggingOutput # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketTagging.html#data_loaded?-instance_method def data_loaded?: () -> bool # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketTagging.html#delete-instance_method def delete: ( ?expected_bucket_owner: ::String ) -> ::Aws::EmptyStructure | (?Hash[Symbol, untyped]) -> ::Aws::EmptyStructure # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketTagging.html#put-instance_method def put: ( ?content_md5: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), tagging: { tag_set: Array[ { key: ::String, value: ::String }, ] }, ?expected_bucket_owner: ::String ) -> ::Aws::EmptyStructure | (?Hash[Symbol, untyped]) -> ::Aws::EmptyStructure # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketTagging.html#bucket-instance_method def bucket: () -> Bucket class Collection < ::Aws::Resources::Collection[BucketTagging] end end end end aws-sdk-s3-1.143.0/sig/errors.rbs0000644000004100000410000000173714563445240016431 0ustar www-datawww-data# WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws module S3 module Errors class ServiceError < ::Aws::Errors::ServiceError end class BucketAlreadyExists < ::Aws::Errors::ServiceError end class BucketAlreadyOwnedByYou < ::Aws::Errors::ServiceError end class InvalidObjectState < ::Aws::Errors::ServiceError def storage_class: () -> ::String def access_tier: () -> ::String end class NoSuchBucket < ::Aws::Errors::ServiceError end class NoSuchKey < ::Aws::Errors::ServiceError end class NoSuchUpload < ::Aws::Errors::ServiceError end class ObjectAlreadyInActiveTierError < ::Aws::Errors::ServiceError end class ObjectNotInActiveTierError < ::Aws::Errors::ServiceError end end end end aws-sdk-s3-1.143.0/sig/multipart_upload_part.rbs0000644000004100000410000001214614563445240021524 0ustar www-datawww-data# WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws module S3 # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUploadPart.html class MultipartUploadPart # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUploadPart.html#initialize-instance_method def initialize: (String bucket_name, String object_key, String multipart_upload_id, Integer part_number, Hash[Symbol, untyped] options) -> void | (bucket_name: String, object_key: String, multipart_upload_id: String, part_number: Integer, ?client: Client) -> void | (Hash[Symbol, untyped] args) -> void # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUploadPart.html#bucket_name-instance_method def bucket_name: () -> String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUploadPart.html#object_key-instance_method def object_key: () -> String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUploadPart.html#multipart_upload_id-instance_method def multipart_upload_id: () -> String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUploadPart.html#part_number-instance_method def part_number: () -> Integer # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUploadPart.html#last_modified-instance_method def last_modified: () -> ::Time # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUploadPart.html#etag-instance_method def etag: () -> ::String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUploadPart.html#size-instance_method def size: () -> ::Integer # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUploadPart.html#checksum_crc32-instance_method def checksum_crc32: () -> ::String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUploadPart.html#checksum_crc32c-instance_method def checksum_crc32c: () -> ::String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUploadPart.html#checksum_sha1-instance_method def checksum_sha1: () -> ::String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUploadPart.html#checksum_sha256-instance_method def checksum_sha256: () -> ::String def client: () -> Client # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUploadPart.html#data-instance_method def data: () -> Types::Part # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUploadPart.html#data_loaded?-instance_method def data_loaded?: () -> bool # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUploadPart.html#copy_from-instance_method def copy_from: ( copy_source: ::String, ?copy_source_if_match: ::String, ?copy_source_if_modified_since: ::Time, ?copy_source_if_none_match: ::String, ?copy_source_if_unmodified_since: ::Time, ?copy_source_range: ::String, ?sse_customer_algorithm: ::String, ?sse_customer_key: ::String, ?sse_customer_key_md5: ::String, ?copy_source_sse_customer_algorithm: ::String, ?copy_source_sse_customer_key: ::String, ?copy_source_sse_customer_key_md5: ::String, ?request_payer: ("requester"), ?expected_bucket_owner: ::String, ?expected_source_bucket_owner: ::String ) -> Types::UploadPartCopyOutput | (?Hash[Symbol, untyped]) -> Types::UploadPartCopyOutput # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUploadPart.html#upload-instance_method def upload: ( ?body: ::String | ::StringIO | ::File, ?content_length: ::Integer, ?content_md5: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), ?checksum_crc32: ::String, ?checksum_crc32c: ::String, ?checksum_sha1: ::String, ?checksum_sha256: ::String, ?sse_customer_algorithm: ::String, ?sse_customer_key: ::String, ?sse_customer_key_md5: ::String, ?request_payer: ("requester"), ?expected_bucket_owner: ::String ) -> Types::UploadPartOutput | (?Hash[Symbol, untyped]) -> Types::UploadPartOutput # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/MultipartUploadPart.html#multipart_upload-instance_method def multipart_upload: () -> MultipartUpload class Collection < ::Aws::Resources::Collection[MultipartUploadPart] end end end end aws-sdk-s3-1.143.0/sig/types.rbs0000644000004100000410000027544614563445240016273 0ustar www-datawww-data# WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws::S3 module Types class AbortIncompleteMultipartUpload attr_accessor days_after_initiation: ::Integer SENSITIVE: [] end class AbortMultipartUploadOutput attr_accessor request_charged: ("requester") SENSITIVE: [] end class AbortMultipartUploadRequest attr_accessor bucket: ::String attr_accessor key: ::String attr_accessor upload_id: ::String attr_accessor request_payer: ("requester") attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class AccelerateConfiguration attr_accessor status: ("Enabled" | "Suspended") SENSITIVE: [] end class AccessControlPolicy attr_accessor grants: ::Array[Types::Grant] attr_accessor owner: Types::Owner SENSITIVE: [] end class AccessControlTranslation attr_accessor owner: ("Destination") SENSITIVE: [] end class AnalyticsAndOperator attr_accessor prefix: ::String attr_accessor tags: ::Array[Types::Tag] SENSITIVE: [] end class AnalyticsConfiguration attr_accessor id: ::String attr_accessor filter: Types::AnalyticsFilter attr_accessor storage_class_analysis: Types::StorageClassAnalysis SENSITIVE: [] end class AnalyticsExportDestination attr_accessor s3_bucket_destination: Types::AnalyticsS3BucketDestination SENSITIVE: [] end class AnalyticsFilter attr_accessor prefix: ::String attr_accessor tag: Types::Tag attr_accessor and: Types::AnalyticsAndOperator SENSITIVE: [] end class AnalyticsS3BucketDestination attr_accessor format: ("CSV") attr_accessor bucket_account_id: ::String attr_accessor bucket: ::String attr_accessor prefix: ::String SENSITIVE: [] end class Bucket attr_accessor name: ::String attr_accessor creation_date: ::Time SENSITIVE: [] end class BucketAlreadyExists < Aws::EmptyStructure end class BucketAlreadyOwnedByYou < Aws::EmptyStructure end class BucketInfo attr_accessor data_redundancy: ("SingleAvailabilityZone") attr_accessor type: ("Directory") SENSITIVE: [] end class BucketLifecycleConfiguration attr_accessor rules: ::Array[Types::LifecycleRule] SENSITIVE: [] end class BucketLoggingStatus attr_accessor logging_enabled: Types::LoggingEnabled SENSITIVE: [] end class CORSConfiguration attr_accessor cors_rules: ::Array[Types::CORSRule] SENSITIVE: [] end class CORSRule attr_accessor id: ::String attr_accessor allowed_headers: ::Array[::String] attr_accessor allowed_methods: ::Array[::String] attr_accessor allowed_origins: ::Array[::String] attr_accessor expose_headers: ::Array[::String] attr_accessor max_age_seconds: ::Integer SENSITIVE: [] end class CSVInput attr_accessor file_header_info: ("USE" | "IGNORE" | "NONE") attr_accessor comments: ::String attr_accessor quote_escape_character: ::String attr_accessor record_delimiter: ::String attr_accessor field_delimiter: ::String attr_accessor quote_character: ::String attr_accessor allow_quoted_record_delimiter: bool SENSITIVE: [] end class CSVOutput attr_accessor quote_fields: ("ALWAYS" | "ASNEEDED") attr_accessor quote_escape_character: ::String attr_accessor record_delimiter: ::String attr_accessor field_delimiter: ::String attr_accessor quote_character: ::String SENSITIVE: [] end class Checksum attr_accessor checksum_crc32: ::String attr_accessor checksum_crc32c: ::String attr_accessor checksum_sha1: ::String attr_accessor checksum_sha256: ::String SENSITIVE: [] end class CloudFunctionConfiguration attr_accessor id: ::String attr_accessor event: ("s3:ReducedRedundancyLostObject" | "s3:ObjectCreated:*" | "s3:ObjectCreated:Put" | "s3:ObjectCreated:Post" | "s3:ObjectCreated:Copy" | "s3:ObjectCreated:CompleteMultipartUpload" | "s3:ObjectRemoved:*" | "s3:ObjectRemoved:Delete" | "s3:ObjectRemoved:DeleteMarkerCreated" | "s3:ObjectRestore:*" | "s3:ObjectRestore:Post" | "s3:ObjectRestore:Completed" | "s3:Replication:*" | "s3:Replication:OperationFailedReplication" | "s3:Replication:OperationNotTracked" | "s3:Replication:OperationMissedThreshold" | "s3:Replication:OperationReplicatedAfterThreshold" | "s3:ObjectRestore:Delete" | "s3:LifecycleTransition" | "s3:IntelligentTiering" | "s3:ObjectAcl:Put" | "s3:LifecycleExpiration:*" | "s3:LifecycleExpiration:Delete" | "s3:LifecycleExpiration:DeleteMarkerCreated" | "s3:ObjectTagging:*" | "s3:ObjectTagging:Put" | "s3:ObjectTagging:Delete") attr_accessor events: ::Array[("s3:ReducedRedundancyLostObject" | "s3:ObjectCreated:*" | "s3:ObjectCreated:Put" | "s3:ObjectCreated:Post" | "s3:ObjectCreated:Copy" | "s3:ObjectCreated:CompleteMultipartUpload" | "s3:ObjectRemoved:*" | "s3:ObjectRemoved:Delete" | "s3:ObjectRemoved:DeleteMarkerCreated" | "s3:ObjectRestore:*" | "s3:ObjectRestore:Post" | "s3:ObjectRestore:Completed" | "s3:Replication:*" | "s3:Replication:OperationFailedReplication" | "s3:Replication:OperationNotTracked" | "s3:Replication:OperationMissedThreshold" | "s3:Replication:OperationReplicatedAfterThreshold" | "s3:ObjectRestore:Delete" | "s3:LifecycleTransition" | "s3:IntelligentTiering" | "s3:ObjectAcl:Put" | "s3:LifecycleExpiration:*" | "s3:LifecycleExpiration:Delete" | "s3:LifecycleExpiration:DeleteMarkerCreated" | "s3:ObjectTagging:*" | "s3:ObjectTagging:Put" | "s3:ObjectTagging:Delete")] attr_accessor cloud_function: ::String attr_accessor invocation_role: ::String SENSITIVE: [] end class CommonPrefix attr_accessor prefix: ::String SENSITIVE: [] end class CompleteMultipartUploadOutput attr_accessor location: ::String attr_accessor bucket: ::String attr_accessor key: ::String attr_accessor expiration: ::String attr_accessor etag: ::String attr_accessor checksum_crc32: ::String attr_accessor checksum_crc32c: ::String attr_accessor checksum_sha1: ::String attr_accessor checksum_sha256: ::String attr_accessor server_side_encryption: ("AES256" | "aws:kms" | "aws:kms:dsse") attr_accessor version_id: ::String attr_accessor ssekms_key_id: ::String attr_accessor bucket_key_enabled: bool attr_accessor request_charged: ("requester") SENSITIVE: [:ssekms_key_id] end class CompleteMultipartUploadRequest attr_accessor bucket: ::String attr_accessor key: ::String attr_accessor multipart_upload: Types::CompletedMultipartUpload attr_accessor upload_id: ::String attr_accessor checksum_crc32: ::String attr_accessor checksum_crc32c: ::String attr_accessor checksum_sha1: ::String attr_accessor checksum_sha256: ::String attr_accessor request_payer: ("requester") attr_accessor expected_bucket_owner: ::String attr_accessor sse_customer_algorithm: ::String attr_accessor sse_customer_key: ::String attr_accessor sse_customer_key_md5: ::String SENSITIVE: [:sse_customer_key] end class CompletedMultipartUpload attr_accessor parts: ::Array[Types::CompletedPart] SENSITIVE: [] end class CompletedPart attr_accessor etag: ::String attr_accessor checksum_crc32: ::String attr_accessor checksum_crc32c: ::String attr_accessor checksum_sha1: ::String attr_accessor checksum_sha256: ::String attr_accessor part_number: ::Integer SENSITIVE: [] end class Condition attr_accessor http_error_code_returned_equals: ::String attr_accessor key_prefix_equals: ::String SENSITIVE: [] end class ContinuationEvent attr_accessor event_type: untyped SENSITIVE: [] end class CopyObjectOutput attr_accessor copy_object_result: Types::CopyObjectResult attr_accessor expiration: ::String attr_accessor copy_source_version_id: ::String attr_accessor version_id: ::String attr_accessor server_side_encryption: ("AES256" | "aws:kms" | "aws:kms:dsse") attr_accessor sse_customer_algorithm: ::String attr_accessor sse_customer_key_md5: ::String attr_accessor ssekms_key_id: ::String attr_accessor ssekms_encryption_context: ::String attr_accessor bucket_key_enabled: bool attr_accessor request_charged: ("requester") SENSITIVE: [:ssekms_key_id, :ssekms_encryption_context] end class CopyObjectRequest attr_accessor acl: ("private" | "public-read" | "public-read-write" | "authenticated-read" | "aws-exec-read" | "bucket-owner-read" | "bucket-owner-full-control") attr_accessor bucket: ::String attr_accessor cache_control: ::String attr_accessor checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") attr_accessor content_disposition: ::String attr_accessor content_encoding: ::String attr_accessor content_language: ::String attr_accessor content_type: ::String attr_accessor copy_source: ::String attr_accessor copy_source_if_match: ::String attr_accessor copy_source_if_modified_since: ::Time attr_accessor copy_source_if_none_match: ::String attr_accessor copy_source_if_unmodified_since: ::Time attr_accessor expires: ::Time attr_accessor grant_full_control: ::String attr_accessor grant_read: ::String attr_accessor grant_read_acp: ::String attr_accessor grant_write_acp: ::String attr_accessor key: ::String attr_accessor metadata: ::Hash[::String, ::String] attr_accessor metadata_directive: ("COPY" | "REPLACE") attr_accessor tagging_directive: ("COPY" | "REPLACE") attr_accessor server_side_encryption: ("AES256" | "aws:kms" | "aws:kms:dsse") attr_accessor storage_class: ("STANDARD" | "REDUCED_REDUNDANCY" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "GLACIER" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE") attr_accessor website_redirect_location: ::String attr_accessor sse_customer_algorithm: ::String attr_accessor sse_customer_key: ::String attr_accessor sse_customer_key_md5: ::String attr_accessor ssekms_key_id: ::String attr_accessor ssekms_encryption_context: ::String attr_accessor bucket_key_enabled: bool attr_accessor copy_source_sse_customer_algorithm: ::String attr_accessor copy_source_sse_customer_key: ::String attr_accessor copy_source_sse_customer_key_md5: ::String attr_accessor request_payer: ("requester") attr_accessor tagging: ::String attr_accessor object_lock_mode: ("GOVERNANCE" | "COMPLIANCE") attr_accessor object_lock_retain_until_date: ::Time attr_accessor object_lock_legal_hold_status: ("ON" | "OFF") attr_accessor expected_bucket_owner: ::String attr_accessor expected_source_bucket_owner: ::String SENSITIVE: [:sse_customer_key, :ssekms_key_id, :ssekms_encryption_context, :copy_source_sse_customer_key] end class CopyObjectResult attr_accessor etag: ::String attr_accessor last_modified: ::Time attr_accessor checksum_crc32: ::String attr_accessor checksum_crc32c: ::String attr_accessor checksum_sha1: ::String attr_accessor checksum_sha256: ::String SENSITIVE: [] end class CopyPartResult attr_accessor etag: ::String attr_accessor last_modified: ::Time attr_accessor checksum_crc32: ::String attr_accessor checksum_crc32c: ::String attr_accessor checksum_sha1: ::String attr_accessor checksum_sha256: ::String SENSITIVE: [] end class CreateBucketConfiguration attr_accessor location_constraint: ("af-south-1" | "ap-east-1" | "ap-northeast-1" | "ap-northeast-2" | "ap-northeast-3" | "ap-south-1" | "ap-south-2" | "ap-southeast-1" | "ap-southeast-2" | "ap-southeast-3" | "ca-central-1" | "cn-north-1" | "cn-northwest-1" | "EU" | "eu-central-1" | "eu-north-1" | "eu-south-1" | "eu-south-2" | "eu-west-1" | "eu-west-2" | "eu-west-3" | "me-south-1" | "sa-east-1" | "us-east-2" | "us-gov-east-1" | "us-gov-west-1" | "us-west-1" | "us-west-2") attr_accessor location: Types::LocationInfo attr_accessor bucket: Types::BucketInfo SENSITIVE: [] end class CreateBucketOutput attr_accessor location: ::String SENSITIVE: [] end class CreateBucketRequest attr_accessor acl: ("private" | "public-read" | "public-read-write" | "authenticated-read") attr_accessor bucket: ::String attr_accessor create_bucket_configuration: Types::CreateBucketConfiguration attr_accessor grant_full_control: ::String attr_accessor grant_read: ::String attr_accessor grant_read_acp: ::String attr_accessor grant_write: ::String attr_accessor grant_write_acp: ::String attr_accessor object_lock_enabled_for_bucket: bool attr_accessor object_ownership: ("BucketOwnerPreferred" | "ObjectWriter" | "BucketOwnerEnforced") SENSITIVE: [] end class CreateMultipartUploadOutput attr_accessor abort_date: ::Time attr_accessor abort_rule_id: ::String attr_accessor bucket: ::String attr_accessor key: ::String attr_accessor upload_id: ::String attr_accessor server_side_encryption: ("AES256" | "aws:kms" | "aws:kms:dsse") attr_accessor sse_customer_algorithm: ::String attr_accessor sse_customer_key_md5: ::String attr_accessor ssekms_key_id: ::String attr_accessor ssekms_encryption_context: ::String attr_accessor bucket_key_enabled: bool attr_accessor request_charged: ("requester") attr_accessor checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") SENSITIVE: [:ssekms_key_id, :ssekms_encryption_context] end class CreateMultipartUploadRequest attr_accessor acl: ("private" | "public-read" | "public-read-write" | "authenticated-read" | "aws-exec-read" | "bucket-owner-read" | "bucket-owner-full-control") attr_accessor bucket: ::String attr_accessor cache_control: ::String attr_accessor content_disposition: ::String attr_accessor content_encoding: ::String attr_accessor content_language: ::String attr_accessor content_type: ::String attr_accessor expires: ::Time attr_accessor grant_full_control: ::String attr_accessor grant_read: ::String attr_accessor grant_read_acp: ::String attr_accessor grant_write_acp: ::String attr_accessor key: ::String attr_accessor metadata: ::Hash[::String, ::String] attr_accessor server_side_encryption: ("AES256" | "aws:kms" | "aws:kms:dsse") attr_accessor storage_class: ("STANDARD" | "REDUCED_REDUNDANCY" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "GLACIER" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE") attr_accessor website_redirect_location: ::String attr_accessor sse_customer_algorithm: ::String attr_accessor sse_customer_key: ::String attr_accessor sse_customer_key_md5: ::String attr_accessor ssekms_key_id: ::String attr_accessor ssekms_encryption_context: ::String attr_accessor bucket_key_enabled: bool attr_accessor request_payer: ("requester") attr_accessor tagging: ::String attr_accessor object_lock_mode: ("GOVERNANCE" | "COMPLIANCE") attr_accessor object_lock_retain_until_date: ::Time attr_accessor object_lock_legal_hold_status: ("ON" | "OFF") attr_accessor expected_bucket_owner: ::String attr_accessor checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") SENSITIVE: [:sse_customer_key, :ssekms_key_id, :ssekms_encryption_context] end class CreateSessionOutput attr_accessor credentials: Types::SessionCredentials SENSITIVE: [] end class CreateSessionRequest attr_accessor session_mode: ("ReadOnly" | "ReadWrite") attr_accessor bucket: ::String SENSITIVE: [] end class DefaultRetention attr_accessor mode: ("GOVERNANCE" | "COMPLIANCE") attr_accessor days: ::Integer attr_accessor years: ::Integer SENSITIVE: [] end class Delete attr_accessor objects: ::Array[Types::ObjectIdentifier] attr_accessor quiet: bool SENSITIVE: [] end class DeleteBucketAnalyticsConfigurationRequest attr_accessor bucket: ::String attr_accessor id: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class DeleteBucketCorsRequest attr_accessor bucket: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class DeleteBucketEncryptionRequest attr_accessor bucket: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class DeleteBucketIntelligentTieringConfigurationRequest attr_accessor bucket: ::String attr_accessor id: ::String SENSITIVE: [] end class DeleteBucketInventoryConfigurationRequest attr_accessor bucket: ::String attr_accessor id: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class DeleteBucketLifecycleRequest attr_accessor bucket: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class DeleteBucketMetricsConfigurationRequest attr_accessor bucket: ::String attr_accessor id: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class DeleteBucketOwnershipControlsRequest attr_accessor bucket: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class DeleteBucketPolicyRequest attr_accessor bucket: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class DeleteBucketReplicationRequest attr_accessor bucket: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class DeleteBucketRequest attr_accessor bucket: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class DeleteBucketTaggingRequest attr_accessor bucket: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class DeleteBucketWebsiteRequest attr_accessor bucket: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class DeleteMarkerEntry attr_accessor owner: Types::Owner attr_accessor key: ::String attr_accessor version_id: ::String attr_accessor is_latest: bool attr_accessor last_modified: ::Time SENSITIVE: [] end class DeleteMarkerReplication attr_accessor status: ("Enabled" | "Disabled") SENSITIVE: [] end class DeleteObjectOutput attr_accessor delete_marker: bool attr_accessor version_id: ::String attr_accessor request_charged: ("requester") SENSITIVE: [] end class DeleteObjectRequest attr_accessor bucket: ::String attr_accessor key: ::String attr_accessor mfa: ::String attr_accessor version_id: ::String attr_accessor request_payer: ("requester") attr_accessor bypass_governance_retention: bool attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class DeleteObjectTaggingOutput attr_accessor version_id: ::String SENSITIVE: [] end class DeleteObjectTaggingRequest attr_accessor bucket: ::String attr_accessor key: ::String attr_accessor version_id: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class DeleteObjectsOutput attr_accessor deleted: ::Array[Types::DeletedObject] attr_accessor request_charged: ("requester") attr_accessor errors: ::Array[Types::Error] SENSITIVE: [] end class DeleteObjectsRequest attr_accessor bucket: ::String attr_accessor delete: Types::Delete attr_accessor mfa: ::String attr_accessor request_payer: ("requester") attr_accessor bypass_governance_retention: bool attr_accessor expected_bucket_owner: ::String attr_accessor checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") SENSITIVE: [] end class DeletePublicAccessBlockRequest attr_accessor bucket: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class DeletedObject attr_accessor key: ::String attr_accessor version_id: ::String attr_accessor delete_marker: bool attr_accessor delete_marker_version_id: ::String SENSITIVE: [] end class Destination attr_accessor bucket: ::String attr_accessor account: ::String attr_accessor storage_class: ("STANDARD" | "REDUCED_REDUNDANCY" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "GLACIER" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE") attr_accessor access_control_translation: Types::AccessControlTranslation attr_accessor encryption_configuration: Types::EncryptionConfiguration attr_accessor replication_time: Types::ReplicationTime attr_accessor metrics: Types::Metrics SENSITIVE: [] end class Encryption attr_accessor encryption_type: ("AES256" | "aws:kms" | "aws:kms:dsse") attr_accessor kms_key_id: ::String attr_accessor kms_context: ::String SENSITIVE: [:kms_key_id] end class EncryptionConfiguration attr_accessor replica_kms_key_id: ::String SENSITIVE: [] end class EndEvent attr_accessor event_type: untyped SENSITIVE: [] end class Error attr_accessor key: ::String attr_accessor version_id: ::String attr_accessor code: ::String attr_accessor message: ::String SENSITIVE: [] end class ErrorDocument attr_accessor key: ::String SENSITIVE: [] end class EventBridgeConfiguration < Aws::EmptyStructure end class ExistingObjectReplication attr_accessor status: ("Enabled" | "Disabled") SENSITIVE: [] end class FilterRule attr_accessor name: ("prefix" | "suffix") attr_accessor value: ::String SENSITIVE: [] end class GetBucketAccelerateConfigurationOutput attr_accessor status: ("Enabled" | "Suspended") attr_accessor request_charged: ("requester") SENSITIVE: [] end class GetBucketAccelerateConfigurationRequest attr_accessor bucket: ::String attr_accessor expected_bucket_owner: ::String attr_accessor request_payer: ("requester") SENSITIVE: [] end class GetBucketAclOutput attr_accessor owner: Types::Owner attr_accessor grants: ::Array[Types::Grant] SENSITIVE: [] end class GetBucketAclRequest attr_accessor bucket: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class GetBucketAnalyticsConfigurationOutput attr_accessor analytics_configuration: Types::AnalyticsConfiguration SENSITIVE: [] end class GetBucketAnalyticsConfigurationRequest attr_accessor bucket: ::String attr_accessor id: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class GetBucketCorsOutput attr_accessor cors_rules: ::Array[Types::CORSRule] SENSITIVE: [] end class GetBucketCorsRequest attr_accessor bucket: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class GetBucketEncryptionOutput attr_accessor server_side_encryption_configuration: Types::ServerSideEncryptionConfiguration SENSITIVE: [] end class GetBucketEncryptionRequest attr_accessor bucket: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class GetBucketIntelligentTieringConfigurationOutput attr_accessor intelligent_tiering_configuration: Types::IntelligentTieringConfiguration SENSITIVE: [] end class GetBucketIntelligentTieringConfigurationRequest attr_accessor bucket: ::String attr_accessor id: ::String SENSITIVE: [] end class GetBucketInventoryConfigurationOutput attr_accessor inventory_configuration: Types::InventoryConfiguration SENSITIVE: [] end class GetBucketInventoryConfigurationRequest attr_accessor bucket: ::String attr_accessor id: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class GetBucketLifecycleConfigurationOutput attr_accessor rules: ::Array[Types::LifecycleRule] SENSITIVE: [] end class GetBucketLifecycleConfigurationRequest attr_accessor bucket: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class GetBucketLifecycleOutput attr_accessor rules: ::Array[Types::Rule] SENSITIVE: [] end class GetBucketLifecycleRequest attr_accessor bucket: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class GetBucketLocationOutput attr_accessor location_constraint: ("af-south-1" | "ap-east-1" | "ap-northeast-1" | "ap-northeast-2" | "ap-northeast-3" | "ap-south-1" | "ap-south-2" | "ap-southeast-1" | "ap-southeast-2" | "ap-southeast-3" | "ca-central-1" | "cn-north-1" | "cn-northwest-1" | "EU" | "eu-central-1" | "eu-north-1" | "eu-south-1" | "eu-south-2" | "eu-west-1" | "eu-west-2" | "eu-west-3" | "me-south-1" | "sa-east-1" | "us-east-2" | "us-gov-east-1" | "us-gov-west-1" | "us-west-1" | "us-west-2") SENSITIVE: [] end class GetBucketLocationRequest attr_accessor bucket: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class GetBucketLoggingOutput attr_accessor logging_enabled: Types::LoggingEnabled SENSITIVE: [] end class GetBucketLoggingRequest attr_accessor bucket: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class GetBucketMetricsConfigurationOutput attr_accessor metrics_configuration: Types::MetricsConfiguration SENSITIVE: [] end class GetBucketMetricsConfigurationRequest attr_accessor bucket: ::String attr_accessor id: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class GetBucketNotificationConfigurationRequest attr_accessor bucket: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class GetBucketOwnershipControlsOutput attr_accessor ownership_controls: Types::OwnershipControls SENSITIVE: [] end class GetBucketOwnershipControlsRequest attr_accessor bucket: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class GetBucketPolicyOutput attr_accessor policy: ::IO SENSITIVE: [] end class GetBucketPolicyRequest attr_accessor bucket: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class GetBucketPolicyStatusOutput attr_accessor policy_status: Types::PolicyStatus SENSITIVE: [] end class GetBucketPolicyStatusRequest attr_accessor bucket: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class GetBucketReplicationOutput attr_accessor replication_configuration: Types::ReplicationConfiguration SENSITIVE: [] end class GetBucketReplicationRequest attr_accessor bucket: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class GetBucketRequestPaymentOutput attr_accessor payer: ("Requester" | "BucketOwner") SENSITIVE: [] end class GetBucketRequestPaymentRequest attr_accessor bucket: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class GetBucketTaggingOutput attr_accessor tag_set: ::Array[Types::Tag] SENSITIVE: [] end class GetBucketTaggingRequest attr_accessor bucket: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class GetBucketVersioningOutput attr_accessor status: ("Enabled" | "Suspended") attr_accessor mfa_delete: ("Enabled" | "Disabled") SENSITIVE: [] end class GetBucketVersioningRequest attr_accessor bucket: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class GetBucketWebsiteOutput attr_accessor redirect_all_requests_to: Types::RedirectAllRequestsTo attr_accessor index_document: Types::IndexDocument attr_accessor error_document: Types::ErrorDocument attr_accessor routing_rules: ::Array[Types::RoutingRule] SENSITIVE: [] end class GetBucketWebsiteRequest attr_accessor bucket: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class GetObjectAclOutput attr_accessor owner: Types::Owner attr_accessor grants: ::Array[Types::Grant] attr_accessor request_charged: ("requester") SENSITIVE: [] end class GetObjectAclRequest attr_accessor bucket: ::String attr_accessor key: ::String attr_accessor version_id: ::String attr_accessor request_payer: ("requester") attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class GetObjectAttributesOutput attr_accessor delete_marker: bool attr_accessor last_modified: ::Time attr_accessor version_id: ::String attr_accessor request_charged: ("requester") attr_accessor etag: ::String attr_accessor checksum: Types::Checksum attr_accessor object_parts: Types::GetObjectAttributesParts attr_accessor storage_class: ("STANDARD" | "REDUCED_REDUNDANCY" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "GLACIER" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE") attr_accessor object_size: ::Integer SENSITIVE: [] end class GetObjectAttributesParts attr_accessor total_parts_count: ::Integer attr_accessor part_number_marker: ::Integer attr_accessor next_part_number_marker: ::Integer attr_accessor max_parts: ::Integer attr_accessor is_truncated: bool attr_accessor parts: ::Array[Types::ObjectPart] SENSITIVE: [] end class GetObjectAttributesRequest attr_accessor bucket: ::String attr_accessor key: ::String attr_accessor version_id: ::String attr_accessor max_parts: ::Integer attr_accessor part_number_marker: ::Integer attr_accessor sse_customer_algorithm: ::String attr_accessor sse_customer_key: ::String attr_accessor sse_customer_key_md5: ::String attr_accessor request_payer: ("requester") attr_accessor expected_bucket_owner: ::String attr_accessor object_attributes: ::Array[("ETag" | "Checksum" | "ObjectParts" | "StorageClass" | "ObjectSize")] SENSITIVE: [:sse_customer_key] end class GetObjectLegalHoldOutput attr_accessor legal_hold: Types::ObjectLockLegalHold SENSITIVE: [] end class GetObjectLegalHoldRequest attr_accessor bucket: ::String attr_accessor key: ::String attr_accessor version_id: ::String attr_accessor request_payer: ("requester") attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class GetObjectLockConfigurationOutput attr_accessor object_lock_configuration: Types::ObjectLockConfiguration SENSITIVE: [] end class GetObjectLockConfigurationRequest attr_accessor bucket: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class GetObjectOutput attr_accessor body: ::IO attr_accessor delete_marker: bool attr_accessor accept_ranges: ::String attr_accessor expiration: ::String attr_accessor restore: ::String attr_accessor last_modified: ::Time attr_accessor content_length: ::Integer attr_accessor etag: ::String attr_accessor checksum_crc32: ::String attr_accessor checksum_crc32c: ::String attr_accessor checksum_sha1: ::String attr_accessor checksum_sha256: ::String attr_accessor missing_meta: ::Integer attr_accessor version_id: ::String attr_accessor cache_control: ::String attr_accessor content_disposition: ::String attr_accessor content_encoding: ::String attr_accessor content_language: ::String attr_accessor content_range: ::String attr_accessor content_type: ::String attr_accessor expires: ::Time attr_accessor expires_string: ::String attr_accessor website_redirect_location: ::String attr_accessor server_side_encryption: ("AES256" | "aws:kms" | "aws:kms:dsse") attr_accessor metadata: ::Hash[::String, ::String] attr_accessor sse_customer_algorithm: ::String attr_accessor sse_customer_key_md5: ::String attr_accessor ssekms_key_id: ::String attr_accessor bucket_key_enabled: bool attr_accessor storage_class: ("STANDARD" | "REDUCED_REDUNDANCY" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "GLACIER" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE") attr_accessor request_charged: ("requester") attr_accessor replication_status: ("COMPLETE" | "PENDING" | "FAILED" | "REPLICA" | "COMPLETED") attr_accessor parts_count: ::Integer attr_accessor tag_count: ::Integer attr_accessor object_lock_mode: ("GOVERNANCE" | "COMPLIANCE") attr_accessor object_lock_retain_until_date: ::Time attr_accessor object_lock_legal_hold_status: ("ON" | "OFF") SENSITIVE: [:ssekms_key_id] end class GetObjectRequest attr_accessor bucket: ::String attr_accessor if_match: ::String attr_accessor if_modified_since: ::Time attr_accessor if_none_match: ::String attr_accessor if_unmodified_since: ::Time attr_accessor key: ::String attr_accessor range: ::String attr_accessor response_cache_control: ::String attr_accessor response_content_disposition: ::String attr_accessor response_content_encoding: ::String attr_accessor response_content_language: ::String attr_accessor response_content_type: ::String attr_accessor response_expires: ::Time attr_accessor version_id: ::String attr_accessor sse_customer_algorithm: ::String attr_accessor sse_customer_key: ::String attr_accessor sse_customer_key_md5: ::String attr_accessor request_payer: ("requester") attr_accessor part_number: ::Integer attr_accessor expected_bucket_owner: ::String attr_accessor checksum_mode: ("ENABLED") SENSITIVE: [:sse_customer_key] end class GetObjectRetentionOutput attr_accessor retention: Types::ObjectLockRetention SENSITIVE: [] end class GetObjectRetentionRequest attr_accessor bucket: ::String attr_accessor key: ::String attr_accessor version_id: ::String attr_accessor request_payer: ("requester") attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class GetObjectTaggingOutput attr_accessor version_id: ::String attr_accessor tag_set: ::Array[Types::Tag] SENSITIVE: [] end class GetObjectTaggingRequest attr_accessor bucket: ::String attr_accessor key: ::String attr_accessor version_id: ::String attr_accessor expected_bucket_owner: ::String attr_accessor request_payer: ("requester") SENSITIVE: [] end class GetObjectTorrentOutput attr_accessor body: ::IO attr_accessor request_charged: ("requester") SENSITIVE: [] end class GetObjectTorrentRequest attr_accessor bucket: ::String attr_accessor key: ::String attr_accessor request_payer: ("requester") attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class GetPublicAccessBlockOutput attr_accessor public_access_block_configuration: Types::PublicAccessBlockConfiguration SENSITIVE: [] end class GetPublicAccessBlockRequest attr_accessor bucket: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class GlacierJobParameters attr_accessor tier: ("Standard" | "Bulk" | "Expedited") SENSITIVE: [] end class Grant attr_accessor grantee: Types::Grantee attr_accessor permission: ("FULL_CONTROL" | "WRITE" | "WRITE_ACP" | "READ" | "READ_ACP") SENSITIVE: [] end class Grantee attr_accessor display_name: ::String attr_accessor email_address: ::String attr_accessor id: ::String attr_accessor type: ("CanonicalUser" | "AmazonCustomerByEmail" | "Group") attr_accessor uri: ::String SENSITIVE: [] end class HeadBucketOutput attr_accessor bucket_location_type: ("AvailabilityZone") attr_accessor bucket_location_name: ::String attr_accessor bucket_region: ::String attr_accessor access_point_alias: bool SENSITIVE: [] end class HeadBucketRequest attr_accessor bucket: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class HeadObjectOutput attr_accessor delete_marker: bool attr_accessor accept_ranges: ::String attr_accessor expiration: ::String attr_accessor restore: ::String attr_accessor archive_status: ("ARCHIVE_ACCESS" | "DEEP_ARCHIVE_ACCESS") attr_accessor last_modified: ::Time attr_accessor content_length: ::Integer attr_accessor checksum_crc32: ::String attr_accessor checksum_crc32c: ::String attr_accessor checksum_sha1: ::String attr_accessor checksum_sha256: ::String attr_accessor etag: ::String attr_accessor missing_meta: ::Integer attr_accessor version_id: ::String attr_accessor cache_control: ::String attr_accessor content_disposition: ::String attr_accessor content_encoding: ::String attr_accessor content_language: ::String attr_accessor content_type: ::String attr_accessor expires: ::Time attr_accessor expires_string: ::String attr_accessor website_redirect_location: ::String attr_accessor server_side_encryption: ("AES256" | "aws:kms" | "aws:kms:dsse") attr_accessor metadata: ::Hash[::String, ::String] attr_accessor sse_customer_algorithm: ::String attr_accessor sse_customer_key_md5: ::String attr_accessor ssekms_key_id: ::String attr_accessor bucket_key_enabled: bool attr_accessor storage_class: ("STANDARD" | "REDUCED_REDUNDANCY" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "GLACIER" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE") attr_accessor request_charged: ("requester") attr_accessor replication_status: ("COMPLETE" | "PENDING" | "FAILED" | "REPLICA" | "COMPLETED") attr_accessor parts_count: ::Integer attr_accessor object_lock_mode: ("GOVERNANCE" | "COMPLIANCE") attr_accessor object_lock_retain_until_date: ::Time attr_accessor object_lock_legal_hold_status: ("ON" | "OFF") SENSITIVE: [:ssekms_key_id] end class HeadObjectRequest attr_accessor bucket: ::String attr_accessor if_match: ::String attr_accessor if_modified_since: ::Time attr_accessor if_none_match: ::String attr_accessor if_unmodified_since: ::Time attr_accessor key: ::String attr_accessor range: ::String attr_accessor version_id: ::String attr_accessor sse_customer_algorithm: ::String attr_accessor sse_customer_key: ::String attr_accessor sse_customer_key_md5: ::String attr_accessor request_payer: ("requester") attr_accessor part_number: ::Integer attr_accessor expected_bucket_owner: ::String attr_accessor checksum_mode: ("ENABLED") SENSITIVE: [:sse_customer_key] end class IndexDocument attr_accessor suffix: ::String SENSITIVE: [] end class Initiator attr_accessor id: ::String attr_accessor display_name: ::String SENSITIVE: [] end class InputSerialization attr_accessor csv: Types::CSVInput attr_accessor compression_type: ("NONE" | "GZIP" | "BZIP2") attr_accessor json: Types::JSONInput attr_accessor parquet: Types::ParquetInput SENSITIVE: [] end class IntelligentTieringAndOperator attr_accessor prefix: ::String attr_accessor tags: ::Array[Types::Tag] SENSITIVE: [] end class IntelligentTieringConfiguration attr_accessor id: ::String attr_accessor filter: Types::IntelligentTieringFilter attr_accessor status: ("Enabled" | "Disabled") attr_accessor tierings: ::Array[Types::Tiering] SENSITIVE: [] end class IntelligentTieringFilter attr_accessor prefix: ::String attr_accessor tag: Types::Tag attr_accessor and: Types::IntelligentTieringAndOperator SENSITIVE: [] end class InvalidObjectState attr_accessor storage_class: ("STANDARD" | "REDUCED_REDUNDANCY" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "GLACIER" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE") attr_accessor access_tier: ("ARCHIVE_ACCESS" | "DEEP_ARCHIVE_ACCESS") SENSITIVE: [] end class InventoryConfiguration attr_accessor destination: Types::InventoryDestination attr_accessor is_enabled: bool attr_accessor filter: Types::InventoryFilter attr_accessor id: ::String attr_accessor included_object_versions: ("All" | "Current") attr_accessor optional_fields: ::Array[("Size" | "LastModifiedDate" | "StorageClass" | "ETag" | "IsMultipartUploaded" | "ReplicationStatus" | "EncryptionStatus" | "ObjectLockRetainUntilDate" | "ObjectLockMode" | "ObjectLockLegalHoldStatus" | "IntelligentTieringAccessTier" | "BucketKeyStatus" | "ChecksumAlgorithm" | "ObjectAccessControlList" | "ObjectOwner")] attr_accessor schedule: Types::InventorySchedule SENSITIVE: [] end class InventoryDestination attr_accessor s3_bucket_destination: Types::InventoryS3BucketDestination SENSITIVE: [] end class InventoryEncryption attr_accessor sses3: Types::SSES3 attr_accessor ssekms: Types::SSEKMS SENSITIVE: [] end class InventoryFilter attr_accessor prefix: ::String SENSITIVE: [] end class InventoryS3BucketDestination attr_accessor account_id: ::String attr_accessor bucket: ::String attr_accessor format: ("CSV" | "ORC" | "Parquet") attr_accessor prefix: ::String attr_accessor encryption: Types::InventoryEncryption SENSITIVE: [] end class InventorySchedule attr_accessor frequency: ("Daily" | "Weekly") SENSITIVE: [] end class JSONInput attr_accessor type: ("DOCUMENT" | "LINES") SENSITIVE: [] end class JSONOutput attr_accessor record_delimiter: ::String SENSITIVE: [] end class LambdaFunctionConfiguration attr_accessor id: ::String attr_accessor lambda_function_arn: ::String attr_accessor events: ::Array[("s3:ReducedRedundancyLostObject" | "s3:ObjectCreated:*" | "s3:ObjectCreated:Put" | "s3:ObjectCreated:Post" | "s3:ObjectCreated:Copy" | "s3:ObjectCreated:CompleteMultipartUpload" | "s3:ObjectRemoved:*" | "s3:ObjectRemoved:Delete" | "s3:ObjectRemoved:DeleteMarkerCreated" | "s3:ObjectRestore:*" | "s3:ObjectRestore:Post" | "s3:ObjectRestore:Completed" | "s3:Replication:*" | "s3:Replication:OperationFailedReplication" | "s3:Replication:OperationNotTracked" | "s3:Replication:OperationMissedThreshold" | "s3:Replication:OperationReplicatedAfterThreshold" | "s3:ObjectRestore:Delete" | "s3:LifecycleTransition" | "s3:IntelligentTiering" | "s3:ObjectAcl:Put" | "s3:LifecycleExpiration:*" | "s3:LifecycleExpiration:Delete" | "s3:LifecycleExpiration:DeleteMarkerCreated" | "s3:ObjectTagging:*" | "s3:ObjectTagging:Put" | "s3:ObjectTagging:Delete")] attr_accessor filter: Types::NotificationConfigurationFilter SENSITIVE: [] end class LifecycleConfiguration attr_accessor rules: ::Array[Types::Rule] SENSITIVE: [] end class LifecycleExpiration attr_accessor date: ::Time attr_accessor days: ::Integer attr_accessor expired_object_delete_marker: bool SENSITIVE: [] end class LifecycleRule attr_accessor expiration: Types::LifecycleExpiration attr_accessor id: ::String attr_accessor prefix: ::String attr_accessor filter: Types::LifecycleRuleFilter attr_accessor status: ("Enabled" | "Disabled") attr_accessor transitions: ::Array[Types::Transition] attr_accessor noncurrent_version_transitions: ::Array[Types::NoncurrentVersionTransition] attr_accessor noncurrent_version_expiration: Types::NoncurrentVersionExpiration attr_accessor abort_incomplete_multipart_upload: Types::AbortIncompleteMultipartUpload SENSITIVE: [] end class LifecycleRuleAndOperator attr_accessor prefix: ::String attr_accessor tags: ::Array[Types::Tag] attr_accessor object_size_greater_than: ::Integer attr_accessor object_size_less_than: ::Integer SENSITIVE: [] end class LifecycleRuleFilter attr_accessor prefix: ::String attr_accessor tag: Types::Tag attr_accessor object_size_greater_than: ::Integer attr_accessor object_size_less_than: ::Integer attr_accessor and: Types::LifecycleRuleAndOperator SENSITIVE: [] end class ListBucketAnalyticsConfigurationsOutput attr_accessor is_truncated: bool attr_accessor continuation_token: ::String attr_accessor next_continuation_token: ::String attr_accessor analytics_configuration_list: ::Array[Types::AnalyticsConfiguration] SENSITIVE: [] end class ListBucketAnalyticsConfigurationsRequest attr_accessor bucket: ::String attr_accessor continuation_token: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class ListBucketIntelligentTieringConfigurationsOutput attr_accessor is_truncated: bool attr_accessor continuation_token: ::String attr_accessor next_continuation_token: ::String attr_accessor intelligent_tiering_configuration_list: ::Array[Types::IntelligentTieringConfiguration] SENSITIVE: [] end class ListBucketIntelligentTieringConfigurationsRequest attr_accessor bucket: ::String attr_accessor continuation_token: ::String SENSITIVE: [] end class ListBucketInventoryConfigurationsOutput attr_accessor continuation_token: ::String attr_accessor inventory_configuration_list: ::Array[Types::InventoryConfiguration] attr_accessor is_truncated: bool attr_accessor next_continuation_token: ::String SENSITIVE: [] end class ListBucketInventoryConfigurationsRequest attr_accessor bucket: ::String attr_accessor continuation_token: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class ListBucketMetricsConfigurationsOutput attr_accessor is_truncated: bool attr_accessor continuation_token: ::String attr_accessor next_continuation_token: ::String attr_accessor metrics_configuration_list: ::Array[Types::MetricsConfiguration] SENSITIVE: [] end class ListBucketMetricsConfigurationsRequest attr_accessor bucket: ::String attr_accessor continuation_token: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class ListBucketsOutput attr_accessor buckets: ::Array[Types::Bucket] attr_accessor owner: Types::Owner SENSITIVE: [] end class ListDirectoryBucketsOutput attr_accessor buckets: ::Array[Types::Bucket] attr_accessor continuation_token: ::String SENSITIVE: [] end class ListDirectoryBucketsRequest attr_accessor continuation_token: ::String attr_accessor max_directory_buckets: ::Integer SENSITIVE: [] end class ListMultipartUploadsOutput attr_accessor bucket: ::String attr_accessor key_marker: ::String attr_accessor upload_id_marker: ::String attr_accessor next_key_marker: ::String attr_accessor prefix: ::String attr_accessor delimiter: ::String attr_accessor next_upload_id_marker: ::String attr_accessor max_uploads: ::Integer attr_accessor is_truncated: bool attr_accessor uploads: ::Array[Types::MultipartUpload] attr_accessor common_prefixes: ::Array[Types::CommonPrefix] attr_accessor encoding_type: ("url") attr_accessor request_charged: ("requester") SENSITIVE: [] end class ListMultipartUploadsRequest attr_accessor bucket: ::String attr_accessor delimiter: ::String attr_accessor encoding_type: ("url") attr_accessor key_marker: ::String attr_accessor max_uploads: ::Integer attr_accessor prefix: ::String attr_accessor upload_id_marker: ::String attr_accessor expected_bucket_owner: ::String attr_accessor request_payer: ("requester") SENSITIVE: [] end class ListObjectVersionsOutput attr_accessor is_truncated: bool attr_accessor key_marker: ::String attr_accessor version_id_marker: ::String attr_accessor next_key_marker: ::String attr_accessor next_version_id_marker: ::String attr_accessor versions: ::Array[Types::ObjectVersion] attr_accessor delete_markers: ::Array[Types::DeleteMarkerEntry] attr_accessor name: ::String attr_accessor prefix: ::String attr_accessor delimiter: ::String attr_accessor max_keys: ::Integer attr_accessor common_prefixes: ::Array[Types::CommonPrefix] attr_accessor encoding_type: ("url") attr_accessor request_charged: ("requester") SENSITIVE: [] end class ListObjectVersionsRequest attr_accessor bucket: ::String attr_accessor delimiter: ::String attr_accessor encoding_type: ("url") attr_accessor key_marker: ::String attr_accessor max_keys: ::Integer attr_accessor prefix: ::String attr_accessor version_id_marker: ::String attr_accessor expected_bucket_owner: ::String attr_accessor request_payer: ("requester") attr_accessor optional_object_attributes: ::Array[("RestoreStatus")] SENSITIVE: [] end class ListObjectsOutput attr_accessor is_truncated: bool attr_accessor marker: ::String attr_accessor next_marker: ::String attr_accessor contents: ::Array[Types::Object] attr_accessor name: ::String attr_accessor prefix: ::String attr_accessor delimiter: ::String attr_accessor max_keys: ::Integer attr_accessor common_prefixes: ::Array[Types::CommonPrefix] attr_accessor encoding_type: ("url") attr_accessor request_charged: ("requester") SENSITIVE: [] end class ListObjectsRequest attr_accessor bucket: ::String attr_accessor delimiter: ::String attr_accessor encoding_type: ("url") attr_accessor marker: ::String attr_accessor max_keys: ::Integer attr_accessor prefix: ::String attr_accessor request_payer: ("requester") attr_accessor expected_bucket_owner: ::String attr_accessor optional_object_attributes: ::Array[("RestoreStatus")] SENSITIVE: [] end class ListObjectsV2Output attr_accessor is_truncated: bool attr_accessor contents: ::Array[Types::Object] attr_accessor name: ::String attr_accessor prefix: ::String attr_accessor delimiter: ::String attr_accessor max_keys: ::Integer attr_accessor common_prefixes: ::Array[Types::CommonPrefix] attr_accessor encoding_type: ("url") attr_accessor key_count: ::Integer attr_accessor continuation_token: ::String attr_accessor next_continuation_token: ::String attr_accessor start_after: ::String attr_accessor request_charged: ("requester") SENSITIVE: [] end class ListObjectsV2Request attr_accessor bucket: ::String attr_accessor delimiter: ::String attr_accessor encoding_type: ("url") attr_accessor max_keys: ::Integer attr_accessor prefix: ::String attr_accessor continuation_token: ::String attr_accessor fetch_owner: bool attr_accessor start_after: ::String attr_accessor request_payer: ("requester") attr_accessor expected_bucket_owner: ::String attr_accessor optional_object_attributes: ::Array[("RestoreStatus")] SENSITIVE: [] end class ListPartsOutput attr_accessor abort_date: ::Time attr_accessor abort_rule_id: ::String attr_accessor bucket: ::String attr_accessor key: ::String attr_accessor upload_id: ::String attr_accessor part_number_marker: ::Integer attr_accessor next_part_number_marker: ::Integer attr_accessor max_parts: ::Integer attr_accessor is_truncated: bool attr_accessor parts: ::Array[Types::Part] attr_accessor initiator: Types::Initiator attr_accessor owner: Types::Owner attr_accessor storage_class: ("STANDARD" | "REDUCED_REDUNDANCY" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "GLACIER" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE") attr_accessor request_charged: ("requester") attr_accessor checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") SENSITIVE: [] end class ListPartsRequest attr_accessor bucket: ::String attr_accessor key: ::String attr_accessor max_parts: ::Integer attr_accessor part_number_marker: ::Integer attr_accessor upload_id: ::String attr_accessor request_payer: ("requester") attr_accessor expected_bucket_owner: ::String attr_accessor sse_customer_algorithm: ::String attr_accessor sse_customer_key: ::String attr_accessor sse_customer_key_md5: ::String SENSITIVE: [:sse_customer_key] end class LocationInfo attr_accessor type: ("AvailabilityZone") attr_accessor name: ::String SENSITIVE: [] end class LoggingEnabled attr_accessor target_bucket: ::String attr_accessor target_grants: ::Array[Types::TargetGrant] attr_accessor target_prefix: ::String attr_accessor target_object_key_format: Types::TargetObjectKeyFormat SENSITIVE: [] end class MetadataEntry attr_accessor name: ::String attr_accessor value: ::String SENSITIVE: [] end class Metrics attr_accessor status: ("Enabled" | "Disabled") attr_accessor event_threshold: Types::ReplicationTimeValue SENSITIVE: [] end class MetricsAndOperator attr_accessor prefix: ::String attr_accessor tags: ::Array[Types::Tag] attr_accessor access_point_arn: ::String SENSITIVE: [] end class MetricsConfiguration attr_accessor id: ::String attr_accessor filter: Types::MetricsFilter SENSITIVE: [] end class MetricsFilter attr_accessor prefix: ::String attr_accessor tag: Types::Tag attr_accessor access_point_arn: ::String attr_accessor and: Types::MetricsAndOperator SENSITIVE: [] end class MultipartUpload attr_accessor upload_id: ::String attr_accessor key: ::String attr_accessor initiated: ::Time attr_accessor storage_class: ("STANDARD" | "REDUCED_REDUNDANCY" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "GLACIER" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE") attr_accessor owner: Types::Owner attr_accessor initiator: Types::Initiator attr_accessor checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") SENSITIVE: [] end class NoSuchBucket < Aws::EmptyStructure end class NoSuchKey < Aws::EmptyStructure end class NoSuchUpload < Aws::EmptyStructure end class NoncurrentVersionExpiration attr_accessor noncurrent_days: ::Integer attr_accessor newer_noncurrent_versions: ::Integer SENSITIVE: [] end class NoncurrentVersionTransition attr_accessor noncurrent_days: ::Integer attr_accessor storage_class: ("GLACIER" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "DEEP_ARCHIVE" | "GLACIER_IR") attr_accessor newer_noncurrent_versions: ::Integer SENSITIVE: [] end class NotificationConfiguration attr_accessor topic_configurations: ::Array[Types::TopicConfiguration] attr_accessor queue_configurations: ::Array[Types::QueueConfiguration] attr_accessor lambda_function_configurations: ::Array[Types::LambdaFunctionConfiguration] attr_accessor event_bridge_configuration: Types::EventBridgeConfiguration SENSITIVE: [] end class NotificationConfigurationDeprecated attr_accessor topic_configuration: Types::TopicConfigurationDeprecated attr_accessor queue_configuration: Types::QueueConfigurationDeprecated attr_accessor cloud_function_configuration: Types::CloudFunctionConfiguration SENSITIVE: [] end class NotificationConfigurationFilter attr_accessor key: Types::S3KeyFilter SENSITIVE: [] end class Object attr_accessor key: ::String attr_accessor last_modified: ::Time attr_accessor etag: ::String attr_accessor checksum_algorithm: ::Array[("CRC32" | "CRC32C" | "SHA1" | "SHA256")] attr_accessor size: ::Integer attr_accessor storage_class: ("STANDARD" | "REDUCED_REDUNDANCY" | "GLACIER" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE") attr_accessor owner: Types::Owner attr_accessor restore_status: Types::RestoreStatus SENSITIVE: [] end class ObjectAlreadyInActiveTierError < Aws::EmptyStructure end class ObjectIdentifier attr_accessor key: ::String attr_accessor version_id: ::String SENSITIVE: [] end class ObjectLockConfiguration attr_accessor object_lock_enabled: ("Enabled") attr_accessor rule: Types::ObjectLockRule SENSITIVE: [] end class ObjectLockLegalHold attr_accessor status: ("ON" | "OFF") SENSITIVE: [] end class ObjectLockRetention attr_accessor mode: ("GOVERNANCE" | "COMPLIANCE") attr_accessor retain_until_date: ::Time SENSITIVE: [] end class ObjectLockRule attr_accessor default_retention: Types::DefaultRetention SENSITIVE: [] end class ObjectNotInActiveTierError < Aws::EmptyStructure end class ObjectPart attr_accessor part_number: ::Integer attr_accessor size: ::Integer attr_accessor checksum_crc32: ::String attr_accessor checksum_crc32c: ::String attr_accessor checksum_sha1: ::String attr_accessor checksum_sha256: ::String SENSITIVE: [] end class ObjectVersion attr_accessor etag: ::String attr_accessor checksum_algorithm: ::Array[("CRC32" | "CRC32C" | "SHA1" | "SHA256")] attr_accessor size: ::Integer attr_accessor storage_class: ("STANDARD") attr_accessor key: ::String attr_accessor version_id: ::String attr_accessor is_latest: bool attr_accessor last_modified: ::Time attr_accessor owner: Types::Owner attr_accessor restore_status: Types::RestoreStatus SENSITIVE: [] end class OutputLocation attr_accessor s3: Types::S3Location SENSITIVE: [] end class OutputSerialization attr_accessor csv: Types::CSVOutput attr_accessor json: Types::JSONOutput SENSITIVE: [] end class Owner attr_accessor display_name: ::String attr_accessor id: ::String SENSITIVE: [] end class OwnershipControls attr_accessor rules: ::Array[Types::OwnershipControlsRule] SENSITIVE: [] end class OwnershipControlsRule attr_accessor object_ownership: ("BucketOwnerPreferred" | "ObjectWriter" | "BucketOwnerEnforced") SENSITIVE: [] end class ParquetInput < Aws::EmptyStructure end class Part attr_accessor part_number: ::Integer attr_accessor last_modified: ::Time attr_accessor etag: ::String attr_accessor size: ::Integer attr_accessor checksum_crc32: ::String attr_accessor checksum_crc32c: ::String attr_accessor checksum_sha1: ::String attr_accessor checksum_sha256: ::String SENSITIVE: [] end class PartitionedPrefix attr_accessor partition_date_source: ("EventTime" | "DeliveryTime") SENSITIVE: [] end class PolicyStatus attr_accessor is_public: bool SENSITIVE: [] end class Progress attr_accessor bytes_scanned: ::Integer attr_accessor bytes_processed: ::Integer attr_accessor bytes_returned: ::Integer SENSITIVE: [] end class ProgressEvent attr_accessor details: Types::Progress attr_accessor event_type: untyped SENSITIVE: [] end class PublicAccessBlockConfiguration attr_accessor block_public_acls: bool attr_accessor ignore_public_acls: bool attr_accessor block_public_policy: bool attr_accessor restrict_public_buckets: bool SENSITIVE: [] end class PutBucketAccelerateConfigurationRequest attr_accessor bucket: ::String attr_accessor accelerate_configuration: Types::AccelerateConfiguration attr_accessor expected_bucket_owner: ::String attr_accessor checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") SENSITIVE: [] end class PutBucketAclRequest attr_accessor acl: ("private" | "public-read" | "public-read-write" | "authenticated-read") attr_accessor access_control_policy: Types::AccessControlPolicy attr_accessor bucket: ::String attr_accessor content_md5: ::String attr_accessor checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") attr_accessor grant_full_control: ::String attr_accessor grant_read: ::String attr_accessor grant_read_acp: ::String attr_accessor grant_write: ::String attr_accessor grant_write_acp: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class PutBucketAnalyticsConfigurationRequest attr_accessor bucket: ::String attr_accessor id: ::String attr_accessor analytics_configuration: Types::AnalyticsConfiguration attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class PutBucketCorsRequest attr_accessor bucket: ::String attr_accessor cors_configuration: Types::CORSConfiguration attr_accessor content_md5: ::String attr_accessor checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class PutBucketEncryptionRequest attr_accessor bucket: ::String attr_accessor content_md5: ::String attr_accessor checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") attr_accessor server_side_encryption_configuration: Types::ServerSideEncryptionConfiguration attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class PutBucketIntelligentTieringConfigurationRequest attr_accessor bucket: ::String attr_accessor id: ::String attr_accessor intelligent_tiering_configuration: Types::IntelligentTieringConfiguration SENSITIVE: [] end class PutBucketInventoryConfigurationRequest attr_accessor bucket: ::String attr_accessor id: ::String attr_accessor inventory_configuration: Types::InventoryConfiguration attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class PutBucketLifecycleConfigurationRequest attr_accessor bucket: ::String attr_accessor checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") attr_accessor lifecycle_configuration: Types::BucketLifecycleConfiguration attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class PutBucketLifecycleRequest attr_accessor bucket: ::String attr_accessor content_md5: ::String attr_accessor checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") attr_accessor lifecycle_configuration: Types::LifecycleConfiguration attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class PutBucketLoggingRequest attr_accessor bucket: ::String attr_accessor bucket_logging_status: Types::BucketLoggingStatus attr_accessor content_md5: ::String attr_accessor checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class PutBucketMetricsConfigurationRequest attr_accessor bucket: ::String attr_accessor id: ::String attr_accessor metrics_configuration: Types::MetricsConfiguration attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class PutBucketNotificationConfigurationRequest attr_accessor bucket: ::String attr_accessor notification_configuration: Types::NotificationConfiguration attr_accessor expected_bucket_owner: ::String attr_accessor skip_destination_validation: bool SENSITIVE: [] end class PutBucketNotificationRequest attr_accessor bucket: ::String attr_accessor content_md5: ::String attr_accessor checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") attr_accessor notification_configuration: Types::NotificationConfigurationDeprecated attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class PutBucketOwnershipControlsRequest attr_accessor bucket: ::String attr_accessor content_md5: ::String attr_accessor expected_bucket_owner: ::String attr_accessor ownership_controls: Types::OwnershipControls SENSITIVE: [] end class PutBucketPolicyRequest attr_accessor bucket: ::String attr_accessor content_md5: ::String attr_accessor checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") attr_accessor confirm_remove_self_bucket_access: bool attr_accessor policy: ::IO attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class PutBucketReplicationRequest attr_accessor bucket: ::String attr_accessor content_md5: ::String attr_accessor checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") attr_accessor replication_configuration: Types::ReplicationConfiguration attr_accessor token: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class PutBucketRequestPaymentRequest attr_accessor bucket: ::String attr_accessor content_md5: ::String attr_accessor checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") attr_accessor request_payment_configuration: Types::RequestPaymentConfiguration attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class PutBucketTaggingRequest attr_accessor bucket: ::String attr_accessor content_md5: ::String attr_accessor checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") attr_accessor tagging: Types::Tagging attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class PutBucketVersioningRequest attr_accessor bucket: ::String attr_accessor content_md5: ::String attr_accessor checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") attr_accessor mfa: ::String attr_accessor versioning_configuration: Types::VersioningConfiguration attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class PutBucketWebsiteRequest attr_accessor bucket: ::String attr_accessor content_md5: ::String attr_accessor checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") attr_accessor website_configuration: Types::WebsiteConfiguration attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class PutObjectAclOutput attr_accessor request_charged: ("requester") SENSITIVE: [] end class PutObjectAclRequest attr_accessor acl: ("private" | "public-read" | "public-read-write" | "authenticated-read" | "aws-exec-read" | "bucket-owner-read" | "bucket-owner-full-control") attr_accessor access_control_policy: Types::AccessControlPolicy attr_accessor bucket: ::String attr_accessor content_md5: ::String attr_accessor checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") attr_accessor grant_full_control: ::String attr_accessor grant_read: ::String attr_accessor grant_read_acp: ::String attr_accessor grant_write: ::String attr_accessor grant_write_acp: ::String attr_accessor key: ::String attr_accessor request_payer: ("requester") attr_accessor version_id: ::String attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class PutObjectLegalHoldOutput attr_accessor request_charged: ("requester") SENSITIVE: [] end class PutObjectLegalHoldRequest attr_accessor bucket: ::String attr_accessor key: ::String attr_accessor legal_hold: Types::ObjectLockLegalHold attr_accessor request_payer: ("requester") attr_accessor version_id: ::String attr_accessor content_md5: ::String attr_accessor checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class PutObjectLockConfigurationOutput attr_accessor request_charged: ("requester") SENSITIVE: [] end class PutObjectLockConfigurationRequest attr_accessor bucket: ::String attr_accessor object_lock_configuration: Types::ObjectLockConfiguration attr_accessor request_payer: ("requester") attr_accessor token: ::String attr_accessor content_md5: ::String attr_accessor checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class PutObjectOutput attr_accessor expiration: ::String attr_accessor etag: ::String attr_accessor checksum_crc32: ::String attr_accessor checksum_crc32c: ::String attr_accessor checksum_sha1: ::String attr_accessor checksum_sha256: ::String attr_accessor server_side_encryption: ("AES256" | "aws:kms" | "aws:kms:dsse") attr_accessor version_id: ::String attr_accessor sse_customer_algorithm: ::String attr_accessor sse_customer_key_md5: ::String attr_accessor ssekms_key_id: ::String attr_accessor ssekms_encryption_context: ::String attr_accessor bucket_key_enabled: bool attr_accessor request_charged: ("requester") SENSITIVE: [:ssekms_key_id, :ssekms_encryption_context] end class PutObjectRequest attr_accessor acl: ("private" | "public-read" | "public-read-write" | "authenticated-read" | "aws-exec-read" | "bucket-owner-read" | "bucket-owner-full-control") attr_accessor body: ::IO attr_accessor bucket: ::String attr_accessor cache_control: ::String attr_accessor content_disposition: ::String attr_accessor content_encoding: ::String attr_accessor content_language: ::String attr_accessor content_length: ::Integer attr_accessor content_md5: ::String attr_accessor content_type: ::String attr_accessor checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") attr_accessor checksum_crc32: ::String attr_accessor checksum_crc32c: ::String attr_accessor checksum_sha1: ::String attr_accessor checksum_sha256: ::String attr_accessor expires: ::Time attr_accessor grant_full_control: ::String attr_accessor grant_read: ::String attr_accessor grant_read_acp: ::String attr_accessor grant_write_acp: ::String attr_accessor key: ::String attr_accessor metadata: ::Hash[::String, ::String] attr_accessor server_side_encryption: ("AES256" | "aws:kms" | "aws:kms:dsse") attr_accessor storage_class: ("STANDARD" | "REDUCED_REDUNDANCY" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "GLACIER" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE") attr_accessor website_redirect_location: ::String attr_accessor sse_customer_algorithm: ::String attr_accessor sse_customer_key: ::String attr_accessor sse_customer_key_md5: ::String attr_accessor ssekms_key_id: ::String attr_accessor ssekms_encryption_context: ::String attr_accessor bucket_key_enabled: bool attr_accessor request_payer: ("requester") attr_accessor tagging: ::String attr_accessor object_lock_mode: ("GOVERNANCE" | "COMPLIANCE") attr_accessor object_lock_retain_until_date: ::Time attr_accessor object_lock_legal_hold_status: ("ON" | "OFF") attr_accessor expected_bucket_owner: ::String SENSITIVE: [:sse_customer_key, :ssekms_key_id, :ssekms_encryption_context] end class PutObjectRetentionOutput attr_accessor request_charged: ("requester") SENSITIVE: [] end class PutObjectRetentionRequest attr_accessor bucket: ::String attr_accessor key: ::String attr_accessor retention: Types::ObjectLockRetention attr_accessor request_payer: ("requester") attr_accessor version_id: ::String attr_accessor bypass_governance_retention: bool attr_accessor content_md5: ::String attr_accessor checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class PutObjectTaggingOutput attr_accessor version_id: ::String SENSITIVE: [] end class PutObjectTaggingRequest attr_accessor bucket: ::String attr_accessor key: ::String attr_accessor version_id: ::String attr_accessor content_md5: ::String attr_accessor checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") attr_accessor tagging: Types::Tagging attr_accessor expected_bucket_owner: ::String attr_accessor request_payer: ("requester") SENSITIVE: [] end class PutPublicAccessBlockRequest attr_accessor bucket: ::String attr_accessor content_md5: ::String attr_accessor checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") attr_accessor public_access_block_configuration: Types::PublicAccessBlockConfiguration attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class QueueConfiguration attr_accessor id: ::String attr_accessor queue_arn: ::String attr_accessor events: ::Array[("s3:ReducedRedundancyLostObject" | "s3:ObjectCreated:*" | "s3:ObjectCreated:Put" | "s3:ObjectCreated:Post" | "s3:ObjectCreated:Copy" | "s3:ObjectCreated:CompleteMultipartUpload" | "s3:ObjectRemoved:*" | "s3:ObjectRemoved:Delete" | "s3:ObjectRemoved:DeleteMarkerCreated" | "s3:ObjectRestore:*" | "s3:ObjectRestore:Post" | "s3:ObjectRestore:Completed" | "s3:Replication:*" | "s3:Replication:OperationFailedReplication" | "s3:Replication:OperationNotTracked" | "s3:Replication:OperationMissedThreshold" | "s3:Replication:OperationReplicatedAfterThreshold" | "s3:ObjectRestore:Delete" | "s3:LifecycleTransition" | "s3:IntelligentTiering" | "s3:ObjectAcl:Put" | "s3:LifecycleExpiration:*" | "s3:LifecycleExpiration:Delete" | "s3:LifecycleExpiration:DeleteMarkerCreated" | "s3:ObjectTagging:*" | "s3:ObjectTagging:Put" | "s3:ObjectTagging:Delete")] attr_accessor filter: Types::NotificationConfigurationFilter SENSITIVE: [] end class QueueConfigurationDeprecated attr_accessor id: ::String attr_accessor event: ("s3:ReducedRedundancyLostObject" | "s3:ObjectCreated:*" | "s3:ObjectCreated:Put" | "s3:ObjectCreated:Post" | "s3:ObjectCreated:Copy" | "s3:ObjectCreated:CompleteMultipartUpload" | "s3:ObjectRemoved:*" | "s3:ObjectRemoved:Delete" | "s3:ObjectRemoved:DeleteMarkerCreated" | "s3:ObjectRestore:*" | "s3:ObjectRestore:Post" | "s3:ObjectRestore:Completed" | "s3:Replication:*" | "s3:Replication:OperationFailedReplication" | "s3:Replication:OperationNotTracked" | "s3:Replication:OperationMissedThreshold" | "s3:Replication:OperationReplicatedAfterThreshold" | "s3:ObjectRestore:Delete" | "s3:LifecycleTransition" | "s3:IntelligentTiering" | "s3:ObjectAcl:Put" | "s3:LifecycleExpiration:*" | "s3:LifecycleExpiration:Delete" | "s3:LifecycleExpiration:DeleteMarkerCreated" | "s3:ObjectTagging:*" | "s3:ObjectTagging:Put" | "s3:ObjectTagging:Delete") attr_accessor events: ::Array[("s3:ReducedRedundancyLostObject" | "s3:ObjectCreated:*" | "s3:ObjectCreated:Put" | "s3:ObjectCreated:Post" | "s3:ObjectCreated:Copy" | "s3:ObjectCreated:CompleteMultipartUpload" | "s3:ObjectRemoved:*" | "s3:ObjectRemoved:Delete" | "s3:ObjectRemoved:DeleteMarkerCreated" | "s3:ObjectRestore:*" | "s3:ObjectRestore:Post" | "s3:ObjectRestore:Completed" | "s3:Replication:*" | "s3:Replication:OperationFailedReplication" | "s3:Replication:OperationNotTracked" | "s3:Replication:OperationMissedThreshold" | "s3:Replication:OperationReplicatedAfterThreshold" | "s3:ObjectRestore:Delete" | "s3:LifecycleTransition" | "s3:IntelligentTiering" | "s3:ObjectAcl:Put" | "s3:LifecycleExpiration:*" | "s3:LifecycleExpiration:Delete" | "s3:LifecycleExpiration:DeleteMarkerCreated" | "s3:ObjectTagging:*" | "s3:ObjectTagging:Put" | "s3:ObjectTagging:Delete")] attr_accessor queue: ::String SENSITIVE: [] end class RecordsEvent attr_accessor payload: ::IO attr_accessor event_type: untyped SENSITIVE: [] end class Redirect attr_accessor host_name: ::String attr_accessor http_redirect_code: ::String attr_accessor protocol: ("http" | "https") attr_accessor replace_key_prefix_with: ::String attr_accessor replace_key_with: ::String SENSITIVE: [] end class RedirectAllRequestsTo attr_accessor host_name: ::String attr_accessor protocol: ("http" | "https") SENSITIVE: [] end class ReplicaModifications attr_accessor status: ("Enabled" | "Disabled") SENSITIVE: [] end class ReplicationConfiguration attr_accessor role: ::String attr_accessor rules: ::Array[Types::ReplicationRule] SENSITIVE: [] end class ReplicationRule attr_accessor id: ::String attr_accessor priority: ::Integer attr_accessor prefix: ::String attr_accessor filter: Types::ReplicationRuleFilter attr_accessor status: ("Enabled" | "Disabled") attr_accessor source_selection_criteria: Types::SourceSelectionCriteria attr_accessor existing_object_replication: Types::ExistingObjectReplication attr_accessor destination: Types::Destination attr_accessor delete_marker_replication: Types::DeleteMarkerReplication SENSITIVE: [] end class ReplicationRuleAndOperator attr_accessor prefix: ::String attr_accessor tags: ::Array[Types::Tag] SENSITIVE: [] end class ReplicationRuleFilter attr_accessor prefix: ::String attr_accessor tag: Types::Tag attr_accessor and: Types::ReplicationRuleAndOperator SENSITIVE: [] end class ReplicationTime attr_accessor status: ("Enabled" | "Disabled") attr_accessor time: Types::ReplicationTimeValue SENSITIVE: [] end class ReplicationTimeValue attr_accessor minutes: ::Integer SENSITIVE: [] end class RequestPaymentConfiguration attr_accessor payer: ("Requester" | "BucketOwner") SENSITIVE: [] end class RequestProgress attr_accessor enabled: bool SENSITIVE: [] end class RestoreObjectOutput attr_accessor request_charged: ("requester") attr_accessor restore_output_path: ::String SENSITIVE: [] end class RestoreObjectRequest attr_accessor bucket: ::String attr_accessor key: ::String attr_accessor version_id: ::String attr_accessor restore_request: Types::RestoreRequest attr_accessor request_payer: ("requester") attr_accessor checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") attr_accessor expected_bucket_owner: ::String SENSITIVE: [] end class RestoreRequest attr_accessor days: ::Integer attr_accessor glacier_job_parameters: Types::GlacierJobParameters attr_accessor type: ("SELECT") attr_accessor tier: ("Standard" | "Bulk" | "Expedited") attr_accessor description: ::String attr_accessor select_parameters: Types::SelectParameters attr_accessor output_location: Types::OutputLocation SENSITIVE: [] end class RestoreStatus attr_accessor is_restore_in_progress: bool attr_accessor restore_expiry_date: ::Time SENSITIVE: [] end class RoutingRule attr_accessor condition: Types::Condition attr_accessor redirect: Types::Redirect SENSITIVE: [] end class Rule attr_accessor expiration: Types::LifecycleExpiration attr_accessor id: ::String attr_accessor prefix: ::String attr_accessor status: ("Enabled" | "Disabled") attr_accessor transition: Types::Transition attr_accessor noncurrent_version_transition: Types::NoncurrentVersionTransition attr_accessor noncurrent_version_expiration: Types::NoncurrentVersionExpiration attr_accessor abort_incomplete_multipart_upload: Types::AbortIncompleteMultipartUpload SENSITIVE: [] end class S3KeyFilter attr_accessor filter_rules: ::Array[Types::FilterRule] SENSITIVE: [] end class S3Location attr_accessor bucket_name: ::String attr_accessor prefix: ::String attr_accessor encryption: Types::Encryption attr_accessor canned_acl: ("private" | "public-read" | "public-read-write" | "authenticated-read" | "aws-exec-read" | "bucket-owner-read" | "bucket-owner-full-control") attr_accessor access_control_list: ::Array[Types::Grant] attr_accessor tagging: Types::Tagging attr_accessor user_metadata: ::Array[Types::MetadataEntry] attr_accessor storage_class: ("STANDARD" | "REDUCED_REDUNDANCY" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "GLACIER" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE") SENSITIVE: [] end class SSEKMS attr_accessor key_id: ::String SENSITIVE: [:key_id] end class SSES3 < Aws::EmptyStructure end class ScanRange attr_accessor start: ::Integer attr_accessor end: ::Integer SENSITIVE: [] end class SelectObjectContentOutput attr_accessor payload: Types::SelectObjectContentEventStream SENSITIVE: [] end class SelectObjectContentRequest attr_accessor bucket: ::String attr_accessor key: ::String attr_accessor sse_customer_algorithm: ::String attr_accessor sse_customer_key: ::String attr_accessor sse_customer_key_md5: ::String attr_accessor expression: ::String attr_accessor expression_type: ("SQL") attr_accessor request_progress: Types::RequestProgress attr_accessor input_serialization: Types::InputSerialization attr_accessor output_serialization: Types::OutputSerialization attr_accessor scan_range: Types::ScanRange attr_accessor expected_bucket_owner: ::String SENSITIVE: [:sse_customer_key] end class SelectParameters attr_accessor input_serialization: Types::InputSerialization attr_accessor expression_type: ("SQL") attr_accessor expression: ::String attr_accessor output_serialization: Types::OutputSerialization SENSITIVE: [] end class ServerSideEncryptionByDefault attr_accessor sse_algorithm: ("AES256" | "aws:kms" | "aws:kms:dsse") attr_accessor kms_master_key_id: ::String SENSITIVE: [:kms_master_key_id] end class ServerSideEncryptionConfiguration attr_accessor rules: ::Array[Types::ServerSideEncryptionRule] SENSITIVE: [] end class ServerSideEncryptionRule attr_accessor apply_server_side_encryption_by_default: Types::ServerSideEncryptionByDefault attr_accessor bucket_key_enabled: bool SENSITIVE: [] end class SessionCredentials attr_accessor access_key_id: ::String attr_accessor secret_access_key: ::String attr_accessor session_token: ::String attr_accessor expiration: ::Time SENSITIVE: [:secret_access_key, :session_token] end class SimplePrefix < Aws::EmptyStructure end class SourceSelectionCriteria attr_accessor sse_kms_encrypted_objects: Types::SseKmsEncryptedObjects attr_accessor replica_modifications: Types::ReplicaModifications SENSITIVE: [] end class SseKmsEncryptedObjects attr_accessor status: ("Enabled" | "Disabled") SENSITIVE: [] end class Stats attr_accessor bytes_scanned: ::Integer attr_accessor bytes_processed: ::Integer attr_accessor bytes_returned: ::Integer SENSITIVE: [] end class StatsEvent attr_accessor details: Types::Stats attr_accessor event_type: untyped SENSITIVE: [] end class StorageClassAnalysis attr_accessor data_export: Types::StorageClassAnalysisDataExport SENSITIVE: [] end class StorageClassAnalysisDataExport attr_accessor output_schema_version: ("V_1") attr_accessor destination: Types::AnalyticsExportDestination SENSITIVE: [] end class Tag attr_accessor key: ::String attr_accessor value: ::String SENSITIVE: [] end class Tagging attr_accessor tag_set: ::Array[Types::Tag] SENSITIVE: [] end class TargetGrant attr_accessor grantee: Types::Grantee attr_accessor permission: ("FULL_CONTROL" | "READ" | "WRITE") SENSITIVE: [] end class TargetObjectKeyFormat attr_accessor simple_prefix: Types::SimplePrefix attr_accessor partitioned_prefix: Types::PartitionedPrefix SENSITIVE: [] end class Tiering attr_accessor days: ::Integer attr_accessor access_tier: ("ARCHIVE_ACCESS" | "DEEP_ARCHIVE_ACCESS") SENSITIVE: [] end class TopicConfiguration attr_accessor id: ::String attr_accessor topic_arn: ::String attr_accessor events: ::Array[("s3:ReducedRedundancyLostObject" | "s3:ObjectCreated:*" | "s3:ObjectCreated:Put" | "s3:ObjectCreated:Post" | "s3:ObjectCreated:Copy" | "s3:ObjectCreated:CompleteMultipartUpload" | "s3:ObjectRemoved:*" | "s3:ObjectRemoved:Delete" | "s3:ObjectRemoved:DeleteMarkerCreated" | "s3:ObjectRestore:*" | "s3:ObjectRestore:Post" | "s3:ObjectRestore:Completed" | "s3:Replication:*" | "s3:Replication:OperationFailedReplication" | "s3:Replication:OperationNotTracked" | "s3:Replication:OperationMissedThreshold" | "s3:Replication:OperationReplicatedAfterThreshold" | "s3:ObjectRestore:Delete" | "s3:LifecycleTransition" | "s3:IntelligentTiering" | "s3:ObjectAcl:Put" | "s3:LifecycleExpiration:*" | "s3:LifecycleExpiration:Delete" | "s3:LifecycleExpiration:DeleteMarkerCreated" | "s3:ObjectTagging:*" | "s3:ObjectTagging:Put" | "s3:ObjectTagging:Delete")] attr_accessor filter: Types::NotificationConfigurationFilter SENSITIVE: [] end class TopicConfigurationDeprecated attr_accessor id: ::String attr_accessor events: ::Array[("s3:ReducedRedundancyLostObject" | "s3:ObjectCreated:*" | "s3:ObjectCreated:Put" | "s3:ObjectCreated:Post" | "s3:ObjectCreated:Copy" | "s3:ObjectCreated:CompleteMultipartUpload" | "s3:ObjectRemoved:*" | "s3:ObjectRemoved:Delete" | "s3:ObjectRemoved:DeleteMarkerCreated" | "s3:ObjectRestore:*" | "s3:ObjectRestore:Post" | "s3:ObjectRestore:Completed" | "s3:Replication:*" | "s3:Replication:OperationFailedReplication" | "s3:Replication:OperationNotTracked" | "s3:Replication:OperationMissedThreshold" | "s3:Replication:OperationReplicatedAfterThreshold" | "s3:ObjectRestore:Delete" | "s3:LifecycleTransition" | "s3:IntelligentTiering" | "s3:ObjectAcl:Put" | "s3:LifecycleExpiration:*" | "s3:LifecycleExpiration:Delete" | "s3:LifecycleExpiration:DeleteMarkerCreated" | "s3:ObjectTagging:*" | "s3:ObjectTagging:Put" | "s3:ObjectTagging:Delete")] attr_accessor event: ("s3:ReducedRedundancyLostObject" | "s3:ObjectCreated:*" | "s3:ObjectCreated:Put" | "s3:ObjectCreated:Post" | "s3:ObjectCreated:Copy" | "s3:ObjectCreated:CompleteMultipartUpload" | "s3:ObjectRemoved:*" | "s3:ObjectRemoved:Delete" | "s3:ObjectRemoved:DeleteMarkerCreated" | "s3:ObjectRestore:*" | "s3:ObjectRestore:Post" | "s3:ObjectRestore:Completed" | "s3:Replication:*" | "s3:Replication:OperationFailedReplication" | "s3:Replication:OperationNotTracked" | "s3:Replication:OperationMissedThreshold" | "s3:Replication:OperationReplicatedAfterThreshold" | "s3:ObjectRestore:Delete" | "s3:LifecycleTransition" | "s3:IntelligentTiering" | "s3:ObjectAcl:Put" | "s3:LifecycleExpiration:*" | "s3:LifecycleExpiration:Delete" | "s3:LifecycleExpiration:DeleteMarkerCreated" | "s3:ObjectTagging:*" | "s3:ObjectTagging:Put" | "s3:ObjectTagging:Delete") attr_accessor topic: ::String SENSITIVE: [] end class Transition attr_accessor date: ::Time attr_accessor days: ::Integer attr_accessor storage_class: ("GLACIER" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "DEEP_ARCHIVE" | "GLACIER_IR") SENSITIVE: [] end class UploadPartCopyOutput attr_accessor copy_source_version_id: ::String attr_accessor copy_part_result: Types::CopyPartResult attr_accessor server_side_encryption: ("AES256" | "aws:kms" | "aws:kms:dsse") attr_accessor sse_customer_algorithm: ::String attr_accessor sse_customer_key_md5: ::String attr_accessor ssekms_key_id: ::String attr_accessor bucket_key_enabled: bool attr_accessor request_charged: ("requester") SENSITIVE: [:ssekms_key_id] end class UploadPartCopyRequest attr_accessor bucket: ::String attr_accessor copy_source: ::String attr_accessor copy_source_if_match: ::String attr_accessor copy_source_if_modified_since: ::Time attr_accessor copy_source_if_none_match: ::String attr_accessor copy_source_if_unmodified_since: ::Time attr_accessor copy_source_range: ::String attr_accessor key: ::String attr_accessor part_number: ::Integer attr_accessor upload_id: ::String attr_accessor sse_customer_algorithm: ::String attr_accessor sse_customer_key: ::String attr_accessor sse_customer_key_md5: ::String attr_accessor copy_source_sse_customer_algorithm: ::String attr_accessor copy_source_sse_customer_key: ::String attr_accessor copy_source_sse_customer_key_md5: ::String attr_accessor request_payer: ("requester") attr_accessor expected_bucket_owner: ::String attr_accessor expected_source_bucket_owner: ::String SENSITIVE: [:sse_customer_key, :copy_source_sse_customer_key] end class UploadPartOutput attr_accessor server_side_encryption: ("AES256" | "aws:kms" | "aws:kms:dsse") attr_accessor etag: ::String attr_accessor checksum_crc32: ::String attr_accessor checksum_crc32c: ::String attr_accessor checksum_sha1: ::String attr_accessor checksum_sha256: ::String attr_accessor sse_customer_algorithm: ::String attr_accessor sse_customer_key_md5: ::String attr_accessor ssekms_key_id: ::String attr_accessor bucket_key_enabled: bool attr_accessor request_charged: ("requester") SENSITIVE: [:ssekms_key_id] end class UploadPartRequest attr_accessor body: ::IO attr_accessor bucket: ::String attr_accessor content_length: ::Integer attr_accessor content_md5: ::String attr_accessor checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256") attr_accessor checksum_crc32: ::String attr_accessor checksum_crc32c: ::String attr_accessor checksum_sha1: ::String attr_accessor checksum_sha256: ::String attr_accessor key: ::String attr_accessor part_number: ::Integer attr_accessor upload_id: ::String attr_accessor sse_customer_algorithm: ::String attr_accessor sse_customer_key: ::String attr_accessor sse_customer_key_md5: ::String attr_accessor request_payer: ("requester") attr_accessor expected_bucket_owner: ::String SENSITIVE: [:sse_customer_key] end class VersioningConfiguration attr_accessor mfa_delete: ("Enabled" | "Disabled") attr_accessor status: ("Enabled" | "Suspended") SENSITIVE: [] end class WebsiteConfiguration attr_accessor error_document: Types::ErrorDocument attr_accessor index_document: Types::IndexDocument attr_accessor redirect_all_requests_to: Types::RedirectAllRequestsTo attr_accessor routing_rules: ::Array[Types::RoutingRule] SENSITIVE: [] end class WriteGetObjectResponseRequest attr_accessor request_route: ::String attr_accessor request_token: ::String attr_accessor body: ::IO attr_accessor status_code: ::Integer attr_accessor error_code: ::String attr_accessor error_message: ::String attr_accessor accept_ranges: ::String attr_accessor cache_control: ::String attr_accessor content_disposition: ::String attr_accessor content_encoding: ::String attr_accessor content_language: ::String attr_accessor content_length: ::Integer attr_accessor content_range: ::String attr_accessor content_type: ::String attr_accessor checksum_crc32: ::String attr_accessor checksum_crc32c: ::String attr_accessor checksum_sha1: ::String attr_accessor checksum_sha256: ::String attr_accessor delete_marker: bool attr_accessor etag: ::String attr_accessor expires: ::Time attr_accessor expiration: ::String attr_accessor last_modified: ::Time attr_accessor missing_meta: ::Integer attr_accessor metadata: ::Hash[::String, ::String] attr_accessor object_lock_mode: ("GOVERNANCE" | "COMPLIANCE") attr_accessor object_lock_legal_hold_status: ("ON" | "OFF") attr_accessor object_lock_retain_until_date: ::Time attr_accessor parts_count: ::Integer attr_accessor replication_status: ("COMPLETE" | "PENDING" | "FAILED" | "REPLICA" | "COMPLETED") attr_accessor request_charged: ("requester") attr_accessor restore: ::String attr_accessor server_side_encryption: ("AES256" | "aws:kms" | "aws:kms:dsse") attr_accessor sse_customer_algorithm: ::String attr_accessor ssekms_key_id: ::String attr_accessor sse_customer_key_md5: ::String attr_accessor storage_class: ("STANDARD" | "REDUCED_REDUNDANCY" | "STANDARD_IA" | "ONEZONE_IA" | "INTELLIGENT_TIERING" | "GLACIER" | "DEEP_ARCHIVE" | "OUTPOSTS" | "GLACIER_IR" | "SNOW" | "EXPRESS_ONEZONE") attr_accessor tag_count: ::Integer attr_accessor version_id: ::String attr_accessor bucket_key_enabled: bool SENSITIVE: [:ssekms_key_id] end class SelectObjectContentEventStream < Enumerator[untyped, untyped] def event_types: () -> [:records, :stats, :progress, :cont, :end] end end end aws-sdk-s3-1.143.0/sig/bucket_policy.rbs0000644000004100000410000000460414563445240017745 0ustar www-datawww-data# WARNING ABOUT GENERATED CODE # # This file is generated. See the contributing guide for more information: # https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md # # WARNING ABOUT GENERATED CODE module Aws module S3 # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketPolicy.html class BucketPolicy # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketPolicy.html#initialize-instance_method def initialize: (String bucket_name, Hash[Symbol, untyped] options) -> void | (bucket_name: String, ?client: Client) -> void | (Hash[Symbol, untyped] args) -> void # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketPolicy.html#bucket_name-instance_method def bucket_name: () -> String # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketPolicy.html#policy-instance_method def policy: () -> ::IO def client: () -> Client # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketPolicy.html#load-instance_method def load: () -> self alias reload load # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketPolicy.html#data-instance_method def data: () -> Types::GetBucketPolicyOutput # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketPolicy.html#data_loaded?-instance_method def data_loaded?: () -> bool # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketPolicy.html#delete-instance_method def delete: ( ?expected_bucket_owner: ::String ) -> ::Aws::EmptyStructure | (?Hash[Symbol, untyped]) -> ::Aws::EmptyStructure # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketPolicy.html#put-instance_method def put: ( ?content_md5: ::String, ?checksum_algorithm: ("CRC32" | "CRC32C" | "SHA1" | "SHA256"), ?confirm_remove_self_bucket_access: bool, policy: ::String, ?expected_bucket_owner: ::String ) -> ::Aws::EmptyStructure | (?Hash[Symbol, untyped]) -> ::Aws::EmptyStructure # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketPolicy.html#bucket-instance_method def bucket: () -> Bucket class Collection < ::Aws::Resources::Collection[BucketPolicy] end end end end aws-sdk-s3-1.143.0/CHANGELOG.md0000644000004100000410000007350114563445240015432 0ustar www-datawww-dataUnreleased Changes ------------------ 1.143.0 (2024-01-26) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. 1.142.0 (2023-12-22) ------------------ * Feature - Added additional examples for some operations. 1.141.0 (2023-11-28) ------------------ * Feature - Adds support for S3 Express One Zone. * Feature - Support S3 Express authentication and endpoints. Express session auth can be disabled with the `disable_s3_express_session_auth` Client option, the `AWS_S3_DISABLE_EXPRESS_SESSION_AUTH` environment variable, and the `s3_disable_express_session_auth` shared config option. A custom `express_credentials_provider` can be configured onto the Client. 1.140.0 (2023-11-27) ------------------ * Feature - Adding new params - Key and Prefix, to S3 API operations for supporting S3 Access Grants. Note - These updates will not change any of the existing S3 API functionality. * Issue - Fix thread interruptions in multipart `download_file`, `file_uploader` and `stream_uploader` (#2944). 1.139.0 (2023-11-22) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. 1.138.0 (2023-11-21) ------------------ * Feature - Add support for automatic date based partitioning in S3 Server Access Logs. 1.137.0 (2023-11-17) ------------------ * Feature - Removes all default 0 values for numbers and false values for booleans 1.136.0 (2023-09-26) ------------------ * Feature - This release adds a new field COMPLETED to the ReplicationStatus Enum. You can now use this field to validate the replication status of S3 objects using the AWS SDK. 1.135.0 (2023-09-20) ------------------ * Feature - Fix an issue where the SDK can fail to unmarshall response due to NumberFormatException 1.134.0 (2023-08-24) ------------------ * Feature - Updates to endpoint ruleset tests to address Smithy validation issues. 1.133.0 (2023-08-22) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. * Feature - Add support for `progress_callback` in `Object#download_file` and improve multi-threaded performance #(2901). 1.132.1 (2023-08-09) ------------------ * Issue - Add support for disabling checksum validation in `Object#download_file` (#2893). 1.132.0 (2023-07-24) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. * Feature - Add support for verifying checksums in FileDownloader. 1.131.0 (2023-07-20) ------------------ * Feature - Improve performance of S3 clients by simplifying and optimizing endpoint resolution. 1.130.0 (2023-07-13) ------------------ * Feature - S3 Inventory now supports Object Access Control List and Object Owner as available object metadata fields in inventory reports. * Feature - Allow Object multipart copy API to work when requiring a checksum algorithm. * Feature - Allow Object multipart copy API to optionally copy parts as they exist on the source object if it has parts, instead of generating new part ranges, when specifying `use_source_parts: true`. 1.129.0 (2023-07-11) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. 1.128.0 (2023-07-06) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. 1.127.0 (2023-06-28) ------------------ * Feature - The S3 LISTObjects, ListObjectsV2 and ListObjectVersions API now supports a new optional header x-amz-optional-object-attributes. If header contains RestoreStatus as the value, then S3 will include Glacier restore status i.e. isRestoreInProgress and RestoreExpiryDate in List response. * Feature - Select minimum expiration time for presigned urls between the expiration time option and the credential expiration time. 1.126.0 (2023-06-16) ------------------ * Feature - This release adds SDK support for request-payer request header and request-charged response header in the "GetBucketAccelerateConfiguration", "ListMultipartUploads", "ListObjects", "ListObjectsV2" and "ListObjectVersions" S3 APIs. 1.125.0 (2023-06-15) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. 1.124.0 (2023-06-13) ------------------ * Feature - Integrate double encryption feature to SDKs. 1.123.2 (2023-06-12) ------------------ * Issue - Fix issue when decrypting noncurrent versions of objects when using client side encryption (#2866). 1.123.1 (2023-06-02) ------------------ * Issue - Fix multipart `download_file` so that it does not download bytes out of range (#2859). 1.123.0 (2023-05-31) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. 1.122.0 (2023-05-04) ------------------ * Feature - Documentation updates for Amazon S3 1.121.0 (2023-04-19) ------------------ * Feature - Provides support for "Snow" Storage class. 1.120.1 (2023-04-05) ------------------ * Issue - Skip `#check_for_cached_region` if custom endpoint provided 1.120.0 (2023-03-31) ------------------ * Feature - Documentation updates for Amazon S3 1.119.2 (2023-03-22) ------------------ * Issue - Provide `endpoint` and `bucket` attributes on `Aws::S3::Errors::PermanentRedirect` error objects. 1.119.1 (2023-02-13) ------------------ * Issue - Ensure object metadata is not lost on multipart copy (#2821). 1.119.0 (2023-01-26) ------------------ * Feature - Allow FIPS to be used with path-style URLs. 1.118.0 (2023-01-18) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. * Issue - Replace runtime endpoint resolution approach with generated ruby code. 1.117.2 (2022-11-30) ------------------ * Issue - Return error messages from failures in threads in `MultipartStreamUploader` (#2793). 1.117.1 (2022-10-26) ------------------ * Issue - Fix custom endpoint and port regression with `presigned_url` (#2776). 1.117.0 (2022-10-25) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. * Issue - Apply checksums to MultipartStreamUploader (#2769). 1.116.0 (2022-10-21) ------------------ * Feature - S3 on Outposts launches support for automatic bucket-style alias. You can use the automatic access point alias instead of an access point ARN for any object-level operation in an Outposts bucket. 1.115.0 (2022-10-19) ------------------ * Feature - Updates internal logic for constructing API endpoints. We have added rule-based endpoints and internal model parameters. 1.114.0 (2022-05-03) ------------------ * Feature - Documentation only update for doc bug fixes for the S3 API docs. 1.113.2 (2022-04-26) ------------------ * Issue - Fix an issue where `ExpiredToken` errors were retried as if the request was from another region. 1.113.1 (2022-04-25) ------------------ * Issue - Rewind the underlying file on a streaming retry that is not a truncated body (#2692). 1.113.0 (2022-02-24) ------------------ * Feature - This release adds support for new integrity checking capabilities in Amazon S3. You can choose from four supported checksum algorithms for data integrity checking on your upload and download requests. In addition, AWS SDK can automatically calculate a checksum as it streams data into S3 1.112.0 (2022-02-03) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. 1.111.3 (2022-01-24) ------------------ * Issue - Fix starts_with fields on `PresignedPost` (#2636). 1.111.2 (2022-01-20) ------------------ * Issue - Minor cleanups. 1.111.1 (2022-01-06) ------------------ * Issue - Don't fail small files in `upload_file` when `:thread_count` is set. (#2628) 1.111.0 (2022-01-04) ------------------ * Feature - Minor doc-based updates based on feedback bugs received. 1.110.0 (2021-12-21) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. 1.109.0 (2021-11-30) ------------------ * Feature - Introduce Amazon S3 Glacier Instant Retrieval storage class and a new setting in S3 Object Ownership to disable ACLs for bucket and the objects in it. 1.108.0 (2021-11-29) ------------------ * Feature - Amazon S3 Event Notifications adds Amazon EventBridge as a destination and supports additional event types. The PutBucketNotificationConfiguration API can now skip validation of Amazon SQS, Amazon SNS and AWS Lambda destinations. 1.107.0 (2021-11-23) ------------------ * Feature - Introduce two new Filters to S3 Lifecycle configurations - ObjectSizeGreaterThan and ObjectSizeLessThan. Introduce a new way to trigger actions on noncurrent versions by providing the number of newer noncurrent versions along with noncurrent days. 1.106.0 (2021-11-17) ------------------ * Feature - Add `presigned_request` method to `Aws::S3::Object`. 1.105.1 (2021-11-05) ------------------ * Issue - Raise error when `use_fips_endpoint` is used with `use_accelerate_endpoint`. 1.105.0 (2021-11-04) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. 1.104.0 (2021-10-18) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. 1.103.0 (2021-09-16) ------------------ * Feature - Add support for access point arn filtering in S3 CW Request Metrics 1.102.0 (2021-09-02) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. 1.101.0 (2021-09-01) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. 1.100.0 (2021-08-27) ------------------ * Feature - Documentation updates for Amazon S3. 1.99.0 (2021-08-16) ------------------ * Feature - Documentation updates for Amazon S3 1.98.0 (2021-07-30) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. 1.97.0 (2021-07-28) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. 1.96.2 (2021-07-20) ------------------ * Issue - Fix file downloading edge case for 1 byte multipart ranges (#2561). 1.96.1 (2021-06-10) ------------------ * Issue - fix GetBucketLocation location_constraint XML parsing (#2536) 1.96.0 (2021-06-03) ------------------ * Feature - S3 Inventory now supports Bucket Key Status 1.95.1 (2021-05-24) ------------------ * Issue - Raise an error when FIPS is in the ARN's region for Access Point and Object Lambda. 1.95.0 (2021-05-21) ------------------ * Feature - Documentation updates for Amazon S3 1.94.1 (2021-05-05) ------------------ * Issue - Expose presigned request status to the request handler stack #2513 1.94.0 (2021-04-27) ------------------ * Feature - Allow S3 Presigner to sign non http verbs like (upload_part, multipart_upload_abort, etc.) #2511 1.93.1 (2021-04-12) ------------------ * Issue - Fix FIPS and global endpoint behavior for S3 ARNs. * Issue - Increases `multipart_threshold` default from 15 megabytes to 100 megabytes. 1.93.0 (2021-03-24) ------------------ * Feature - Documentation updates for Amazon S3 1.92.0 (2021-03-18) ------------------ * Feature - S3 Object Lambda is a new S3 feature that enables users to apply their own custom code to process the output of a standard S3 GET request by automatically invoking a Lambda function with a GET request * Feature - Support S3 Object Lambda ARNs in the `bucket:` parameter. 1.91.0 (2021-03-10) ------------------ * Feature - Adding ID element to the CORSRule schema 1.90.0 (2021-03-08) ------------------ * Feature - Amazon S3 Documentation updates 1.89.0 (2021-02-26) ------------------ * Feature - Add RequestPayer to GetObjectTagging and PutObjectTagging. 1.88.2 (2021-02-25) ------------------ * Issue - Support https in `Object#public_url` for `virtual_host`. (#1389) * Issue - Fix an issue with the IAD regional endpoint plugin removing `us-east-1` from custom endpoints. 1.88.1 (2021-02-12) ------------------ * Issue - Fixed an issue with some plugins expecting `#size` to exist on a request body for streaming IO. 1.88.0 (2021-02-02) ------------------ * Feature - Support PrivateLink using the client `:endpoint` option. This patch has a minor behavioral change: a client constructed using `:use_dualstack_endpoint` or `:use_accelerate_endpoint` and `:endpoint` will now raise an `ArgumentError`. * Issue - Fix a bug where bucket region detection did not work correctly with ARNs. 1.87.0 (2020-12-21) ------------------ * Feature - Format GetObject's Expires header to be an http-date instead of iso8601 1.86.2 (2020-12-14) ------------------ * Issue - Use `URI::DEFAULT_PARSER.escape` (an alias for `URI.escape`) in the legacy signer because Ruby 3 removes WEBrick from stdlib. 1.86.1 (2020-12-11) ------------------ * Issue - Bump minimum KMS dependency. (#2449) 1.86.0 (2020-12-01) ------------------ * Feature - S3 adds support for multiple-destination replication, option to sync replica modifications; S3 Bucket Keys to reduce cost of S3 SSE with AWS KMS 1.85.0 (2020-11-20) ------------------ * Feature - Add new documentation regarding automatically generated Content-MD5 headers when using the SDK or CLI. 1.84.1 (2020-11-10) ------------------ * Issue - Fix presigned urls for Outpost ARNs. 1.84.0 (2020-11-09) ------------------ * Feature - S3 Intelligent-Tiering adds support for Archive and Deep Archive Access tiers; S3 Replication adds replication metrics and failure notifications, brings feature parity for delete marker replication 1.83.2 (2020-11-06) ------------------ * Issue - Fix bug with clients not resolving the correct endpoint in `us-east-1` using access point ARNs. 1.83.1 (2020-10-19) ------------------ * Issue - Fix `multipart_threshold` documentation. 1.83.0 (2020-10-02) ------------------ * Feature - Amazon S3 Object Ownership is a new S3 feature that enables bucket owners to automatically assume ownership of objects that are uploaded to their buckets by other AWS Accounts. 1.82.0 (2020-09-30) ------------------ * Feature - Amazon S3 on Outposts expands object storage to on-premises AWS Outposts environments, enabling you to store and retrieve objects using S3 APIs and features. * Feature - Support Outpost Access Point ARNs. 1.81.1 (2020-09-25) ------------------ * Issue - Ignore `amz-sdk-request` header (used for standard and adaptive retries) in the pre-signer. (#2411) 1.81.0 (2020-09-15) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. 1.80.0 (2020-09-10) ------------------ * Feature - Bucket owner verification feature added. This feature introduces the x-amz-expected-bucket-owner and x-amz-source-expected-bucket-owner headers. 1.79.1 (2020-08-26) ------------------ * Issue - Fix `Aws::S3::PresignedPost` using the `use_accelerate_endpoint` option with Resource clients. (#2103) 1.79.0 (2020-08-25) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. 1.78.0 (2020-08-11) ------------------ * Feature - Add support for in-region CopyObject and UploadPartCopy through S3 Access Points 1.77.0 (2020-08-10) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. * Issue - Fix issue with JRuby and bump minimum version of core. 1.76.0 (2020-08-07) ------------------ * Feature - Updates Amazon S3 API reference documentation. * Feature - Updates to the Amazon S3 Encryption Client. This change includes fixes for issues that were reported by Sophie Schmieg from the Google ISE team, and for issues that were discovered by AWS Cryptography. 1.75.0 (2020-07-21) ------------------ * Feature - Add progress_callback to `Object#upload` to support reporting of upload progress. (#648) 1.74.0 (2020-07-08) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. * Feature - Allow the `use_accelerate_endpoint` option to be used with `Aws::S3::PresignedPost`. (#2103) 1.73.0 (2020-07-02) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. 1.72.0 (2020-06-26) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. 1.71.1 (2020-06-25) ------------------ * Issue - Fix uninitialized constant `Aws::S3::Plugins::RetryableBlockIO::Forwardable` (#2348) 1.71.0 (2020-06-25) ------------------ * Issue - This version has been yanked. (#2349). * Feature - Retry incomplete, streaming responses to `get_object` using the range parameter to avoid re-downloading already processed data (#2326). * Issue - Reduce memory usage of `IOEncryptor` and `IODecryptor`. 1.70.0 (2020-06-23) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. 1.69.1 (2020-06-22) ------------------ * Issue - Add support for user provided encryption context to `EncryptionV2::Client`. 1.69.0 (2020-06-18) ------------------ * Feature - Add a new version of the S3 Client Side Encryption Client: `EncryptionV2::Client` which supports more modern encryption algorithms. 1.68.1 (2020-06-11) ------------------ * Issue - Republish previous version with correct dependency on `aws-sdk-core`. 1.68.0 (2020-06-10) ------------------ * Issue - This version has been yanked. (#2327). * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. * Feature - Change `:compute_checksums` option to compute checksums only for optional operations when set to true, and no operations when set to false. Operations that require checksums are now modeled with `httpChecksumRequired` and computed automatically in aws-sdk-core. 1.67.1 (2020-06-01) ------------------ * Issue - Add support for Object.exists? and Waiters for the encryption client. 1.67.0 (2020-05-28) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. 1.66.0 (2020-05-21) ------------------ * Feature - Deprecates unusable input members bound to Content-MD5 header. Updates example and documentation. 1.65.0 (2020-05-18) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. * Feature - Allow S3 presigner to presign non-object operations such as `list_objects`. 1.64.0 (2020-05-07) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. 1.63.1 (2020-05-04) ------------------ * Issue - Handle copy_object, complete_multipart_upload, and upload_part_copy http responses with 200 OK and incomplete bodies as errors. 1.63.0 (2020-04-22) ------------------ * Feature - Add `presigned_request` method to the `Presigner` class. This method returns a URL and headers necessary rather than hoisting them onto the query string. * Feature - Force HTTPS when using `virtual_host: true` on the `Presigner` class. 1.62.0 (2020-04-20) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. 1.61.2 (2020-04-03) ------------------ * Issue - Add `put_bucket_lifecycle_configuration` and `put_bucket_replication` as required operations used in the MD5 plugin. 1.61.1 (2020-03-10) ------------------ * Issue - Fix raising in `Object#upload_stream` block not triggering the `Aws::S3::MultipartStreamUploader#abort_upload`. 1.61.0 (2020-03-09) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. * Issue - Don't update endpoint on region mismatch errors when using a custom endpoint. 1.60.2 (2020-02-07) ------------------ * Issue - Allow `Aws::S3::Encrypted::Client` to be used with a Resource client. 1.60.1 (2019-12-19) ------------------ * Issue - Allow downcased option for S3 us-east-1 regionalization. 1.60.0 (2019-12-18) ------------------ * Feature - Updates Amazon S3 endpoints allowing you to configure your client to opt-in to using S3 with the us-east-1 regional endpoint, instead of global. 1.59.1 (2019-12-17) ------------------ * Issue - Added validation in the s3 presigner to check for 0 or negative expire_in times. 1.59.0 (2019-12-05) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. * Issue - Fixed an issue with Access Point ARNs not resigning correctly. * Issue - Fixed S3 gemspec to require a minimum core version to support S3 Access Point ARNs. (GitHub PR #2184) 1.58.0 (2019-12-03) ------------------ * Feature - Amazon S3 Access Points is a new S3 feature that simplifies managing data access at scale for shared data sets on Amazon S3. Access Points provide a customizable way to access the objects in a bucket, with a unique hostname and access policy that enforces the specific permissions and network controls for any request made through the access point. This represents a new way of provisioning access to shared data sets. 1.57.0 (2019-11-20) ------------------ * Feature - This release introduces support for Amazon S3 Replication Time Control, a new feature of S3 Replication that provides a predictable replication time backed by a Service Level Agreement. S3 Replication Time Control helps customers meet compliance or business requirements for data replication, and provides visibility into the replication process with new Amazon CloudWatch Metrics. 1.56.0 (2019-11-18) ------------------ * Feature - Added support for S3 Replication for existing objects. This release allows customers who have requested and been granted access to replicate existing S3 objects across buckets. * Issue - Fix issue where `Aws::Errors::MissingRegionError` was not thrown for S3 or S3Control clients. 1.55.0 (2019-11-15) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. 1.54.0 (2019-11-13) ------------------ * Feature - Support `:s3_us_east_1_regional_endpoint` with `regional` to enable IAD regional endpoint for S3. 1.53.0 (2019-10-31) ------------------ * Feature - S3 Inventory now supports a new field 'IntelligentTieringAccessTier' that reports the access tier (frequent or infrequent) of objects stored in Intelligent-Tiering storage class. 1.52.0 (2019-10-28) ------------------ * Feature - Adding support in SelectObjectContent for scanning a portion of an object specified by a scan range. 1.51.0 (2019-10-23) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. 1.50.0 (2019-10-17) ------------------ * Feature - Add support to yield the response in #upload_file if a block is given. 1.49.0 (2019-10-10) ------------------ * Feature - Support `#delete_object` and `#head_object` for encryption client. 1.48.0 (2019-08-30) ------------------ * Feature - Added a `:whitelist_headers` option to S3 presigner. 1.47.0 (2019-08-28) ------------------ * Feature - Added a `:time` option to S3 presigner. 1.46.0 (2019-07-25) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. 1.45.0 (2019-07-03) ------------------ * Feature - Add S3 x-amz-server-side-encryption-context support. 1.44.0 (2019-07-01) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. 1.43.0 (2019-06-17) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. 1.42.0 (2019-06-04) ------------------ * Feature - Documentation updates for s3 1.41.0 (2019-05-29) ------------------ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. 1.40.0 (2019-05-21) ------------------ * Feature - API update. 1.39.0 (2019-05-16) ------------------ * Feature - API update. 1.38.0 (2019-05-15) ------------------ * Feature - API update. 1.37.0 (2019-05-14) ------------------ * Feature - API update. 1.36.1 (2019-04-19) ------------------ * Issue - Reduce memory usage of `Aws::S3::Object#upload_stream` when `StringIO` is used 1.36.0 (2019-03-27) ------------------ * Feature - API update. 1.35.0 (2019-03-22) ------------------ * Feature - API update. 1.34.0 (2019-03-21) ------------------ * Feature - API update. 1.33.0 (2019-03-18) ------------------ * Feature - API update. 1.32.0 (2019-03-14) ------------------ * Feature - API update. 1.31.0 (2019-03-08) ------------------ * Feature - API update. 1.30.1 (2019-01-11) ------------------ * Issue - Plugin updates to support client-side monitoring. 1.30.0 (2018-12-04) ------------------ * Feature - API update. 1.29.0 (2018-11-30) ------------------ * Feature - API update. 1.28.0 (2018-11-29) ------------------ * Feature - API update. * Issue - Update operations needs Content-MD5 header 1.27.0 (2018-11-27) ------------------ * Feature - API update. 1.26.0 (2018-11-26) ------------------ * Feature - API update. 1.25.0 (2018-11-20) ------------------ * Feature - API update. 1.24.1 (2018-11-16) ------------------ * Issue - Update version dependency on `aws-sdk-core` to support endpoint discovery. 1.24.0 (2018-11-15) ------------------ * Feature - API update. 1.23.1 (2018-10-30) ------------------ * Issue - Support multipart upload empty stream (GitHub Issue #1880) * Issue - Aws::S3::Encryption::IOAuthDecrypter - Fixes issue where the body tag being split across packets could cause GCM decryption to fail intermittently. 1.23.0 (2018-10-24) ------------------ * Feature - API update. 1.22.0 (2018-10-23) ------------------ * Feature - API update. 1.21.0 (2018-10-04) ------------------ * Feature - API update. 1.20.0 (2018-09-19) ------------------ * Feature - API update. 1.19.0 (2018-09-06) ------------------ * Feature - Adds code paths and plugins for future SDK instrumentation and telemetry. 1.18.0 (2018-09-05) ------------------ * Feature - API update. 1.17.1 (2018-08-29) ------------------ * Issue - Update example for bucket#url (Github Issue#1868) * Issue - Support opt-out counting #presigned_url as #api_requests (Github Issue#1866) 1.17.0 (2018-07-11) ------------------ * Feature - API update. 1.16.1 (2018-07-10) ------------------ * Issue - Avoids region redirects for FIPS endpoints 1.16.0 (2018-06-28) ------------------ * Feature - Supports `:version_id` for resource `#download_file` helper. * Issue - Reduce memory allocation in checksum and signature generation. * Issue - Ensure file handlers are closed when an exception is raised in `Aws::S3::FileUploader`. 1.15.0 (2018-06-26) ------------------ * Feature - API update. 1.14.0 (2018-06-13) ------------------ * Feature - Adds support for `Aws::S3::Object#upload_stream`, allowing streaming uploads outside of a File-based interface. 1.13.0 (2018-05-22) ------------------ * Feature - API update. * Issue - Update EventEmitter to Aws::EventEmitter 1.12.0 (2018-05-18) ------------------ * Feature - API update. 1.11.0 (2018-05-17) ------------------ * Feature - Support S3 `SelectObjectContent` API 1.10.0 (2018-05-07) ------------------ * Feature - API update. 1.9.1 (2018-04-19) ------------------ * Issue - S3 accelerate endpoint doesn't work with 'expect' header 1.9.0 (2018-04-04) ------------------ * Feature - API update. 1.8.2 (2018-02-23) ------------------ * Issue - Add support for AES/CBC/PKCS7Padding to encryption client. 1.8.1 (2018-02-16) ------------------ * Issue - Enhance S3 Multipart Downloader performance #1709 * Issue - Fix Ruby 2.5 warnings. 1.8.0 (2017-11-29) ------------------ * Feature - API update. 1.7.0 (2017-11-17) ------------------ * Feature - API update. * Issue - Fix S3 unit test with latest endpoint 1.6.0 (2017-11-07) ------------------ * Feature - API update. * Issue - Update S3 unit test with latest endpoint 1.5.0 (2017-10-06) ------------------ * Feature - API update. * Issue - Update OJ Json parser error code * Issue - Fix typo 1.4.0 (2017-09-14) ------------------ * Feature - API update. 1.3.0 (2017-09-13) ------------------ * Feature - API update. 1.2.0 (2017-09-07) ------------------ * Feature - API update. 1.1.0 (2017-09-01) ------------------ * Feature - API update. * Issue - Add object streaming behavior smoke test * Issue - Update `aws-sdk-s3` gemspec metadata. 1.0.0 (2017-08-29) ------------------ 1.0.0.rc15 (2017-08-15) ------------------ * Feature - API update. * Issue - Aws::S3 - Fix Multipart Downloader bug issue #1566, now file batches exist in a newly created tmp directory under destination directory. 1.0.0.rc14 (2017-08-01) ------------------ * Feature - API update. 1.0.0.rc13 (2017-07-25) ------------------ * Feature - API update. 1.0.0.rc12 (2017-07-13) ------------------ * Feature - API update. 1.0.0.rc11 (2017-07-06) ------------------ * Feature - API update. 1.0.0.rc10 (2017-06-29) ------------------ * Feature - API update. 1.0.0.rc9 (2017-06-26) ------------------ * Feature - API update. 1.0.0.rc8 (2017-05-23) ------------------ * Feature - API update. 1.0.0.rc7 (2017-05-09) ------------------ * Issue - Correct dependency on `aws-sdk-kms` gem. 1.0.0.rc6 (2017-05-09) ------------------ * Feature - API update. 1.0.0.rc5 (2017-05-05) ------------------ * Feature - Aws::S3 - Added Multipart Download Helper feature to support different `:mode` ("auto", "single_request", "get_range") in downloading large objects with `#download_file` in multipart when possible. 1.0.0.rc4 (2017-04-21) ------------------ * Feature - API update. 1.0.0.rc3 (2017-03-09) ------------------ * Issue - Correct dependency on `aws-sdk-kms` gem. 1.0.0.rc2 (2016-12-09) ------------------ * Feature - API update. 1.0.0.rc1 (2016-12-05) ------------------ * Feature - Initial preview release of the `aws-sdk-s3` gem.