lib/aws/s3/s3_object.rb in aws-sdk-1.8.5 vs lib/aws/s3/s3_object.rb in aws-sdk-1.9.0
- old
+ new
@@ -18,222 +18,222 @@
class S3
# Represents an object in S3. Objects live in a bucket and have
# unique keys.
#
- # = Getting Objects
+ # # Getting Objects
#
# You can get an object by its key.
#
- # s3 = AWS::S3.new
- # obj = s3.buckets['my-bucket'].objects['key'] # no request made
+ # s3 = AWS::S3.new
+ # obj = s3.buckets['my-bucket'].objects['key'] # no request made
#
# You can also get objects by enumerating a objects in a bucket.
#
- # bucket.objects.each do |obj|
- # puts obj.key
- # end
+ # bucket.objects.each do |obj|
+ # puts obj.key
+ # end
#
# See {ObjectCollection} for more information on finding objects.
#
- # = Creating Objects
+ # # Creating Objects
#
# You create an object by writing to it. The following two
# expressions are equivalent.
#
- # obj = bucket.objects.create('key', 'data')
- # obj = bucket.objects['key'].write('data')
+ # obj = bucket.objects.create('key', 'data')
+ # obj = bucket.objects['key'].write('data')
#
- # = Writing Objects
+ # # Writing Objects
#
# To upload data to S3, you simply need to call {#write} on an object.
#
- # obj.write('Hello World!')
- # obj.read
- # #=> 'Hello World!'
+ # obj.write('Hello World!')
+ # obj.read
+ # #=> 'Hello World!'
#
- # == Uploading Files
+ # ## Uploading Files
#
# You can upload a file to S3 in a variety of ways. Given a path
# to a file (as a string) you can do any of the following:
#
- # # specify the data as a path to a file
- # obj.write(Pathname.new(path_to_file))
+ # # specify the data as a path to a file
+ # obj.write(Pathname.new(path_to_file))
#
- # # also works this way
- # obj.write(:file => path_to_file)
+ # # also works this way
+ # obj.write(:file => path_to_file)
#
- # # Also accepts an open file object
- # file = File.open(path_to_file, 'r')
- # obj.write(file)
+ # # Also accepts an open file object
+ # file = File.open(path_to_file, 'r')
+ # obj.write(file)
#
# All three examples above produce the same result. The file
# will be streamed to S3 in chunks. It will not be loaded
# entirely into memory.
#
- # == Streaming Uploads
+ # ## Streaming Uploads
#
# When you call {#write} with any IO-like object (must respond to
# #read and #eof?), it will be streamed to S3 in chunks.
#
# While it is possible to determine the size of many IO objects, you may
# have to specify the :content_length of your IO object.
# If the exact size can not be known, you may provide an
- # +:estimated_content_length+. Depending on the size (actual or
+ # `:estimated_content_length`. Depending on the size (actual or
# estimated) of your data, it will be uploaded in a single request or
# in multiple requests via {#multipart_upload}.
#
# You may also stream uploads to S3 using a block:
#
- # obj.write do |buffer, bytes|
- # # writing fewer than the requested number of bytes to the buffer
- # # will cause write to stop yielding to the block
- # end
+ # obj.write do |buffer, bytes|
+ # # writing fewer than the requested number of bytes to the buffer
+ # # will cause write to stop yielding to the block
+ # end
#
- # = Reading Objects
+ # # Reading Objects
#
# You can read an object directly using {#read}. Be warned, this will
# load the entire object into memory and is not recommended for large
# objects.
#
- # obj.write('abc')
- # puts obj.read
- # #=> abc
+ # obj.write('abc')
+ # puts obj.read
+ # #=> abc
#
- # == Streaming Downloads
+ # ## Streaming Downloads
#
# If you want to stream an object from S3, you can pass a block
# to {#read}.
#
- # File.open('output', 'w') do |file|
- # large_object.read do |chunk|
- # file.write(chunk)
+ # File.open('output', 'w') do |file|
+ # large_object.read do |chunk|
+ # file.write(chunk)
+ # end
# end
- # end
#
- # = Encryption
+ # # Encryption
#
# Amazon S3 can encrypt objects for you service-side. You can also
# use client-side encryption.
#
- # == Server Side Encryption
+ # ## Server Side Encryption
#
# Amazon S3 provides server side encryption for an additional cost.
# You can specify to use server side encryption when writing an object.
#
- # obj.write('data', :server_side_encryption => :aes256)
+ # obj.write('data', :server_side_encryption => :aes256)
#
# You can also make this the default behavior.
#
- # AWS.config(:s3_server_side_encryption => :aes256)
+ # AWS.config(:s3_server_side_encryption => :aes256)
#
- # s3 = AWS::S3.new
- # s3.buckets['name'].objects['key'].write('abc') # will be encrypted
+ # s3 = AWS::S3.new
+ # s3.buckets['name'].objects['key'].write('abc') # will be encrypted
#
- # == Client Side Encryption
+ # ## Client Side Encryption
#
# Client side encryption utilizes envelope encryption, so that your keys are
# never sent to S3. You can use a symetric key or an asymmetric
# key pair.
#
- # === Symmetric Key Encryption
+ # ### Symmetric Key Encryption
#
# An AES key is used for symmetric encryption. The key can be 128, 192,
# and 256 bit sizes. Start by generating key or read a previously
# generated key.
#
- # # generate a new random key
- # my_key = OpenSSL::Cipher.new("AES-256-ECB").random_key
+ # # generate a new random key
+ # my_key = OpenSSL::Cipher.new("AES-256-ECB").random_key
#
- # # read an existing key from disk
- # my_key = File.read("my_key.der")
+ # # read an existing key from disk
+ # my_key = File.read("my_key.der")
#
# Now you can encrypt locally and upload the encrypted data to S3.
# To do this, you need to provide your key.
#
- # obj = bucket.objects["my-text-object"]
+ # obj = bucket.objects["my-text-object"]
#
- # # encrypt then upload data
- # obj.write("MY TEXT", :encryption_key => my_key)
+ # # encrypt then upload data
+ # obj.write("MY TEXT", :encryption_key => my_key)
#
- # # try read the object without decrypting, oops
- # obj.read
- # #=> '.....'
+ # # try read the object without decrypting, oops
+ # obj.read
+ # #=> '.....'
#
# Lastly, you can download and decrypt by providing the same key.
#
- # obj.read(:encryption_key => my_key)
- # #=> "MY TEXT"
+ # obj.read(:encryption_key => my_key)
+ # #=> "MY TEXT"
#
- # === Asymmetric Key Pair
+ # ### Asymmetric Key Pair
#
# A RSA key pair is used for asymmetric encryption. The public key is used
# for encryption and the private key is used for decryption. Start
# by generating a key.
#
- # my_key = OpenSSL::PKey::RSA.new(1024)
+ # my_key = OpenSSL::PKey::RSA.new(1024)
#
# Provide your key to #write and the data will be encrypted before it
# is uploaded. Pass the same key to #read to decrypt the data
# when you download it.
#
- # obj = bucket.objects["my-text-object"]
+ # obj = bucket.objects["my-text-object"]
#
- # # encrypt and upload the data
- # obj.write("MY TEXT", :encryption_key => my_key)
+ # # encrypt and upload the data
+ # obj.write("MY TEXT", :encryption_key => my_key)
#
- # # download and decrypt the data
- # obj.read(:encryption_key => my_key)
- # #=> "MY TEXT"
+ # # download and decrypt the data
+ # obj.read(:encryption_key => my_key)
+ # #=> "MY TEXT"
#
- # === Configuring storage locations
+ # ### Configuring storage locations
#
# By default, encryption materials are stored in the object metadata.
# If you prefer, you can store the encryption materials in a separate
# object in S3. This object will have the same key + '.instruction'.
#
- # # new object, does not exist yet
- # obj = bucket.objects["my-text-object"]
+ # # new object, does not exist yet
+ # obj = bucket.objects["my-text-object"]
#
- # # no instruction file present
- # bucket.objects['my-text-object.instruction'].exists?
- # #=> false
+ # # no instruction file present
+ # bucket.objects['my-text-object.instruction'].exists?
+ # #=> false
#
- # # store the encryption materials in the instruction file
- # # instead of obj#metadata
- # obj.write("MY TEXT",
- # :encryption_key => MY_KEY,
- # :encryption_materials_location => :instruction_file)
+ # # store the encryption materials in the instruction file
+ # # instead of obj#metadata
+ # obj.write("MY TEXT",
+ # :encryption_key => MY_KEY,
+ # :encryption_materials_location => :instruction_file)
#
- # bucket.objects['my-text-object.instruction'].exists?
- # #=> true
+ # bucket.objects['my-text-object.instruction'].exists?
+ # #=> true
#
# If you store the encryption materials in an instruction file, you
# must tell #read this or it will fail to find your encryption materials.
#
- # # reading an encrypted file whos materials are stored in an
- # # instruction file, and not metadata
- # obj.read(:encryption_key => MY_KEY,
- # :encryption_materials_location => :instruction_file)
+ # # reading an encrypted file whos materials are stored in an
+ # # instruction file, and not metadata
+ # obj.read(:encryption_key => MY_KEY,
+ # :encryption_materials_location => :instruction_file)
#
- # === Configuring default behaviors
+ # ### Configuring default behaviors
#
# You can configure the default key such that it will automatically
# encrypt and decrypt for you. You can do this globally or for a
# single S3 interface
#
- # # all objects uploaded/downloaded with this s3 object will be
- # # encrypted/decrypted
- # s3 = AWS::S3.new(:s3_encryption_key => "MY_KEY")
+ # # all objects uploaded/downloaded with this s3 object will be
+ # # encrypted/decrypted
+ # s3 = AWS::S3.new(:s3_encryption_key => "MY_KEY")
#
- # # set the key to always encrypt/decrypt
- # AWS.config(:s3_encryption_key => "MY_KEY")
+ # # set the key to always encrypt/decrypt
+ # AWS.config(:s3_encryption_key => "MY_KEY")
#
# You can also configure the default storage location for the encryption
# materials.
#
- # AWS.config(:s3_encryption_materials_location => :instruction_file)
+ # AWS.config(:s3_encryption_materials_location => :instruction_file)
#
class S3Object
include Core::Model
include DataOptions
@@ -264,11 +264,11 @@
def == other
other.kind_of?(S3Object) and other.bucket == bucket and other.key == key
end
alias_method :eql?, :==
- # @return [Boolean] Returns +true+ if the object exists in S3.
+ # @return [Boolean] Returns `true` if the object exists in S3.
def exists?
head
rescue Errors::NoSuchKey => e
false
else
@@ -281,11 +281,11 @@
# * metadata (hash of user-supplied key-value pairs)
# * content_length (integer, number of bytes)
# * content_type (as sent to S3 when uploading the object)
# * etag (typically the object's MD5)
# * server_side_encryption (the algorithm used to encrypt the
- # object on the server side, e.g. +:aes256+)
+ # object on the server side, e.g. `:aes256`)
#
# @param [Hash] options
# @option options [String] :version_id Which version of this object
# to make a HEAD request against.
# @return A head object response with metadata,
@@ -335,11 +335,11 @@
def expiration_rule_id
head[:expiration_rule_id]
end
# @return [Symbol, nil] Returns the algorithm used to encrypt
- # the object on the server side, or +nil+ if SSE was not used
+ # the object on the server side, or `nil` if SSE was not used
# when storing the object.
def server_side_encryption
head[:server_side_encryption]
end
@@ -380,11 +380,11 @@
#
# @option [String] :version_id (nil) If present the specified version
# of this object will be deleted. Only works for buckets that have
# had versioning enabled.
#
- # @option [Boolean] :delete_instruction_file (false) Set this to +true+
+ # @option [Boolean] :delete_instruction_file (false) Set this to `true`
# if you use client-side encryption and the encryption materials
# were stored in a separate object in S3 (key.instruction).
#
# @option [String] :mfa The serial number and current token code of
# the Multi-Factor Authentication (MFA) device for the user. Format
@@ -405,19 +405,19 @@
nil
end
# Restores a temporary copy of an archived object from the
- # Glacier storage tier. After the specified +days+, Amazon
+ # Glacier storage tier. After the specified `days`, Amazon
# S3 deletes the temporary copy. Note that the object
# remains archived; Amazon S3 deletes only the restored copy.
#
# Restoring an object does not occur immediately. Use
# {#restore_in_progress?} to check the status of the operation.
#
# @option [Integer] :days (1) the number of days to keep the object
- # @return [Boolean] +true+ if a restore can be initiated.
+ # @return [Boolean] `true` if a restore can be initiated.
# @since 1.7.2
def restore options = {}
options[:days] ||= 1
client.restore_object(
@@ -437,74 +437,76 @@
end
# Returns a collection representing all the object versions
# for this object.
#
+ # @example
+ #
# bucket.versioning_enabled? # => true
# version = bucket.objects["mykey"].versions.latest
#
# @return [ObjectVersionCollection]
def versions
ObjectVersionCollection.new(self)
end
# Uploads data to the object in S3.
#
- # obj = s3.buckets['bucket-name'].objects['key']
+ # obj = s3.buckets['bucket-name'].objects['key']
#
- # # strings
- # obj.write("HELLO")
+ # # strings
+ # obj.write("HELLO")
#
- # # files (by path)
- # obj.write(Pathname.new('path/to/file.txt'))
+ # # files (by path)
+ # obj.write(Pathname.new('path/to/file.txt'))
#
- # # file objects
- # obj.write(File.open('path/to/file.txt', 'r'))
+ # # file objects
+ # obj.write(File.open('path/to/file.txt', 'r'))
#
- # # IO objects (must respond to #read and #eof?)
- # obj.write(io)
+ # # IO objects (must respond to #read and #eof?)
+ # obj.write(io)
#
- # === Multipart Uploads vs Single Uploads
+ # ### Multipart Uploads vs Single Uploads
#
# This method will intelligently choose between uploading the
# file in a signal request and using {#multipart_upload}.
# You can control this behavior by configuring the thresholds
# and you can disable the multipart feature as well.
#
- # # always send the file in a single request
- # obj.write(file, :single_request => true)
+ # # always send the file in a single request
+ # obj.write(file, :single_request => true)
#
- # # upload the file in parts if the total file size exceeds 100MB
- # obj.write(file, :multipart_threshold => 100 * 1024 * 1024)
+ # # upload the file in parts if the total file size exceeds 100MB
+ # obj.write(file, :multipart_threshold => 100 * 1024 * 1024)
#
# @overload write(data, options = {})
#
# @param [String,Pathname,File,IO] data The data to upload.
# This may be a:
# * String
# * Pathname
# * File
# * IO
- # * Any object that responds to +#read+ and +#eof?+.
+ # * Any object that responds to `#read` and `#eof?`.
#
# @param options [Hash] Additional upload options.
#
# @option options [Integer] :content_length If provided, this
# option must match the total number of bytes written to S3.
# This options is *required* when it is not possible to
- # automatically determine the size of +data+.
+ # automatically determine the size of `data`.
#
# @option options [Integer] :estimated_content_length When uploading
# data of unknown content length, you may specify this option to
# hint what mode of upload should take place. When
- # +:estimated_content_length+ exceeds the +:multipart_threshold+,
+ # `:estimated_content_length` exceeds the `:multipart_threshold`,
# then the data will be uploaded in parts, otherwise it will
# be read into memory and uploaded via {Client#put_object}.
#
- # @option options [Boolean] :single_request (false) When +true+,
+ # @option options [Boolean] :single_request (false) When `true`,
# this method will always upload the data in a single request
- # (via {Client#put_object}). When +false+, this method will
+ # (via {Client#put_object}). When `false`, this method will
# choose between {Client#put_object} and {#multipart_upload}.
#
# @option options [Integer] :multipart_threshold (16777216) Specifies
# the maximum size (in bytes) of a single-request upload. If the
# data exceeds this threshold, it will be uploaded via
@@ -517,22 +519,22 @@
# (except the final part). The default is 5MB and can be
# configured via AWS.config(:s3_multipart_min_part_size => ...).
#
# @option options [Hash] :metadata A hash of metadata to be
# included with the object. These will be sent to S3 as
- # headers prefixed with +x-amz-meta+. Each name, value pair
+ # headers prefixed with `x-amz-meta`. Each name, value pair
# must conform to US-ASCII.
#
# @option options [Symbol,String] :acl (:private) A canned access
# control policy. Valid values are:
#
- # * +:private+
- # * +:public_read+
- # * +:public_read_write+
- # * +:authenticated_read+
- # * +:bucket_owner_read+
- # * +:bucket_owner_full_control+
+ # * `:private`
+ # * `:public_read`
+ # * `:public_read_write`
+ # * `:authenticated_read`
+ # * `:bucket_owner_read`
+ # * `:bucket_owner_full_control`
#
# @option options [String] :grant_read
#
# @option options [String] :grant_write
#
@@ -540,11 +542,11 @@
#
# @option options [String] :grant_write_acp
#
# @option options [String] :grant_full_control
#
- # @option options [Boolean] :reduced_redundancy (false) When +true+,
+ # @option options [Boolean] :reduced_redundancy (false) When `true`,
# this object will be stored with Reduced Redundancy Storage.
#
# @option options :cache_control [String] Can be used to specify
# caching behavior. See
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9
@@ -554,11 +556,11 @@
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec19.5.1
#
# @option options :content_encoding [String] Specifies what
# content encodings have been applied to the object and thus
# what decoding mechanisms must be applied to obtain the
- # media-type referenced by the +Content-Type+ header field.
+ # media-type referenced by the `Content-Type` header field.
# See
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11
#
# @option options [String] :content_md5
# The base64 encoded content md5 of the data.
@@ -566,33 +568,33 @@
# @option options :content_type A standard MIME type
# describing the format of the object data.
#
# @option options [Symbol] :server_side_encryption (nil) If this
# option is set, the object will be stored using server side
- # encryption. The only valid value is +:aes256+, which
+ # encryption. The only valid value is `:aes256`, which
# specifies that the object should be stored using the AES
# encryption algorithm with 256 bit keys. By default, this
- # option uses the value of the +:s3_server_side_encryption+
+ # option uses the value of the `:s3_server_side_encryption`
# option in the current configuration; for more information,
# see {AWS.config}.
#
# @option options [OpenSSL::PKey::RSA, String] :encryption_key
# Set this to encrypt the data client-side using envelope
# encryption. The key must be an OpenSSL asymmetric key
# or a symmetric key string (16, 24 or 32 bytes in length).
#
# @option options [Symbol] :encryption_materials_location (:metadata)
- # Set this to +:instruction_file+ if you prefer to store the
+ # Set this to `:instruction_file` if you prefer to store the
# client-side encryption materials in a separate object in S3
# instead of in the object metadata.
#
# @option options [String] :expires The date and time at which the
# object is no longer cacheable.
#
# @return [S3Object, ObjectVersion] If the bucket has versioning
# enabled, this methods returns an {ObjectVersion}, otherwise
- # this method returns +self+.
+ # this method returns `self`.
#
def write *args, &block
options = compute_write_options(*args, &block)
@@ -613,58 +615,62 @@
# to have more control over how the failure of an individual
# part upload is handled. Otherwise, {#write} is much simpler
# to use.
#
# @example Uploading an object in two parts
+ #
# bucket.objects.myobject.multipart_upload do |upload|
# upload.add_part("a" * 5242880)
# upload.add_part("b" * 2097152)
# end
#
# @example Uploading parts out of order
+ #
# bucket.objects.myobject.multipart_upload do |upload|
# upload.add_part("b" * 2097152, :part_number => 2)
# upload.add_part("a" * 5242880, :part_number => 1)
# end
#
# @example Aborting an upload after parts have been added
+ #
# bucket.objects.myobject.multipart_upload do |upload|
# upload.add_part("b" * 2097152, :part_number => 2)
# upload.abort
# end
#
# @example Starting an upload and completing it later by ID
+ #
# upload = bucket.objects.myobject.multipart_upload
# upload.add_part("a" * 5242880)
# upload.add_part("b" * 2097152)
# id = upload.id
#
# # later or in a different process
# upload = bucket.objects.myobject.multipart_uploads[id]
# upload.complete(:remote_parts)
#
# @yieldparam [MultipartUpload] upload A handle to the upload.
- # {MultipartUpload#close} is called in an +ensure+ clause so
+ # {MultipartUpload#close} is called in an `ensure` clause so
# that the upload will always be either completed or
# aborted.
#
# @param [Hash] options Options for the upload.
#
# @option options [Hash] :metadata A hash of metadata to be
# included with the object. These will be sent to S3 as
- # headers prefixed with +x-amz-meta+. Each name, value pair
+ # headers prefixed with `x-amz-meta`. Each name, value pair
# must conform to US-ASCII.
#
# @option options [Symbol] :acl (private) A canned access
# control policy. Valid values are:
#
- # * +:private+
- # * +:public_read+
- # * +:public_read_write+
- # * +:authenticated_read+
- # * +:bucket_owner_read+
- # * +:bucket_owner_full_control+
+ # * `:private`
+ # * `:public_read`
+ # * `:public_read_write`
+ # * `:authenticated_read`
+ # * `:bucket_owner_read`
+ # * `:bucket_owner_full_control`
#
# @option options [Boolean] :reduced_redundancy (false) If true,
# Reduced Redundancy Storage will be enabled for the uploaded
# object.
#
@@ -677,23 +683,23 @@
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec19.5.1
#
# @option options :content_encoding [String] Specifies what
# content encodings have been applied to the object and thus
# what decoding mechanisms must be applied to obtain the
- # media-type referenced by the +Content-Type+ header field.
+ # media-type referenced by the `Content-Type` header field.
# See
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11
#
# @option options :content_type A standard MIME type
# describing the format of the object data.
#
# @option options [Symbol] :server_side_encryption (nil) If this
# option is set, the object will be stored using server side
- # encryption. The only valid value is +:aes256+, which
+ # encryption. The only valid value is `:aes256`, which
# specifies that the object should be stored using the AES
# encryption algorithm with 256 bit keys. By default, this
- # option uses the value of the +:s3_server_side_encryption+
+ # option uses the value of the `:s3_server_side_encryption`
# option in the current configuration; for more information,
# see {AWS.config}.
#
# @return [S3Object, ObjectVersion] If the bucket has versioning
# enabled, returns the {ObjectVersion} representing the
@@ -734,27 +740,27 @@
#
# This works by copying the object to a new key and then
# deleting the old object. This function returns the
# new object once this is done.
#
- # bucket = s3.buckets['old-bucket']
- # old_obj = bucket.objects['old-key']
+ # bucket = s3.buckets['old-bucket']
+ # old_obj = bucket.objects['old-key']
#
- # # renaming an object returns a new object
- # new_obj = old_obj.move_to('new-key')
+ # # renaming an object returns a new object
+ # new_obj = old_obj.move_to('new-key')
#
- # old_obj.key #=> 'old-key'
- # old_obj.exists? #=> false
+ # old_obj.key #=> 'old-key'
+ # old_obj.exists? #=> false
#
- # new_obj.key #=> 'new-key'
- # new_obj.exists? #=> true
+ # new_obj.key #=> 'new-key'
+ # new_obj.exists? #=> true
#
# If you need to move an object to a different bucket, pass
- # +:bucket+ or +:bucket_name+.
+ # `:bucket` or `:bucket_name`.
#
- # obj = s3.buckets['old-bucket'].objects['old-key']
- # obj.move_to('new-key', :bucket_name => 'new_bucket')
+ # obj = s3.buckets['old-bucket'].objects['old-key']
+ # obj.move_to('new-key', :bucket_name => 'new_bucket')
#
# If the copy succeeds, but the then the delete fails, an error
# will be raised.
#
# @param [String] target The key to move this object to.
@@ -810,23 +816,23 @@
# read a specific version of the source object.
#
# @option options [Symbol] :acl (private) A canned access
# control policy. Valid values are:
#
- # * +:private+
- # * +:public_read+
- # * +:public_read_write+
- # * +:authenticated_read+
- # * +:bucket_owner_read+
- # * +:bucket_owner_full_control+
+ # * `:private`
+ # * `:public_read`
+ # * `:public_read_write`
+ # * `:authenticated_read`
+ # * `:bucket_owner_read`
+ # * `:bucket_owner_full_control`
#
# @option options [Symbol] :server_side_encryption (nil) If this
# option is set, the object will be stored using server side
- # encryption. The only valid value is +:aes256+, which
+ # encryption. The only valid value is `:aes256`, which
# specifies that the object should be stored using the AES
# encryption algorithm with 256 bit keys. By default, this
- # option uses the value of the +:s3_server_side_encryption+
+ # option uses the value of the `:s3_server_side_encryption`
# option in the current configuration; for more information,
# see {AWS.config}.
#
# @option options [Boolean] :client_side_encrypted (false) Set to true
# when the object being copied was client-side encrypted. This
@@ -922,27 +928,27 @@
# lower cost.
#
# @option options [Symbol] :acl (private) A canned access
# control policy. Valid values are:
#
- # * +:private+
- # * +:public_read+
- # * +:public_read_write+
- # * +:authenticated_read+
- # * +:bucket_owner_read+
- # * +:bucket_owner_full_control+
+ # * `:private`
+ # * `:public_read`
+ # * `:public_read_write`
+ # * `:authenticated_read`
+ # * `:bucket_owner_read`
+ # * `:bucket_owner_full_control`
#
# @option options [Symbol] :server_side_encryption (nil) If this
# option is set, the object will be stored using server side
- # encryption. The only valid value is +:aes256+, which
+ # encryption. The only valid value is `:aes256`, which
# specifies that the object should be stored using the AES
# encryption algorithm with 256 bit keys. By default, this
- # option uses the value of the +:s3_server_side_encryption+
+ # option uses the value of the `:s3_server_side_encryption`
# option in the current configuration; for more information,
# see {AWS.config}.
#
- # @option options [Boolean] :client_side_encrypted (false) When +true+,
+ # @option options [Boolean] :client_side_encrypted (false) When `true`,
# the client-side encryption materials will be copied. Without this
# option, the key and iv are not guaranteed to be transferred to
# the new object.
#
# @option options [String] :expires The date and time at which the
@@ -975,62 +981,62 @@
# Fetches the object data from S3. If you pass a block to this
# method, the data will be yielded to the block in chunks as it
# is read off the HTTP response.
#
- # === Read an object from S3 in chunks
+ # ### Read an object from S3 in chunks
#
# When downloading large objects it is recommended to pass a block
# to #read. Data will be yielded to the block as it is read off
# the HTTP response.
#
- # # read an object from S3 to a file
- # File.open('output.txt', 'w') do |file|
- # bucket.objects['key'].read do |chunk|
- # file.write(chunk)
+ # # read an object from S3 to a file
+ # File.open('output.txt', 'w') do |file|
+ # bucket.objects['key'].read do |chunk|
+ # file.write(chunk)
+ # end
# end
- # end
#
- # === Reading an object without a block
+ # ### Reading an object without a block
#
# When you omit the block argument to #read, then the entire
# HTTP response and read and the object data is loaded into
# memory.
#
- # bucket.objects['key'].read
- # #=> 'object-contents-here'
+ # bucket.objects['key'].read
+ # #=> 'object-contents-here'
#
# @param [Hash] options
#
# @option options [String] :version_id Reads data from a
# specific version of this object.
#
# @option options [Time] :if_unmodified_since If specified, the
# method will raise
- # <tt>AWS::S3::Errors::PreconditionFailed</tt> unless the
+ # `AWS::S3::Errors::PreconditionFailed` unless the
# object has not been modified since the given time.
#
# @option options [Time] :if_modified_since If specified, the
- # method will raise <tt>AWS::S3::Errors::NotModified</tt> if
+ # method will raise `AWS::S3::Errors::NotModified` if
# the object has not been modified since the given time.
#
# @option options [String] :if_match If specified, the method
- # will raise <tt>AWS::S3::Errors::PreconditionFailed</tt>
+ # will raise `AWS::S3::Errors::PreconditionFailed`
# unless the object ETag matches the provided value.
#
# @option options [String] :if_none_match If specified, the
- # method will raise <tt>AWS::S3::Errors::NotModified</tt> if
+ # method will raise `AWS::S3::Errors::NotModified` if
# the object ETag matches the provided value.
#
# @option options [Range] :range A byte range to read data from
#
# @option options [OpenSSL::PKey::RSA, String] :encryption_key
# (nil) If this option is set, the object will be decrypted using
# envelope encryption. The valid values are OpenSSL asymmetric keys
- # +OpenSSL::Pkey::RSA+ or strings representing symmetric keys
- # of an AES-128/192/256-ECB cipher as a +String+.
- # This value defaults to the value in +s3_encryption_key+;
+ # `OpenSSL::Pkey::RSA` or strings representing symmetric keys
+ # of an AES-128/192/256-ECB cipher as a `String`.
+ # This value defaults to the value in `s3_encryption_key`;
# for more information, see {AWS.config}.
#
# Symmetric Keys:
#
# cipher = OpenSSL::Cipher.new('AES-256-ECB')
@@ -1038,14 +1044,14 @@
#
# Asymmetric keys can also be generated as so:
# key = OpenSSL::PKey::RSA.new(KEY_SIZE)
#
# @option options [Symbol] :encryption_materials_location (:metadata)
- # Set this to +:instruction_file+ if the encryption materials
+ # Set this to `:instruction_file` if the encryption materials
# are not stored in the object metadata
#
- # @note +:range+ option cannot be used with client-side encryption
+ # @note `:range` option cannot be used with client-side encryption
#
# @note All decryption reads incur at least an extra HEAD operation.
#
def read options = {}, &read_block
@@ -1072,20 +1078,20 @@
end
end
# Returns the object's access control list. This will be an
- # instance of AccessControlList, plus an additional +change+
+ # instance of AccessControlList, plus an additional `change`
# method:
#
- # object.acl.change do |acl|
- # # remove any grants to someone other than the bucket owner
- # owner_id = object.bucket.owner.id
- # acl.grants.reject! do |g|
- # g.grantee.canonical_user_id != owner_id
- # end
- # end
+ # object.acl.change do |acl|
+ # # remove any grants to someone other than the bucket owner
+ # owner_id = object.bucket.owner.id
+ # acl.grants.reject! do |g|
+ # g.grantee.canonical_user_id != owner_id
+ # end
+ # end
#
# Note that changing the ACL is not an atomic operation; it
# fetches the current ACL, yields it to the block, and then
# sets it again. Therefore, it's possible that you may
# overwrite a concurrent update to the ACL using this
@@ -1128,30 +1134,34 @@
# This URL can be used by a regular HTTP client to perform the
# desired operation without credentials and without changing
# the permissions of the object.
#
# @example Generate a url to read an object
+ #
# bucket.objects.myobject.url_for(:read)
#
# @example Generate a url to delete an object
+ #
# bucket.objects.myobject.url_for(:delete)
#
# @example Override response headers for reading an object
+ #
# object = bucket.objects.myobject
# url = object.url_for(:read,
# :response_content_type => "application/json")
#
# @example Generate a url that expires in 10 minutes
+ #
# bucket.objects.myobject.url_for(:read, :expires => 10*60)
#
# @param [Symbol, String] method The HTTP verb or object
# method for which the returned URL will be valid. Valid
# values:
#
- # * +:get+ or +:read+
- # * +:put+ or +:write+
- # * +:delete+
+ # * `:get` or `:read`
+ # * `:put` or `:write`
+ # * `:delete`
#
# @param [Hash] options Additional options for generating the URL.
#
# @option options :expires Sets the expiration time of the
# URL; after this time S3 will return an error if the URL is
@@ -1162,11 +1172,11 @@
#
# @option options [Boolean] :secure (true) Whether to generate a
# secure (HTTPS) URL or a plain HTTP url.
#
# @option options [String] :endpoint Sets the hostname of the
- # endpoint (overrides config.s3_endpoint).
+ # endpoint.
#
# @option options [Integer] :port Sets the port of the
# endpoint (overrides config.s3_port).
#
# @option options [Boolean] :force_path_style (false) Indicates
@@ -1252,11 +1262,11 @@
# @param [true,false] value If this is true, the object will be
# copied in place and stored with reduced redundancy at a
# lower cost. Otherwise, the object will be copied and stored
# with the standard storage class.
#
- # @return [true,false] The +value+ parameter.
+ # @return [true,false] The `value` parameter.
def reduced_redundancy= value
copy_from(key, :reduced_redundancy => value)
value
end
@@ -1285,11 +1295,11 @@
cipher.update(get_object(options)[:data]) + cipher.final
end
end
end
- # @return [Boolean] Returns +true+ if the :data option is large or
+ # @return [Boolean] Returns `true` if the :data option is large or
# guessed to be larger than a configured threshold.
def use_multipart? options
estimated_content_length(options) > multipart_threshold(options) and
!options[:single_request]
end
@@ -1377,11 +1387,11 @@
symbol.to_s.upcase
end
def request_for_signing(options)
- port = [443, 80].include?(config.s3_port) ?
+ port = [443, 80].include?(config.s3_port) ?
(options[:secure] ? 443 : 80) :
config.s3_port
req = Request.new
@@ -1672,16 +1682,16 @@
if options[:reduced_redundancy] == true
options[:storage_class] = 'REDUCED_REDUNDANCY'
end
end
- # @return [String] Encodes a +String+ in base 64 regardless of version of
+ # @return [String] Encodes a `String` in base 64 regardless of version of
# Ruby for http headers (removes newlines).
def encode64 input
Base64.encode64(input).split("\n") * ""
end
- # @return [String] Decodes a +String+ in base 64.
+ # @return [String] Decodes a `String` in base 64.
def decode64 input
Base64.decode64(input)
end
end
end