lib/s33r/client.rb in s33r-0.4.2 vs lib/s33r/client.rb in s33r-0.5
- old
+ new
@@ -1,545 +1,404 @@
-require 'net/https'
-require 'cgi'
-require 'erb'
-require 'yaml'
base = File.dirname(__FILE__)
+require File.join(base, 'networking')
require File.join(base, 's3_acl')
-require File.join(base, 's33r_exception')
+require File.join(base, 's3_logging')
+require File.join(base, 'utility')
module S33r
- include Net
-
- # The client performs operations over the network,
- # using the core to build request headers and content;
- # only client-specific headers are managed here: other headers
- # can be handled by the core.
- #--
- # TODO: use customisable thread pool for requests.
- # TODO: timeout on requests.
- #--
+ # Use this class to do operations on the Service, e.g.
+ # creating buckets, deleting buckets, listing all buckets,
+ # returning a single bucket.
class Client
+ include Networking
+ include S3ACL
+ include S3Logging
include S33r
-
- # S3 keys.
- attr_accessor :aws_access_key, :aws_secret_access_key
- # Size of data chunk to be sent per request when putting data.
- attr_accessor :chunk_size
+ # Options used to create this Client.
+ attr_reader :created_with_options
- # Headers which should be sent with every request by default (unless overridden).
- attr_accessor :client_headers
+ #-- These are used for creating URLs.
- # Whether client should use SSL.
+ # Use SSL for requests.
attr_accessor :use_ssl
- # Whether client dumps headers from requests.
- attr_accessor :dump_requests
+ # Default expiry for authenticated URLs.
+ attr_accessor :expires
- # Default log bucket location.
- attr_accessor :log_bucket
+ # Default canned ACL string to apply to all put requests.
+ attr_accessor :canned_acl
- # The options used to create the client (useful when spawning
- # NamedBucket instances from Client instances).
- attr_reader :options
-
- # Configure either an SSL-enabled or plain HTTP client.
- # (If using SSL, no verification of server certificate is performed.)
- #
- # +options+: hash of optional client config.:
- # * <tt>:use_ssl => false</tt>: only use plain HTTP for connections
- # * <tt>:dump_requests => true</tt>: dump each request's initial line and headers to STDOUT
- def initialize(aws_access_key, aws_secret_access_key, options={})
- @use_ssl = true
- @use_ssl = false if (false == options[:use_ssl])
- options[:use_ssl] = @use_ssl
-
- @dump_requests = (true == options[:dump_requests])
-
- # set default chunk size for streaming request body
- @chunk_size = DEFAULT_CHUNK_SIZE
-
- @log_bucket = options[:log_bucket]
-
- # Amazon S3 developer keys
- @aws_access_key = aws_access_key
- @aws_secret_access_key = aws_secret_access_key
-
- # headers sent with every request made by this client
- @client_headers = {}
-
- # keep a record of the options used to create this instance
- @options = options
- end
+ # Amazon keys.
+ attr_accessor :access, :secret
- # Get an HTTP client instance.
- #
- # NB this has been moved here so that client instances are
- # only instantiated when needed (so Client can be used
- # as an empty shell when list_buckets is called).
- def get_client
- if @use_ssl
- client = HTTP.new(HOST, PORT)
- # turn off SSL certificate verification
- client.verify_mode = OpenSSL::SSL::VERIFY_NONE
- client.use_ssl = true
- else
- client = HTTP.new(HOST, NON_SSL_PORT)
- client.use_ssl = false
- end
-
- client
+ # Get default options passed to every call to do_request.
+ def request_defaults
+ defaults = {}
+ defaults[:use_ssl] = @use_ssl
+ defaults[:expires] = @expires
+ defaults[:access] = @access
+ defaults[:secret] = @secret
+ defaults[:canned_acl] = @canned_acl
+ defaults
end
- # Initialise client from YAML configuration file
- # (see load_config method for details of acceptable format).
- def Client.init(config_file)
- aws_access_key, aws_secret_access_key, options = load_config(config_file)
- Client.new(aws_access_key, aws_secret_access_key, options)
+ # Get the settings for this client.
+ def settings
+ request_defaults.merge(:dump_requests => dump_requests,
+ :chunk_size => chunk_size, :persistent => persistent)
end
+
+ # Create a plain Client.
+ def initialize(options={})
+ set_options(options)
+ end
- # Load YAML config. file for a client. The config. file looks like this:
+ # Set options for the client.
#
- # :include: test/files/namedbucket_config.yml
- #
- # Note that the loader also runs the config. file through ERB, so you can
- # add dynamic blocks of ERB (Ruby) code into your files.
- #
- # The +options+ section contains other settings, either specific to the Client or
- # NamedBucket classes, or general application settings.
- # The +options+ section can be omitted, but settings for AWS keys are required.
- #
- # Returns an array <tt>[aws_access_key, aws_secret_access_key, options]</tt>, where +options+
- # is a hash.
- def Client.load_config(config_file)
- config = YAML::load(ERB.new(IO.read(config_file)).result)
- aws_access_key = config['aws_access_key']
- aws_secret_access_key = config['aws_secret_access_key']
+ # +options+ may include the following which alter how the Client interacts with S3; they also
+ # influence URLs you may generate from the Client:
+ # * <tt>:access => 'aws access key'</tt> (defaults to nil)
+ # * <tt>:secret => 'aws secret access key'</tt> (defaults to nil)
+ # * <tt>:use_ssl => false</tt>: to use plain HTTP for requests
+ # sent by this bucket (default=true). If a bucket has :use_ssl => true,
+ # any URLs you generate from it will be SSL URLs unless you explicitly
+ # disable this behaviour (see url for details).
+ # * <tt>:expires => <datetime specifier></tt>: set the default value to be passed as the :expires
+ # option when generating authenticated URLs. Should be parseable by S33r.parse_expiry.
+ # * <tt>:canned_acl => 'public-read'</tt>: set a default canned acl to apply to all put
+ # requests.
+ #
+ # These options change the behaviour of the HTTP client which actually sends the request:
+ # * <tt>:chunk_size => Integer</tt>: use a non-standard chunk size;
+ # default is to use S33r::DEFAULT_CHUNK_SIZE.
+ # * <tt>:persistent => true</tt>: use persistent HTTP connections
+ # (default=false).
+ # * <tt>:dump_requests => true</tt>: to dump all request headers before the request is sent.
+ def set_options(options={})
+ # General client options.
+ @access = options[:access]
+ @secret = options[:secret]
+ @use_ssl = true
+ @use_ssl = false if (false == options[:use_ssl])
+ @expires = options[:expires] || 'never'
+ @canned_acl = options[:canned_acl] || nil
- options = {}
- options = S33r.keys_to_symbols(config['options']) if config['options']
+ # Options specific to the mechanics of the HTTP request.
+ @dump_requests = options[:dump_requests] || false
+ @chunk_size = options[:chunk_size]
+ @persistent = options[:persistent] || false
- [aws_access_key, aws_secret_access_key, options]
+ @created_with_options = options
end
-
- # Send a request over the wire.
- #
- # This method streams +data+ if it responds to the +stat+ method
- # (as files do).
- #
- # Returns a Net::HTTPResponse instance.
- def do_request(method, path, data=nil, headers={})
- req = get_requester(method, path)
- req.chunk_size = @chunk_size
-
- # Add the S3 headers which are always required.
- headers = add_default_headers(headers)
-
- # Add any client-specific default headers.
- headers = add_client_headers(headers)
-
- # Generate the S3 authorization header.
- headers['Authorization'] = generate_auth_header_value(method, path, headers,
- @aws_access_key, @aws_secret_access_key)
-
- # Insert the headers into the request object.
- headers.each do |key, value|
- req[key] = value
- end
-
- # Add data to the request as a stream.
- if req.request_body_permitted?
- # For streaming files; NB Content-Length will be set by Net::HTTP
- # for character-based data: this section of code is only used
- # when reading directly from a file.
- if data.respond_to?(:stat)
- req.body_stream = data
- req['Content-Length'] = data.stat.size.to_s
- data = nil
- end
- else
- data = nil
- end
-
- if @dump_requests
- puts req.to_s
- end
-
- # Run the request.
- client = get_client
- client.start do
- response = client.request(req, data)
-
- # Check the response to see whether S3 is down;
- # raises an S3FallenOver error if S3 returns a 500-503 response code
- response.check_s3_availability
-
- response
- end
- end
-
- # Return an instance of an appropriate request class.
- def get_requester(method, path)
- raise S33rException::UnsupportedHTTPMethod, "The #{method} HTTP method is not supported" if !(METHOD_VERBS.include?(method))
- eval("HTTP::" + method[0,1].upcase + method[1..-1].downcase + ".new('#{path}')")
- end
-
+
# List all buckets.
#
- # Returns an array of NamedBucket instances; array will be empty if
+ # Returns an array of Bucket instances; array will be empty if
# the BucketListing parse fails for any reason (i.e. no <Bucket> elements
# occur in it).
- def list_buckets
- bucket_list_xml = do_get('/').body
+ #
+ # +options+ is passed through to get_bucket, making it possible to detach
+ # retrieved buckets from the Client instance, and to pass other options to
+ # the bucket.
+ def buckets(options={})
+ resp = do_get
+
+ bucket_list_xml = resp.body
doc = XML.get_xml_doc(S33r.remove_namespace(bucket_list_xml))
- named_buckets = []
+ buckets = {}
doc.find("//Bucket").to_a.each do |node|
bucket_name = node.xget('Name')
if bucket_name
- # The NamedBucket instances inherit the request dumping behaviour
+ # CreationDate is a string in format '2006-10-17T15:14:39.000Z'.
+ creation_date = Time.parse(node.xget('CreationDate'))
+ # The Bucket instances inherit the request dumping behaviour
# of this client.
- named_buckets << NamedBucket.new(@aws_access_key, @aws_secret_access_key,
- {:default_bucket => bucket_name, :dump_request => self.dump_requests})
+ buckets[bucket_name] = get_bucket(bucket_name, options)
end
end
- named_buckets
+ buckets
end
+ alias :list_buckets :buckets
- # List just bucket names.
- def list
- list_buckets.map {|bucket| bucket.name}
+ # Just get a sorted array of names of buckets.
+ def bucket_names
+ buckets.keys.sort
end
-
+
+ # Get a Client instance bound to a bucket.
+ #
+ # +options+:
+ # * <tt>:orphan => true</tt>: create the Client in isolation from
+ # the Service and don't inherit any of its instance settings.
+ #
+ # Other options are passed through to Bucket.new.
+ def get_bucket(bucket_name, options={})
+ orphan = options.delete(:orphan)
+ unless orphan
+ options.merge!(settings) { |key, old_val, new_val| old_val }
+ end
+ bucket = Bucket.new(bucket_name, options)
+ yield bucket if block_given?
+ bucket
+ end
+
+ # Create a new Bucket.
+ def create_bucket(name, options={})
+ options[:create] = true
+ get_bucket(name, options)
+ end
+
# List entries in a bucket.
#
- # +query_params+: hash of options on the bucket listing request, passed as querystring parameters to S3
+ # +options+: hash of options on the bucket listing request, passed as querystring parameters to S3
# (see http://docs.amazonwebservices.com/AmazonS3/2006-03-01/).
# * <tt>:prefix => 'some_string'</tt>: restrict results to keys beginning with 'some_string'
# * <tt>:marker => 'some_string'</tt>: restict results to keys occurring lexicographically after 'some_string'
- # * <tt>:max_keys => 1000</tt>: return at most this number of keys (maximum possible value is 1000)
+ # * <tt>:max_keys => Integer</tt>: return at most this number of keys (maximum possible value is 1000)
# * <tt>:delimiter => 'some_string'</tt>: keys containing the same string between prefix and the delimiter
# are rolled up into a CommonPrefixes element inside the response
#
# NB if you pass a :marker, this takes up one of your :max_keys; so if you are fetching page
# two from a bucket, and you want 10 items, you need to set :max_keys to 11.
#
# To page through a bucket 10 keys at a time, you can do:
#
- # resp, listing = list_bucket('mybucket', :max_keys => 10)
- # resp, listing = list_bucket('mybucket', :max_keys => 11, :marker => listing.last_key)
- # resp, listing = list_bucket('mybucket', :max_keys => 11, :marker => listing.last_key)
+ # listing = list_bucket('mybucket', :max_keys => 10)
+ # listing = list_bucket('mybucket', :max_keys => 11, :marker => listing.last_key)
+ # listing = list_bucket('mybucket', :max_keys => 11, :marker => listing.last_key)
# etc.
#
# Note in the example code, +listing+ is a BucketListing instance; call its contents method
# to get a hash of the keys in the bucket, along with associated objects.
#
- # Returns [raw_response, BucketListing instance].
- def list_bucket(bucket_name, query_params={})
- if query_params[:max_keys]
- max_keys = query_params[:max_keys].to_i
- raise S33rException::BucketListingMaxKeysError, "max_keys option to list bucket cannot be > #{BUCKET_LIST_MAX_MAX_KEYS}" \
+ # Returns BucketListing instance.
+ #-- TODO: testing
+ def listing(options={})
+ querystring = options[:querystring] || {}
+
+ # Check :max_keys isn't higher than the maximum allowed by S3.
+ if options[:max_keys]
+ max_keys = options[:max_keys].to_i
if max_keys > BUCKET_LIST_MAX_MAX_KEYS
-
- # convert :max_keys querystring parameter to 'max-keys' parameter
- query_params['max-keys'] = query_params.delete(:max_keys)
+ raise S3Exception::BucketListingMaxKeysError, "max_keys option to list bucket cannot be > #{BUCKET_LIST_MAX_MAX_KEYS}"
+ end
+ querystring['max-keys'] = max_keys
end
- resp = do_get("/#{bucket_name}" + generate_querystring(query_params))
-
- [resp, BucketListing.new(resp.body)]
- end
-
- # Create a bucket.
- #
- # Returns true if response returned a 200 code; false otherwise.
- def create_bucket(bucket_name, headers={})
- resp = do_put("/#{bucket_name}", nil, headers)
- resp.ok?
- end
-
- # Delete a bucket.
- #
- # +options+ hash can contain the following:
- # * <tt>:force => true</tt>: delete all keys within the bucket then delete the bucket itself
- #-- TODO: maybe delete keys matching a partial path
- #-- TODO: if multiple pages of keys in buckets, need to get them by page.
- def delete_bucket(bucket_name, headers={}, options={})
- if true == options[:force]
- _, bucket_listing = list_bucket(bucket_name)
- bucket_listing.contents.each_value do |obj|
- delete_resource(bucket_name, obj.key)
- end
+ ['prefix', 'marker', 'delimiter'].each do |key|
+ key_sym = key.to_sym
+ querystring[key] = options[key_sym] if options[key_sym]
end
- do_delete("/#{bucket_name}", headers)
+ options[:querystring] = querystring
+
+ resp = do_get(options)
+
+ if resp.ok?
+ @listing = BucketListing.new(resp.body)
+ else
+ raise resp.s3_error
+ end
end
-
- # Check whether a bucket exists or not.
- def bucket_exists?(bucket_name)
- resource_exists?(bucket_name)
+ alias :objects :listing
+
+
+ # List content of a bucket.
+ def list_bucket(bucket_name, options={})
+ options[:bucket] = bucket_name
+ listing(options)
end
- # Create a NamedBucket instance.
+ # Delete a Bucket.
#
- # +options+ is a hash of extra options to use when creating
- # the NamedBucket instance (see NamedBucket.initialize);
- # specify :parent to use the same options used to create this Client
- # instance.
- def get_named_bucket(bucket_name, options={}, &block)
- options = @options if :parent == options
- options[:default_bucket] = bucket_name
- named_bucket = NamedBucket.new(@aws_access_key, @aws_secret_access_key, options)
- yield named_bucket if block_given?
- named_bucket
+ # +options+:
+ # * <tt>:force => true</tt>: To clear the content of the bucket first.
+ def delete_bucket(bucket_name, options={})
+ options[:bucket] = bucket_name
+ if options[:force]
+ listing(options).keys.each { |key| do_delete(options.merge(:key => key)) }
+ end
+ do_delete(options).ok?
end
- # Fetch a resource.
- #
- # Returns a plain response, not an S3Object: if you want an object back,
- # use get_object instead.
- def get_resource(bucket_name, resource_key, headers={})
- do_get("/#{bucket_name}/#{resource_key}", headers)
+ # Check whether a bucket exists on S3.
+ def bucket_exists?(name, options={})
+ options[:bucket] = name
+ do_head(options).ok?
end
- # Check whether a bucket contains a key.
+ # Put a "thing" onto S3.
#
- # Returns true if resource_key exists inside bucket_name.
- def resource_exists?(bucket_name, resource_key=nil)
- path = "/#{bucket_name}"
- path += "/#{resource_key}" unless resource_key.nil?
- do_head(path).ok?
+ # +thing+ may be a string, an S3Object, an S3ACL::Policy,
+ # a LoggingResource or a file handle.
+ #
+ # Anything you pass in +options+ will override any values
+ # inferred from the +thing+ (e.g. content type, key).
+ #
+ # +options+:
+ # * <tt>:key => 'some-key'</tt> (required unless thing is an S3Object).
+ # * <tt>:bucket => 'some-bucket'</tt>
+ # * <tt>:content_type => 'text/plain'</tt>
+ # * <tt>:render_as_attachment => Boolean</tt>
+ # * <tt>:file => true</tt>: thing is a filename, so load it as a file
+ # * <tt>:canned_acl => 'public'</tt>: one of S33r::CANNED_ACLS, to set a canned
+ # acl on a put.
+ #
+ #-- TODO: finish documentation for options
+ #-- TODO: implement canned_acl
+ #-- TODO: pass Policy as an option
+ def put(thing, options={}, headers={})
+ is_file = options[:file]
+
+ # thing is a file, so load it.
+ if is_file and thing.is_a?(String)
+ # Use the filename as the key unless it is set already.
+ options[:key] ||= thing
+
+ # Guess the content type unless it's been set.
+ unless options[:content_type]
+ mime_type = guess_mime_type(thing)
+ content_type = mime_type.simplified
+ options[:content_type] = content_type
+ end
+ elsif thing.is_a?(S3Object)
+ options[:key] ||= thing.key
+ data = thing.value
+ options[:content_type] ||= thing.content_type
+ options[:render_as_attachment] ||= thing.render_as_attachment
+ headers = metadata_headers(thing.meta)
+ elsif thing.is_a?(Policy) || thing.is_a?(LoggingResource)
+ data = thing.to_xml
+ options[:content_type] = 'text/xml'
+ else
+ data = thing
+ end
+
+ key = options[:key]
+
+ # Headers for content type etc.
+ headers.merge! content_headers(options[:content_type], key, options[:render_as_attachment])
+
+ if is_file
+ File.open(thing) do |data|
+ do_put(data, options, headers).ok?
+ end
+ else
+ do_put(data, options, headers).ok?
+ end
end
- # Fetch an object. Note that this actually pulls down the
- # object from S3 and instantiates the S3Object instance with it.
- def get_object(bucket_name, resource_key, headers={})
- response = get_resource(bucket_name, resource_key, headers)
- S3Object.from_response(resource_key, response)
+ # Put a file onto S3 (shortcut to put).
+ def put_file(filename, options={}, headers={})
+ options[:file] = true
+ put(filename, options, headers)
end
- # Fetch the ACL document for a resource.
- #
- # Raises an exception if there is a problem with the resource
- # (e.g. it doesn't exist).
- def get_acl(bucket_name, resource_key='')
- path = s3_acl_path(bucket_name, resource_key)
- response = do_get(path)
- if response.ok?
- S3ACL::ACLDoc.from_xml(response.body)
+ # Get an ACL.
+ def get_acl(options={})
+ options[:acl] = true
+ resp = do_get(options)
+ if resp.ok?
+ S3ACL::Policy.from_xml(resp.body)
else
- raise S33rException::MissingResource, "Tried to get an ACL from a non-existent resource [#{path}]"
+ nil
end
end
+ alias :acl :get_acl
- # Put the ACL document back to a resource.
- #
- # +acl_doc+ is an S33r::S3ACL::ACLDoc instance.
- #
- # Returns true if response had a 200 code, false otherwise.
- # If you get a 400 Bad Request back, it means a CanonicalUser
- # could not be identified from the email address.
- def set_acl(acl_doc, bucket_name, resource_key='')
- path = s3_acl_path(bucket_name, resource_key)
- response = do_put(path, acl_doc.to_xml)
- response.ok?
+ # Set an ACL.
+ def set_acl(policy, options={})
+ options[:acl] = true
+ put(policy, options)
end
+ alias :acl= :set_acl
- # Set up logging for a bucket and resource key.
- #
- # +logging_resource+ = a LoggingResource instance.
- # +bucket_name+ = a bucket to log.
- # +resource_key+ = a resource to log (if empty, logging
- # gets added to the bucket).
- def set_logging(logging_resource, bucket_name, resource_key='')
- path = s3_logging_path(bucket_name, resource_key)
- response = do_put(path, logging_resource.to_xml)
+ # Is a resource public?
+ def public?(options={})
+ get_acl(options).public_readable?
end
- # Make a resource public (i.e. grant READ permissions
- # to the AllUsers group type). NB separate method is used
- # on buckets, to make all of their content public too.
- #
- # Returns nil if resource does not exist.
- def make_public(bucket_name, resource_key='')
- acl = get_acl(bucket_name, resource_key)
- if !acl.nil? and acl.add_public_read_grants
- set_acl(acl, bucket_name, resource_key)
- end
+ # Make a resource public
+ def make_public
+ set_acl(get_acl().add_public_read_grant)
end
- #-- TODO
+ # Make a resource private
def make_private
+ set_acl(get_acl().remove_public_read_grant)
end
- # Make a bucket capable of being a target for access logging.
- #
- # Returns true if the bucket is now a possible log target;
- # false otherwise.
- #
- #-- TODO: tests
- def enable_log_target(bucket_name)
- acl = get_acl(bucket_name)
- if acl.add_log_target_grants
- set_acl(acl, bucket_name)
- end
- acl.log_targetable?
+ # Get a URL for a thing.
+ def url(options={})
+ options = request_defaults.merge(options)
+ s3_url(options)
end
- # Disable permissions for access logging into a bucket.
+ # Change the status of a bucket for logging.
#
- # Returns true if the bucket is no longer log targetable;
- # false if it remains a log target.
- #
- #-- TODO: tests
- def disable_log_target(bucket_name)
- acl = get_acl(bucket_name)
- acl.remove_log_target
- set_acl(acl, bucket_name)
- !acl.log_targetable?
+ # +logging_on+ = :on to turn logging on (default),
+ # :off to turn logging off.
+ def change_log_target_status(bucket_name, state=:on)
+ logging_on = (:on == state)
+ bucket = get_bucket(bucket_name)
+ policy = bucket.acl
+ logging_on ? policy.add_log_target_grants : policy.remove_log_target_grants
+ bucket.acl = policy
+ logging_on == policy.log_targetable?
end
- # Enable logging for a resource (only buckets are supported presently).
+ # Get the logging status for a resource.
#
- # +log_prefix+ is the prefix for the logs.
- # +bucket_name+ is the bucket to log.
- # +log_bucket+ is the bucket to put logs into.
- #
- # options:
- # +:for_key => 'key'+ is the (optional) resource to log in the bucket
- # (NB this is not currently supported by S3).
- # +:log_prefix => 'prefix'+ is the (optional) log file prefix
- # (defaults to bucket_name + '-')
- #
- #-- TODO: tests
- def enable_logging(bucket_name, log_bucket=nil, options={})
- # Set to the default log_bucket if not set explicitly.
- log_bucket ||= @log_bucket
-
- resource_key = options[:for_key]
- resource_key ||= ''
-
- log_prefix = options[:prefix]
- log_prefix ||= bucket_name + '-'
-
- log_bucket_acl = get_acl(log_bucket)
- if !(log_bucket_acl.log_targetable?)
- raise BucketNotLogTargetable, "The bucket #{log_bucket} cannot be specified as a log target"
+ # +options+:
+ # * <tt>:for_bucket => 'bucket'</tt>: get the logging status for a bucket.
+ # (Alias for :bucket; if both supplied, :bucket takes preference.)
+ def logging(options={})
+ options[:logging] = true
+ options[:bucket] ||= options[:for_bucket]
+ resp = do_get(options)
+ if resp.ok?
+ LoggingResource.from_xml(resp.body)
+ else
+ nil
end
- logging_resource = LoggingResource.new(log_bucket, log_prefix)
- set_logging(logging_resource, bucket_name, resource_key)
end
- # Turn off logging of a resource.
- #-- TODO
- def disable_logging
- end
-
- # Get the logging status of a resource.
- #-- TODO
- def get_logging
- end
-
- # Put some generic resource onto S3.
+ # Enable logging for a bucket.
#
- # To stream with this method, +data+ should respond to the +stat+
- # method; examples of data types which respond to this include File instances.
- def put_resource(bucket_name, resource_key, data, headers={})
- raise S33r::S33rException::TryingToPutEmptyResource, "No data to put for key '#{resource_key}'" unless data
- resp = do_put("/#{bucket_name}/" + "#{CGI::escape(resource_key)}", data, headers)
- resp.ok?
- end
-
- # Put a string onto S3.
- def put_text(string, bucket_name, resource_key, headers={})
- headers["Content-Type"] = "text/plain"
- put_resource(bucket_name, resource_key, string, headers)
- end
-
- # Put a file onto S3.
+ # +target_bucket+ is the target for the logs.
+ # The bucket you want to log is passed in +options+.
#
- # If +resource_key+ is nil, the filename is used as the key instead.
- #
- # +headers+ sets some headers with the request; useful if you have an odd file type
- # not recognised by the mimetypes library, and want to explicitly set the Content-Type header.
- #
- # +options+ hash simplifies setting some headers with specific meaning to S3:
- # * <tt>:render_as_attachment => true</tt>: set the Content-Disposition for this file to "attachment" and set
- # the default filename for saving the file (when accessed by a web browser) to +filename+; this
- # turns the file into a download when opened in a browser, rather than trying to render it inline.
- #
- # Note that this method uses a handle to the file, so it can be streamed in chunks to S3.
- def put_file(filename, bucket_name, resource_key=nil, headers={}, options={})
- # default to the file path as the resource key if none explicitly set
- if resource_key.nil?
- resource_key = filename
- end
+ # +options+
+ # * <tt>:bucket => 'bucket'</tt>: bucket to log.
+ # * <tt>:for_bucket => 'bucket'</tt>: syntactic sugar; alias for <tt>:bucket</tt>.
+ # If :bucket and :for_bucket are provided, :bucket takes preference.
+ # * <tt>:prefix => 'some-prefix-'</tt>: specify a prefix for log files;
+ # otherwise 'log-<bucket name>-' is used
+ def logs_to(target_bucket, options={})
+ target_bucket_name = target_bucket.is_a?(Bucket) ? target_bucket.name : target_bucket
+ log_prefix = options[:prefix] || "log-#{bucket_name}-"
+ options[:bucket] ||= options[:for_bucket]
- # set Content-Disposition header
- if options[:render_as_attachment]
- headers['Content-Disposition'] = "attachment; filename=#{File.basename(filename)}"
+ target_bucket_acl = get_acl(:bucket => target_bucket_name)
+ unless target_bucket_acl.log_targetable?
+ raise BucketNotLogTargetable, "The bucket #{target_bucket_name} cannot be specified as a log target"
end
-
- # content type is explicitly set in the headers, so apply to request
- if headers[:content_type]
- # use the first MIME type corresponding to this content type string
- # (MIME::Types returns an array of possible MIME types)
- mime_type = MIME::Types[headers[:content_type]][0]
- else
- # we're not going to use this much, just for parsing the content type etc.
- mime_type = guess_mime_type(filename)
- end
- content_type = mime_type.simplified
- headers['Content-Type'] = content_type
- headers['Content-Transfer-Encoding'] = 'binary' if mime_type.binary?
-
- # Open the file, and pass the handle to the HTTP client so content
- # can be streamed.
- File.open(filename) do |data|
- # send the put request
- put_resource(bucket_name, resource_key, data, headers)
- end
+
+ logging_resource = LoggingResource.new(target_bucket_name, log_prefix)
+ options[:logging] = true
+ put(logging_resource, options)
end
- # Delete a resource from S3.
+ # Turn off logging for a bucket.
#
- # Note that S3 returns the same response code regardless
- # of whether the resource was successfully deleted, or didn't exist
- # in the first place.
- def delete_resource(bucket_name, resource_key, headers={})
- do_delete("/#{bucket_name}/#{resource_key}", headers)
+ # +options+:
+ # * <tt>:bucket => 'bucket'</tt>: bucket to turn logging off for.
+ def logs_off(options={})
+ options[:logging] = true
+ put(LoggingResource.new, options)
end
-
- # Add any default headers which should be sent with every request from the client.
- #
- # +headers+ is a hash of headers already set up. Any headers passed in here
- # override the defaults in +client_headers+.
- #
- # Returns +headers+ with the content of +client_headers+ merged in.
- def add_client_headers(headers)
- headers.merge!(client_headers) { |key, arg, default| arg }
- end
-
- def do_get(path='/', headers={})
- do_request('GET', path, nil, headers)
- end
-
- def do_head(path='/', headers={})
- do_request('HEAD', path, nil, headers)
- end
-
- def do_post(path='/', data=nil, headers={})
- do_request('POST', path, data, headers)
- end
-
- def do_put(path='/', data=nil, headers={})
- do_request('PUT', path, data, headers)
- end
-
- def do_delete(path, headers={})
- do_request('DELETE', path, nil, headers)
- end
-
end
-end
+end
\ No newline at end of file