lib/fluent/plugin/out_s3.rb in fluent-plugin-s3-1.1.8 vs lib/fluent/plugin/out_s3.rb in fluent-plugin-s3-1.1.9

- old
+ new

@@ -66,10 +66,12 @@ config_param :s3_bucket, :string desc "S3 region name" config_param :s3_region, :string, default: ENV["AWS_REGION"] || "us-east-1" desc "Use 's3_region' instead" config_param :s3_endpoint, :string, default: nil + desc "If true, S3 Transfer Acceleration will be enabled for uploads. IMPORTANT: You must first enable this feature on your destination S3 bucket" + config_param :enable_transfer_acceleration, :bool, default: false desc "If false, the certificate of endpoint will not be verified" config_param :ssl_verify_peer, :bool, :default => true desc "The format of S3 object keys" config_param :s3_object_key_format, :string, default: "%{path}%{time_slice}_%{index}.%{file_extension}" desc "If true, the bucket name is always left in the request URI and never moved to the host as a sub-domain" @@ -196,10 +198,11 @@ def start options = setup_credentials options[:region] = @s3_region if @s3_region options[:endpoint] = @s3_endpoint if @s3_endpoint + options[:use_accelerate_endpoint] = @enable_transfer_acceleration options[:http_proxy] = @proxy_uri if @proxy_uri options[:force_path_style] = @force_path_style options[:compute_checksums] = @compute_checksums unless @compute_checksums.nil? options[:signature_version] = @signature_version unless @signature_version.nil? options[:ssl_verify_peer] = @ssl_verify_peer @@ -249,11 +252,11 @@ values_for_s3_object_key_post["%{uuid_flush}".freeze] = uuid_random if @uuid_flush_enabled s3path = @s3_object_key_format.gsub(%r(%{[^}]+})) do |matched_key| values_for_s3_object_key_pre.fetch(matched_key, matched_key) end - s3path = extract_placeholders(s3path, metadata) + s3path = extract_placeholders(s3path, chunk) s3path = s3path.gsub(%r(%{[^}]+}), values_for_s3_object_key_post) if (i > 0) && (s3path == previous_path) if @overwrite log.warn "#{s3path} already exists, but will overwrite" break @@ -287,11 +290,11 @@ values_for_s3_object_key_post["%{uuid_flush}".freeze] = uuid_random if @uuid_flush_enabled s3path = @s3_object_key_format.gsub(%r(%{[^}]+})) do |matched_key| values_for_s3_object_key_pre.fetch(matched_key, matched_key) end - s3path = extract_placeholders(s3path, metadata) + s3path = extract_placeholders(s3path, chunk) s3path = s3path.gsub(%r(%{[^}]+}), values_for_s3_object_key_post) end tmp = Tempfile.new("s3-") tmp.binmode @@ -317,10 +320,10 @@ put_options[:grant_write_acp] = @grant_write_acp if @grant_write_acp if @s3_metadata put_options[:metadata] = {} @s3_metadata.each do |k, v| - put_options[:metadata][k] = extract_placeholders(v, metadata).gsub(%r(%{[^}]+}), {"%{index}" => sprintf(@index_format, i - 1)}) + put_options[:metadata][k] = extract_placeholders(v, chunk).gsub(%r(%{[^}]+}), {"%{index}" => sprintf(@index_format, i - 1)}) end end @bucket.object(s3path).put(put_options) @values_for_s3_object_chunk.delete(chunk.unique_id)