lib/logstash/outputs/s3.rb in logstash-output-s3-4.3.5 vs lib/logstash/outputs/s3.rb in logstash-output-s3-4.3.6

- old
+ new

@@ -108,22 +108,23 @@ config :additional_settings, :validate => :hash, :default => {} # Set the size of file in bytes, this means that files on bucket when have dimension > file_size, they are stored in two or more file. # If you have tags then it will generate a specific size file for every tags - ##NOTE: define size of file is the better thing, because generate a local temporary file on disk and then put it in bucket. + # + # NOTE: define size of file is the better thing, because generate a local temporary file on disk and then put it in bucket. config :size_file, :validate => :number, :default => 1024 * 1024 * 5 # Set the time, in MINUTES, to close the current sub_time_section of bucket. # If you also define file_size you have a number of files related to the section and the current tag. # If it's valued 0 and rotation_strategy is 'time' or 'size_and_time' then the plugin reaise a configuration error. config :time_file, :validate => :number, :default => 15 - ## IMPORTANT: if you use multiple instance of s3, you should specify on one of them the "restore=> true" and on the others "restore => false". - ## This is hack for not destroy the new files after restoring the initial files. - ## If you do not specify "restore => true" when logstash crashes or is restarted, the files are not sent into the bucket, - ## for example if you have single Instance. + # If `restore => false` is specified and Logstash crashes, the unprocessed files are not sent into the bucket. + # + # NOTE: that the `recovery => true` default assumes multiple S3 outputs would set a unique `temporary_directory => ...` + # if they do not than only a single S3 output is safe to recover (since let-over files are processed and deleted). config :restore, :validate => :boolean, :default => true # The S3 canned ACL to use when putting the file. Defaults to "private". config :canned_acl, :validate => ["private", "public-read", "public-read-write", "authenticated-read", "aws-exec-read", "bucket-owner-read", "bucket-owner-full-control", "log-delivery-write"], :default => "private" @@ -145,10 +146,13 @@ # Defaults to STANDARD. config :storage_class, :validate => ["STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA"], :default => "STANDARD" # Set the directory where logstash will store the tmp files before sending it to S3 # default to the current OS temporary directory in linux /tmp/logstash + # + # NOTE: the reason we do not have a unique (isolated) temporary directory as a default, to support multiple plugin instances, + # is that we would have to rely on something static that does not change between restarts (e.g. a user set id => ...). config :temporary_directory, :validate => :string, :default => File.join(Dir.tmpdir, "logstash") # Specify a prefix to the uploaded filename, this can simulate directories on S3. Prefix does not require leading slash. # This option support string interpolation, be warned this can created a lot of temporary local files. config :prefix, :validate => :string, :default => '' @@ -345,24 +349,24 @@ # one thread has access to the resource. @file_repository.get_factory(prefix) do |factory| temp_file = factory.current if @rotation.rotate?(temp_file) - @logger.debug("Rotate file", - :strategy => @rotation.class.name, - :key => temp_file.key, - :path => temp_file.path) + @logger.debug? && @logger.debug("Rotate file", + :key => temp_file.key, + :path => temp_file.path, + :strategy => @rotation.class.name) upload_file(temp_file) factory.rotate! end end end end def upload_file(temp_file) - @logger.debug("Queue for upload", :path => temp_file.path) + @logger.debug? && @logger.debug("Queue for upload", :path => temp_file.path) # if the queue is full the calling thread will be used to upload temp_file.close # make sure the content is on disk if temp_file.size > 0 @uploader.upload_async(temp_file, @@ -381,11 +385,11 @@ SizeAndTimeRotationPolicy.new(size_file, time_file) end end def clean_temporary_file(file) - @logger.debug("Removing temporary file", :file => file.path) + @logger.debug? && @logger.debug("Removing temporary file", :path => file.path) file.delete! end # The upload process will use a separate uploader/threadpool with less resource allocated to it. # but it will use an unbounded queue for the work, it may take some time before all the older files get processed. @@ -396,10 +400,10 @@ Dir.glob(::File.join(@temporary_directory, "**/*")) .select { |file| ::File.file?(file) } .each do |file| temp_file = TemporaryFile.create_from_existing_file(file, temp_folder_path) if temp_file.size > 0 - @logger.debug("Recovering from crash and uploading", :file => temp_file.path) + @logger.debug? && @logger.debug("Recovering from crash and uploading", :path => temp_file.path) @crash_uploader.upload_async(temp_file, :on_complete => method(:clean_temporary_file), :upload_options => upload_options) else clean_temporary_file(temp_file) end end