lib/logstash/inputs/http_poller.rb in logstash-input-http_poller-2.1.0 vs lib/logstash/inputs/http_poller.rb in logstash-input-http_poller-3.0.0

- old
+ new

@@ -2,11 +2,10 @@ require "logstash/inputs/base" require "logstash/namespace" require "logstash/plugin_mixins/http_client" require "socket" # for Socket.gethostname require "manticore" -require "rufus/scheduler" # This Logstash input plugin allows you to call an HTTP API, decode the output of it into event(s), and # send them on their merry way. The idea behind this plugins came from a need to read springboot # metrics endpoint, instead of configuring jmx to monitor my java application memory/gc/ etc. # @@ -32,12 +31,11 @@ # password => "hunter2" # } # } # } # request_timeout => 60 -# # Supports "cron", "every", "at" and "in" schedules by rufus scheduler -# schedule => { cron => "* * * * * UTC"} +# interval => 60 # codec => "json" # # A hash of request metadata info (timing, response headers, etc.) will be sent here # metadata_target => "http_poller_metadata" # } # } @@ -59,47 +57,32 @@ # A Hash of urls in this format : `"name" => "url"`. # The name and the url will be passed in the outputed event config :urls, :validate => :hash, :required => true # How often (in seconds) the urls will be called - # DEPRECATED. Use 'schedule' option instead. - # If both interval and schedule options are specified, interval - # option takes higher precedence - config :interval, :validate => :number, :deprecated => true + config :interval, :validate => :number, :required => true - # Schedule of when to periodically poll from the urls - # Format: A hash with - # + key: "cron" | "every" | "in" | "at" - # + value: string - # Examples: - # a) { "every" => "1h" } - # b) { "cron" => "* * * * * UTC" } - # See: rufus/scheduler for details about different schedule options and value string format - config :schedule, :validate => :hash - # Define the target field for placing the received data. If this setting is omitted, the data will be stored at the root (top level) of the event. config :target, :validate => :string # If you'd like to work with the request/response metadata. # Set this value to the name of the field you'd like to store a nested # hash of metadata. config :metadata_target, :validate => :string, :default => '@metadata' public - Schedule_types = %w(cron every at in) def register @host = Socket.gethostname.force_encoding(Encoding::UTF_8) @logger.info("Registering http_poller Input", :type => @type, - :urls => @urls, :interval => @interval, :schedule => @schedule, :timeout => @timeout) + :urls => @urls, :interval => @interval, :timeout => @timeout) setup_requests! end def stop Stud.stop!(@interval_thread) if @interval_thread - @scheduler.stop if @scheduler end private def setup_requests! @requests = Hash[@urls.map {|name, url| [name, normalize_request(url)] }] @@ -148,50 +131,17 @@ request end public def run(queue) - #interval or schedule must be provided. Must be exclusively either one. Not neither. Not both. - raise LogStash::ConfigurationError, "Invalid config. Neither interval nor schedule was specified." \ - unless @interval || @schedule - raise LogStash::ConfigurationError, "Invalid config. Specify only interval or schedule. Not both." \ - if @interval && @schedule - - if @interval - setup_interval(queue) - elsif @schedule - setup_schedule(queue) - else - #should not reach here - raise LogStash::ConfigurationError, "Invalid config. Neither interval nor schedule was specified." - end - end - - private - def setup_interval(queue) @interval_thread = Thread.current Stud.interval(@interval) do run_once(queue) end end - def setup_schedule(queue) - #schedule hash must contain exactly one of the allowed keys - msg_invalid_schedule = "Invalid config. schedule hash must contain " + - "exactly one of the following keys - cron, at, every or in" - raise Logstash::ConfigurationError, msg_invalid_schedule if @schedule.keys.length !=1 - schedule_type = @schedule.keys.first - schedule_value = @schedule[schedule_type] - raise LogStash::ConfigurationError, msg_invalid_schedule unless Schedule_types.include?(schedule_type) - - @scheduler = Rufus::Scheduler.new(:max_work_threads => 1) - #as of v3.0.9, :first_in => :now doesn't work. Use the following workaround instead - opts = schedule_type == "every" ? { :first_in => 0.01 } : {} - @scheduler.send(schedule_type, schedule_value, opts) { run_once(queue) } - @scheduler.join - end - + private def run_once(queue) @requests.each do |name, request| request_async(queue, name, request) end @@ -242,17 +192,17 @@ event.tag("_http_request_failure") # This is also in the metadata, but we send it anyone because we want this # persisted by default, whereas metadata isn't. People don't like mysterious errors - event["http_request_failure"] = { + event.set("http_request_failure", { "request" => structure_request(request), "name" => name, "error" => exception.to_s, "backtrace" => exception.backtrace, "runtime_seconds" => execution_time - } + }) queue << event rescue StandardError, java.lang.Exception => e @logger.error? && @logger.error("Cannot read URL or send the error as an event!", :exception => e, @@ -264,10 +214,10 @@ end private def apply_metadata(event, name, request, response=nil, execution_time=nil) return unless @metadata_target - event[@metadata_target] = event_metadata(name, request, response, execution_time) + event.set(@metadata_target, event_metadata(name, request, response, execution_time)) end private def event_metadata(name, request, response=nil, execution_time=nil) m = {