lib/scalyr/common/client.rb in logstash-output-scalyr-0.1.7 vs lib/scalyr/common/client.rb in logstash-output-scalyr-0.1.8
- old
+ new
@@ -117,18 +117,14 @@
:total_compressed_request_bytes_sent => 0, # The total number of compressed bytes sent over the network
:total_response_bytes_received => 0, # The total number of bytes received.
:total_request_latency_secs => 0, # The total number of secs spent waiting for a responses (so average latency
# can be calculated by dividing this number by @total_requests_sent).
# This includes connection establishment time.
- :total_connections_created => 0, # The total number of HTTP connections successfully created.
:total_serialization_duration_secs => 0, # The total duration (in seconds) it took to serialize (JSON dumos) all the request bodies.
# You can calculate avg compression duration by diving this value with total_requests_sent
:total_compression_duration_secs => 0, # The total duration (in seconds) it took to compress all the request bodies.
# You can calculate avg compression duration by diving this value with total_requests_sent
- :total_flatten_values_duration_secs => 0, # The total duration (in seconds) it took to flatten nested record values.
- # In case flattening is disabled, this value will always be 0. Can infer average per-request value by dividing this
- # value by total_requests_sent
:compression_type => @compression_type,
:compression_level => @compression_level,
}
@http = Net::HTTP::Persistent.new
@@ -158,47 +154,43 @@
def get_new_latency_stats
return {
# The total number of HTTP connections successfully created.
:serialization_duration_secs => Quantile::Estimator.new, # The duration (in seconds) it took to serialize (JSON dumos) all the request bodies.
:compression_duration_secs => Quantile::Estimator.new, # The duration (in seconds) it took to compress all the request bodies.
- :flatten_values_duration_secs => Quantile::Estimator.new, # The duration (in seconds) it took to flatten nested record values.
- # In case flattening is disabled, this value will always be 0.
:request_latency_secs => Quantile::Estimator.new, # Secs spent waiting for a responses. This includes connection establishment time.
:bytes_sent => Quantile::Estimator.new # The number of bytes sent over the network. Batch size with a bit more overhead.
}
end
- # Get a clone of current statistics hash
+ # Get a clone of current statistics hash and calculate percentiles
def get_stats
current_stats = @stats.clone
current_stats[:request_latency_p50] = @latency_stats[:request_latency_secs].query(0.5)
current_stats[:request_latency_p90] = @latency_stats[:request_latency_secs].query(0.9)
current_stats[:request_latency_p99] = @latency_stats[:request_latency_secs].query(0.99)
current_stats[:serialization_duration_secs_p50] = @latency_stats[:serialization_duration_secs].query(0.5)
current_stats[:serialization_duration_secs_p90] = @latency_stats[:serialization_duration_secs].query(0.9)
current_stats[:serialization_duration_secs_p99] = @latency_stats[:serialization_duration_secs].query(0.99)
- current_stats[:flatten_values_duration_secs_p50] = @latency_stats[:flatten_values_duration_secs].query(0.5)
- current_stats[:flatten_values_duration_secs_p90] = @latency_stats[:flatten_values_duration_secs].query(0.9)
- current_stats[:flatten_values_duration_secs_p99] = @latency_stats[:flatten_values_duration_secs].query(0.99)
current_stats[:compression_duration_secs_p50] = @latency_stats[:compression_duration_secs].query(0.5)
current_stats[:compression_duration_secs_p90] = @latency_stats[:compression_duration_secs].query(0.9)
current_stats[:compression_duration_secs_p99] = @latency_stats[:compression_duration_secs].query(0.99)
current_stats[:bytes_sent_p50] = @latency_stats[:bytes_sent].query(0.5)
current_stats[:bytes_sent_p90] = @latency_stats[:bytes_sent].query(0.9)
current_stats[:bytes_sent_p99] = @latency_stats[:bytes_sent].query(0.99)
if @flush_quantile_estimates_on_status_send
+ @logger.debug "Recreating / reseting quantile estimator classes for plugin metrics"
@latency_stats = get_new_latency_stats
end
current_stats
end
# Upload data to Scalyr. Assumes that the body size complies with Scalyr limits
- def post_add_events(body, is_status, body_serialization_duration = 0, flatten_nested_values_duration = 0)
+ def post_add_events(body, is_status, body_serialization_duration = 0)
post, compression_duration = prepare_post_object @add_events_uri.path, body
fail_count = 1 # putative assume failure
start_time = Time.now
uncompressed_bytes_sent = 0
compressed_bytes_sent = 0
@@ -236,17 +228,15 @@
@stats[:total_requests_failed] += fail_count
@stats[:total_request_bytes_sent] += uncompressed_bytes_sent
@stats[:total_compressed_request_bytes_sent] += compressed_bytes_sent
@stats[:total_response_bytes_received] += bytes_received
@stats[:total_serialization_duration_secs] += body_serialization_duration
- @stats[:total_flatten_values_duration_secs] += flatten_nested_values_duration
@stats[:total_compression_duration_secs] += compression_duration
end_time = Time.now
@stats[:total_request_latency_secs] += (end_time - start_time)
@latency_stats[:request_latency_secs].observe(end_time - start_time)
@latency_stats[:serialization_duration_secs].observe(body_serialization_duration)
- @latency_stats[:flatten_values_duration_secs].observe(flatten_nested_values_duration)
@latency_stats[:compression_duration_secs].observe(compression_duration)
@latency_stats[:bytes_sent].observe(uncompressed_bytes_sent)
end
end
end
@@ -283,10 +273,10 @@
compression_duration = end_time - start_time
end
post = Net::HTTP::Post.new uri_path
post.add_field('Content-Type', 'application/json')
- version = 'output-logstash-scalyr 0.1.7'
+ version = 'output-logstash-scalyr 0.1.8'
post.add_field('User-Agent', version + ';' + RUBY_VERSION + ';' + RUBY_PLATFORM)
if not encoding.nil?
post.add_field('Content-Encoding', encoding)
post.body = compressed_body