lib/benchmark_driver/runner/once.rb in benchmark_driver-0.11.1 vs lib/benchmark_driver/runner/once.rb in benchmark_driver-0.12.0

- old
+ new

@@ -5,39 +5,37 @@ require 'tempfile' require 'shellwords' # Run only once, for testing class BenchmarkDriver::Runner::Once + METRIC = BenchmarkDriver::Metric.new(name: 'Iteration per second', unit: 'i/s') + # JobParser returns this, `BenchmarkDriver::Runner.runner_for` searches "*::Job" Job = Class.new(BenchmarkDriver::DefaultJob) # Dynamically fetched and used by `BenchmarkDriver::JobParser.parse` - JobParser = BenchmarkDriver::DefaultJobParser.for(Job) + JobParser = BenchmarkDriver::DefaultJobParser.for(klass: Job, metrics: [METRIC]) - METRIC = BenchmarkDriver::Metric.new(name: 'Iteration per second', unit: 'i/s') - # @param [BenchmarkDriver::Config::RunnerConfig] config # @param [BenchmarkDriver::Output] output def initialize(config:, output:) @config = config @output = output end # This method is dynamically called by `BenchmarkDriver::JobRunner.run` # @param [Array<BenchmarkDriver::Default::Job>] jobs def run(jobs) - @output.metrics = [METRIC] - jobs = jobs.map do |job| Job.new(job.to_h.merge(loop_count: 1)) # to show this on output end @output.with_benchmark do jobs.each do |job| @output.with_job(name: job.name) do job.runnable_execs(@config.executables).each do |exec| duration = run_benchmark(job, exec: exec) # no repeat support - @output.with_context(name: exec.name, executable: exec, duration: duration, loop_count: 1) do - @output.report(value: 1.0 / duration, metric: METRIC) + @output.with_context(name: exec.name, executable: exec) do + @output.report(values: { METRIC => 1.0 / duration }, duration: duration, loop_count: 1) end end end end end