lib/benchmark_driver/runner/ips.rb in benchmark_driver-0.12.0 vs lib/benchmark_driver/runner/ips.rb in benchmark_driver-0.13.0
- old
+ new
@@ -14,13 +14,15 @@
# Dynamically fetched and used by `BenchmarkDriver::JobParser.parse`
JobParser = BenchmarkDriver::DefaultJobParser.for(klass: Job, metrics: [METRIC])
# @param [BenchmarkDriver::Config::RunnerConfig] config
# @param [BenchmarkDriver::Output] output
- def initialize(config:, output:)
+ # @param [BenchmarkDriver::Context] contexts
+ def initialize(config:, output:, contexts:)
@config = config
@output = output
+ @contexts = contexts
end
# This method is dynamically called by `BenchmarkDriver::JobRunner.run`
# @param [Array<BenchmarkDriver::Default::Job>] jobs
def run(jobs)
@@ -28,15 +30,15 @@
@output.with_warmup do
jobs = jobs.map do |job|
next job if job.loop_count # skip warmup if loop_count is set
@output.with_job(name: job.name) do
- executable = job.runnable_execs(@config.executables).first
- duration, loop_count = run_warmup(job, exec: executable)
+ context = job.runnable_contexts(@contexts).first
+ duration, loop_count = run_warmup(job, context: context)
value, duration = value_duration(duration: duration, loop_count: loop_count)
- @output.with_context(name: executable.name, executable: executable) do
+ @output.with_context(name: context.name, executable: context.executable, gems: context.gems) do
@output.report(values: { metric => value }, duration: duration, loop_count: loop_count)
end
loop_count = (loop_count.to_f * @config.run_duration / duration).floor
Job.new(job.to_h.merge(loop_count: loop_count))
@@ -46,16 +48,16 @@
end
@output.with_benchmark do
jobs.each do |job|
@output.with_job(name: job.name) do
- job.runnable_execs(@config.executables).each do |exec|
+ job.runnable_contexts(@contexts).each do |context|
repeat_params = { config: @config, larger_better: true, rest_on_average: :average }
value, duration = BenchmarkDriver::Repeater.with_repeat(repeat_params) do
- run_benchmark(job, exec: exec)
+ run_benchmark(job, context: context)
end
- @output.with_context(name: exec.name, executable: exec) do
+ @output.with_context(name: context.name, executable: context.executable, gems: context.gems) do
@output.report(values: { metric => value }, duration: duration, loop_count: job.loop_count)
end
end
end
end
@@ -63,44 +65,44 @@
end
private
# @param [BenchmarkDriver::Runner::Ips::Job] job - loop_count is nil
- # @param [BenchmarkDriver::Config::Executable] exec
- def run_warmup(job, exec:)
+ # @param [BenchmarkDriver::Context] context
+ def run_warmup(job, context:)
warmup = WarmupScript.new(
- prelude: job.prelude,
+ prelude: "#{context.prelude}\n#{job.prelude}",
script: job.script,
teardown: job.teardown,
loop_count: job.loop_count,
first_warmup_duration: @config.run_duration / 6.0, # default: 0.5
second_warmup_duration: @config.run_duration / 3.0, # default: 1.0
)
duration, loop_count = Tempfile.open(['benchmark_driver-', '.rb']) do |f|
with_script(warmup.render(result: f.path)) do |path|
- execute(*exec.command, path)
+ execute(*context.executable.command, path)
end
eval(f.read)
end
[duration, loop_count]
end
# @param [BenchmarkDriver::Runner::Ips::Job] job - loop_count is not nil
- # @param [BenchmarkDriver::Config::Executable] exec
+ # @param [BenchmarkDriver::Context] context
# @return [BenchmarkDriver::Metrics]
- def run_benchmark(job, exec:)
+ def run_benchmark(job, context:)
benchmark = BenchmarkScript.new(
- prelude: job.prelude,
+ prelude: "#{context.prelude}\n#{job.prelude}",
script: job.script,
teardown: job.teardown,
loop_count: job.loop_count,
)
duration = Tempfile.open(['benchmark_driver-', '.rb']) do |f|
with_script(benchmark.render(result: f.path)) do |path|
- execute(*exec.command, path)
+ execute(*context.executable.command, path)
end
Float(f.read)
end
value_duration(