lib/benchmark_driver.rb in benchmark_driver-0.1.0 vs lib/benchmark_driver.rb in benchmark_driver-0.2.0
- old
+ new
@@ -1,64 +1,76 @@
require 'benchmark_driver/version'
require 'benchmark'
require 'tempfile'
class BenchmarkDriver
- # @param [Integer] duration - Benchmark duration in seconds
+ MEASURE_TYPES = %w[loop_count ips]
+ DEFAULT_LOOP_COUNT = 100_000
+ DEFAULT_IPS_DURATION = 1
+
+ # @param [String] measure_type - "loop_count"|"ips"
+ # @param [Integer] measure_num - Loop count for "loop_type", duration seconds for "ips"
# @param [Array<String>] execs - ["path1", "path2"] or `["ruby1::path1", "ruby2::path2"]`
- # @param [String] result_format
# @param [Boolean] verbose
- def initialize(duration: 1, execs: ['ruby'], result_format: 'time', verbose: false)
- @duration = duration
+ def initialize(measure_type: 'loop_count', measure_num: nil, execs: ['ruby'], verbose: false)
+ unless MEASURE_TYPES.include?(measure_type)
+ abort "unsupported measure type: #{measure_type.dump}"
+ end
+ @measure_type = measure_type
+ @measure_num = measure_num
@execs = execs.map do |exec|
name, path = exec.split('::', 2)
Executable.new(name, path || name)
end
- @result_format = result_format
@verbose = verbose
end
- # @param [Hash,Array<Hash>] hashes
- def run(hashes)
- hashes = [hashes] if hashes.is_a?(Hash)
- benchmarks = hashes.map do |hash|
- BenchmarkScript.new(Hash[hash.map { |k, v| [k.to_sym, v] }])
- end
- if benchmarks.empty?
- abort 'No benchmark is specified in YAML'
- end
+ # @param [Hash] root_hash
+ def run(root_hash)
+ root = BenchmarkRoot.new(Hash[root_hash.map { |k, v| [k.to_sym, v] }])
- results = benchmarks.map do |benchmark|
+ results = root.benchmarks.map do |benchmark|
metrics_by_exec = {}
iterations = calc_iterations(@execs.first, benchmark)
@execs.each do |exec|
- puts "Running #{benchmark.name.dump} with #{exec.name.dump} #{iterations} times..." if @verbose
+ if @verbose
+ puts "--- Running #{benchmark.name.dump} with #{exec.name.dump} #{iterations} times ---"
+ puts "#{benchmark.benchmark_script(iterations)}\n"
+ end
elapsed_time = run_benchmark(exec, benchmark, iterations)
metrics_by_exec[exec] = BenchmarkMetrics.new(iterations, elapsed_time)
end
BenchmarkResult.new(benchmark.name, metrics_by_exec)
end
puts if @verbose
- case @result_format
- when 'time'
- ExecutionTimeReporter.report(@execs, results)
+ case @measure_type
+ when 'loop_count'
+ LoopCountReporter.report(@execs, results)
when 'ips'
IpsReporter.report(@execs, results)
else
- raise "unsupported result format: #{@result_format.dump}"
+ raise "unexpected measure type: #{@measure_type.dump}"
end
end
private
# Estimate iterations to finish benchmark within `@duration`.
def calc_iterations(exec, benchmark)
- # TODO: Change to try from 1, 10, 100 ...
- base = 1000
- time = run_benchmark(exec, benchmark, base)
- (@duration / time * base).to_i
+ case @measure_type
+ when 'loop_count'
+ @measure_num || benchmark.loop_count || DEFAULT_LOOP_COUNT
+ when 'ips'
+ # TODO: Change to try from 1, 10, 100 ...
+ base = 1000
+ time = run_benchmark(exec, benchmark, base)
+ duration = @measure_num || DEFAULT_IPS_DURATION
+ (duration / time * base).to_i
+ else
+ raise "unexpected measure type: #{@measure_type.dump}"
+ end
end
def run_benchmark(exec, benchmark, iterations)
# TODO: raise error if negative
measure_script(exec.path, benchmark.benchmark_script(iterations)) -
@@ -73,21 +85,59 @@
cmd = "#{ruby} #{f.path}"
Benchmark.measure { system(cmd, out: File::NULL) }.real
end
end
+ class BenchmarkRoot
+ # @param [String] name
+ # @param [String] prelude
+ # @param [Integer,nil] loop_count
+ # @param [String,nil] benchmark - For running single instant benchmark
+ # @param [Array<Hash>] benchmarks - For running multiple benchmarks
+ def initialize(name:, prelude: '', loop_count: nil, benchmark: nil, benchmarks: [])
+ if benchmark
+ unless benchmarks.empty?
+ raise ArgumentError.new("Only either :benchmark or :benchmarks can be specified")
+ end
+ @benchmarks = [BenchmarkScript.new(name: name, prelude: prelude, benchmark: benchmark)]
+ else
+ @benchmarks = benchmarks.map do |hash|
+ BenchmarkScript.new(Hash[hash.map { |k, v| [k.to_sym, v] }]).tap do |b|
+ b.inherit_root(prelude: prelude, loop_count: loop_count)
+ end
+ end
+ end
+ end
+
+ # @return [Array<BenchmarkScript>]
+ attr_reader :benchmarks
+ end
+
class BenchmarkScript
# @param [String] name
# @param [String] prelude
- # @param [String] script
- def initialize(name:, prelude: '', benchmark:)
+ # @param [String] benchmark
+ def initialize(name:, prelude: '', loop_count: nil, benchmark:)
@name = name
@prelude = prelude
+ @loop_count = loop_count
@benchmark = benchmark
end
+
+ # @return [String]
attr_reader :name
+ # @return [Integer]
+ attr_reader :loop_count
+
+ def inherit_root(prelude:, loop_count:)
+ @prelude = "#{prelude}\n#{@prelude}"
+ if @loop_count.nil? && loop_count
+ @loop_count = loop_count
+ end
+ end
+
def overhead_script(iterations)
<<-RUBY
#{@prelude}
i = 0
while i < #{iterations}
@@ -135,10 +185,10 @@
:name, # @param [String]
:path, # @param [String]
)
end
- module ExecutionTimeReporter
+ module LoopCountReporter
class << self
# @param [Array<Executable>] execs
# @param [Array<BenchmarkResult>] results
def report(execs, results)
puts "benchmark results:"