lib/rubysl/benchmark/benchmark.rb in rubysl-benchmark-2.0.0 vs lib/rubysl/benchmark/benchmark.rb in rubysl-benchmark-2.0.1
- old
+ new
@@ -1,79 +1,76 @@
-=begin
-#
+#--
# benchmark.rb - a performance benchmarking library
#
-# $Id: benchmark.rb 11708 2007-02-12 23:01:19Z shyouhei $
+# $Id: benchmark.rb 43002 2013-09-20 16:05:48Z zzak $
#
# Created by Gotoken (gotoken@notwork.org).
#
# Documentation by Gotoken (original RD), Lyle Johnson (RDoc conversion), and
# Gavin Sinclair (editing).
+#++
#
-=end
-
# == Overview
#
# The Benchmark module provides methods for benchmarking Ruby code, giving
# detailed reports on the time taken for each task.
#
# The Benchmark module provides methods to measure and report the time
# used to execute Ruby code.
#
# * Measure the time to construct the string given by the expression
-# <tt>"a"*1_000_000</tt>:
+# <code>"a"*1_000_000_000</code>:
#
# require 'benchmark'
#
-# puts Benchmark.measure { "a"*1_000_000 }
+# puts Benchmark.measure { "a"*1_000_000_000 }
#
-# On my machine (FreeBSD 3.2 on P5, 100MHz) this generates:
+# On my machine (OSX 10.8.3 on i5 1.7 Ghz) this generates:
#
-# 1.166667 0.050000 1.216667 ( 0.571355)
+# 0.350000 0.400000 0.750000 ( 0.835234)
#
# This report shows the user CPU time, system CPU time, the sum of
# the user and system CPU times, and the elapsed real time. The unit
# of time is seconds.
#
# * Do some experiments sequentially using the #bm method:
#
# require 'benchmark'
#
-# n = 50000
+# n = 5000000
# Benchmark.bm do |x|
# x.report { for i in 1..n; a = "1"; end }
# x.report { n.times do ; a = "1"; end }
# x.report { 1.upto(n) do ; a = "1"; end }
# end
#
# The result:
#
# user system total real
-# 1.033333 0.016667 1.016667 ( 0.492106)
-# 1.483333 0.000000 1.483333 ( 0.694605)
-# 1.516667 0.000000 1.516667 ( 0.711077)
+# 1.010000 0.000000 1.010000 ( 1.014479)
+# 1.000000 0.000000 1.000000 ( 0.998261)
+# 0.980000 0.000000 0.980000 ( 0.981335)
#
# * Continuing the previous example, put a label in each report:
#
# require 'benchmark'
#
-# n = 50000
+# n = 5000000
# Benchmark.bm(7) do |x|
# x.report("for:") { for i in 1..n; a = "1"; end }
# x.report("times:") { n.times do ; a = "1"; end }
# x.report("upto:") { 1.upto(n) do ; a = "1"; end }
# end
#
# The result:
#
# user system total real
-# for: 1.050000 0.000000 1.050000 ( 0.503462)
-# times: 1.533333 0.016667 1.550000 ( 0.735473)
-# upto: 1.500000 0.016667 1.516667 ( 0.711239)
+# for: 1.010000 0.000000 1.010000 ( 1.015688)
+# times: 1.000000 0.000000 1.000000 ( 1.003611)
+# upto: 1.030000 0.000000 1.030000 ( 1.028098)
#
-#
# * The times for some benchmarks depend on the order in which items
# are run. These differences are due to the cost of memory
# allocation and garbage collection. To avoid these discrepancies,
# the #bmbm method is provided. For example, to compare ways to
# sort an array of floats:
@@ -88,142 +85,134 @@
# end
#
# The result:
#
# Rehearsal -----------------------------------------
-# sort! 11.928000 0.010000 11.938000 ( 12.756000)
-# sort 13.048000 0.020000 13.068000 ( 13.857000)
-# ------------------------------- total: 25.006000sec
+# sort! 1.490000 0.010000 1.500000 ( 1.490520)
+# sort 1.460000 0.000000 1.460000 ( 1.463025)
+# -------------------------------- total: 2.960000sec
#
# user system total real
-# sort! 12.959000 0.010000 12.969000 ( 13.793000)
-# sort 12.007000 0.000000 12.007000 ( 12.791000)
+# sort! 1.460000 0.000000 1.460000 ( 1.460465)
+# sort 1.450000 0.010000 1.460000 ( 1.448327)
#
-#
# * Report statistics of sequential experiments with unique labels,
# using the #benchmark method:
#
# require 'benchmark'
+# include Benchmark # we need the CAPTION and FORMAT constants
#
-# n = 50000
-# Benchmark.benchmark(" "*7 + CAPTION, 7, FMTSTR, ">total:", ">avg:") do |x|
+# n = 5000000
+# Benchmark.benchmark(CAPTION, 7, FORMAT, ">total:", ">avg:") do |x|
# tf = x.report("for:") { for i in 1..n; a = "1"; end }
# tt = x.report("times:") { n.times do ; a = "1"; end }
# tu = x.report("upto:") { 1.upto(n) do ; a = "1"; end }
# [tf+tt+tu, (tf+tt+tu)/3]
# end
#
# The result:
#
# user system total real
-# for: 1.016667 0.016667 1.033333 ( 0.485749)
-# times: 1.450000 0.016667 1.466667 ( 0.681367)
-# upto: 1.533333 0.000000 1.533333 ( 0.722166)
-# >total: 4.000000 0.033333 4.033333 ( 1.889282)
-# >avg: 1.333333 0.011111 1.344444 ( 0.629761)
+# for: 0.950000 0.000000 0.950000 ( 0.952039)
+# times: 0.980000 0.000000 0.980000 ( 0.984938)
+# upto: 0.950000 0.000000 0.950000 ( 0.946787)
+# >total: 2.880000 0.000000 2.880000 ( 2.883764)
+# >avg: 0.960000 0.000000 0.960000 ( 0.961255)
module Benchmark
BENCHMARK_VERSION = "2002-04-25" # :nodoc:
- def Benchmark.times # :nodoc:
- Process.times
- end
-
-
- # Invokes the block with a <tt>Benchmark::Report</tt> object, which
+ # Invokes the block with a Benchmark::Report object, which
# may be used to collect and report on the results of individual
- # benchmark tests. Reserves <i>label_width</i> leading spaces for
- # labels on each line. Prints _caption_ at the top of the
- # report, and uses _fmt_ to format each line.
+ # benchmark tests. Reserves +label_width+ leading spaces for
+ # labels on each line. Prints +caption+ at the top of the
+ # report, and uses +format+ to format each line.
+ # Returns an array of Benchmark::Tms objects.
+ #
# If the block returns an array of
- # <tt>Benchmark::Tms</tt> objects, these will be used to format
- # additional lines of output. If _label_ parameters are
+ # Benchmark::Tms objects, these will be used to format
+ # additional lines of output. If +label+ parameters are
# given, these are used to label these extra lines.
#
# _Note_: Other methods provide a simpler interface to this one, and are
# suitable for nearly all benchmarking requirements. See the examples in
# Benchmark, and the #bm and #bmbm methods.
#
# Example:
#
# require 'benchmark'
- # include Benchmark # we need the CAPTION and FMTSTR constants
+ # include Benchmark # we need the CAPTION and FORMAT constants
#
- # n = 50000
- # Benchmark.benchmark(" "*7 + CAPTION, 7, FMTSTR, ">total:", ">avg:") do |x|
+ # n = 5000000
+ # Benchmark.benchmark(CAPTION, 7, FORMAT, ">total:", ">avg:") do |x|
# tf = x.report("for:") { for i in 1..n; a = "1"; end }
# tt = x.report("times:") { n.times do ; a = "1"; end }
# tu = x.report("upto:") { 1.upto(n) do ; a = "1"; end }
# [tf+tt+tu, (tf+tt+tu)/3]
# end
#
- # <i>Generates:</i>
+ # Generates:
#
# user system total real
- # for: 1.016667 0.016667 1.033333 ( 0.485749)
- # times: 1.450000 0.016667 1.466667 ( 0.681367)
- # upto: 1.533333 0.000000 1.533333 ( 0.722166)
- # >total: 4.000000 0.033333 4.033333 ( 1.889282)
- # >avg: 1.333333 0.011111 1.344444 ( 0.629761)
+ # for: 0.970000 0.000000 0.970000 ( 0.970493)
+ # times: 0.990000 0.000000 0.990000 ( 0.989542)
+ # upto: 0.970000 0.000000 0.970000 ( 0.972854)
+ # >total: 2.930000 0.000000 2.930000 ( 2.932889)
+ # >avg: 0.976667 0.000000 0.976667 ( 0.977630)
#
- def benchmark(caption="", label_width=nil, fmtstr=nil, *labels) # :yield: report
- raise ArgumentError, "no block" unless block_given?
-
- # swap
- sync, STDOUT.sync = STDOUT.sync, true
-
+ def benchmark(caption = "", label_width = nil, format = nil, *labels) # :yield: report
+ sync = STDOUT.sync
+ STDOUT.sync = true
label_width ||= 0
- fmtstr ||= FMTSTR
-
- print caption
- results = yield(Report.new(label_width, fmtstr))
-
- if Array === results
- results.grep(Tms).each do |t|
- lbl = labels.shift || t.label || ""
- print lbl.ljust(label_width), t.format(fmtstr)
- end
- end
-
- STDOUT.sync = sync
- results
+ label_width += 1
+ format ||= FORMAT
+ print ' '*label_width + caption unless caption.empty?
+ report = Report.new(label_width, format)
+ results = yield(report)
+ Array === results and results.grep(Tms).each {|t|
+ print((labels.shift || t.label || "").ljust(label_width), t.format(format))
+ }
+ report.list
+ ensure
+ STDOUT.sync = sync unless sync.nil?
end
- # A simple interface to the #benchmark method, #bm is generates sequential reports
- # with labels. The parameters have the same meaning as for #benchmark.
+ # A simple interface to the #benchmark method, #bm generates sequential
+ # reports with labels. The parameters have the same meaning as for
+ # #benchmark.
#
# require 'benchmark'
#
- # n = 50000
+ # n = 5000000
# Benchmark.bm(7) do |x|
# x.report("for:") { for i in 1..n; a = "1"; end }
# x.report("times:") { n.times do ; a = "1"; end }
# x.report("upto:") { 1.upto(n) do ; a = "1"; end }
# end
#
- # <i>Generates:</i>
+ # Generates:
#
# user system total real
- # for: 1.050000 0.000000 1.050000 ( 0.503462)
- # times: 1.533333 0.016667 1.550000 ( 0.735473)
- # upto: 1.500000 0.016667 1.516667 ( 0.711239)
+ # for: 0.960000 0.000000 0.960000 ( 0.957966)
+ # times: 0.960000 0.000000 0.960000 ( 0.960423)
+ # upto: 0.950000 0.000000 0.950000 ( 0.954864)
#
- def bm(label_width=0, *labels, &blk) # :yield: report
- benchmark " " * label_width + CAPTION, label_width, FMTSTR, *labels, &blk
+ def bm(label_width = 0, *labels, &blk) # :yield: report
+ benchmark(CAPTION, label_width, FORMAT, *labels, &blk)
end
# Sometimes benchmark results are skewed because code executed
# earlier encounters different garbage collection overheads than
# that run later. #bmbm attempts to minimize this effect by running
# the tests twice, the first time as a rehearsal in order to get the
# runtime environment stable, the second time for
- # real. <tt>GC.start</tt> is executed before the start of each of
+ # real. GC.start is executed before the start of each of
# the real timings; the cost of this is not included in the
# timings. In reality, though, there's only so much that #bmbm can
# do, and the results are not guaranteed to be isolated from garbage
# collection and other effects.
#
@@ -237,171 +226,165 @@
# Benchmark.bmbm do |x|
# x.report("sort!") { array.dup.sort! }
# x.report("sort") { array.dup.sort }
# end
#
- # <i>Generates:</i>
+ # Generates:
#
# Rehearsal -----------------------------------------
- # sort! 11.928000 0.010000 11.938000 ( 12.756000)
- # sort 13.048000 0.020000 13.068000 ( 13.857000)
- # ------------------------------- total: 25.006000sec
+ # sort! 1.440000 0.010000 1.450000 ( 1.446833)
+ # sort 1.440000 0.000000 1.440000 ( 1.448257)
+ # -------------------------------- total: 2.890000sec
#
# user system total real
- # sort! 12.959000 0.010000 12.969000 ( 13.793000)
- # sort 12.007000 0.000000 12.007000 ( 12.791000)
+ # sort! 1.460000 0.000000 1.460000 ( 1.458065)
+ # sort 1.450000 0.000000 1.450000 ( 1.455963)
#
# #bmbm yields a Benchmark::Job object and returns an array of
# Benchmark::Tms objects.
#
- def bmbm(width=0, &blk) # :yield: job
+ def bmbm(width = 0) # :yield: job
job = Job.new(width)
- yield job
- width = job.width
+ yield(job)
+ width = job.width + 1
+ sync = STDOUT.sync
+ STDOUT.sync = true
- sync, STDOUT.sync = STDOUT.sync, true
-
# rehearsal
- print "Rehearsal "
- puts '-' * (width+CAPTION.length - "Rehearsal ".length)
- list = []
- job.list.each do |label_item|
- label, item = label_item
+ puts 'Rehearsal '.ljust(width+CAPTION.length,'-')
+ ets = job.list.inject(Tms.new) { |sum,(label,item)|
print label.ljust(width)
- res = Benchmark.measure(label, &item)
+ res = Benchmark.measure(&item)
print res.format
- list.push res
- end
- sum = Tms.new; list.each{ |i| sum += i }
- ets = sum.format("total: %tsec")
+ sum + res
+ }.format("total: %tsec")
+ print " #{ets}\n\n".rjust(width+CAPTION.length+2,'-')
- printf "%s %s\n\n", "-"*(width+CAPTION.length-ets.length-1), ets
-
# take
- print ' ' * width, CAPTION
- list = []
- ary = []
-
- job.list.each do |label_item|
- label, item = label_item
+ print ' '*width + CAPTION
+ job.list.map { |label,item|
GC.start
print label.ljust(width)
- res = Benchmark.measure(label, &item)
- print res.format
- ary.push res
- list.push [label, res]
- end
-
- STDOUT.sync = sync
- ary
+ Benchmark.measure(label, &item).tap { |res| print res }
+ }
+ ensure
+ STDOUT.sync = sync unless sync.nil?
end
#
# Returns the time used to execute the given block as a
# Benchmark::Tms object.
#
- def measure(label="") # :yield:
- t0, r0 = Benchmark.times, Time.now
+ def measure(label = "") # :yield:
+ t0, r0 = Process.times, Time.now
yield
- t1, r1 = Benchmark.times, Time.now
- Benchmark::Tms.new(t1.tutime - t0.tutime,
- t1.tstime - t0.tstime,
+ t1, r1 = Process.times, Time.now
+ Benchmark::Tms.new(t1.utime - t0.utime,
+ t1.stime - t0.stime,
t1.cutime - t0.cutime,
t1.cstime - t0.cstime,
- r1.to_f - r0.to_f,
+ r1 - r0,
label)
end
#
# Returns the elapsed real time used to execute the given block.
#
- def realtime(&blk) # :yield:
- Benchmark.measure(&blk).real
+ def realtime # :yield:
+ r0 = Time.now
+ yield
+ Time.now - r0
end
+ module_function :benchmark, :measure, :realtime, :bm, :bmbm
+
#
# A Job is a sequence of labelled blocks to be processed by the
# Benchmark.bmbm method. It is of little direct interest to the user.
#
class Job # :nodoc:
#
# Returns an initialized Job instance.
# Usually, one doesn't call this method directly, as new
# Job objects are created by the #bmbm method.
- # _width_ is a initial value for the label offset used in formatting;
- # the #bmbm method passes its _width_ argument to this constructor.
+ # +width+ is a initial value for the label offset used in formatting;
+ # the #bmbm method passes its +width+ argument to this constructor.
#
def initialize(width)
@width = width
@list = []
end
#
# Registers the given label and block pair in the job list.
#
- def item(label="", &blk) # :yield:
- raise ArgmentError, "no block" unless block_given?
-
- label.concat ' '
+ def item(label = "", &blk) # :yield:
+ raise ArgumentError, "no block" unless block_given?
+ label = label.to_s
w = label.length
@width = w if @width < w
- @list.push [label, blk]
+ @list << [label, blk]
self
end
alias report item
# An array of 2-element arrays, consisting of label and block pairs.
attr_reader :list
- # Length of the widest label in the #list, plus one.
+ # Length of the widest label in the #list.
attr_reader :width
end
- module_function :benchmark, :measure, :realtime, :bm, :bmbm
-
#
# This class is used by the Benchmark.benchmark and Benchmark.bm methods.
# It is of little direct interest to the user.
#
class Report # :nodoc:
#
# Returns an initialized Report instance.
# Usually, one doesn't call this method directly, as new
# Report objects are created by the #benchmark and #bm methods.
- # _width_ and _fmtstr_ are the label offset and
+ # +width+ and +format+ are the label offset and
# format string used by Tms#format.
#
- def initialize(width=0, fmtstr=nil)
- @width, @fmtstr = width, fmtstr
+ def initialize(width = 0, format = nil)
+ @width, @format, @list = width, format, []
end
#
- # Prints the _label_ and measured time for the block,
- # formatted by _fmt_. See Tms#format for the
+ # Prints the +label+ and measured time for the block,
+ # formatted by +format+. See Tms#format for the
# formatting rules.
#
- def item(label="", *fmt, &blk) # :yield:
- print label.ljust(@width)
- res = Benchmark.measure(label, &blk)
- print res.format(@fmtstr, *fmt)
+ def item(label = "", *format, &blk) # :yield:
+ print label.to_s.ljust(@width)
+ @list << res = Benchmark.measure(label, &blk)
+ print res.format(@format, *format)
res
end
alias report item
+
+ # An array of Benchmark::Tms objects representing each item.
+ attr_reader :list
end
#
# A data object, representing the times associated with a benchmark
# measurement.
#
class Tms
+
+ # Default caption, see also Benchmark::CAPTION
CAPTION = " user system total real\n"
- FMTSTR = "%10.6u %10.6y %10.6t %10.6r\n"
+ # Default format string, see also Benchmark::FORMAT
+ FORMAT = "%10.6u %10.6y %10.6t %10.6r\n"
+
# User CPU time
attr_reader :utime
# System CPU time
attr_reader :stime
@@ -413,31 +396,30 @@
attr_reader :cstime
# Elapsed real time
attr_reader :real
- # Total time, that is _utime_ + _stime_ + _cutime_ + _cstime_
+ # Total time, that is +utime+ + +stime+ + +cutime+ + +cstime+
attr_reader :total
# Label
attr_reader :label
#
# Returns an initialized Tms object which has
- # _u_ as the user CPU time, _s_ as the system CPU time,
- # _cu_ as the children's user CPU time, _cs_ as the children's
- # system CPU time, _real_ as the elapsed real time and _l_
- # as the label.
+ # +utime+ as the user CPU time, +stime+ as the system CPU time,
+ # +cutime+ as the children's user CPU time, +cstime+ as the children's
+ # system CPU time, +real+ as the elapsed real time and +label+ as the label.
#
- def initialize(u=0.0, s=0.0, cu=0.0, cs=0.0, real=0.0, l=nil)
- @utime, @stime, @cutime, @cstime, @real, @label = u, s, cu, cs, real, l
+ def initialize(utime = 0.0, stime = 0.0, cutime = 0.0, cstime = 0.0, real = 0.0, label = nil)
+ @utime, @stime, @cutime, @cstime, @real, @label = utime, stime, cutime, cstime, real, label.to_s
@total = @utime + @stime + @cutime + @cstime
end
#
# Returns a new Tms object whose times are the sum of the times for this
- # Tms object, plus the time required to execute the code block (_blk_).
+ # Tms object, plus the time required to execute the code block (+blk+).
#
def add(&blk) # :yield:
self + Benchmark.measure(&blk)
end
@@ -494,24 +476,23 @@
# <tt>%Y</tt>:: Replaced by the children's system CPU time, as reported by Tms#cstime
# <tt>%t</tt>:: Replaced by the total CPU time, as reported by Tms#total
# <tt>%r</tt>:: Replaced by the elapsed real time, as reported by Tms#real
# <tt>%n</tt>:: Replaced by the label string, as reported by Tms#label (Mnemonic: n of "*n*ame")
#
- # If _fmtstr_ is not given, FMTSTR is used as default value, detailing the
+ # If _format_ is not given, FORMAT is used as default value, detailing the
# user, system and real elapsed time.
#
- def format(arg0=nil, *args)
- fmtstr = (arg0 || FMTSTR).dup
- fmtstr.gsub!(/(%[-+\.\d]*)n/) { "#{$1}s" % label }
- fmtstr.gsub!(/(%[-+\.\d]*)u/) { "#{$1}f" % utime }
- fmtstr.gsub!(/(%[-+\.\d]*)y/) { "#{$1}f" % stime }
- fmtstr.gsub!(/(%[-+\.\d]*)U/) { "#{$1}f" % cutime }
- fmtstr.gsub!(/(%[-+\.\d]*)Y/) { "#{$1}f" % cstime }
- fmtstr.gsub!(/(%[-+\.\d]*)t/) { "#{$1}f" % total }
- fmtstr.gsub!(/(%[-+\.\d]*)r/) { "(#{$1}f)" % real }
-
- arg0 ? Kernel.format(fmtstr, *args) : fmtstr
+ def format(format = nil, *args)
+ str = (format || FORMAT).dup
+ str.gsub!(/(%[-+.\d]*)n/) { "#{$1}s" % label }
+ str.gsub!(/(%[-+.\d]*)u/) { "#{$1}f" % utime }
+ str.gsub!(/(%[-+.\d]*)y/) { "#{$1}f" % stime }
+ str.gsub!(/(%[-+.\d]*)U/) { "#{$1}f" % cutime }
+ str.gsub!(/(%[-+.\d]*)Y/) { "#{$1}f" % cstime }
+ str.gsub!(/(%[-+.\d]*)t/) { "#{$1}f" % total }
+ str.gsub!(/(%[-+.\d]*)r/) { "(#{$1}f)" % real }
+ format ? str % args : str
end
#
# Same as #format.
#
@@ -528,51 +509,60 @@
def to_a
[@label, @utime, @stime, @cutime, @cstime, @real]
end
protected
+
+ #
+ # Returns a new Tms object obtained by memberwise operation +op+
+ # of the individual times for this Tms object with those of the other
+ # Tms object.
+ #
+ # +op+ can be a mathematical operation such as <tt>+</tt>, <tt>-</tt>,
+ # <tt>*</tt>, <tt>/</tt>
+ #
def memberwise(op, x)
case x
when Benchmark::Tms
Benchmark::Tms.new(utime.__send__(op, x.utime),
stime.__send__(op, x.stime),
cutime.__send__(op, x.cutime),
cstime.__send__(op, x.cstime),
real.__send__(op, x.real)
- )
+ )
else
Benchmark::Tms.new(utime.__send__(op, x),
stime.__send__(op, x),
cutime.__send__(op, x),
cstime.__send__(op, x),
real.__send__(op, x)
- )
+ )
end
end
end
# The default caption string (heading above the output times).
CAPTION = Benchmark::Tms::CAPTION
# The default format string used to display times. See also Benchmark::Tms#format.
- FMTSTR = Benchmark::Tms::FMTSTR
+ FORMAT = Benchmark::Tms::FORMAT
end
if __FILE__ == $0
include Benchmark
n = ARGV[0].to_i.nonzero? || 50000
puts %Q([#{n} times iterations of `a = "1"'])
- benchmark(" " + CAPTION, 7, FMTSTR) do |x|
- x.report("for:") {for i in 1..n; a = "1"; end} # Benchmark.measure
- x.report("times:") {n.times do ; a = "1"; end}
- x.report("upto:") {1.upto(n) do ; a = "1"; end}
+ benchmark(CAPTION, 7, FORMAT) do |x|
+ x.report("for:") {for _ in 1..n; _ = "1"; end} # Benchmark.measure
+ x.report("times:") {n.times do ; _ = "1"; end}
+ x.report("upto:") {1.upto(n) do ; _ = "1"; end}
end
benchmark do
[
- measure{for i in 1..n; a = "1"; end}, # Benchmark.measure
- measure{n.times do ; a = "1"; end},
- measure{1.upto(n) do ; a = "1"; end}
+ measure{for _ in 1..n; _ = "1"; end}, # Benchmark.measure
+ measure{n.times do ; _ = "1"; end},
+ measure{1.upto(n) do ; _ = "1"; end}
]
end
end