#!/usr/bin/env ratch # Run cross comparison tests. # # This tool runs unit tests in pairs to make # sure there is cross library compatibility. require 'facets/hash/rekey' require 'facets/string/tabs' require 'facets/progressbar' require 'test/unit/ui/testrunnermediator' #require 'test/unit' ::Test::Unit.run = true # Don't autorun tests! main :crosstest do run_crosstests end # Run unit-tests. The tests are run in pairs in a separate interpretor # to prevent script clash. This makes for a more robust test # facility and discover potential conflicts between test scripts. # # tests Test files (eg. test/tc_**/*.rb) [test/**/*] # libs Directories to include in load path. # ('./lib' is always included) # live Deactive use of local libs and test against install. # reqs List of files to require prior to running tests. # # To isolate tests this tool marshals test results across a # stdout->stdin shell pipe. This prevents interfence of one # pair of script's tests on another. But consequently it is # not always possible to send debug info to stdout in the tests # themselves (eg. #p and #puts). def run_crosstests info = configuration['test'] if commandline.arguments.empty? tests = info['files'] || 'test/unit/**/test_*.rb' else tests = commandline.arguments end libs = info['libpath'] reqs = info['require'] live = info['live'] #extract = info['extract'] tests = [ tests ].flatten libs = [ libs ].flatten results = TestResults.new #(style) # get test files test_libs = libs.join(':') test_files = Dir.multiglob(*tests) if test_files.empty? puts "Tests [NONE]" return end # run tests if trace? pbar = nil size = test_files.collect{|f| f.size}.max * 2 + 5 dots = '.' * size else pbar = Console::ProgressBar.new('Testing', test_files.size ** 2) pbar.inc end test_files.each do |test_file| next unless File.file?(test_file) test_files.each do |test_file_x| next unless File.file?(test_file_x) next if test_file == test_file_x pbar.inc if pbar unless pbar out = test_file +' : ' + test_file_x print out + dots[out.size..-1] $stdout.flush end r = fork_test( test_file, test_file_x, :libs=>libs, :reqs=>reqs, :live=>live ) unless pbar puts r.passed? ? "[PASS]" : "[FAIL]" end results << r end end pbar.finish if pbar # display results puts results fails, errrs = results.failure_count, results.error_count #CHECKLIST << :test if fails > 0 #CHECKLIST << :test if errrs > 0 return (fails + errrs > 0) end # Run a test in a separate process. # # Currently send program output to null device. # Could send to a logger in future version. # # Key parameters are libs, reqs, live. def fork_test( file, xfile, keys ) keys = keys.rekey(:to_s) libs = keys['lib'] || [] reqs = keys['req'] || [] live = keys['live'] src = '' unless live l = File.join( Dir.pwd, 'lib' ) if File.directory?( l ) src << %{$:.unshift('#{l}')\n} end libs.each { |r| src << %{$:.unshift('#{r}')\n} } end src << %{ #require 'test/unit' require 'test/unit/collector' require 'test/unit/collector/objectspace' require 'test/unit/ui/testrunnermediator' } reqs.each do |fix| src << %Q{ require '#{fix}' } end src << %{ def warn(*null); end # silence warnings output = STDOUT.dup STDOUT.reopen( PLATFORM =~ /mswin/ ? "NUL" : "/dev/null" ) load('#{file}') load('#{xfile}') tests = Test::Unit::Collector::ObjectSpace.new.collect runner = Test::Unit::UI::TestRunnerMediator.new( tests ) result = runner.run_suite begin marshalled = Marshal.dump(result) rescue TypeError => e $stderr << "MARSHAL ERROR\n" $stderr << "TEST: #{file}\n" $stderr << "XTEST: #{xfile}\n" $stderr << "DATA:" << result.inspect exit -1 end output << marshalled STDOUT.reopen(output) output.close } result = IO.popen("ruby","w+") do |ruby| ruby.puts src ruby.close_write ruby.read end begin marsh = Marshal.load(result) rescue ArgumentError $stderr << "\nCannot load marshalled test data.\n" $stderr << result << "\n" exit -1 end return marsh end # # Support class for collecting test results. # class TestResults attr_reader :assertion_count, :run_count, :failure_count, :error_count attr_accessor :style def initialize( style=nil ) @style = style @results = [] @assertion_count = 0 @run_count = 0 @failure_count = 0 @error_count = 0 end # Add a result to the results collection. def <<( result ) @results << result @assertion_count += result.assertion_count @run_count += result.run_count @failure_count += result.failure_count @error_count += result.error_count end # def errors errors = [] @results.each do |r| unless r.passed? errors << r.instance_variable_get('@errors') end end errors.reject! { |e| e == [] } errors end # def failures failures = [] @results.each do |r| unless r.passed? failures << r.instance_variable_get('@failures') end end failures.reject! { |e| e == [] } failures end # Output format for test results. def to_s return @results.to_s if style == 'pease' s = [] # Display failures unless failures.empty? s << '' s << "FAILURES:" failures.reverse.each do |fails| fails.reverse.each do |failure| #puts s << %{ - test : #{failure.test_name}} s << %{ location : #{failure.location}} if failure.message.index("\n") s << %{ message : >} s << failure.message.tabto(6) else s << %{ message : #{failure.message}} end end end end # Display errors unless errors.empty? s << '' s << "ERRORS:" errors.reverse.each do |errs| errs.reverse.each do |err| s << '' s << %{ - test : #{err.test_name}} s << %{ message : #{err.exception.message}} s << %{ backtrace :} err.exception.backtrace[0...-1].each { |bt| s << %Q{ - #{bt}} } end end end # Display final results s << '' s << "Summary:" s << " Tests : #{@run_count}" s << " Assertions : #{@assertion_count}" s << " Failures : #{@failure_count}" s << " Errors : #{@error_count}" s << '' s << '' return s.join("\n") end # Delegate missing call to the results array. def method_missing(*a,&b) @results.send(*a,&b) end end