lib/genevalidator/output.rb in genevalidator-1.6.1 vs lib/genevalidator/output.rb in genevalidator-1.6.2
- old
+ new
@@ -1,269 +1,261 @@
-require 'genevalidator/version'
-require 'fileutils'
require 'erb'
-require 'yaml'
-require 'thread'
+require 'fileutils'
+require 'forwardable'
+require 'json'
+
+require 'genevalidator/version'
+
module GeneValidator
class Output
- attr_accessor :prediction_len
+ extend Forwardable
+ def_delegators GeneValidator, :opt, :config, :mutex, :mutex_html,
+ :mutex_json
attr_accessor :prediction_def
attr_accessor :nr_hits
# list of +ValidationReport+ objects
attr_accessor :validations
- attr_accessor :filename
- attr_accessor :html_path
- attr_accessor :yaml_path
attr_accessor :idx
- attr_accessor :start_idx
attr_accessor :overall_score
attr_accessor :fails
attr_accessor :successes
- attr_accessor :mutex
- attr_accessor :mutex_yaml
- attr_accessor :mutex_html
-
##
# Initilizes the object
# Params:
- # +mutex+: +Mutex+ for exclusive access to the console
- # +mutex_yaml+: +Mutex+ for exclusive access to the YAML file
- # +mutex_html+: +Mutex+ for exclusive access to the HTML file
- # +filename+: name of the fasta input file
- # +html_path+: path of the html folder
- # +yaml_path+: path where the yaml output wil be saved
- # +idx+: idnex of the current query
- # +start_idx+: number of the sequence from the file to start with
- def initialize(mutex, mutex_yaml, mutex_html, filename, html_path,
- yaml_path, idx = 0, start_idx = 0)
- @prediction_len = 0
- @prediction_def = 'no_definition'
- @nr_hits = 0
+ # +current_idx+: index of the current query
+ def initialize(current_idx, no_of_hits, definition)
+ @opt = opt
+ @config = config
+ @config[:run_no] += 1
- @filename = filename
- @html_path = html_path
- @yaml_path = yaml_path
- @idx = idx
- @start_idx = start_idx
+ @prediction_def = definition
+ @nr_hits = no_of_hits
+ @idx = current_idx
- @mutex = mutex
- @mutex_yaml = mutex_yaml
- @mutex_html = mutex_html
+ @app_html = File.join(@config[:html_path], 'files/table.html')
end
def print_output_console
- if @idx == @start_idx
- header = sprintf('%3s|%s|%20s|%5s', 'No', 'Score', 'Identifier',
- 'No_Hits')
- validations.map do |v|
- header << "|#{v.short_header}"
- end
- puts header
+ mutex.synchronize do
+ print_console_header unless @config[:console_header_printed]
+ short_def = @prediction_def.scan(/([^ ]+)/)[0][0]
+ print format("%3s\t%5s\t%20s\t%7s\t", @idx, @overall_score, short_def,
+ @nr_hits)
+ puts validations.map(&:print).join("\t").gsub(' ', ' ')
end
-
- short_def = @prediction_def.scan(/([^ ]+)/)[0][0]
- validation_outputs = validations.map(&:print)
-
- output = sprintf('%3s|%d|%20s|%5s|', @idx, @overall_score,
- short_def, @nr_hits)
- validation_outputs.each do |item|
- output << item
- output << '|'
- end
-
- @mutex.synchronize do
- puts output.gsub(' ', ' ')
- end
end
- def print_output_file_yaml
- file_yaml = "#{@yaml_path}/#{@filename}.yaml"
- report = validations
- if @idx == @start_idx
- @mutex_yaml.synchronize do
- File.open(file_yaml, 'w') do |f|
- YAML.dump({ @prediction_def.scan(/([^ ]+)/)[0][0] => report }, f)
- end
- end
- else
- @mutex_yaml.synchronize do
- hash = {} # YAML.load_file(file_yaml)
- hash[@prediction_def.scan(/([^ ]+)/)[0][0]] = report
- File.open(file_yaml, 'a') do |f|
- new_report = hash.to_yaml
- f.write(new_report[4..new_report.length - 1])
- end
- end
- end
+ def print_console_header
+ @config[:console_header_printed] = true
+ print format("%3s\t%5s\t%20s\t%7s\t", 'No', 'Score', 'Identifier', 'No_Hits')
+ puts validations.map(&:short_header).join("\t")
end
def generate_html
- if @fails == 0
- bg_icon = 'success'
- else
- bg_icon = 'danger'
+ mutex_html.synchronize do
+ output_html = output_filename
+ query_erb = File.join(@config[:aux], 'template_query.erb')
+ template_file = File.open(query_erb, 'r').read
+ erb = ERB.new(template_file, 0, '>')
+ File.open(output_html, 'a') { |f| f.write(erb.result(binding)) }
+ File.open(@app_html, 'a') { |f| f.write(erb.result(binding)) }
end
+ end
- index_file = "#{@html_path}/results.html"
- table_file = "#{@html_path}/files/table.html"
+ def output_filename
+ i = (@config[:run_no].to_f / @config[:output_max]).ceil
+ output_html = File.join(@config[:html_path], "results#{i}.html")
+ write_html_header(output_html)
+ output_html
+ end
- aux_dir = File.join(File.dirname(File.expand_path(__FILE__)), '../../aux')
+ def write_html_header(output_html)
+ head_erb = File.join(@config[:aux], 'template_header.erb')
+ head_table_erb = File.join(@config[:aux], 'app_template_header.erb')
+ set_up_html(head_erb, output_html) unless File.exist?(output_html)
+ set_up_html(head_table_erb, @app_html) unless File.exist?(@app_html)
+ end
- # if it's the first time I write in the html file
- if @idx == @start_idx
- @mutex_html.synchronize do
- template_header = File.join(aux_dir, 'template_header.erb')
- template_file = File.open(template_header, 'r').read
- erb = ERB.new(template_file, 0, '>')
+ def set_up_html(erb_file, output_file)
+ return if File.exist?(output_file)
+ template_contents = File.open(erb_file, 'r').read
+ erb = ERB.new(template_contents, 0, '>')
+ File.open(output_file, 'w+') { |f| f.write(erb.result(binding)) }
+ end
- # Creating a Separate output file for the web app
- app_template_header = File.join(aux_dir, 'app_template_header.erb')
- table_template_file = File.open(app_template_header, 'r').read
- erb_table = ERB.new(table_template_file, 0, '>')
+ def generate_json
+ mutex_json.synchronize do
+ row = { idx: @idx, overall_score: @overall_score,
+ definition: @prediction_def, no_hits: @nr_hits }
+ row = create_validation_hashes(row)
+ write_row_json(row)
+ @config[:json_output] << row
+ end
+ end
- File.open(index_file, 'w+') do |file|
- file.write(erb.result(binding))
- end
-
- File.open(table_file, 'w+') do |file|
- file.write(erb_table.result(binding))
- end
+ def create_validation_hashes(row)
+ row[:validations] = {}
+ @validations.each do |item|
+ val = { header: item.header, description: item.description,
+ status: item.color, print: item.print.gsub(' ', ' ') }
+ if item.color != 'warning'
+ explain = { approach: item.approach, explanation: item.explanation,
+ conclusion: item.conclusion }
+ val.merge!(explain)
end
+ val[:graphs] = create_graphs_hash(item) unless item.plot_files.nil?
+ row[:validations][item.short_header] = val
end
+ row
+ end
- toggle = "toggle#{@idx}"
+ def create_graphs_hash(item)
+ graphs = []
+ item.plot_files.each do |g|
+ graphs << { data: g.data, type: g.type, title: g.title,
+ footer: g.footer, xtitle: g.xtitle,
+ ytitle: g.ytitle, aux1: g.aux1, aux2: g.aux2 }
+ end
+ graphs
+ end
- @mutex_yaml.synchronize do
- template_query = File.join(aux_dir, 'template_query.erb')
- template_file = File.open(template_query, 'r').read
- erb = ERB.new(template_file, 0, '>')
+ def write_row_json(row)
+ row_json = File.join(@config[:plot_dir],
+ "#{@config[:filename]}_#{@idx}.json")
+ File.open(row_json, 'w') { |f| f.write(row.to_json) }
+ end
- File.open(index_file, 'a') do |file|
- file.write(erb.result(binding))
- end
-
- File.open(table_file, 'a') do |file|
- file.write(erb.result(binding))
- end
- end
+ def self.write_json_file(array, json_file)
+ File.open(json_file, 'w') { |f| f.write(array.to_json) }
end
##
# Method that closes the gas in the html file and writes the overall
# evaluation
# Param:
# +all_query_outputs+: array with +ValidationTest+ objects
# +html_path+: path of the html folder
# +filemane+: name of the fasta input file
- # def self.print_footer(all_query_outputs, html_path, filename)
- def self.print_footer(no_queries, scores, good_predictions, bad_predictions,
- nee, no_mafft, no_internet, map_errors, running_times,
- html_path, filename)
- # compute the statistics
- # overall_evaluation = overall_evaluation(all_query_outputs, filename)
- overall_evaluation = overall_evaluation(no_queries, good_predictions,
- bad_predictions, nee, no_mafft,
- no_internet, map_errors,
- running_times)
+ def self.print_footer(overview, config)
+ overall_evaluation = overview(overview)
- less = overall_evaluation[0]
- less = less.gsub("\n", '<br>').gsub("'", %q(\\\'))
+ create_plot_json(overview[:scores], config[:plot_dir])
- # print to console
- evaluation = ''
- overall_evaluation.each { |e| evaluation << "\n#{e}" }
- puts evaluation
- puts ''
+ less = overall_evaluation[0].gsub("\n", '<br>').gsub("'", %q(\\\'))
- # print to html
- # make the historgram with the resulted scores
- statistics_filename = "#{html_path}/files/json/#{filename}_statistics.json"
- f = File.open(statistics_filename, 'w')
+ eval = print_summary_to_console(overall_evaluation, config[:summary])
+ evaluation = eval.gsub("\n", '<br>').gsub("'", %q(\\\'))
- f.write(
- [scores.group_by { |a| a }.map { |k, vs| { 'key' => k,
- 'value' => vs.length,
- 'main' => false } }].to_json)
- f.close
+ footer_erb = File.join(config[:aux], 'template_footer.erb')
- plot_statistics = Plot.new("files/json/#{filename}_statistics.json",
- :simplebars,
- 'Overall evaluation',
- '',
- 'validation score',
- 'number of queries',
- 10)
+ no_of_results_files = (config[:run_no].to_f / config[:output_max]).ceil
+ template_file = File.open(footer_erb, 'r').read
+ erb = ERB.new(template_file, 0, '>')
- evaluation = evaluation.gsub("\n", '<br>').gsub("'", %q(\\\'))
+ output_files = []
+ (1..no_of_results_files).each { |i| output_files << "results#{i}.html" }
- index_file = "#{html_path}/results.html"
- table_file = "#{html_path}/files/table.html"
- aux_dir = File.join(File.dirname(File.expand_path(__FILE__)), '../../aux')
+ (1..no_of_results_files).each do |i|
+ results_html = File.join(config[:html_path], "results#{i}.html")
+ File.open(results_html, 'a+') { |f| f.write(erb.result(binding)) }
+ end
- template_footer = File.join(aux_dir, 'template_footer.erb')
- app_template_footer = File.join(aux_dir, 'app_template_footer.erb')
+ turn_off_sorting(config[:html_path]) if no_of_results_files > 1
- template_file = File.open(template_footer, 'r').read
- erb = ERB.new(template_file, 0, '>')
- File.open(index_file, 'a+') do |file|
- file.write(erb.result(binding))
- end
+ # write footer for the app
+ app_footer_erb = File.join(config[:aux], 'app_template_footer.erb')
+ table_html = File.join(config[:html_path], 'files/table.html')
+ table_footer_template = File.open(app_footer_erb, 'r').read
+ table_erb = ERB.new(table_footer_template, 0, '>')
+ File.open(table_html, 'a+') { |f| f.write(table_erb.result(binding)) }
+ end
- table_footer_template = File.open(app_template_footer, 'r').read
- table_erb = ERB.new(table_footer_template, 0, '>')
- File.open(table_file, 'a+') do |file|
- file.write(table_erb.result(binding))
+ def self.turn_off_sorting(html_path)
+ script_file = File.join(html_path, 'files/js/script.js')
+ temp_file = File.join(html_path, 'files/js/script.temp.js')
+ File.open(temp_file, 'w') do |out_file|
+ out_file.puts File.readlines(script_file)[30..-1].join
end
+ FileUtils.mv(temp_file, script_file)
end
+ def self.print_summary_to_console(overall_evaluation, summary)
+ # print to console
+ eval = ''
+ overall_evaluation.each { |e| eval << "\n#{e}" }
+ $stderr.puts eval if summary
+ $stderr.puts ''
+ eval
+ end
+
+ # make the historgram with the resulted scores
+ def self.create_plot_json(scores, plot_dir)
+ plot_file = File.join(plot_dir, 'overview.json')
+ data = [scores.group_by { |a| a }.map { |k, vs| { 'key' => k, 'value' => vs.length, 'main' => false } }]
+ hash = { data: data, type: :simplebars, title: 'Overall Evaluation',
+ footer: '', xtitle: 'Validation Score',
+ ytitle: 'Number of Queries', aux1: 10, aux2: '' }
+ File.open(plot_file, 'w') { |f| f.write hash.to_json }
+ end
+
##
# Calculates an overall evaluation of the output
# Params:
# +all_query_outputs+: Array of +ValidationTest+ objects
# Output
# Array of Strigs with the reports
- def self.overall_evaluation(no_queries, good_scores, bad_scores,
- no_evidence, no_mafft, no_internet, map_errors,
- running_times)
- good_pred = (good_scores == 1) ? 'One' : "#{good_scores} are"
- bad_pred = (bad_scores == 1) ? 'One' : "#{bad_scores} are"
+ def self.overview(o)
+ eval = general_overview(o)
+ error_eval = errors_overview(o)
+ time_eval = time_overview(o)
+ overall_evaluation = [eval, error_eval, time_eval]
+ overall_evaluation.select { |e| e != '' }
+ end
+
+ def self.general_overview(o)
+ good_pred = (o[:good_scores] == 1) ? 'One' : "#{o[:good_scores]} are"
+ bad_pred = (o[:bad_scores] == 1) ? 'One' : "#{o[:bad_scores]} are"
+
eval = "Overall Query Score Evaluation:\n" \
- "#{no_queries} predictions were validated, from which there" \
+ "#{o[:no_queries]} predictions were validated, from which there" \
" were:\n" \
"#{good_pred} good prediction(s),\n" \
"#{bad_pred} possibly weak prediction(s).\n"
- if no_evidence != 0
- eval << "#{no_evidence} could not be evaluated due to the lack of" \
+ if o[:nee] != 0 # nee = no evidence
+ eval << "#{o[:nee]} could not be evaluated due to the lack of" \
' evidence.'
end
+ eval
+ end
+ def self.errors_overview(o)
# errors per validation
error_eval = ''
- map_errors.each do |k, v|
+ o[:map_errors].each do |k, v|
error_eval << "\nWe couldn't run #{k} Validation for #{v} queries"
end
-
- if no_mafft >= (no_queries - no_evidence)
+ if o[:no_mafft] >= (o[:no_queries] - o[:nee])
error_eval << "\nWe couldn't run MAFFT multiple alignment"
end
- if no_internet >= (no_queries - no_evidence)
+ if o[:no_internet] >= (o[:no_queries] - o[:nee])
error_eval << "\nWe couldn't make use of your internet connection"
end
+ error_eval
+ end
+ def self.time_overview(o)
time_eval = ''
- running_times.each do |key, value|
- average_time = value.x / (value.y + 0.0)
+ o[:run_time].each do |key, value|
+ average_time = value.x / (value.y).to_f
time_eval << "\nAverage running time for #{key} Validation:" \
" #{average_time.round(3)}s per validation"
end
-
- overall_evaluation = [eval, error_eval, time_eval]
- overall_evaluation.select { |e| e != '' }
+ time_eval
end
end
end