require 'erb'
require 'fileutils'
require 'forwardable'
require 'json'
require 'genevalidator/version'
module GeneValidator
class Output
extend Forwardable
def_delegators GeneValidator, :opt, :config, :mutex, :mutex_html,
:mutex_json
attr_accessor :prediction_def
attr_accessor :nr_hits
# list of +ValidationReport+ objects
attr_accessor :validations
attr_accessor :idx
attr_accessor :overall_score
attr_accessor :fails
attr_accessor :successes
##
# Initilizes the object
# Params:
# +current_idx+: index of the current query
def initialize(current_idx, no_of_hits, definition)
@opt = opt
@config = config
@config[:run_no] += 1
@prediction_def = definition
@nr_hits = no_of_hits
@idx = current_idx
end
def print_output_console
mutex.synchronize do
print_console_header unless @config[:console_header_printed]
short_def = @prediction_def.scan(/([^ ]+)/)[0][0]
print format("%3s\t%5s\t%20s\t%7s\t", @idx, @overall_score, short_def,
@nr_hits)
puts validations.map(&:print).join("\t").gsub(' ', ' ')
end
end
def print_console_header
@config[:console_header_printed] = true
print format("%3s\t%5s\t%20s\t%7s\t", 'No', 'Score', 'Identifier',
'No_Hits')
puts validations.map(&:short_header).join("\t")
end
def generate_html
mutex_html.synchronize do
output_html = output_filename
query_erb = File.join(@config[:aux], 'template_query.erb')
template_file = File.open(query_erb, 'r').read
erb = ERB.new(template_file, 0, '>')
File.open(output_html, 'a') { |f| f.write(erb.result(binding)) }
end
end
def output_filename
idx = (@config[:run_no].to_f / @config[:output_max]).ceil
output_html = File.join(@config[:html_path], "results#{idx}.html")
write_html_header(output_html)
output_html
end
def write_html_header(output_html)
head_erb = File.join(@config[:aux], 'template_header.erb')
set_up_html(head_erb, output_html) unless File.exist?(output_html)
end
def set_up_html(erb_file, output_file)
return if File.exist?(output_file)
template_contents = File.open(erb_file, 'r').read
erb = ERB.new(template_contents, 0, '>')
File.open(output_file, 'w+') { |f| f.write(erb.result(binding)) }
end
def generate_json
mutex_json.synchronize do
row = { idx: @idx, overall_score: @overall_score,
definition: @prediction_def, no_hits: @nr_hits }
row = create_validation_hashes(row)
write_row_json(row)
@config[:json_output] << row
end
end
def create_validation_hashes(row)
row[:validations] = {}
@validations.each do |item|
val = add_basic_validation_info(item)
explain = add_explanation_data(item) if item.color != 'warning'
val.merge!(explain) if explain
val[:graphs] = create_graphs_hash(item) unless item.plot_files.nil?
row[:validations][item.short_header] = val
end
row
end
def add_basic_validation_info(item)
{ header: item.header, description: item.description, status: item.color,
print: item.print.gsub(' ', ' ') }
end
def add_explanation_data(item)
{ approach: item.approach, explanation: item.explanation,
conclusion: item.conclusion }
end
def create_graphs_hash(item)
graphs = []
item.plot_files.each do |g|
graphs << { data: g.data, type: g.type, title: g.title,
footer: g.footer, xtitle: g.xtitle, ytitle: g.ytitle,
aux1: g.aux1, aux2: g.aux2 }
end
graphs
end
def write_row_json(row)
row_json = File.join(@config[:plot_dir],
"#{@config[:filename]}_#{@idx}.json")
File.open(row_json, 'w') { |f| f.write(row.to_json) }
end
def self.write_json_file(array, json_file)
File.open(json_file, 'w') { |f| f.write(array.to_json) }
end
##
# Method that closes the gas in the html file and writes the overall
# evaluation
# Param:
# +all_query_outputs+: array with +ValidationTest+ objects
# +html_path+: path of the html folder
# +filemane+: name of the fasta input file
def self.print_footer(overview, config)
set_overall_evaluation(overview, config)
footer_erb = File.join(config[:aux], 'template_footer.erb')
no_of_results_files = (config[:run_no].to_f / config[:output_max]).ceil
template_file = File.open(footer_erb, 'r').read
erb = ERB.new(template_file, 0, '>')
output_files = []
(1..no_of_results_files).each { |i| output_files << "results#{i}.html" }
(1..no_of_results_files).each do |i|
results_html = File.join(config[:html_path], "results#{i}.html")
File.open(results_html, 'a+') { |f| f.write(erb.result(binding)) }
end
turn_off_sorting(config[:html_path]) if no_of_results_files > 1
end
def self.set_overall_evaluation(overview, config)
overall_evaluation = overview(overview)
less = overall_evaluation[0].gsub("\n", '
').gsub("'", %q(\\\'))
eval = print_summary_to_console(overall_evaluation, config[:summary])
evaluation = eval.gsub("\n", '
').gsub("'", %q(\\\'))
create_overview_json(overview[:scores], config[:plot_dir], less,
evaluation)
end
def self.turn_off_sorting(html_path)
script_file = File.join(html_path,
'files/js/genevalidator.compiled.min.js')
content = File.read(script_file).gsub(',initTableSorter(),', ',')
File.open("#{script_file}.tmp", 'w') { |f| f.puts content }
FileUtils.mv("#{script_file}.tmp", script_file)
end
def self.print_summary_to_console(overall_evaluation, summary)
# print to console
eval = ''
overall_evaluation.each { |e| eval << "#{e}\n" }
$stderr.puts eval if summary
$stderr.puts ''
eval
end
# make the historgram with the resulted scores
def self.create_overview_json(scores, plot_dir, less, evaluation)
plot_file = File.join(plot_dir, 'overview.json')
data = [scores.group_by { |a| a }.map { |k, vs| { 'key' => k, 'value' => vs.length, 'main' => false } }]
hash = { data: data, type: :simplebars,
title: 'Overall GeneValidator Score Evaluation',
footer: '', xtitle: 'Validation Score',
ytitle: 'Number of Queries', aux1: 10, aux2: '', less: less,
evaluation: evaluation }
File.open(plot_file, 'w') { |f| f.write hash.to_json }
end
##
# Calculates an overall evaluation of the output
# Params:
# +all_query_outputs+: Array of +ValidationTest+ objects
# Output
# Array of Strigs with the reports
def self.overview(o)
eval = general_overview(o)
error_eval = errors_overview(o)
time_eval = time_overview(o)
overall_evaluation = [eval, error_eval, time_eval]
overall_evaluation.select { |e| e != '' }
end
def self.general_overview(o)
good_pred = (o[:good_scores] == 1) ? 'One' : "#{o[:good_scores]} are"
bad_pred = (o[:bad_scores] == 1) ? 'One' : "#{o[:bad_scores]} are"
eval = "Overall Query Score Evaluation:\n" \
"#{o[:no_queries]} predictions were validated, from which there" \
" were:\n" \
"#{good_pred} good prediction(s),\n" \
"#{bad_pred} possibly weak prediction(s).\n"
if o[:nee] != 0 # nee = no evidence
eval << "#{o[:nee]} could not be evaluated due to the lack of" \
' evidence.'
end
eval
end
# errors per validation
def self.errors_overview(o)
error_eval = ''
o[:map_errors].each do |k, v|
error_eval << "\nWe couldn't run #{k} Validation for #{v} queries"
end
if o[:no_mafft] >= (o[:no_queries] - o[:nee])
error_eval << "\nWe couldn't run MAFFT multiple alignment"
end
if o[:no_internet] >= (o[:no_queries] - o[:nee])
error_eval << "\nWe couldn't make use of your internet connection"
end
error_eval
end
def self.time_overview(o)
time_eval = ''
o[:run_time].each do |key, value|
average_time = value.x / (value.y).to_f
time_eval << "\nAverage running time for #{key} Validation:" \
" #{average_time.round(3)}s per validation"
end
time_eval
end
end
end