require "isodoc"
require_relative "metadata"
require "fileutils"
require_relative "base_convert"
module IsoDoc
module NIST
# A {Converter} implementation that generates HTML output, and a document
# schema encapsulation of the document for validation
class HtmlConvert < IsoDoc::HtmlConvert
def initialize(options)
@libdir = File.dirname(__FILE__)
super
end
def convert1(docxml, filename, dir)
@bibliographycount = docxml.xpath(ns("//references")).size
super
end
def default_fonts(options)
{
bodyfont: (options[:script] == "Hans" ? '"SimSun",serif' :
'"Libre Baskerville",serif'),
headerfont: (options[:script] == "Hans" ? '"SimHei",sans-serif' :
'"Libre Baskerville",serif'),
monospacefont: '"Space Mono",monospace'
}
end
def default_file_locations(_options)
{
htmlstylesheet: html_doc_path("htmlstyle.scss"),
htmlcoverpage: html_doc_path("html_nist_titlepage.html"),
htmlintropage: html_doc_path("html_nist_intro.html"),
scripts: html_doc_path("scripts.html"),
}
end
def googlefonts
<<~HEAD.freeze
HEAD
end
def toclevel
ret = toclevel_classes.map do |l|
"#{l}:not(:empty):not(.TermNum):not(.noTOC):not(.AbstractTitle):"\
"not(.IntroTitle):not(.ForewordTitle)"
end
<<~HEAD.freeze
function toclevel() { return "#{ret.join(',')}";}
HEAD
end
def html_toc(docxml)
idx = docxml.at("//div[@id = 'toc']") or return docxml
toc = "
"
path = toclevel_classes.map do |l|
"//main//#{l}[not(@class = 'TermNum')][not(@class = 'noTOC')]"\
"[text()][not(@class = 'AbstractTitle')]"\
"[not(@class = 'IntroTitle')][not(@class = 'ForewordTitle')]"
end
docxml.xpath(path.join(" | ")).each_with_index do |h, tocidx|
h["id"] ||= "toc#{tocidx}"
toc += html_toc_entry(h.name, h)
end
idx.children = "#{toc}
"
docxml
end
def make_body(xml, docxml)
body_attr = { lang: "EN-US", link: "blue", vlink: "#954F72",
"xml:lang": "EN-US", class: "container" }
xml.body **body_attr do |body|
make_body1(body, docxml)
make_body2(body, docxml)
make_body3(body, docxml)
end
end
def authority_cleanup(docxml)
dest = docxml.at("//div[@id = 'authority']") || return
auth = docxml.at("//div[@class = 'authority']") || return
auth.xpath(".//h1 | .//h2").each { |h| h["class"] = "IntroTitle" }
dest1 = docxml.xpath("//div[@class = 'authority6']")
auth1 = docxml&.at("//div[@id = 'authority6']")&.remove
dest1 and auth1 and dest1.each { |d| d.replace(auth1) }
dest.replace(auth.remove)
a = docxml.at("//div[@id = 'authority1']") and a["class"] = "authority1"
a = docxml.at("//div[@id = 'authority2']") and a["class"] = "authority2"
a = docxml.at("//div[@id = 'authority3']") and a["class"] = "authority3"
a = docxml.at("//div[@id = 'authority3a']") and a["class"] = "authority3"
a = docxml.at("//div[@id = 'authority4']") and a["class"] = "authority4"
a = docxml.at("//div[@id = 'authority5']") and a["class"] = "authority5"
a = docxml.at("//div[@id = 'authority6']") and a["class"] = "authority6"
end
def cleanup(docxml)
super
term_cleanup(docxml)
requirement_cleanup(docxml)
docxml
end
def make_body3(body, docxml)
body.div **{ class: "main-section" } do |div3|
foreword docxml, div3
abstract docxml, div3
keywords docxml, div3
boilerplate docxml, div3
preface docxml, div3
middle docxml, div3
footnotes div3
comments div3
end
end
def termdef_parse(node, out)
out.dl **{ class: "terms_dl" } do |dl|
dl.dt do |dt|
term_and_termref_parse(node, dt)
end
dl.dd do |dd|
term_rest_parse(node, dd)
end
end
end
include BaseConvert
end
end
end