require "isodoc" require_relative "metadata" require "fileutils" require_relative "base_convert" require_relative "init" require_relative "word_convert_toc" module IsoDoc module NIST # A {Converter} implementation that generates Word output, and a document # schema encapsulation of the document for validation class WordConvert < IsoDoc::WordConvert def initialize(options) @libdir = File.dirname(__FILE__) super @wordToClevels = options[:doctoclevels].to_i @wordToClevels = 3 if @wordToClevels.zero? @htmlToClevels = options[:htmltoclevels].to_i @htmlToClevels = 3 if @htmlToClevels.zero? end def convert1(docxml, filename, dir) @series = docxml&.at(ns("//bibdata/series/abbreviation"))&.text @bibliographycount = docxml.xpath(ns("//bibliography/references | //annex/references | "\ "//bibliography/clause/references")).size if @series == "NIST CSWP" @wordstylesheet_name = html_doc_path("wordstyle_cswp.scss") @standardstylesheet_name = html_doc_path("nist_cswp.scss") @wordcoverpage = html_doc_path("word_nist_titlepage_cswp.html") @wordintropage = html_doc_path("word_nist_intro_cswp.html") @header = html_doc_path("header_cswp.html") end super end def default_fonts(options) { bodyfont: (options[:script] == "Hans" ? '"SimSun",serif' : '"Times New Roman",serif'), headerfont: (options[:script] == "Hans" ? '"SimHei",sans-serif' : '"Arial",sans-serif'), monospacefont: '"Courier New",monospace' } end def default_file_locations(_options) { wordstylesheet: html_doc_path("wordstyle.scss"), standardstylesheet: html_doc_path("nist.scss"), header: html_doc_path("header.html"), wordcoverpage: html_doc_path("word_nist_titlepage.html"), wordintropage: html_doc_path("word_nist_intro.html"), ulstyle: "l3", olstyle: "l2" } end def make_body(xml, docxml) body_attr = { lang: "EN-US", link: "blue", vlink: "#954F72" } xml.body **body_attr do |body| make_body1(body, docxml) make_body2(body, docxml) make_body3(body, docxml) end end def make_body2(body, docxml) body.div **{ class: "WordSection2" } do |div2| @prefacenum = 0 info docxml, div2 preface_block docxml, div2 foreword docxml, div2 abstract docxml, div2 keywords docxml, div2 boilerplate docxml, div2 preface docxml, div2 div2.p { |p| p << " " } # placeholder end section_break(body) end def toWord(result, filename, dir, header) result = populate_template(result, :word) result = from_xhtml(word_cleanup(to_xhtml(result))) unless @landscapestyle.nil? || @landscapestyle.empty? @wordstylesheet&.open @wordstylesheet&.write(@landscapestyle) @wordstylesheet&.close end Html2Doc.process(result, filename: filename, stylesheet: @wordstylesheet&.path, header_file: header&.path, dir: dir, asciimathdelims: [@openmathdelim, @closemathdelim], liststyles: { ul: @ulstyle, ol: @olstyle, steps: "l4" }) header&.unlink @wordstylesheet&.unlink end def authority_cleanup(docxml) docxml&.xpath("//div[@class = 'authority']//h1 | "\ "//div[@class = 'authority']//h2")&.each do |h| h.name = "p" h["class"] = "IntroTitle" end dest1 = docxml.xpath("//div[@class = 'authority6' and "\ "not(@id = 'authority6')]") auth1 = docxml&.at("//div[@id = 'authority6']")&.remove dest1 and auth1 and dest1.each { |d| d.replace(auth1) } insert = docxml.at("//div[@class = 'WordSection2']") if @series != "NIST CSWP" auth = docxml&.at("//div[@class = 'authority']")&.remove || return insert.children.first.add_previous_sibling(auth) end authority_cleanup1(docxml) end def authority_cleanup1(docxml) a = docxml.at("//div[@id = 'authority1']") and a["class"] = "authority1" a = docxml.at("//div[@id = 'authority2']") and a["class"] = "authority2" a = docxml.at("//div[@id = 'authority3']") and a["class"] = "authority3" a = docxml.at("//div[@id = 'authority3a']") and a["class"] = "authority3" a = docxml.at("//div[@id = 'authority4']") and a["class"] = "authority4" a = docxml.at("//div[@id = 'authority5']") and a["class"] = "authority5" a = docxml.at("//div[@id = 'authority6']") and a["class"] = "authority6" end def cleanup(docxml) super term_cleanup(docxml) requirement_cleanup(docxml) h1_cleanup(docxml) word_annex_cleanup(docxml) # need it earlier word_preface_cleanup(docxml) # ditto, since early ToC insertion toc_insert(docxml, @wordToClevels) end # create fallback h1 class to deal with page breaks def h1_cleanup(docxml) docxml.xpath("//h1[not(@class)]").each do |h| h["class"] = "NormalTitle" end end def word_annex_cleanup1(docxml, i) docxml.xpath("//h#{i}[ancestor::*[@class = 'Section3']]").each do |h2| h2.name = "p" h2["class"] = "h#{i}Annex" end end def word_annex_cleanup(docxml) word_annex_cleanup1(docxml, 1) word_annex_cleanup1(docxml, 2) word_annex_cleanup1(docxml, 3) word_annex_cleanup1(docxml, 4) word_annex_cleanup1(docxml, 5) word_annex_cleanup1(docxml, 6) end def word_preface_cleanup(docxml) docxml.xpath("//h1[@class = 'AbstractTitle'] | "\ "//h1[@class = 'IntroTitle'] | "\ "//h1[@class = 'ForewordTitle'] | //h1[parent::div/@class = 'authority']").each do |h2| h2.name = "p" h2["class"] = "h1Preface" end docxml.xpath("//h2[ancestor::div/@class = 'authority']").each do |h2| h2.name = "p" h2["class"] = "h2Preface" end end def word_cleanup(docxml) super word_preface_cleanup(docxml) docxml end def termdef_parse(node, out) out.table **{ class: "terms_dl" } do |dl| dl.tr do |tr| tr.td **{ valign: "top", align: "left" } do |dt| term_and_termref_parse(node, dt) end tr.td **{ valign: "top" } do |dd| term_rest_parse(node, dd) end end end end def glossary_parse(node, out) out.table **attr_code(id: node["id"], class: "terms_dl") do |t| node.elements.select { |n| dt_dd? n }.each_slice(2) do |dt, dd| t.tr do |v| v.td **attr_code(id: dt["id"], valign: "top", align: "left") do |term| dt_parse(dt, term) end v.td **attr_code(id: dd["id"], valign: "top") do |listitem| dd.children.each { |n| parse(n, listitem) } end end end end node.elements.reject { |n| dt_dd? n }.each { |n| parse(n, out) } end include BaseConvert include Init end end end