Sha256: 6a2937b29fbbb70757a2e9f7011eeb09a76dfe09910efcc8540108981e42fba5
Contents?: true
Size: 1.75 KB
Versions: 1
Compression:
Stored size: 1.75 KB
Contents
# -*- encoding: utf-8 -*- require 'test_helper' require 'hexapdf/tokenizer' require 'stringio' require_relative 'common_tokenizer_tests' describe HexaPDF::Tokenizer do include CommonTokenizerTests def create_tokenizer(str) @tokenizer = HexaPDF::Tokenizer.new(StringIO.new(str.b)) end it "handles object references" do create_tokenizer("1 0 R 2 15 R ") assert_equal(HexaPDF::Reference.new(1, 0), @tokenizer.next_token) assert_equal(HexaPDF::Reference.new(2, 15), @tokenizer.next_token) @tokenizer.pos = 0 assert_equal(HexaPDF::Reference.new(1, 0), @tokenizer.next_object) assert_equal(HexaPDF::Reference.new(2, 15), @tokenizer.next_object) end it "next_token: should not fail when resetting the position (due to use of an internal buffer)" do create_tokenizer("0 1 2 3 4 " * 4000) 4000.times do 5.times {|i| assert_equal(i, @tokenizer.next_token) } end end it "has a special token scanning method for use with xref reconstruction" do create_tokenizer(<<-EOF.chomp.gsub(/^ {8}/, '')) % Comment true 123 50 obj (ignored) /Ignored [/Ignored] <</Ignored /Values>> EOF scan_to_newline = proc { @tokenizer.scan_until(/(\n|\r\n?)+|\z/) } assert_nil(@tokenizer.next_integer_or_keyword) scan_to_newline.call assert_equal(true, @tokenizer.next_integer_or_keyword) assert_equal(123, @tokenizer.next_integer_or_keyword) assert_equal(50, @tokenizer.next_integer_or_keyword) assert_equal('obj', @tokenizer.next_integer_or_keyword) 4.times do assert_nil(@tokenizer.next_integer_or_keyword) scan_to_newline.call end assert_equal(HexaPDF::Tokenizer::NO_MORE_TOKENS, @tokenizer.next_integer_or_keyword) end end
Version data entries
1 entries across 1 versions & 1 rubygems
Version | Path |
---|---|
hexapdf-0.14.3 | test/hexapdf/test_tokenizer.rb |