Sha256: f8b11e4e7cfdb7329741373d0afa97e1d7088deef9ebd486ecac42d2c9a6f8f1

Contents?: true

Size: 1.49 KB

Versions: 5

Compression:

Stored size: 1.49 KB

Contents

module Tokenizers
  # The base indexing tokenizer.
  #
  # Override in indexing subclasses and define in configuration.
  #
  class Index < Base
    
    # Default indexing preprocessing hook.
    #
    # Does:
    #   1. Umlaut substitution.
    #   2. Downcasing.
    #   3. Remove illegal expressions.
    #   4. Contraction.
    #   5. Remove non-single stopwords. (Stopwords that occur with other words)
    #
    def preprocess text
      text = substituter.substitute text if substituter?
      text.downcase!
      remove_illegals text
      contract text
      # we do not remove single stopwords for an entirely different
      # reason than in the query tokenizer.
      # An indexed thing with just name "UND" (a stopword) should not lose its name.
      #
      remove_non_single_stopwords text
      text
    end
    
    # Default indexing pretokenizing hook.
    #
    # Does:
    #   1. Split the text into words.
    #   2. Normalize each word.
    #
    # TODO Rename into wordize? Or somesuch?
    #
    def pretokenize text
      words = split text
      words.collect! do |word|
        normalize_with_patterns word
        word
      end
    end
    
    # Does not actually return a token, but a
    # symbol "token".
    #
    def token_for text
      symbolize text
    end
    
    # Rejects tokens if they are too short (or blank).
    #
    # Override in subclasses to redefine behaviour.
    #
    def reject tokens
      tokens.reject! { |token| token.to_s.size < 2 }
    end
    
  end
end

Version data entries

5 entries across 5 versions & 1 rubygems

Version Path
picky-0.9.4 lib/picky/tokenizers/index.rb
picky-0.9.3 lib/picky/tokenizers/index.rb
picky-0.9.2 lib/picky/tokenizers/index.rb
picky-0.9.1 lib/picky/tokenizers/index.rb
picky-0.9.0 lib/picky/tokenizers/index.rb