# -*- coding: utf-8 -*- """ pygments.lexers.agile ~~~~~~~~~~~~~~~~~~~~~ Lexers for agile languages. :copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import Lexer, RegexLexer, ExtendedRegexLexer, \ LexerContext, include, combined, do_insertions, bygroups, using from pygments.token import Error, Text, Other, \ Comment, Operator, Keyword, Name, String, Number, Generic, Punctuation from pygments.util import get_bool_opt, get_list_opt, shebang_matches from pygments import unistring as uni __all__ = ['PythonLexer', 'PythonConsoleLexer', 'PythonTracebackLexer', 'RubyLexer', 'RubyConsoleLexer', 'PerlLexer', 'LuaLexer', 'MiniDLexer', 'IoLexer', 'TclLexer', 'ClojureLexer', 'Python3Lexer', 'Python3TracebackLexer'] # b/w compatibility from pygments.lexers.functional import SchemeLexer line_re = re.compile('.*?\n') class PythonLexer(RegexLexer): """ For `Python `_ source code. """ name = 'Python' aliases = ['python', 'py'] filenames = ['*.py', '*.pyw', '*.sc', 'SConstruct', 'SConscript', '*.tac'] mimetypes = ['text/x-python', 'application/x-python'] tokens = { 'root': [ (r'\n', Text), (r'^(\s*)("""(?:.|\n)*?""")', bygroups(Text, String.Doc)), (r"^(\s*)('''(?:.|\n)*?''')", bygroups(Text, String.Doc)), (r'[^\S\n]+', Text), (r'#.*$', Comment), (r'[]{}:(),;[]', Punctuation), (r'\\\n', Text), (r'\\', Text), (r'(in|is|and|or|not)\b', Operator.Word), (r'!=|==|<<|>>|[-~+/*%=<>&^|.]', Operator), include('keywords'), (r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'), (r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'), (r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), 'fromimport'), (r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), 'import'), include('builtins'), include('backtick'), ('(?:[rR]|[uU][rR]|[rR][uU])"""', String, 'tdqs'), ("(?:[rR]|[uU][rR]|[rR][uU])'''", String, 'tsqs'), ('(?:[rR]|[uU][rR]|[rR][uU])"', String, 'dqs'), ("(?:[rR]|[uU][rR]|[rR][uU])'", String, 'sqs'), ('[uU]?"""', String, combined('stringescape', 'tdqs')), ("[uU]?'''", String, combined('stringescape', 'tsqs')), ('[uU]?"', String, combined('stringescape', 'dqs')), ("[uU]?'", String, combined('stringescape', 'sqs')), include('name'), include('numbers'), ], 'keywords': [ (r'(assert|break|continue|del|elif|else|except|exec|' r'finally|for|global|if|lambda|pass|print|raise|' r'return|try|while|yield|as|with)\b', Keyword), ], 'builtins': [ (r'(?`_ source code (version 3.0). *New in Pygments 0.10.* """ name = 'Python 3' aliases = ['python3', 'py3'] filenames = [] # Nothing until Python 3 gets widespread mimetypes = ['text/x-python3', 'application/x-python3'] flags = re.MULTILINE | re.UNICODE uni_name = "[%s][%s]*" % (uni.xid_start, uni.xid_continue) tokens = PythonLexer.tokens.copy() tokens['keywords'] = [ (r'(assert|break|continue|del|elif|else|except|' r'finally|for|global|if|lambda|pass|raise|' r'return|try|while|yield|as|with|True|False|None)\b', Keyword), ] tokens['builtins'] = [ (r'(?>> a = 'foo' >>> print a foo >>> 1 / 0 Traceback (most recent call last): File "", line 1, in ZeroDivisionError: integer division or modulo by zero Additional options: `python3` Use Python 3 lexer for code. Default is ``False``. *New in Pygments 1.0.* """ name = 'Python console session' aliases = ['pycon'] mimetypes = ['text/x-python-doctest'] def __init__(self, **options): self.python3 = get_bool_opt(options, 'python3', False) Lexer.__init__(self, **options) def get_tokens_unprocessed(self, text): if self.python3: pylexer = Python3Lexer(**self.options) tblexer = Python3TracebackLexer(**self.options) else: pylexer = PythonLexer(**self.options) tblexer = PythonTracebackLexer(**self.options) curcode = '' insertions = [] curtb = '' tbindex = 0 tb = 0 for match in line_re.finditer(text): line = match.group() if line.startswith('>>> ') or line.startswith('... '): tb = 0 insertions.append((len(curcode), [(0, Generic.Prompt, line[:4])])) curcode += line[4:] elif line.rstrip() == '...' and not tb: # only a new >>> prompt can end an exception block # otherwise an ellipsis in place of the traceback frames # will be mishandled insertions.append((len(curcode), [(0, Generic.Prompt, '...')])) curcode += line[3:] else: if curcode: for item in do_insertions(insertions, pylexer.get_tokens_unprocessed(curcode)): yield item curcode = '' insertions = [] if (line.startswith('Traceback (most recent call last):') or re.match(r' File "[^"]+", line \d+\n$', line)): tb = 1 curtb = line tbindex = match.start() elif line == 'KeyboardInterrupt\n': yield match.start(), Name.Class, line elif tb: curtb += line if not (line.startswith(' ') or line.strip() == '...'): tb = 0 for i, t, v in tblexer.get_tokens_unprocessed(curtb): yield tbindex+i, t, v else: yield match.start(), Generic.Output, line if curcode: for item in do_insertions(insertions, pylexer.get_tokens_unprocessed(curcode)): yield item class PythonTracebackLexer(RegexLexer): """ For Python tracebacks. *New in Pygments 0.7.* """ name = 'Python Traceback' aliases = ['pytb'] filenames = ['*.pytb'] mimetypes = ['text/x-python-traceback'] tokens = { 'root': [ (r'^Traceback \(most recent call last\):\n', Generic.Traceback, 'intb'), # SyntaxError starts with this. (r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'), (r'^.*\n', Other), ], 'intb': [ (r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)', bygroups(Text, Name.Builtin, Text, Number, Text, Name.Identifier, Text)), (r'^( File )("[^"]+")(, line )(\d+)(\n)', bygroups(Text, Name.Builtin, Text, Number, Text)), (r'^( )(.+)(\n)', bygroups(Text, using(PythonLexer), Text)), (r'^([ \t]*)(...)(\n)', bygroups(Text, Comment, Text)), # for doctests... (r'^(.+)(: )(.+)(\n)', bygroups(Name.Class, Text, Name.Identifier, Text), '#pop'), (r'^([a-zA-Z_][a-zA-Z0-9_]*)(:?\n)', bygroups(Name.Class, Text), '#pop') ], } class Python3TracebackLexer(RegexLexer): """ For Python 3.0 tracebacks, with support for chained exceptions. *New in Pygments 1.0.* """ name = 'Python 3.0 Traceback' aliases = ['py3tb'] filenames = ['*.py3tb'] mimetypes = ['text/x-python3-traceback'] tokens = { 'root': [ (r'\n', Text), (r'^Traceback \(most recent call last\):\n', Generic.Traceback, 'intb'), (r'^During handling of the above exception, another ' r'exception occurred:\n\n', Generic.Traceback), (r'^The above exception was the direct cause of the ' r'following exception:\n\n', Generic.Traceback), ], 'intb': [ (r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)', bygroups(Text, Name.Builtin, Text, Number, Text, Name.Identifier, Text)), (r'^( )(.+)(\n)', bygroups(Text, using(Python3Lexer), Text)), (r'^([ \t]*)(...)(\n)', bygroups(Text, Comment, Text)), # for doctests... (r'^(.+)(: )(.+)(\n)', bygroups(Name.Class, Text, Name.Identifier, Text), '#pop'), (r'^([a-zA-Z_][a-zA-Z0-9_]*)(:?\n)', bygroups(Name.Class, Text), '#pop') ], } class RubyLexer(ExtendedRegexLexer): """ For `Ruby `_ source code. """ name = 'Ruby' aliases = ['rb', 'ruby'] filenames = ['*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx'] mimetypes = ['text/x-ruby', 'application/x-ruby'] flags = re.DOTALL | re.MULTILINE def heredoc_callback(self, match, ctx): # okay, this is the hardest part of parsing Ruby... # match: 1 = <<-?, 2 = quote? 3 = name 4 = quote? 5 = rest of line start = match.start(1) yield start, Operator, match.group(1) # <<-? yield match.start(2), String.Heredoc, match.group(2) # quote ", ', ` yield match.start(3), Name.Constant, match.group(3) # heredoc name yield match.start(4), String.Heredoc, match.group(4) # quote again heredocstack = ctx.__dict__.setdefault('heredocstack', []) outermost = not bool(heredocstack) heredocstack.append((match.group(1) == '<<-', match.group(3))) ctx.pos = match.start(5) ctx.end = match.end(5) # this may find other heredocs for i, t, v in self.get_tokens_unprocessed(context=ctx): yield i, t, v ctx.pos = match.end() if outermost: # this is the outer heredoc again, now we can process them all for tolerant, hdname in heredocstack: lines = [] for match in line_re.finditer(ctx.text, ctx.pos): if tolerant: check = match.group().strip() else: check = match.group().rstrip() if check == hdname: for amatch in lines: yield amatch.start(), String.Heredoc, amatch.group() yield match.start(), Name.Constant, match.group() ctx.pos = match.end() break else: lines.append(match) else: # end of heredoc not found -- error! for amatch in lines: yield amatch.start(), Error, amatch.group() ctx.end = len(ctx.text) del heredocstack[:] def gen_rubystrings_rules(): def intp_regex_callback(self, match, ctx): yield match.start(1), String.Regex, match.group(1) # begin nctx = LexerContext(match.group(3), 0, ['interpolated-regex']) for i, t, v in self.get_tokens_unprocessed(context=nctx): yield match.start(3)+i, t, v yield match.start(4), String.Regex, match.group(4) # end[mixounse]* ctx.pos = match.end() def intp_string_callback(self, match, ctx): yield match.start(1), String.Other, match.group(1) nctx = LexerContext(match.group(3), 0, ['interpolated-string']) for i, t, v in self.get_tokens_unprocessed(context=nctx): yield match.start(3)+i, t, v yield match.start(4), String.Other, match.group(4) # end ctx.pos = match.end() states = {} states['strings'] = [ # easy ones (r'\:([a-zA-Z_][\w_]*[\!\?]?|\*\*?|[-+]@?|' r'[/%&|^`~]|\[\]=?|<<|>>|<=?>|>=?|===?)', String.Symbol), (r":'(\\\\|\\'|[^'])*'", String.Symbol), (r"'(\\\\|\\'|[^'])*'", String.Single), (r':"', String.Symbol, 'simple-sym'), (r'"', String.Double, 'simple-string'), (r'(?', 'ab'): states[name+'-intp-string'] = [ (r'\\[\\' + lbrace + rbrace + ']', String.Other), (r'(?! states['strings'] += [ # %r regex (r'(%r([^a-zA-Z0-9]))((?:\\\2|(?!\2).)*)(\2[mixounse]*)', intp_regex_callback), # regular fancy strings with qsw (r'%[qsw]([^a-zA-Z0-9])((?:\\\1|(?!\1).)*)\1', String.Other), (r'(%[QWx]([^a-zA-Z0-9]))((?:\\\2|(?!\2).)*)(\2)', intp_string_callback), # special forms of fancy strings after operators or # in method calls with braces (r'(?<=[-+/*%=<>&!^|~,(])(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)', bygroups(Text, String.Other, None)), # and because of fixed width lookbehinds the whole thing a # second time for line startings... (r'^(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)', bygroups(Text, String.Other, None)), # all regular fancy strings without qsw (r'(%([^a-zA-Z0-9\s]))((?:\\\2|(?!\2).)*)(\2)', intp_string_callback), ] return states tokens = { 'root': [ (r'#.*?$', Comment.Single), (r'=begin\s.*?\n=end', Comment.Multiline), # keywords (r'(BEGIN|END|alias|begin|break|case|defined\?|' r'do|else|elsif|end|ensure|for|if|in|next|redo|' r'rescue|raise|retry|return|super|then|undef|unless|until|when|' r'while|yield)\b', Keyword), # start of function, class and module names (r'(module)(\s+)([a-zA-Z_][a-zA-Z0-9_]*(::[a-zA-Z_][a-zA-Z0-9_]*)*)', bygroups(Keyword, Text, Name.Namespace)), (r'(def)(\s+)', bygroups(Keyword, Text), 'funcname'), (r'def(?=[*%&^`~+-/\[<>=])', Keyword, 'funcname'), (r'(class)(\s+)', bygroups(Keyword, Text), 'classname'), # special methods (r'(initialize|new|loop|include|extend|raise|attr_reader|' r'attr_writer|attr_accessor|attr|catch|throw|private|' r'module_function|public|protected|true|false|nil)\b', Keyword.Pseudo), (r'(not|and|or)\b', Operator.Word), (r'(autoload|block_given|const_defined|eql|equal|frozen|include|' r'instance_of|is_a|iterator|kind_of|method_defined|nil|' r'private_method_defined|protected_method_defined|' r'public_method_defined|respond_to|tainted)\?', Name.Builtin), (r'(chomp|chop|exit|gsub|sub)!', Name.Builtin), (r'(?~!])|' r'(?<=(?:\s|;)when\s)|' r'(?<=(?:\s|;)or\s)|' r'(?<=(?:\s|;)and\s)|' r'(?<=(?:\s|;|\.)index\s)|' r'(?<=(?:\s|;|\.)scan\s)|' r'(?<=(?:\s|;|\.)sub\s)|' r'(?<=(?:\s|;|\.)sub!\s)|' r'(?<=(?:\s|;|\.)gsub\s)|' r'(?<=(?:\s|;|\.)gsub!\s)|' r'(?<=(?:\s|;|\.)match\s)|' r'(?<=(?:\s|;)if\s)|' r'(?<=(?:\s|;)elsif\s)|' r'(?<=^when\s)|' r'(?<=^index\s)|' r'(?<=^scan\s)|' r'(?<=^sub\s)|' r'(?<=^gsub\s)|' r'(?<=^sub!\s)|' r'(?<=^gsub!\s)|' r'(?<=^match\s)|' r'(?<=^if\s)|' r'(?<=^elsif\s)' r')(\s*)(/)', bygroups(Text, String.Regex), 'multiline-regex'), # multiline regex (in method calls) (r'(?<=\(|,)/', String.Regex, 'multiline-regex'), # multiline regex (this time the funny no whitespace rule) (r'(\s+)(/[^\s=])', String.Regex, 'multiline-regex'), # lex numbers and ignore following regular expressions which # are division operators in fact (grrrr. i hate that. any # better ideas?) # since pygments 0.7 we also eat a "?" operator after numbers # so that the char operator does not work. Chars are not allowed # there so that you can use the ternary operator. # stupid example: # x>=0?n[x]:"" (r'(0_?[0-7]+(?:_[0-7]+)*)(\s*)([/?])?', bygroups(Number.Oct, Text, Operator)), (r'(0x[0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*)(\s*)([/?])?', bygroups(Number.Hex, Text, Operator)), (r'(0b[01]+(?:_[01]+)*)(\s*)([/?])?', bygroups(Number.Bin, Text, Operator)), (r'([\d]+(?:_\d+)*)(\s*)([/?])?', bygroups(Number.Integer, Text, Operator)), # Names (r'@@[a-zA-Z_][a-zA-Z0-9_]*', Name.Variable.Class), (r'@[a-zA-Z_][a-zA-Z0-9_]*', Name.Variable.Instance), (r'\$[a-zA-Z0-9_]+', Name.Variable.Global), (r'\$[!@&`\'+~=/\\,;.<>_*$?:"]', Name.Variable.Global), (r'\$-[0adFiIlpvw]', Name.Variable.Global), (r'::', Operator), include('strings'), # chars (r'\?(\\[MC]-)*' # modifiers r'(\\([\\abefnrstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})|\S)' r'(?!\w)', String.Char), (r'[A-Z][a-zA-Z0-9_]+', Name.Constant), # this is needed because ruby attributes can look # like keywords (class) or like this: ` ?!? (r'(\.|::)([a-zA-Z_]\w*[\!\?]?|[*%&^`~+-/\[<>=])', bygroups(Operator, Name)), (r'[a-zA-Z_][\w_]*[\!\?]?', Name), (r'(\[|\]|\*\*|<>?|>=|<=|<=>|=~|={3}|' r'!~|&&?|\|\||\.{1,3})', Operator), (r'[-+/*%=<>&!^|~]=?', Operator), (r'[(){};,/?:\\]', Punctuation), (r'\s+', Text) ], 'funcname': [ (r'\(', Punctuation, 'defexpr'), (r'(?:([a-zA-Z_][a-zA-Z0-9_]*)(\.))?' r'([a-zA-Z_][\w_]*[\!\?]?|\*\*?|[-+]@?|' r'[/%&|^`~]|\[\]=?|<<|>>|<=?>|>=?|===?)', bygroups(Name.Class, Operator, Name.Function), '#pop'), (r'', Text, '#pop') ], 'classname': [ (r'\(', Punctuation, 'defexpr'), (r'<<', Operator, '#pop'), (r'[A-Z_][\w_]*', Name.Class, '#pop'), (r'', Text, '#pop') ], 'defexpr': [ (r'(\))(\.|::)?', bygroups(Punctuation, Operator), '#pop'), (r'\(', Operator, '#push'), include('root') ], 'in-intp': [ ('}', String.Interpol, '#pop'), include('root'), ], 'string-intp': [ (r'#{', String.Interpol, 'in-intp'), (r'#@@?[a-zA-Z_][a-zA-Z0-9_]*', String.Interpol), (r'#\$[a-zA-Z_][a-zA-Z0-9_]*', String.Interpol) ], 'string-intp-escaped': [ include('string-intp'), (r'\\([\\abefnrstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})', String.Escape) ], 'interpolated-regex': [ include('string-intp'), (r'[\\#]', String.Regex), (r'[^\\#]+', String.Regex), ], 'interpolated-string': [ include('string-intp'), (r'[\\#]', String.Other), (r'[^\\#]+', String.Other), ], 'multiline-regex': [ include('string-intp'), (r'\\\\', String.Regex), (r'\\/', String.Regex), (r'[\\#]', String.Regex), (r'[^\\/#]+', String.Regex), (r'/[mixounse]*', String.Regex, '#pop'), ], 'end-part': [ (r'.+', Comment.Preproc, '#pop') ] } tokens.update(gen_rubystrings_rules()) def analyse_text(text): return shebang_matches(text, r'ruby(1\.\d)?') class RubyConsoleLexer(Lexer): """ For Ruby interactive console (**irb**) output like: .. sourcecode:: rbcon irb(main):001:0> a = 1 => 1 irb(main):002:0> puts a 1 => nil """ name = 'Ruby irb session' aliases = ['rbcon', 'irb'] mimetypes = ['text/x-ruby-shellsession'] _prompt_re = re.compile('irb\([a-zA-Z_][a-zA-Z0-9_]*\):\d{3}:\d+[>*"\'] ' '|>> |\?> ') def get_tokens_unprocessed(self, text): rblexer = RubyLexer(**self.options) curcode = '' insertions = [] for match in line_re.finditer(text): line = match.group() m = self._prompt_re.match(line) if m is not None: end = m.end() insertions.append((len(curcode), [(0, Generic.Prompt, line[:end])])) curcode += line[end:] else: if curcode: for item in do_insertions(insertions, rblexer.get_tokens_unprocessed(curcode)): yield item curcode = '' insertions = [] yield match.start(), Generic.Output, line if curcode: for item in do_insertions(insertions, rblexer.get_tokens_unprocessed(curcode)): yield item class PerlLexer(RegexLexer): """ For `Perl `_ source code. """ name = 'Perl' aliases = ['perl', 'pl'] filenames = ['*.pl', '*.pm'] mimetypes = ['text/x-perl', 'application/x-perl'] flags = re.DOTALL | re.MULTILINE # TODO: give this a perl guy who knows how to parse perl... tokens = { 'balanced-regex': [ (r'/(\\\\|\\/|[^/])*/[egimosx]*', String.Regex, '#pop'), (r'!(\\\\|\\!|[^!])*![egimosx]*', String.Regex, '#pop'), (r'\\(\\\\|[^\\])*\\[egimosx]*', String.Regex, '#pop'), (r'{(\\\\|\\}|[^}])*}[egimosx]*', String.Regex, '#pop'), (r'<(\\\\|\\>|[^>])*>[egimosx]*', String.Regex, '#pop'), (r'\[(\\\\|\\\]|[^\]])*\][egimosx]*', String.Regex, '#pop'), (r'\((\\\\|\\\)|[^\)])*\)[egimosx]*', String.Regex, '#pop'), (r'@(\\\\|\\\@|[^\@])*@[egimosx]*', String.Regex, '#pop'), (r'%(\\\\|\\\%|[^\%])*%[egimosx]*', String.Regex, '#pop'), (r'\$(\\\\|\\\$|[^\$])*\$[egimosx]*', String.Regex, '#pop'), ], 'root': [ (r'\#.*?$', Comment.Single), (r'^=[a-zA-Z0-9]+\s+.*?\n=cut', Comment.Multiline), (r'(case|continue|do|else|elsif|for|foreach|if|last|my|' r'next|our|redo|reset|then|unless|until|while|use|' r'print|new|BEGIN|END|return)\b', Keyword), (r'(format)(\s+)([a-zA-Z0-9_]+)(\s*)(=)(\s*\n)', bygroups(Keyword, Text, Name, Text, Punctuation, Text), 'format'), (r'(eq|lt|gt|le|ge|ne|not|and|or|cmp)\b', Operator.Word), # common delimiters (r's/(\\\\|\\/|[^/])*/(\\\\|\\/|[^/])*/[egimosx]*', String.Regex), (r's!(\\\\|\\!|[^!])*!(\\\\|\\!|[^!])*![egimosx]*', String.Regex), (r's\\(\\\\|[^\\])*\\(\\\\|[^\\])*\\[egimosx]*', String.Regex), (r's@(\\\\|\\@|[^@])*@(\\\\|\\@|[^@])*@[egimosx]*', String.Regex), (r's%(\\\\|\\%|[^%])*%(\\\\|\\%|[^%])*%[egimosx]*', String.Regex), # balanced delimiters (r's{(\\\\|\\}|[^}])*}\s*', String.Regex, 'balanced-regex'), (r's<(\\\\|\\>|[^>])*>\s*', String.Regex, 'balanced-regex'), (r's\[(\\\\|\\\]|[^\]])*\]\s*', String.Regex, 'balanced-regex'), (r's\((\\\\|\\\)|[^\)])*\)\s*', String.Regex, 'balanced-regex'), (r'm?/(\\\\|\\/|[^/\n])*/[gcimosx]*', String.Regex), (r'm(?=[/!\\{<\[\(@%\$])', String.Regex, 'balanced-regex'), (r'((?<==~)|(?<=\())\s*/(\\\\|\\/|[^/])*/[gcimosx]*', String.Regex), (r'\s+', Text), (r'(abs|accept|alarm|atan2|bind|binmode|bless|caller|chdir|' r'chmod|chomp|chop|chown|chr|chroot|close|closedir|connect|' r'continue|cos|crypt|dbmclose|dbmopen|defined|delete|die|' r'dump|each|endgrent|endhostent|endnetent|endprotoent|' r'endpwent|endservent|eof|eval|exec|exists|exit|exp|fcntl|' r'fileno|flock|fork|format|formline|getc|getgrent|getgrgid|' r'getgrnam|gethostbyaddr|gethostbyname|gethostent|getlogin|' r'getnetbyaddr|getnetbyname|getnetent|getpeername|getpgrp|' r'getppid|getpriority|getprotobyname|getprotobynumber|' r'getprotoent|getpwent|getpwnam|getpwuid|getservbyname|' r'getservbyport|getservent|getsockname|getsockopt|glob|gmtime|' r'goto|grep|hex|import|index|int|ioctl|join|keys|kill|last|' r'lc|lcfirst|length|link|listen|local|localtime|log|lstat|' r'map|mkdir|msgctl|msgget|msgrcv|msgsnd|my|next|no|oct|open|' r'opendir|ord|our|pack|package|pipe|pop|pos|printf|' r'prototype|push|quotemeta|rand|read|readdir|' r'readline|readlink|readpipe|recv|redo|ref|rename|require|' r'reverse|rewinddir|rindex|rmdir|scalar|seek|seekdir|' r'select|semctl|semget|semop|send|setgrent|sethostent|setnetent|' r'setpgrp|setpriority|setprotoent|setpwent|setservent|' r'setsockopt|shift|shmctl|shmget|shmread|shmwrite|shutdown|' r'sin|sleep|socket|socketpair|sort|splice|split|sprintf|sqrt|' r'srand|stat|study|substr|symlink|syscall|sysopen|sysread|' r'sysseek|system|syswrite|tell|telldir|tie|tied|time|times|tr|' r'truncate|uc|ucfirst|umask|undef|unlink|unpack|unshift|untie|' r'utime|values|vec|wait|waitpid|wantarray|warn|write' r')\b', Name.Builtin), (r'((__(DATA|DIE|WARN)__)|(STD(IN|OUT|ERR)))\b', Name.Builtin.Pseudo), (r'<<([\'"]?)([a-zA-Z_][a-zA-Z0-9_]*)\1;?\n.*?\n\2\n', String), (r'__END__', Comment.Preproc, 'end-part'), (r'\$\^[ADEFHILMOPSTWX]', Name.Variable.Global), (r"\$[\\\"\[\]'&`+*.,;=%~?@$!<>(^|/-](?!\w)", Name.Variable.Global), (r'[$@%#]+', Name.Variable, 'varname'), (r'0_?[0-7]+(_[0-7]+)*', Number.Oct), (r'0x[0-9A-Fa-f]+(_[0-9A-Fa-f]+)*', Number.Hex), (r'0b[01]+(_[01]+)*', Number.Bin), (r'\d+', Number.Integer), (r"'(\\\\|\\'|[^'])*'", String), (r'"(\\\\|\\"|[^"])*"', String), (r'`(\\\\|\\`|[^`])*`', String.Backtick), (r'<([^\s>]+)>', String.Regexp), (r'(q|qq|qw|qr|qx)\{', String.Other, 'cb-string'), (r'(q|qq|qw|qr|qx)\(', String.Other, 'rb-string'), (r'(q|qq|qw|qr|qx)\[', String.Other, 'sb-string'), (r'(q|qq|qw|qr|qx)\<', String.Other, 'lt-string'), (r'(q|qq|qw|qr|qx)([^a-zA-Z0-9])(.|\n)*?\2', String.Other), (r'package\s+', Keyword, 'modulename'), (r'sub\s+', Keyword, 'funcname'), (r'(\[\]|\*\*|::|<<|>>|>=|<=|<=>|={3}|!=|=~|' r'!~|&&?|\|\||\.{1,3})', Operator), (r'[-+/*%=<>&^|!\\~]=?', Operator), (r'[\(\)\[\]:;,<>/\?\{\}]', Punctuation), # yes, there's no shortage # of punctuation in Perl! (r'(?=\w)', Name, 'name'), ], 'format': [ (r'\.\n', String.Interpol, '#pop'), (r'[^\n]*\n', String.Interpol), ], 'varname': [ (r'\s+', Text), (r'\{', Punctuation, '#pop'), # hash syntax? (r'\)|,', Punctuation, '#pop'), # argument specifier (r'[a-zA-Z0-9_]+::', Name.Namespace), (r'[a-zA-Z0-9_:]+', Name.Variable, '#pop'), ], 'name': [ (r'[a-zA-Z0-9_]+::', Name.Namespace), (r'[a-zA-Z0-9_:]+', Name, '#pop'), (r'[A-Z_]+(?=[^a-zA-Z0-9_])', Name.Constant, '#pop'), (r'(?=[^a-zA-Z0-9_])', Text, '#pop'), ], 'modulename': [ (r'[a-zA-Z_][\w_]*', Name.Namespace, '#pop') ], 'funcname': [ (r'[a-zA-Z_][\w_]*[\!\?]?', Name.Function), (r'\s+', Text), # argument declaration (r'(\([$@%]*\))(\s*)', bygroups(Punctuation, Text)), (r'.*?{', Punctuation, '#pop'), (r';', Punctuation, '#pop'), ], 'cb-string': [ (r'\\[\{\}\\]', String.Other), (r'\\', String.Other), (r'\{', String.Other, 'cb-string'), (r'\}', String.Other, '#pop'), (r'[^\{\}\\]+', String.Other) ], 'rb-string': [ (r'\\[\(\)\\]', String.Other), (r'\\', String.Other), (r'\(', String.Other, 'rb-string'), (r'\)', String.Other, '#pop'), (r'[^\(\)]+', String.Other) ], 'sb-string': [ (r'\\[\[\]\\]', String.Other), (r'\\', String.Other), (r'\[', String.Other, 'sb-string'), (r'\]', String.Other, '#pop'), (r'[^\[\]]+', String.Other) ], 'lt-string': [ (r'\\[\<\>\\]', String.Other), (r'\\', String.Other), (r'\<', String.Other, 'lt-string'), (r'\>', String.Other, '#pop'), (r'[^\<\>]+', String.Other) ], 'end-part': [ (r'.+', Comment.Preproc, '#pop') ] } def analyse_text(text): if shebang_matches(text, r'perl(\d\.\d\.\d)?'): return True if 'my $' in text: return 0.9 return 0.1 # who knows, might still be perl! class LuaLexer(RegexLexer): """ For `Lua `_ source code. Additional options accepted: `func_name_highlighting` If given and ``True``, highlight builtin function names (default: ``True``). `disabled_modules` If given, must be a list of module names whose function names should not be highlighted. By default all modules are highlighted. To get a list of allowed modules have a look into the `_luabuiltins` module: .. sourcecode:: pycon >>> from pygments.lexers._luabuiltins import MODULES >>> MODULES.keys() ['string', 'coroutine', 'modules', 'io', 'basic', ...] """ name = 'Lua' aliases = ['lua'] filenames = ['*.lua'] mimetypes = ['text/x-lua', 'application/x-lua'] tokens = { 'root': [ # lua allows a file to start with a shebang (r'#!(.*?)$', Comment.Preproc), (r'', Text, 'base'), ], 'base': [ (r'(?s)--\[(=*)\[.*?\]\1\]', Comment.Multiline), ('--.*$', Comment.Single), (r'(?i)(\d*\.\d+|\d+\.\d*)(e[+-]?\d+)?', Number.Float), (r'(?i)\d+e[+-]?\d+', Number.Float), ('(?i)0x[0-9a-f]*', Number.Hex), (r'\d+', Number.Integer), (r'\n', Text), (r'[^\S\n]', Text), (r'(?s)\[(=*)\[.*?\]\1\]', String.Multiline), (r'[\[\]\{\}\(\)\.,:;]', Punctuation), (r'(==|~=|<=|>=|\.\.|\.\.\.|[=+\-*/%^<>#])', Operator), (r'(and|or|not)\b', Operator.Word), ('(break|do|else|elseif|end|for|if|in|repeat|return|then|until|' r'while)\b', Keyword), (r'(local)\b', Keyword.Declaration), (r'(true|false|nil)\b', Keyword.Constant), (r'(function)(\s+)', bygroups(Keyword, Text), 'funcname'), (r'(class)(\s+)', bygroups(Keyword, Text), 'classname'), (r'[A-Za-z_][A-Za-z0-9_]*(\.[A-Za-z_][A-Za-z0-9_]*)?', Name), # multiline strings (r'(?s)\[(=*)\[(.*?)\]\1\]', String), ("'", String.Single, combined('stringescape', 'sqs')), ('"', String.Double, combined('stringescape', 'dqs')) ], 'funcname': [ ('[A-Za-z_][A-Za-z0-9_]*', Name.Function, '#pop'), # inline function ('\(', Punctuation, '#pop'), ], 'classname': [ ('[A-Za-z_][A-Za-z0-9_]*', Name.Class, '#pop') ], # if I understand correctly, every character is valid in a lua string, # so this state is only for later corrections 'string': [ ('.', String) ], 'stringescape': [ (r'''\\([abfnrtv\\"']|\d{1,3})''', String.Escape) ], 'sqs': [ ("'", String, '#pop'), include('string') ], 'dqs': [ ('"', String, '#pop'), include('string') ] } def __init__(self, **options): self.func_name_highlighting = get_bool_opt( options, 'func_name_highlighting', True) self.disabled_modules = get_list_opt(options, 'disabled_modules', []) self._functions = set() if self.func_name_highlighting: from pygments.lexers._luabuiltins import MODULES for mod, func in MODULES.iteritems(): if mod not in self.disabled_modules: self._functions.update(func) RegexLexer.__init__(self, **options) def get_tokens_unprocessed(self, text): for index, token, value in \ RegexLexer.get_tokens_unprocessed(self, text): if token is Name: if value in self._functions: yield index, Name.Builtin, value continue elif '.' in value: a, b = value.split('.') yield index, Name, a yield index + len(a), Punctuation, u'.' yield index + len(a) + 1, Name, b continue yield index, token, value class MiniDLexer(RegexLexer): """ For `MiniD `_ (a D-like scripting language) source. """ name = 'MiniD' filenames = ['*.md'] aliases = ['minid'] mimetypes = ['text/x-minidsrc'] tokens = { 'root': [ (r'\n', Text), (r'\s+', Text), # Comments (r'//(.*?)\n', Comment.Single), (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), (r'/\+', Comment.Multiline, 'nestedcomment'), # Keywords (r'(as|assert|break|case|catch|class|continue|coroutine|default' r'|do|else|finally|for|foreach|function|global|namespace' r'|if|import|in|is|local|module|return|super|switch' r'|this|throw|try|vararg|while|with|yield)\b', Keyword), (r'(false|true|null)\b', Keyword.Constant), # FloatLiteral (r'([0-9][0-9_]*)?\.[0-9_]+([eE][+\-]?[0-9_]+)?', Number.Float), # IntegerLiteral # -- Binary (r'0[Bb][01_]+', Number), # -- Octal (r'0[Cc][0-7_]+', Number.Oct), # -- Hexadecimal (r'0[xX][0-9a-fA-F_]+', Number.Hex), # -- Decimal (r'(0|[1-9][0-9_]*)', Number.Integer), # CharacterLiteral (r"""'(\\['"?\\abfnrtv]|\\x[0-9a-fA-F]{2}|\\[0-9]{1,3}""" r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|.)'""", String.Char ), # StringLiteral # -- WysiwygString (r'@"(""|.)*"', String), # -- AlternateWysiwygString (r'`(``|.)*`', String), # -- DoubleQuotedString (r'"(\\\\|\\"|[^"])*"', String), # Tokens ( r'(~=|\^=|%=|\*=|==|!=|>>>=|>>>|>>=|>>|>=|<=>|\?=|-\>' r'|<<=|<<|<=|\+\+|\+=|--|-=|\|\||\|=|&&|&=|\.\.|/=)' r'|[-/.&$@|\+<>!()\[\]{}?,;:=*%^~#\\]', Punctuation ), # Identifier (r'[a-zA-Z_]\w*', Name), ], 'nestedcomment': [ (r'[^+/]+', Comment.Multiline), (r'/\+', Comment.Multiline, '#push'), (r'\+/', Comment.Multiline, '#pop'), (r'[+/]', Comment.Multiline), ], } class IoLexer(RegexLexer): """ For `Io `_ (a small, prototype-based programming language) source. *New in Pygments 0.10.* """ name = 'Io' filenames = ['*.io'] aliases = ['io'] mimetypes = ['text/x-iosrc'] tokens = { 'root': [ (r'\n', Text), (r'\s+', Text), # Comments (r'//(.*?)\n', Comment.Single), (r'#(.*?)\n', Comment.Single), (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), (r'/\+', Comment.Multiline, 'nestedcomment'), # DoubleQuotedString (r'"(\\\\|\\"|[^"])*"', String), # Operators (r'::=|:=|=|\(|\)|;|,|\*|-|\+|>|<|@|!|/|\||\^|\.|%|&|\[|\]|\{|\}', Operator), # keywords (r'(clone|do|doFile|doString|method|for|if|else|elseif|then)\b', Keyword), # constants (r'(nil|false|true)\b', Name.Constant), # names ('(Object|list|List|Map|args|Sequence|Coroutine|File)\b', Name.Builtin), ('[a-zA-Z_][a-zA-Z0-9_]*', Name), # numbers (r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float), (r'\d+', Number.Integer) ], 'nestedcomment': [ (r'[^+/]+', Comment.Multiline), (r'/\+', Comment.Multiline, '#push'), (r'\+/', Comment.Multiline, '#pop'), (r'[+/]', Comment.Multiline), ] } class TclLexer(RegexLexer): """ For Tcl source code. *New in Pygments 0.10.* """ keyword_cmds_re = ( r'\b(after|apply|array|break|catch|continue|elseif|else|error|' r'eval|expr|for|foreach|global|if|namespace|proc|rename|return|' r'set|switch|then|trace|unset|update|uplevel|upvar|variable|' r'vwait|while)\b' ) builtin_cmds_re = ( r'\b(append|bgerror|binary|cd|chan|clock|close|concat|dde|dict|' r'encoding|eof|exec|exit|fblocked|fconfigure|fcopy|file|' r'fileevent|flush|format|gets|glob|history|http|incr|info|interp|' r'join|lappend|lassign|lindex|linsert|list|llength|load|loadTk|' r'lrange|lrepeat|lreplace|lreverse|lsearch|lset|lsort|mathfunc|' r'mathop|memory|msgcat|open|package|pid|pkg::create|pkg_mkIndex|' r'platform|platform::shell|puts|pwd|re_syntax|read|refchan|' r'regexp|registry|regsub|scan|seek|socket|source|split|string|' r'subst|tell|time|tm|unknown|unload)\b' ) name = 'Tcl' aliases = ['tcl'] filenames = ['*.tcl'] mimetypes = ['text/x-tcl', 'text/x-script.tcl', 'application/x-tcl'] def _gen_command_rules(keyword_cmds_re, builtin_cmds_re, context=""): return [ (keyword_cmds_re, Keyword, 'params' + context), (builtin_cmds_re, Name.Builtin, 'params' + context), (r'([\w\.\-]+)', Name.Variable, 'params' + context), (r'#', Comment, 'comment'), ] tokens = { 'root': [ include('command'), include('basic'), include('data'), (r'}', Keyword), # HACK: somehow we miscounted our braces ], 'command': _gen_command_rules(keyword_cmds_re, builtin_cmds_re), 'command-in-brace': _gen_command_rules(keyword_cmds_re, builtin_cmds_re, "-in-brace"), 'command-in-bracket': _gen_command_rules(keyword_cmds_re, builtin_cmds_re, "-in-bracket"), 'command-in-paren': _gen_command_rules(keyword_cmds_re, builtin_cmds_re, "-in-paren"), 'basic': [ (r'\(', Keyword, 'paren'), (r'\[', Keyword, 'bracket'), (r'\{', Keyword, 'brace'), (r'"', String.Double, 'string'), (r'(eq|ne|in|ni)\b', Operator.Word), (r'!=|==|<<|>>|<=|>=|&&|\|\||\*\*|[-+~!*/%<>&^|?:]', Operator), ], 'data': [ (r'\s+', Text), (r'0x[a-fA-F0-9]+', Number.Hex), (r'0[0-7]+', Number.Oct), (r'\d+\.\d+', Number.Float), (r'\d+', Number.Integer), (r'\$([\w\.\-\:]+)', Name.Variable), (r'([\w\.\-\:]+)', Text), ], 'params': [ (r';', Keyword, '#pop'), (r'\n', Text, '#pop'), (r'(else|elseif|then)', Keyword), include('basic'), include('data'), ], 'params-in-brace': [ (r'}', Keyword, ('#pop', '#pop')), include('params') ], 'params-in-paren': [ (r'\)', Keyword, ('#pop', '#pop')), include('params') ], 'params-in-bracket': [ (r'\]', Keyword, ('#pop', '#pop')), include('params') ], 'string': [ (r'\[', String.Double, 'string-square'), (r'(?s)(\\\\|\\[0-7]+|\\.|[^"\\])', String.Double), (r'"', String.Double, '#pop') ], 'string-square': [ (r'\[', String.Double, 'string-square'), (r'(?s)(\\\\|\\[0-7]+|\\.|\\\n|[^\]\\])', String.Double), (r'\]', String.Double, '#pop') ], 'brace': [ (r'}', Keyword, '#pop'), include('command-in-brace'), include('basic'), include('data'), ], 'paren': [ (r'\)', Keyword, '#pop'), include('command-in-paren'), include('basic'), include('data'), ], 'bracket': [ (r'\]', Keyword, '#pop'), include('command-in-bracket'), include('basic'), include('data'), ], 'comment': [ (r'.*[^\\]\n', Comment, '#pop'), (r'.*\\\n', Comment), ], } def analyse_text(text): return shebang_matches(text, r'(tcl)') class ClojureLexer(RegexLexer): """ Lexer for `Clojure `_ source code. *New in Pygments 0.11.* """ name = 'Clojure' aliases = ['clojure', 'clj'] filenames = ['*.clj'] mimetypes = ['text/x-clojure', 'application/x-clojure'] keywords = [ 'fn', 'def', 'defn', 'defmacro', 'defmethod', 'defmulti', 'defn-', 'defstruct', 'if', 'cond', 'let', 'for' ] builtins = [ '.', '..', '*', '+', '-', '->', '..', '/', '<', '<=', '=', '==', '>', '>=', 'accessor', 'agent', 'agent-errors', 'aget', 'alength', 'all-ns', 'alter', 'and', 'append-child', 'apply', 'array-map', 'aset', 'aset-boolean', 'aset-byte', 'aset-char', 'aset-double', 'aset-float', 'aset-int', 'aset-long', 'aset-short', 'assert', 'assoc', 'await', 'await-for', 'bean', 'binding', 'bit-and', 'bit-not', 'bit-or', 'bit-shift-left', 'bit-shift-right', 'bit-xor', 'boolean', 'branch?', 'butlast', 'byte', 'cast', 'char', 'children', 'class', 'clear-agent-errors', 'comment', 'commute', 'comp', 'comparator', 'complement', 'concat', 'conj', 'cons', 'constantly', 'construct-proxy', 'contains?', 'count', 'create-ns', 'create-struct', 'cycle', 'dec', 'deref', 'difference', 'disj', 'dissoc', 'distinct', 'doall', 'doc', 'dorun', 'doseq', 'dosync', 'dotimes', 'doto', 'double', 'down', 'drop', 'drop-while', 'edit', 'end?', 'ensure', 'eval', 'every?', 'false?', 'ffirst', 'file-seq', 'filter', 'find', 'find-doc', 'find-ns', 'find-var', 'first', 'float', 'flush', 'fnseq', 'frest', 'gensym', 'get', 'get-proxy-class', 'hash-map', 'hash-set', 'identical?', 'identity', 'if-let', 'import', 'in-ns', 'inc', 'index', 'insert-child', 'insert-left', 'insert-right', 'inspect-table', 'inspect-tree', 'instance?', 'int', 'interleave', 'intersection', 'into', 'into-array', 'iterate', 'join', 'key', 'keys', 'keyword', 'keyword?', 'last', 'lazy-cat', 'lazy-cons', 'left', 'lefts', 'line-seq', 'list', 'list*', 'load', 'load-file', 'locking', 'long', 'loop', 'macroexpand', 'macroexpand-1', 'make-array', 'make-node', 'map', 'map-invert', 'map?', 'mapcat', 'max', 'max-key', 'memfn', 'merge', 'merge-with', 'meta', 'min', 'min-key', 'name', 'namespace', 'neg?', 'new', 'newline', 'next', 'nil?', 'node', 'not', 'not-any?', 'not-every?', 'not=', 'ns-imports', 'ns-interns', 'ns-map', 'ns-name', 'ns-publics', 'ns-refers', 'ns-resolve', 'ns-unmap', 'nth', 'nthrest', 'or', 'parse', 'partial', 'path', 'peek', 'pop', 'pos?', 'pr', 'pr-str', 'print', 'print-str', 'println', 'println-str', 'prn', 'prn-str', 'project', 'proxy', 'proxy-mappings', 'quot', 'rand', 'rand-int', 'range', 're-find', 're-groups', 're-matcher', 're-matches', 're-pattern', 're-seq', 'read', 'read-line', 'reduce', 'ref', 'ref-set', 'refer', 'rem', 'remove', 'remove-method', 'remove-ns', 'rename', 'rename-keys', 'repeat', 'replace', 'replicate', 'resolve', 'rest', 'resultset-seq', 'reverse', 'rfirst', 'right', 'rights', 'root', 'rrest', 'rseq', 'second', 'select', 'select-keys', 'send', 'send-off', 'seq', 'seq-zip', 'seq?', 'set', 'short', 'slurp', 'some', 'sort', 'sort-by', 'sorted-map', 'sorted-map-by', 'sorted-set', 'special-symbol?', 'split-at', 'split-with', 'str', 'string?', 'struct', 'struct-map', 'subs', 'subvec', 'symbol', 'symbol?', 'sync', 'take', 'take-nth', 'take-while', 'test', 'time', 'to-array', 'to-array-2d', 'tree-seq', 'true?', 'union', 'up', 'update-proxy', 'val', 'vals', 'var-get', 'var-set', 'var?', 'vector', 'vector-zip', 'vector?', 'when', 'when-first', 'when-let', 'when-not', 'with-local-vars', 'with-meta', 'with-open', 'with-out-str', 'xml-seq', 'xml-zip', 'zero?', 'zipmap', 'zipper'] # valid names for identifiers # well, names can only not consist fully of numbers # but this should be good enough for now valid_name = r'[a-zA-Z0-9!$%&*+,/:<=>?@^_~-]+' tokens = { 'root' : [ # the comments - always starting with semicolon # and going to the end of the line (r';.*$', Comment.Single), # whitespaces - usually not relevant (r'\s+', Text), # numbers (r'-?\d+\.\d+', Number.Float), (r'-?\d+', Number.Integer), # support for uncommon kinds of numbers - # have to figure out what the characters mean #(r'(#e|#i|#b|#o|#d|#x)[\d.]+', Number), # strings, symbols and characters (r'"(\\\\|\\"|[^"])*"', String), (r"'" + valid_name, String.Symbol), (r"\\([()/'\".'_!§$%& ?;=#+-]{1}|[a-zA-Z0-9]+)", String.Char), # constants (r'(#t|#f)', Name.Constant), # special operators (r"('|#|`|,@|,|\.)", Operator), # highlight the keywords ('(%s)' % '|'.join([ re.escape(entry) + ' ' for entry in keywords]), Keyword ), # first variable in a quoted string like # '(this is syntactic sugar) (r"(?<='\()" + valid_name, Name.Variable), (r"(?<=#\()" + valid_name, Name.Variable), # highlight the builtins ("(?<=\()(%s)" % '|'.join([ re.escape(entry) + ' ' for entry in builtins]), Name.Builtin ), # the remaining functions (r'(?<=\()' + valid_name, Name.Function), # find the remaining variables (valid_name, Name.Variable), # Clojure accepts vector notation (r'(\[|\])', Punctuation), # Clojure accepts map notation (r'(\{|\})', Punctuation), # the famous parentheses! (r'(\(|\))', Punctuation), ], }