Back to index

moin  1.9.0~rc2
parsers.py
Go to the documentation of this file.
00001 # -*- coding: utf-8 -*-
00002 """
00003     pygments.lexers.parsers
00004     ~~~~~~~~~~~~~~~~~~~~~~~
00005 
00006     Lexers for parser generators.
00007 
00008     :copyright: Copyright 2006-2009 by the Pygments team, see AUTHORS.
00009     :license: BSD, see LICENSE for details.
00010 """
00011 
00012 import re
00013 
00014 from pygments.lexer import RegexLexer, DelegatingLexer, \
00015     include, bygroups, using, this
00016 from pygments.token import Error, Punctuation, Generic, Other, \
00017     Text, Comment, Operator, Keyword, Name, String, Number, Whitespace
00018 from pygments.lexers.compiled import JavaLexer, CLexer, CppLexer, \
00019     ObjectiveCLexer, DLexer
00020 from pygments.lexers.dotnet import CSharpLexer
00021 from pygments.lexers.agile import RubyLexer, PythonLexer, PerlLexer
00022 from pygments.lexers.web import ActionScriptLexer
00023 # Use TextLexer during development to just focus on one part of a delegating
00024 # lexer.
00025 from pygments.lexers.special import TextLexer
00026 
00027 __all__ = ['RagelLexer', 'RagelEmbeddedLexer', 'RagelCLexer', 'RagelDLexer',
00028            'RagelCppLexer', 'RagelObjectiveCLexer', 'RagelRubyLexer',
00029            'RagelJavaLexer', 'AntlrLexer', 'AntlrPythonLexer',
00030            'AntlrPerlLexer', 'AntlrRubyLexer', 'AntlrCppLexer',
00031            #'AntlrCLexer',
00032            'AntlrCSharpLexer', 'AntlrObjectiveCLexer',
00033            'AntlrJavaLexer', "AntlrActionScriptLexer"]
00034 
00035 
00036 class RagelLexer(RegexLexer):
00037     """
00038     A pure `Ragel <http://www.complang.org/ragel/>`_ lexer.  Use this for
00039     fragments of Ragel.  For ``.rl`` files, use RagelEmbeddedLexer instead
00040     (or one of the language-specific subclasses).
00041 
00042     *New in Pygments 1.1.*
00043     """
00044 
00045     name = 'Ragel'
00046     aliases = ['ragel']
00047     filenames = []
00048 
00049     tokens = {
00050         'whitespace': [
00051             (r'\s+', Whitespace)
00052         ],
00053         'comments': [
00054             (r'\#.*$', Comment),
00055         ],
00056         'keywords': [
00057             (r'(access|action|alphtype)\b', Keyword),
00058             (r'(getkey|write|machine|include)\b', Keyword),
00059             (r'(any|ascii|extend|alpha|digit|alnum|lower|upper)\b', Keyword),
00060             (r'(xdigit|cntrl|graph|print|punct|space|zlen|empty)\b', Keyword)
00061         ],
00062         'numbers': [
00063             (r'0x[0-9A-Fa-f]+', Number.Hex),
00064             (r'[+-]?[0-9]+', Number.Integer),
00065         ],
00066         'literals': [
00067             (r'"(\\\\|\\"|[^"])*"', String), # double quote string
00068             (r"'(\\\\|\\'|[^'])*'", String), # single quote string
00069             (r'\[(\\\\|\\\]|[^\]])*\]', String), # square bracket literals
00070             (r'/(?!\*)(\\\\|\\/|[^/])*/', String.Regex), # regular expressions
00071         ],
00072         'identifiers': [
00073             (r'[a-zA-Z_][a-zA-Z_0-9]*', Name.Variable),
00074         ],
00075         'operators': [
00076             (r',', Operator), # Join
00077             (r'\||&|-|--', Operator), # Union, Intersection and Subtraction
00078             (r'\.|<:|:>|:>>', Operator), # Concatention
00079             (r':', Operator), # Label
00080             (r'->', Operator), # Epsilon Transition
00081             (r'(>|\$|%|<|@|<>)(/|eof\b)', Operator), # EOF Actions
00082             (r'(>|\$|%|<|@|<>)(!|err\b)', Operator), # Global Error Actions
00083             (r'(>|\$|%|<|@|<>)(\^|lerr\b)', Operator), # Local Error Actions
00084             (r'(>|\$|%|<|@|<>)(~|to\b)', Operator), # To-State Actions
00085             (r'(>|\$|%|<|@|<>)(\*|from\b)', Operator), # From-State Actions
00086             (r'>|@|\$|%', Operator), # Transition Actions and Priorities
00087             (r'\*|\?|\+|{[0-9]*,[0-9]*}', Operator), # Repetition
00088             (r'!|\^', Operator), # Negation
00089             (r'\(|\)', Operator), # Grouping
00090         ],
00091         'root': [
00092             include('literals'),
00093             include('whitespace'),
00094             include('comments'),
00095             include('keywords'),
00096             include('numbers'),
00097             include('identifiers'),
00098             include('operators'),
00099             (r'{', Punctuation, 'host'),
00100             (r'=', Operator),
00101             (r';', Punctuation),
00102         ],
00103         'host': [
00104             (r'(' + r'|'.join(( # keep host code in largest possible chunks
00105                 r'[^{}\'"/#]+', # exclude unsafe characters
00106                 r'[^\\][\\][{}]', # allow escaped { or }
00107 
00108                 # strings and comments may safely contain unsafe characters
00109                 r'"(\\\\|\\"|[^"])*"', # double quote string
00110                 r"'(\\\\|\\'|[^'])*'", # single quote string
00111                 r'//.*$\n?', # single line comment
00112                 r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
00113                 r'\#.*$\n?', # ruby comment
00114 
00115                 # regular expression: There's no reason for it to start
00116                 # with a * and this stops confusion with comments.
00117                 r'/(?!\*)(\\\\|\\/|[^/])*/',
00118 
00119                 # / is safe now that we've handled regex and javadoc comments
00120                 r'/',
00121             )) + r')+', Other),
00122 
00123             (r'{', Punctuation, '#push'),
00124             (r'}', Punctuation, '#pop'),
00125         ],
00126     }
00127 
00128 
00129 class RagelEmbeddedLexer(RegexLexer):
00130     """
00131     A lexer for `Ragel`_ embedded in a host language file.
00132 
00133     This will only highlight Ragel statements. If you want host language
00134     highlighting then call the language-specific Ragel lexer.
00135 
00136     *New in Pygments 1.1.*
00137     """
00138 
00139     name = 'Embedded Ragel'
00140     aliases = ['ragel-em']
00141     filenames = ['*.rl']
00142 
00143     tokens = {
00144         'root': [
00145             (r'(' + r'|'.join(( # keep host code in largest possible chunks
00146                 r'[^%\'"/#]+', # exclude unsafe characters
00147                 r'%(?=[^%]|$)', # a single % sign is okay, just not 2 of them
00148 
00149                 # strings and comments may safely contain unsafe characters
00150                 r'"(\\\\|\\"|[^"])*"', # double quote string
00151                 r"'(\\\\|\\'|[^'])*'", # single quote string
00152                 r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
00153                 r'//.*$\n?', # single line comment
00154                 r'\#.*$\n?', # ruby/ragel comment
00155                 r'/(?!\*)(\\\\|\\/|[^/])*/', # regular expression
00156 
00157                 # / is safe now that we've handled regex and javadoc comments
00158                 r'/',
00159             )) + r')+', Other),
00160 
00161             # Single Line FSM.
00162             # Please don't put a quoted newline in a single line FSM.
00163             # That's just mean. It will break this.
00164             (r'(%%)(?![{%])(.*)($|;)(\n?)', bygroups(Punctuation,
00165                                                      using(RagelLexer),
00166                                                      Punctuation, Text)),
00167 
00168             # Multi Line FSM.
00169             (r'(%%%%|%%){', Punctuation, 'multi-line-fsm'),
00170         ],
00171         'multi-line-fsm': [
00172             (r'(' + r'|'.join(( # keep ragel code in largest possible chunks.
00173                 r'(' + r'|'.join((
00174                     r'[^}\'"\[/#]', # exclude unsafe characters
00175                     r'}(?=[^%]|$)', # } is okay as long as it's not followed by %
00176                     r'}%(?=[^%]|$)', # ...well, one %'s okay, just not two...
00177                     r'[^\\][\\][{}]', # ...and } is okay if it's escaped
00178 
00179                     # allow / if it's preceded with one of these symbols
00180                     # (ragel EOF actions)
00181                     r'(>|\$|%|<|@|<>)/',
00182 
00183                     # specifically allow regex followed immediately by *
00184                     # so it doesn't get mistaken for a comment
00185                     r'/(?!\*)(\\\\|\\/|[^/])*/\*',
00186 
00187                     # allow / as long as it's not followed by another / or by a *
00188                     r'/(?=[^/\*]|$)',
00189 
00190                     # We want to match as many of these as we can in one block.
00191                     # Not sure if we need the + sign here,
00192                     # does it help performance?
00193                     )) + r')+',
00194 
00195                 # strings and comments may safely contain unsafe characters
00196                 r'"(\\\\|\\"|[^"])*"', # double quote string
00197                 r"'(\\\\|\\'|[^'])*'", # single quote string
00198                 r"\[(\\\\|\\\]|[^\]])*\]", # square bracket literal
00199                 r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
00200                 r'//.*$\n?', # single line comment
00201                 r'\#.*$\n?', # ruby/ragel comment
00202             )) + r')+', using(RagelLexer)),
00203 
00204             (r'}%%', Punctuation, '#pop'),
00205         ]
00206     }
00207 
00208     def analyse_text(text):
00209         return '@LANG: indep' in text or 0.1
00210 
00211 
00212 class RagelRubyLexer(DelegatingLexer):
00213     """
00214     A lexer for `Ragel`_ in a Ruby host file.
00215 
00216     *New in Pygments 1.1.*
00217     """
00218 
00219     name = 'Ragel in Ruby Host'
00220     aliases = ['ragel-ruby', 'ragel-rb']
00221     filenames = ['*.rl']
00222 
00223     def __init__(self, **options):
00224         super(RagelRubyLexer, self).__init__(RubyLexer, RagelEmbeddedLexer,
00225                                               **options)
00226 
00227     def analyse_text(text):
00228         return '@LANG: ruby' in text
00229 
00230 
00231 class RagelCLexer(DelegatingLexer):
00232     """
00233     A lexer for `Ragel`_ in a C host file.
00234 
00235     *New in Pygments 1.1.*
00236     """
00237 
00238     name = 'Ragel in C Host'
00239     aliases = ['ragel-c']
00240     filenames = ['*.rl']
00241 
00242     def __init__(self, **options):
00243         super(RagelCLexer, self).__init__(CLexer, RagelEmbeddedLexer,
00244                                           **options)
00245 
00246     def analyse_text(text):
00247         return '@LANG: c' in text
00248 
00249 
00250 class RagelDLexer(DelegatingLexer):
00251     """
00252     A lexer for `Ragel`_ in a D host file.
00253 
00254     *New in Pygments 1.1.*
00255     """
00256 
00257     name = 'Ragel in D Host'
00258     aliases = ['ragel-d']
00259     filenames = ['*.rl']
00260 
00261     def __init__(self, **options):
00262         super(RagelDLexer, self).__init__(DLexer, RagelEmbeddedLexer, **options)
00263 
00264     def analyse_text(text):
00265         return '@LANG: d' in text
00266 
00267 
00268 class RagelCppLexer(DelegatingLexer):
00269     """
00270     A lexer for `Ragel`_ in a CPP host file.
00271 
00272     *New in Pygments 1.1.*
00273     """
00274 
00275     name = 'Ragel in CPP Host'
00276     aliases = ['ragel-cpp']
00277     filenames = ['*.rl']
00278 
00279     def __init__(self, **options):
00280         super(RagelCppLexer, self).__init__(CppLexer, RagelEmbeddedLexer, **options)
00281 
00282     def analyse_text(text):
00283         return '@LANG: c++' in text
00284 
00285 
00286 class RagelObjectiveCLexer(DelegatingLexer):
00287     """
00288     A lexer for `Ragel`_ in an Objective C host file.
00289 
00290     *New in Pygments 1.1.*
00291     """
00292 
00293     name = 'Ragel in Objective C Host'
00294     aliases = ['ragel-objc']
00295     filenames = ['*.rl']
00296 
00297     def __init__(self, **options):
00298         super(RagelObjectiveCLexer, self).__init__(ObjectiveCLexer,
00299                                                    RagelEmbeddedLexer,
00300                                                    **options)
00301 
00302     def analyse_text(text):
00303         return '@LANG: objc' in text
00304 
00305 
00306 class RagelJavaLexer(DelegatingLexer):
00307     """
00308     A lexer for `Ragel`_ in a Java host file.
00309 
00310     *New in Pygments 1.1.*
00311     """
00312 
00313     name = 'Ragel in Java Host'
00314     aliases = ['ragel-java']
00315     filenames = ['*.rl']
00316 
00317     def __init__(self, **options):
00318         super(RagelJavaLexer, self).__init__(JavaLexer, RagelEmbeddedLexer,
00319                                              **options)
00320 
00321     def analyse_text(text):
00322         return '@LANG: java' in text
00323 
00324 
00325 class AntlrLexer(RegexLexer):
00326     """
00327     Generic `ANTLR`_ Lexer.
00328     Should not be called directly, instead
00329     use DelegatingLexer for your target language.
00330 
00331     *New in Pygments 1.1.*
00332 
00333     .. _ANTLR: http://www.antlr.org/
00334     """
00335 
00336     name = 'ANTLR'
00337     aliases = ['antlr']
00338     filenames = []
00339 
00340     _id =          r'[A-Za-z][A-Za-z_0-9]*'
00341     _TOKEN_REF =   r'[A-Z][A-Za-z_0-9]*'
00342     _RULE_REF =    r'[a-z][A-Za-z_0-9]*'
00343     _STRING_LITERAL = r'\'(?:\\\\|\\\'|[^\']*)\''
00344     _INT = r'[0-9]+'
00345 
00346     tokens = {
00347         'whitespace': [
00348             (r'\s+', Whitespace),
00349         ],
00350         'comments': [
00351             (r'//.*$', Comment),
00352             (r'/\*(.|\n)*?\*/', Comment),
00353         ],
00354         'root': [
00355             include('whitespace'),
00356             include('comments'),
00357 
00358             (r'(lexer|parser|tree)?(\s*)(grammar\b)(\s*)(' + _id + ')(;)',
00359              bygroups(Keyword, Whitespace, Keyword, Whitespace, Name.Class,
00360                       Punctuation)),
00361             # optionsSpec
00362             (r'options\b', Keyword, 'options'),
00363             # tokensSpec
00364             (r'tokens\b', Keyword, 'tokens'),
00365             # attrScope
00366             (r'(scope)(\s*)(' + _id + ')(\s*)({)',
00367              bygroups(Keyword, Whitespace, Name.Variable, Whitespace,
00368                       Punctuation), 'action'),
00369             # exception
00370             (r'(catch|finally)\b', Keyword, 'exception'),
00371             # action
00372             (r'(@' + _id + ')(\s*)(::)?(\s*)(' + _id + ')(\s*)({)',
00373              bygroups(Name.Label, Whitespace, Punctuation, Whitespace,
00374                       Name.Label, Whitespace, Punctuation), 'action'),
00375             # rule
00376             (r'((?:protected|private|public|fragment)\b)?(\s*)(' + _id + ')(!)?', \
00377              bygroups(Keyword, Whitespace, Name.Label, Punctuation),
00378              ('rule-alts', 'rule-prelims')),
00379         ],
00380         'exception': [
00381             (r'\n', Whitespace, '#pop'),
00382             (r'\s', Whitespace),
00383             include('comments'),
00384 
00385             (r'\[', Punctuation, 'nested-arg-action'),
00386             (r'\{', Punctuation, 'action'),
00387         ],
00388         'rule-prelims': [
00389             include('whitespace'),
00390             include('comments'),
00391 
00392             (r'returns\b', Keyword),
00393             (r'\[', Punctuation, 'nested-arg-action'),
00394             (r'\{', Punctuation, 'action'),
00395             # throwsSpec
00396             (r'(throws)(\s+)(' + _id + ')',
00397              bygroups(Keyword, Whitespace, Name.Label)),
00398             (r'(?:(,)(\s*)(' + _id + '))+',
00399              bygroups(Punctuation, Whitespace, Name.Label)), # Additional throws
00400             # optionsSpec
00401             (r'options\b', Keyword, 'options'),
00402             # ruleScopeSpec - scope followed by target language code or name of action
00403             # TODO finish implementing other possibilities for scope
00404             # L173 ANTLRv3.g from ANTLR book
00405             (r'(scope)(\s+)({)', bygroups(Keyword, Whitespace, Punctuation),
00406             'action'),
00407             (r'(scope)(\s+)(' + _id + ')(\s*)(;)',
00408              bygroups(Keyword, Whitespace, Name.Label, Whitespace, Punctuation)),
00409             # ruleAction
00410             (r'(@' + _id + ')(\s*)({)',
00411              bygroups(Name.Label, Whitespace, Punctuation), 'action'),
00412             # finished prelims, go to rule alts!
00413             (r':', Punctuation, '#pop')
00414         ],
00415         'rule-alts': [
00416             include('whitespace'),
00417             include('comments'),
00418 
00419             # These might need to go in a separate 'block' state triggered by (
00420             (r'options\b', Keyword, 'options'),
00421             (r':', Punctuation),
00422 
00423             # literals
00424             (r"'(\\\\|\\'|[^'])*'", String),
00425             (r'"(\\\\|\\"|[^"])*"', String),
00426             (r'<<([^>]|>[^>])>>', String),
00427             # identifiers
00428             # Tokens start with capital letter.
00429             (r'\$?[A-Z_][A-Za-z_0-9]*', Name.Constant),
00430             # Rules start with small letter.
00431             (r'\$?[a-z_][A-Za-z_0-9]*', Name.Variable),
00432             # operators
00433             (r'(\+|\||->|=>|=|\(|\)|\.\.|\.|\?|\*|\^|!|\#|~)', Operator),
00434             (r',', Punctuation),
00435             (r'\[', Punctuation, 'nested-arg-action'),
00436             (r'\{', Punctuation, 'action'),
00437             (r';', Punctuation, '#pop')
00438         ],
00439         'tokens': [
00440             include('whitespace'),
00441             include('comments'),
00442             (r'{', Punctuation),
00443             (r'(' + _TOKEN_REF + r')(\s*)(=)?(\s*)(' + _STRING_LITERAL
00444              + ')?(\s*)(;)',
00445              bygroups(Name.Label, Whitespace, Punctuation, Whitespace,
00446                       String, Whitespace, Punctuation)),
00447             (r'}', Punctuation, '#pop'),
00448         ],
00449         'options': [
00450             include('whitespace'),
00451             include('comments'),
00452             (r'{', Punctuation),
00453             (r'(' + _id + r')(\s*)(=)(\s*)(' +
00454              '|'.join((_id, _STRING_LITERAL, _INT, '\*'))+ ')(\s*)(;)',
00455              bygroups(Name.Variable, Whitespace, Punctuation, Whitespace,
00456                       Text, Whitespace, Punctuation)),
00457             (r'}', Punctuation, '#pop'),
00458         ],
00459         'action': [
00460             (r'(' + r'|'.join(( # keep host code in largest possible chunks
00461                 r'[^\${}\'"/\\]+', # exclude unsafe characters
00462 
00463                 # strings and comments may safely contain unsafe characters
00464                 r'"(\\\\|\\"|[^"])*"', # double quote string
00465                 r"'(\\\\|\\'|[^'])*'", # single quote string
00466                 r'//.*$\n?', # single line comment
00467                 r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
00468 
00469                 # regular expression: There's no reason for it to start
00470                 # with a * and this stops confusion with comments.
00471                 r'/(?!\*)(\\\\|\\/|[^/])*/',
00472 
00473                 # backslashes are okay, as long as we are not backslashing a %
00474                 r'\\(?!%)',
00475 
00476                 # Now that we've handled regex and javadoc comments
00477                 # it's safe to let / through.
00478                 r'/',
00479             )) + r')+', Other),
00480             (r'(\\)(%)', bygroups(Punctuation, Other)),
00481             (r'(\$[a-zA-Z]+)(\.?)(text|value)?',
00482              bygroups(Name.Variable, Punctuation, Name.Property)),
00483             (r'{', Punctuation, '#push'),
00484             (r'}', Punctuation, '#pop'),
00485         ],
00486         'nested-arg-action': [
00487             (r'(' + r'|'.join(( # keep host code in largest possible chunks.
00488                 r'[^\$\[\]\'"/]+', # exclude unsafe characters
00489 
00490                 # strings and comments may safely contain unsafe characters
00491                 r'"(\\\\|\\"|[^"])*"', # double quote string
00492                 r"'(\\\\|\\'|[^'])*'", # single quote string
00493                 r'//.*$\n?', # single line comment
00494                 r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
00495 
00496                 # regular expression: There's no reason for it to start
00497                 # with a * and this stops confusion with comments.
00498                 r'/(?!\*)(\\\\|\\/|[^/])*/',
00499 
00500                 # Now that we've handled regex and javadoc comments
00501                 # it's safe to let / through.
00502                 r'/',
00503             )) + r')+', Other),
00504 
00505 
00506             (r'\[', Punctuation, '#push'),
00507             (r'\]', Punctuation, '#pop'),
00508             (r'(\$[a-zA-Z]+)(\.?)(text|value)?',
00509              bygroups(Name.Variable, Punctuation, Name.Property)),
00510             (r'(\\\\|\\\]|\\\[|[^\[\]])+', Other),
00511         ]
00512     }
00513 
00514     def analyse_text(text):
00515         return re.search(r'^\s*grammar\s+[a-zA-Z0-9]+\s*;', text, re.M)
00516 
00517 # http://www.antlr.org/wiki/display/ANTLR3/Code+Generation+Targets
00518 
00519 # TH: I'm not aware of any language features of C++ that will cause
00520 # incorrect lexing of C files.  Antlr doesn't appear to make a distinction,
00521 # so just assume they're C++.  No idea how to make Objective C work in the
00522 # future.
00523 
00524 #class AntlrCLexer(DelegatingLexer):
00525 #    """
00526 #    ANTLR with C Target
00527 #
00528 #    *New in Pygments 1.1*
00529 #    """
00530 #
00531 #    name = 'ANTLR With C Target'
00532 #    aliases = ['antlr-c']
00533 #    filenames = ['*.G', '*.g']
00534 #
00535 #    def __init__(self, **options):
00536 #        super(AntlrCLexer, self).__init__(CLexer, AntlrLexer, **options)
00537 #
00538 #    def analyse_text(text):
00539 #        return re.match(r'^\s*language\s*=\s*C\s*;', text)
00540 
00541 class AntlrCppLexer(DelegatingLexer):
00542     """
00543     `ANTLR`_ with CPP Target
00544 
00545     *New in Pygments 1.1.*
00546     """
00547 
00548     name = 'ANTLR With CPP Target'
00549     aliases = ['antlr-cpp']
00550     filenames = ['*.G', '*.g']
00551 
00552     def __init__(self, **options):
00553         super(AntlrCppLexer, self).__init__(CppLexer, AntlrLexer, **options)
00554 
00555     def analyse_text(text):
00556         return AntlrLexer.analyse_text(text) and \
00557                re.search(r'^\s*language\s*=\s*C\s*;', text, re.M)
00558 
00559 
00560 class AntlrObjectiveCLexer(DelegatingLexer):
00561     """
00562     `ANTLR`_ with Objective-C Target
00563 
00564     *New in Pygments 1.1.*
00565     """
00566 
00567     name = 'ANTLR With ObjectiveC Target'
00568     aliases = ['antlr-objc']
00569     filenames = ['*.G', '*.g']
00570 
00571     def __init__(self, **options):
00572         super(AntlrObjectiveCLexer, self).__init__(ObjectiveCLexer,
00573                                                    AntlrLexer, **options)
00574 
00575     def analyse_text(text):
00576         return AntlrLexer.analyse_text(text) and \
00577                re.search(r'^\s*language\s*=\s*ObjC\s*;', text)
00578 
00579 
00580 class AntlrCSharpLexer(DelegatingLexer):
00581     """
00582     `ANTLR`_ with C# Target
00583 
00584     *New in Pygments 1.1.*
00585     """
00586 
00587     name = 'ANTLR With C# Target'
00588     aliases = ['antlr-csharp', 'antlr-c#']
00589     filenames = ['*.G', '*.g']
00590 
00591     def __init__(self, **options):
00592         super(AntlrCSharpLexer, self).__init__(CSharpLexer, AntlrLexer,
00593                                                **options)
00594 
00595     def analyse_text(text):
00596         return AntlrLexer.analyse_text(text) and \
00597                re.search(r'^\s*language\s*=\s*CSharp2\s*;', text, re.M)
00598 
00599 
00600 class AntlrPythonLexer(DelegatingLexer):
00601     """
00602     `ANTLR`_ with Python Target
00603 
00604     *New in Pygments 1.1.*
00605     """
00606 
00607     name = 'ANTLR With Python Target'
00608     aliases = ['antlr-python']
00609     filenames = ['*.G', '*.g']
00610 
00611     def __init__(self, **options):
00612         super(AntlrPythonLexer, self).__init__(PythonLexer, AntlrLexer,
00613                                                **options)
00614 
00615     def analyse_text(text):
00616         return AntlrLexer.analyse_text(text) and \
00617                re.search(r'^\s*language\s*=\s*Python\s*;', text, re.M)
00618 
00619 
00620 class AntlrJavaLexer(DelegatingLexer):
00621     """
00622     `ANTLR`_ with Java Target
00623 
00624     *New in Pygments 1.1*
00625     """
00626 
00627     name = 'ANTLR With Java Target'
00628     aliases = ['antlr-java']
00629     filenames = ['*.G', '*.g']
00630 
00631     def __init__(self, **options):
00632         super(AntlrJavaLexer, self).__init__(JavaLexer, AntlrLexer,
00633                                              **options)
00634 
00635     def analyse_text(text):
00636         # Antlr language is Java by default
00637         return AntlrLexer.analyse_text(text) and 0.9
00638 
00639 
00640 class AntlrRubyLexer(DelegatingLexer):
00641     """
00642     `ANTLR`_ with Ruby Target
00643 
00644     *New in Pygments 1.1.*
00645     """
00646 
00647     name = 'ANTLR With Ruby Target'
00648     aliases = ['antlr-ruby', 'antlr-rb']
00649     filenames = ['*.G', '*.g']
00650 
00651     def __init__(self, **options):
00652         super(AntlrRubyLexer, self).__init__(RubyLexer, AntlrLexer,
00653                                              **options)
00654 
00655     def analyse_text(text):
00656         return AntlrLexer.analyse_text(text) and \
00657                re.search(r'^\s*language\s*=\s*Ruby\s*;', text, re.M)
00658 
00659 
00660 class AntlrPerlLexer(DelegatingLexer):
00661     """
00662     `ANTLR`_ with Perl Target
00663 
00664     *New in Pygments 1.1.*
00665     """
00666 
00667     name = 'ANTLR With Perl Target'
00668     aliases = ['antlr-perl']
00669     filenames = ['*.G', '*.g']
00670 
00671     def __init__(self, **options):
00672         super(AntlrPerlLexer, self).__init__(PerlLexer, AntlrLexer,
00673                                              **options)
00674 
00675     def analyse_text(text):
00676         return AntlrLexer.analyse_text(text) and \
00677                re.search(r'^\s*language\s*=\s*Perl5\s*;', text, re.M)
00678 
00679 
00680 class AntlrActionScriptLexer(DelegatingLexer):
00681     """
00682     `ANTLR`_ with ActionScript Target
00683 
00684     *New in Pygments 1.1.*
00685     """
00686 
00687     name = 'ANTLR With ActionScript Target'
00688     aliases = ['antlr-as', 'antlr-actionscript']
00689     filenames = ['*.G', '*.g']
00690 
00691     def __init__(self, **options):
00692         super(AntlrActionScriptLexer, self).__init__(ActionScriptLexer,
00693                                                      AntlrLexer, **options)
00694 
00695     def analyse_text(text):
00696         return AntlrLexer.analyse_text(text) and \
00697                re.search(r'^\s*language\s*=\s*ActionScript\s*;', text, re.M)