Back to index

moin  1.9.0~rc2
templates.py
Go to the documentation of this file.
00001 # -*- coding: utf-8 -*-
00002 """
00003     pygments.lexers.templates
00004     ~~~~~~~~~~~~~~~~~~~~~~~~~
00005 
00006     Lexers for various template engines' markup.
00007 
00008     :copyright: Copyright 2006-2009 by the Pygments team, see AUTHORS.
00009     :license: BSD, see LICENSE for details.
00010 """
00011 
00012 import re
00013 try:
00014     set
00015 except NameError:
00016     from sets import Set as set
00017 
00018 from pygments.lexers.web import \
00019      PhpLexer, HtmlLexer, XmlLexer, JavascriptLexer, CssLexer
00020 from pygments.lexers.agile import PythonLexer
00021 from pygments.lexers.compiled import JavaLexer
00022 from pygments.lexer import Lexer, DelegatingLexer, RegexLexer, bygroups, \
00023      include, using, this
00024 from pygments.token import Error, Punctuation, \
00025      Text, Comment, Operator, Keyword, Name, String, Number, Other, Token
00026 from pygments.util import html_doctype_matches, looks_like_xml
00027 
00028 __all__ = ['HtmlPhpLexer', 'XmlPhpLexer', 'CssPhpLexer',
00029            'JavascriptPhpLexer', 'ErbLexer', 'RhtmlLexer',
00030            'XmlErbLexer', 'CssErbLexer', 'JavascriptErbLexer',
00031            'SmartyLexer', 'HtmlSmartyLexer', 'XmlSmartyLexer',
00032            'CssSmartyLexer', 'JavascriptSmartyLexer', 'DjangoLexer',
00033            'HtmlDjangoLexer', 'CssDjangoLexer', 'XmlDjangoLexer',
00034            'JavascriptDjangoLexer', 'GenshiLexer', 'HtmlGenshiLexer',
00035            'GenshiTextLexer', 'CssGenshiLexer', 'JavascriptGenshiLexer',
00036            'MyghtyLexer', 'MyghtyHtmlLexer', 'MyghtyXmlLexer',
00037            'MyghtyCssLexer', 'MyghtyJavascriptLexer', 'MakoLexer',
00038            'MakoHtmlLexer', 'MakoXmlLexer', 'MakoJavascriptLexer',
00039            'MakoCssLexer', 'JspLexer', 'CheetahLexer', 'CheetahHtmlLexer',
00040            'CheetahXmlLexer', 'CheetahJavascriptLexer',
00041            'EvoqueLexer', 'EvoqueHtmlLexer', 'EvoqueXmlLexer']
00042 
00043 
00044 class ErbLexer(Lexer):
00045     """
00046     Generic `ERB <http://ruby-doc.org/core/classes/ERB.html>`_ (Ruby Templating)
00047     lexer.
00048 
00049     Just highlights ruby code between the preprocessor directives, other data
00050     is left untouched by the lexer.
00051 
00052     All options are also forwarded to the `RubyLexer`.
00053     """
00054 
00055     name = 'ERB'
00056     aliases = ['erb']
00057     mimetypes = ['application/x-ruby-templating']
00058 
00059     _block_re = re.compile(r'(<%%|%%>|<%=|<%#|<%-|<%|-%>|%>|^%[^%].*?$)', re.M)
00060 
00061     def __init__(self, **options):
00062         from pygments.lexers.agile import RubyLexer
00063         self.ruby_lexer = RubyLexer(**options)
00064         Lexer.__init__(self, **options)
00065 
00066     def get_tokens_unprocessed(self, text):
00067         """
00068         Since ERB doesn't allow "<%" and other tags inside of ruby
00069         blocks we have to use a split approach here that fails for
00070         that too.
00071         """
00072         tokens = self._block_re.split(text)
00073         tokens.reverse()
00074         state = idx = 0
00075         try:
00076             while True:
00077                 # text
00078                 if state == 0:
00079                     val = tokens.pop()
00080                     yield idx, Other, val
00081                     idx += len(val)
00082                     state = 1
00083                 # block starts
00084                 elif state == 1:
00085                     tag = tokens.pop()
00086                     # literals
00087                     if tag in ('<%%', '%%>'):
00088                         yield idx, Other, tag
00089                         idx += 3
00090                         state = 0
00091                     # comment
00092                     elif tag == '<%#':
00093                         yield idx, Comment.Preproc, tag
00094                         val = tokens.pop()
00095                         yield idx + 3, Comment, val
00096                         idx += 3 + len(val)
00097                         state = 2
00098                     # blocks or output
00099                     elif tag in ('<%', '<%=', '<%-'):
00100                         yield idx, Comment.Preproc, tag
00101                         idx += len(tag)
00102                         data = tokens.pop()
00103                         r_idx = 0
00104                         for r_idx, r_token, r_value in \
00105                             self.ruby_lexer.get_tokens_unprocessed(data):
00106                             yield r_idx + idx, r_token, r_value
00107                         idx += len(data)
00108                         state = 2
00109                     elif tag in ('%>', '-%>'):
00110                         yield idx, Error, tag
00111                         idx += len(tag)
00112                         state = 0
00113                     # % raw ruby statements
00114                     else:
00115                         yield idx, Comment.Preproc, tag[0]
00116                         r_idx = 0
00117                         for r_idx, r_token, r_value in \
00118                             self.ruby_lexer.get_tokens_unprocessed(tag[1:]):
00119                             yield idx + 1 + r_idx, r_token, r_value
00120                         idx += len(tag)
00121                         state = 0
00122                 # block ends
00123                 elif state == 2:
00124                     tag = tokens.pop()
00125                     if tag not in ('%>', '-%>'):
00126                         yield idx, Other, tag
00127                     else:
00128                         yield idx, Comment.Preproc, tag
00129                     idx += len(tag)
00130                     state = 0
00131         except IndexError:
00132             return
00133 
00134     def analyse_text(text):
00135         if '<%' in text and '%>' in text:
00136             return 0.4
00137 
00138 
00139 class SmartyLexer(RegexLexer):
00140     """
00141     Generic `Smarty <http://smarty.php.net/>`_ template lexer.
00142 
00143     Just highlights smarty code between the preprocessor directives, other
00144     data is left untouched by the lexer.
00145     """
00146 
00147     name = 'Smarty'
00148     aliases = ['smarty']
00149     filenames = ['*.tpl']
00150     mimetypes = ['application/x-smarty']
00151 
00152     flags = re.MULTILINE | re.DOTALL
00153 
00154     tokens = {
00155         'root': [
00156             (r'[^{]+', Other),
00157             (r'(\{)(\*.*?\*)(\})',
00158              bygroups(Comment.Preproc, Comment, Comment.Preproc)),
00159             (r'(\{php\})(.*?)(\{/php\})',
00160              bygroups(Comment.Preproc, using(PhpLexer, startinline=True),
00161                       Comment.Preproc)),
00162             (r'(\{)(/?[a-zA-Z_][a-zA-Z0-9_]*)(\s*)',
00163              bygroups(Comment.Preproc, Name.Function, Text), 'smarty'),
00164             (r'\{', Comment.Preproc, 'smarty')
00165         ],
00166         'smarty': [
00167             (r'\s+', Text),
00168             (r'\}', Comment.Preproc, '#pop'),
00169             (r'#[a-zA-Z_][a-zA-Z0-9_]*#', Name.Variable),
00170             (r'\$[a-zA-Z_][a-zA-Z0-9_]*(\.[a-zA-Z0-9_]+)*', Name.Variable),
00171             (r'[~!%^&*()+=|\[\]:;,.<>/?{}@-]', Operator),
00172             ('(true|false|null)\b', Keyword.Constant),
00173             (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
00174              r"0[xX][0-9a-fA-F]+[Ll]?", Number),
00175             (r'"(\\\\|\\"|[^"])*"', String.Double),
00176             (r"'(\\\\|\\'|[^'])*'", String.Single),
00177             (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Attribute)
00178         ]
00179     }
00180 
00181     def analyse_text(text):
00182         rv = 0.0
00183         if re.search('\{if\s+.*?\}.*?\{/if\}', text):
00184             rv += 0.15
00185         if re.search('\{include\s+file=.*?\}', text):
00186             rv += 0.15
00187         if re.search('\{foreach\s+.*?\}.*?\{/foreach\}', text):
00188             rv += 0.15
00189         if re.search('\{\$.*?\}', text):
00190             rv += 0.01
00191         return rv
00192 
00193 
00194 class DjangoLexer(RegexLexer):
00195     """
00196     Generic `django <http://www.djangoproject.com/documentation/templates/>`_
00197     and `jinja <http://wsgiarea.pocoo.org/jinja/>`_ template lexer.
00198 
00199     It just highlights django/jinja code between the preprocessor directives,
00200     other data is left untouched by the lexer.
00201     """
00202 
00203     name = 'Django/Jinja'
00204     aliases = ['django', 'jinja']
00205     mimetypes = ['application/x-django-templating', 'application/x-jinja']
00206 
00207     flags = re.M | re.S
00208 
00209     tokens = {
00210         'root': [
00211             (r'[^{]+', Other),
00212             (r'\{\{', Comment.Preproc, 'var'),
00213             # jinja/django comments
00214             (r'\{[*#].*?[*#]\}', Comment),
00215             # django comments
00216             (r'(\{%)(-?\s*)(comment)(\s*-?)(%\})(.*?)'
00217              r'(\{%)(-?\s*)(endcomment)(\s*-?)(%\})',
00218              bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
00219                       Comment, Comment.Preproc, Text, Keyword, Text,
00220                       Comment.Preproc)),
00221             # raw jinja blocks
00222             (r'(\{%)(-?\s*)(raw)(\s*-?)(%\})(.*?)'
00223              r'(\{%)(-?\s*)(endraw)(\s*-?)(%\})',
00224              bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
00225                       Text, Comment.Preproc, Text, Keyword, Text,
00226                       Comment.Preproc)),
00227             # filter blocks
00228             (r'(\{%)(-?\s*)(filter)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)',
00229              bygroups(Comment.Preproc, Text, Keyword, Text, Name.Function),
00230              'block'),
00231             (r'(\{%)(-?\s*)([a-zA-Z_][a-zA-Z0-9_]*)',
00232              bygroups(Comment.Preproc, Text, Keyword), 'block'),
00233             (r'\{', Other)
00234         ],
00235         'varnames': [
00236             (r'(\|)(\s*)([a-zA-Z_][a-zA-Z0-9_]*)',
00237              bygroups(Operator, Text, Name.Function)),
00238             (r'(is)(\s+)(not)?(\s+)?([a-zA-Z_][a-zA-Z0-9_]*)',
00239              bygroups(Keyword, Text, Keyword, Text, Name.Function)),
00240             (r'(_|true|false|none|True|False|None)\b', Keyword.Pseudo),
00241             (r'(in|as|reversed|recursive|not|and|or|is|if|else|import|'
00242              r'with(?:(?:out)?\s*context)?|scoped|ignore\s+missing)\b',
00243              Keyword),
00244             (r'(loop|block|super|forloop)\b', Name.Builtin),
00245             (r'[a-zA-Z][a-zA-Z0-9_]*', Name.Variable),
00246             (r'\.[a-zA-Z0-9_]+', Name.Variable),
00247             (r':?"(\\\\|\\"|[^"])*"', String.Double),
00248             (r":?'(\\\\|\\'|[^'])*'", String.Single),
00249             (r'([{}()\[\]+\-*/,:]|[><=]=?)', Operator),
00250             (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
00251              r"0[xX][0-9a-fA-F]+[Ll]?", Number),
00252         ],
00253         'var': [
00254             (r'\s+', Text),
00255             (r'(-?)(\}\})', bygroups(Text, Comment.Preproc), '#pop'),
00256             include('varnames')
00257         ],
00258         'block': [
00259             (r'\s+', Text),
00260             (r'(-?)(%\})', bygroups(Text, Comment.Preproc), '#pop'),
00261             include('varnames'),
00262             (r'.', Punctuation)
00263         ]
00264     }
00265 
00266     def analyse_text(text):
00267         rv = 0.0
00268         if re.search(r'\{%\s*(block|extends)', text) is not None:
00269             rv += 0.4
00270         if re.search(r'\{%\s*if\s*.*?%\}', text) is not None:
00271             rv += 0.1
00272         if re.search(r'\{\{.*?\}\}', text) is not None:
00273             rv += 0.1
00274         return rv
00275 
00276 
00277 class MyghtyLexer(RegexLexer):
00278     """
00279     Generic `myghty templates`_ lexer. Code that isn't Myghty
00280     markup is yielded as `Token.Other`.
00281 
00282     *New in Pygments 0.6.*
00283 
00284     .. _myghty templates: http://www.myghty.org/
00285     """
00286 
00287     name = 'Myghty'
00288     aliases = ['myghty']
00289     filenames = ['*.myt', 'autodelegate']
00290     mimetypes = ['application/x-myghty']
00291 
00292     tokens = {
00293         'root': [
00294             (r'\s+', Text),
00295             (r'(<%(def|method))(\s*)(.*?)(>)(.*?)(</%\2\s*>)(?s)',
00296              bygroups(Name.Tag, None, Text, Name.Function, Name.Tag,
00297                       using(this), Name.Tag)),
00298             (r'(<%(\w+))(.*?)(>)(.*?)(</%\2\s*>)(?s)',
00299              bygroups(Name.Tag, None, Name.Function, Name.Tag,
00300                       using(PythonLexer), Name.Tag)),
00301             (r'(<&[^|])(.*?)(,.*?)?(&>)',
00302              bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)),
00303             (r'(<&\|)(.*?)(,.*?)?(&>)(?s)',
00304              bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)),
00305             (r'</&>', Name.Tag),
00306             (r'(<%!?)(.*?)(%>)(?s)',
00307              bygroups(Name.Tag, using(PythonLexer), Name.Tag)),
00308             (r'(?<=^)#[^\n]*(\n|\Z)', Comment),
00309             (r'(?<=^)(%)([^\n]*)(\n|\Z)',
00310              bygroups(Name.Tag, using(PythonLexer), Other)),
00311             (r"""(?sx)
00312                  (.+?)               # anything, followed by:
00313                  (?:
00314                   (?<=\n)(?=[%#]) |  # an eval or comment line
00315                   (?=</?[%&]) |      # a substitution or block or
00316                                      # call start or end
00317                                      # - don't consume
00318                   (\\\n) |           # an escaped newline
00319                   \Z                 # end of string
00320                  )""", bygroups(Other, Operator)),
00321         ]
00322     }
00323 
00324 
00325 class MyghtyHtmlLexer(DelegatingLexer):
00326     """
00327     Subclass of the `MyghtyLexer` that highlights unlexer data
00328     with the `HtmlLexer`.
00329 
00330     *New in Pygments 0.6.*
00331     """
00332 
00333     name = 'HTML+Myghty'
00334     aliases = ['html+myghty']
00335     mimetypes = ['text/html+myghty']
00336 
00337     def __init__(self, **options):
00338         super(MyghtyHtmlLexer, self).__init__(HtmlLexer, MyghtyLexer,
00339                                               **options)
00340 
00341 
00342 class MyghtyXmlLexer(DelegatingLexer):
00343     """
00344     Subclass of the `MyghtyLexer` that highlights unlexer data
00345     with the `XmlLexer`.
00346 
00347     *New in Pygments 0.6.*
00348     """
00349 
00350     name = 'XML+Myghty'
00351     aliases = ['xml+myghty']
00352     mimetypes = ['application/xml+myghty']
00353 
00354     def __init__(self, **options):
00355         super(MyghtyXmlLexer, self).__init__(XmlLexer, MyghtyLexer,
00356                                              **options)
00357 
00358 
00359 class MyghtyJavascriptLexer(DelegatingLexer):
00360     """
00361     Subclass of the `MyghtyLexer` that highlights unlexer data
00362     with the `JavascriptLexer`.
00363 
00364     *New in Pygments 0.6.*
00365     """
00366 
00367     name = 'JavaScript+Myghty'
00368     aliases = ['js+myghty', 'javascript+myghty']
00369     mimetypes = ['application/x-javascript+myghty',
00370                  'text/x-javascript+myghty',
00371                  'text/javascript+mygthy']
00372 
00373     def __init__(self, **options):
00374         super(MyghtyJavascriptLexer, self).__init__(JavascriptLexer,
00375                                                     MyghtyLexer, **options)
00376 
00377 
00378 class MyghtyCssLexer(DelegatingLexer):
00379     """
00380     Subclass of the `MyghtyLexer` that highlights unlexer data
00381     with the `CssLexer`.
00382 
00383     *New in Pygments 0.6.*
00384     """
00385 
00386     name = 'CSS+Myghty'
00387     aliases = ['css+myghty']
00388     mimetypes = ['text/css+myghty']
00389 
00390     def __init__(self, **options):
00391         super(MyghtyCssLexer, self).__init__(CssLexer, MyghtyLexer,
00392                                              **options)
00393 
00394 
00395 class MakoLexer(RegexLexer):
00396     """
00397     Generic `mako templates`_ lexer. Code that isn't Mako
00398     markup is yielded as `Token.Other`.
00399 
00400     *New in Pygments 0.7.*
00401 
00402     .. _mako templates: http://www.makotemplates.org/
00403     """
00404 
00405     name = 'Mako'
00406     aliases = ['mako']
00407     filenames = ['*.mao']
00408     mimetypes = ['application/x-mako']
00409 
00410     tokens = {
00411         'root': [
00412             (r'(\s*)(%)(\s*end(?:\w+))(\n|\Z)',
00413              bygroups(Text, Comment.Preproc, Keyword, Other)),
00414             (r'(\s*)(%)([^\n]*)(\n|\Z)',
00415              bygroups(Text, Comment.Preproc, using(PythonLexer), Other)),
00416             (r'(\s*)(##[^\n]*)(\n|\Z)',
00417              bygroups(Text, Comment.Preproc, Other)),
00418             (r'(?s)<%doc>.*?</%doc>', Comment.Preproc),
00419             (r'(<%)([\w\.\:]+)',
00420              bygroups(Comment.Preproc, Name.Builtin), 'tag'),
00421             (r'(</%)([\w\.\:]+)(>)',
00422              bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc)),
00423             (r'<%(?=([\w\.\:]+))', Comment.Preproc, 'ondeftags'),
00424             (r'(<%(?:!?))(.*?)(%>)(?s)',
00425              bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
00426             (r'(\$\{)(.*?)(\})',
00427              bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
00428             (r'''(?sx)
00429                 (.+?)                # anything, followed by:
00430                 (?:
00431                  (?<=\n)(?=%|\#\#) | # an eval or comment line
00432                  (?=\#\*) |          # multiline comment
00433                  (?=</?%) |          # a python block
00434                                      # call start or end
00435                  (?=\$\{) |          # a substitution
00436                  (?<=\n)(?=\s*%) |
00437                                      # - don't consume
00438                  (\\\n) |            # an escaped newline
00439                  \Z                  # end of string
00440                 )
00441             ''', bygroups(Other, Operator)),
00442             (r'\s+', Text),
00443         ],
00444         'ondeftags': [
00445             (r'<%', Comment.Preproc),
00446             (r'(?<=<%)(include|inherit|namespace|page)', Name.Builtin),
00447             include('tag'),
00448         ],
00449         'tag': [
00450             (r'((?:\w+)\s*=)\s*(".*?")',
00451              bygroups(Name.Attribute, String)),
00452             (r'/?\s*>', Comment.Preproc, '#pop'),
00453             (r'\s+', Text),
00454         ],
00455         'attr': [
00456             ('".*?"', String, '#pop'),
00457             ("'.*?'", String, '#pop'),
00458             (r'[^\s>]+', String, '#pop'),
00459         ],
00460     }
00461 
00462 
00463 class MakoHtmlLexer(DelegatingLexer):
00464     """
00465     Subclass of the `MakoLexer` that highlights unlexed data
00466     with the `HtmlLexer`.
00467 
00468     *New in Pygments 0.7.*
00469     """
00470 
00471     name = 'HTML+Mako'
00472     aliases = ['html+mako']
00473     mimetypes = ['text/html+mako']
00474 
00475     def __init__(self, **options):
00476         super(MakoHtmlLexer, self).__init__(HtmlLexer, MakoLexer,
00477                                               **options)
00478 
00479 class MakoXmlLexer(DelegatingLexer):
00480     """
00481     Subclass of the `MakoLexer` that highlights unlexer data
00482     with the `XmlLexer`.
00483 
00484     *New in Pygments 0.7.*
00485     """
00486 
00487     name = 'XML+Mako'
00488     aliases = ['xml+mako']
00489     mimetypes = ['application/xml+mako']
00490 
00491     def __init__(self, **options):
00492         super(MakoXmlLexer, self).__init__(XmlLexer, MakoLexer,
00493                                              **options)
00494 
00495 class MakoJavascriptLexer(DelegatingLexer):
00496     """
00497     Subclass of the `MakoLexer` that highlights unlexer data
00498     with the `JavascriptLexer`.
00499 
00500     *New in Pygments 0.7.*
00501     """
00502 
00503     name = 'JavaScript+Mako'
00504     aliases = ['js+mako', 'javascript+mako']
00505     mimetypes = ['application/x-javascript+mako',
00506                  'text/x-javascript+mako',
00507                  'text/javascript+mako']
00508 
00509     def __init__(self, **options):
00510         super(MakoJavascriptLexer, self).__init__(JavascriptLexer,
00511                                                     MakoLexer, **options)
00512 
00513 class MakoCssLexer(DelegatingLexer):
00514     """
00515     Subclass of the `MakoLexer` that highlights unlexer data
00516     with the `CssLexer`.
00517 
00518     *New in Pygments 0.7.*
00519     """
00520 
00521     name = 'CSS+Mako'
00522     aliases = ['css+mako']
00523     mimetypes = ['text/css+mako']
00524 
00525     def __init__(self, **options):
00526         super(MakoCssLexer, self).__init__(CssLexer, MakoLexer,
00527                                              **options)
00528 
00529 
00530 # Genshi and Cheetah lexers courtesy of Matt Good.
00531 
00532 class CheetahPythonLexer(Lexer):
00533     """
00534     Lexer for handling Cheetah's special $ tokens in Python syntax.
00535     """
00536 
00537     def get_tokens_unprocessed(self, text):
00538         pylexer = PythonLexer(**self.options)
00539         for pos, type_, value in pylexer.get_tokens_unprocessed(text):
00540             if type_ == Token.Error and value == '$':
00541                 type_ = Comment.Preproc
00542             yield pos, type_, value
00543 
00544 
00545 class CheetahLexer(RegexLexer):
00546     """
00547     Generic `cheetah templates`_ lexer. Code that isn't Cheetah
00548     markup is yielded as `Token.Other`.  This also works for
00549     `spitfire templates`_ which use the same syntax.
00550 
00551     .. _cheetah templates: http://www.cheetahtemplate.org/
00552     .. _spitfire templates: http://code.google.com/p/spitfire/
00553     """
00554 
00555     name = 'Cheetah'
00556     aliases = ['cheetah', 'spitfire']
00557     filenames = ['*.tmpl', '*.spt']
00558     mimetypes = ['application/x-cheetah', 'application/x-spitfire']
00559 
00560     tokens = {
00561         'root': [
00562             (r'(##[^\n]*)$',
00563              (bygroups(Comment))),
00564             (r'#[*](.|\n)*?[*]#', Comment),
00565             (r'#end[^#\n]*(?:#|$)', Comment.Preproc),
00566             (r'#slurp$', Comment.Preproc),
00567             (r'(#[a-zA-Z]+)([^#\n]*)(#|$)',
00568              (bygroups(Comment.Preproc, using(CheetahPythonLexer),
00569                        Comment.Preproc))),
00570             # TODO support other Python syntax like $foo['bar']
00571             (r'(\$)([a-zA-Z_][a-zA-Z0-9_\.]*[a-zA-Z0-9_])',
00572              bygroups(Comment.Preproc, using(CheetahPythonLexer))),
00573             (r'(\$\{!?)(.*?)(\})(?s)',
00574              bygroups(Comment.Preproc, using(CheetahPythonLexer),
00575                       Comment.Preproc)),
00576             (r'''(?sx)
00577                 (.+?)               # anything, followed by:
00578                 (?:
00579                  (?=[#][#a-zA-Z]*) |   # an eval comment
00580                  (?=\$[a-zA-Z_{]) | # a substitution
00581                  \Z                 # end of string
00582                 )
00583             ''', Other),
00584             (r'\s+', Text),
00585         ],
00586     }
00587 
00588 
00589 class CheetahHtmlLexer(DelegatingLexer):
00590     """
00591     Subclass of the `CheetahLexer` that highlights unlexer data
00592     with the `HtmlLexer`.
00593     """
00594 
00595     name = 'HTML+Cheetah'
00596     aliases = ['html+cheetah', 'html+spitfire']
00597     mimetypes = ['text/html+cheetah', 'text/html+spitfire']
00598 
00599     def __init__(self, **options):
00600         super(CheetahHtmlLexer, self).__init__(HtmlLexer, CheetahLexer,
00601                                                **options)
00602 
00603 
00604 class CheetahXmlLexer(DelegatingLexer):
00605     """
00606     Subclass of the `CheetahLexer` that highlights unlexer data
00607     with the `XmlLexer`.
00608     """
00609 
00610     name = 'XML+Cheetah'
00611     aliases = ['xml+cheetah', 'xml+spitfire']
00612     mimetypes = ['application/xml+cheetah', 'application/xml+spitfire']
00613 
00614     def __init__(self, **options):
00615         super(CheetahXmlLexer, self).__init__(XmlLexer, CheetahLexer,
00616                                               **options)
00617 
00618 
00619 class CheetahJavascriptLexer(DelegatingLexer):
00620     """
00621     Subclass of the `CheetahLexer` that highlights unlexer data
00622     with the `JavascriptLexer`.
00623     """
00624 
00625     name = 'JavaScript+Cheetah'
00626     aliases = ['js+cheetah', 'javascript+cheetah',
00627                'js+spitfire', 'javascript+spitfire']
00628     mimetypes = ['application/x-javascript+cheetah',
00629                  'text/x-javascript+cheetah',
00630                  'text/javascript+cheetah',
00631                  'application/x-javascript+spitfire',
00632                  'text/x-javascript+spitfire',
00633                  'text/javascript+spitfire']
00634 
00635     def __init__(self, **options):
00636         super(CheetahJavascriptLexer, self).__init__(JavascriptLexer,
00637                                                      CheetahLexer, **options)
00638 
00639 
00640 class GenshiTextLexer(RegexLexer):
00641     """
00642     A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ text
00643     templates.
00644     """
00645 
00646     name = 'Genshi Text'
00647     aliases = ['genshitext']
00648     mimetypes = ['application/x-genshi-text', 'text/x-genshi']
00649 
00650     tokens = {
00651         'root': [
00652             (r'[^#\$\s]+', Other),
00653             (r'^(\s*)(##.*)$', bygroups(Text, Comment)),
00654             (r'^(\s*)(#)', bygroups(Text, Comment.Preproc), 'directive'),
00655             include('variable'),
00656             (r'[#\$\s]', Other),
00657         ],
00658         'directive': [
00659             (r'\n', Text, '#pop'),
00660             (r'(?:def|for|if)\s+.*', using(PythonLexer), '#pop'),
00661             (r'(choose|when|with)([^\S\n]+)(.*)',
00662              bygroups(Keyword, Text, using(PythonLexer)), '#pop'),
00663             (r'(choose|otherwise)\b', Keyword, '#pop'),
00664             (r'(end\w*)([^\S\n]*)(.*)', bygroups(Keyword, Text, Comment), '#pop'),
00665         ],
00666         'variable': [
00667             (r'(?<!\$)(\$\{)(.+?)(\})',
00668              bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
00669             (r'(?<!\$)(\$)([a-zA-Z_][a-zA-Z0-9_\.]*)',
00670              Name.Variable),
00671         ]
00672     }
00673 
00674 
00675 class GenshiMarkupLexer(RegexLexer):
00676     """
00677     Base lexer for Genshi markup, used by `HtmlGenshiLexer` and
00678     `GenshiLexer`.
00679     """
00680 
00681     flags = re.DOTALL
00682 
00683     tokens = {
00684         'root': [
00685             (r'[^<\$]+', Other),
00686             (r'(<\?python)(.*?)(\?>)',
00687              bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
00688             # yield style and script blocks as Other
00689             (r'<\s*(script|style)\s*.*?>.*?<\s*/\1\s*>', Other),
00690             (r'<\s*py:[a-zA-Z0-9]+', Name.Tag, 'pytag'),
00691             (r'<\s*[a-zA-Z0-9:]+', Name.Tag, 'tag'),
00692             include('variable'),
00693             (r'[<\$]', Other),
00694         ],
00695         'pytag': [
00696             (r'\s+', Text),
00697             (r'[a-zA-Z0-9_:-]+\s*=', Name.Attribute, 'pyattr'),
00698             (r'/?\s*>', Name.Tag, '#pop'),
00699         ],
00700         'pyattr': [
00701             ('(")(.*?)(")', bygroups(String, using(PythonLexer), String), '#pop'),
00702             ("(')(.*?)(')", bygroups(String, using(PythonLexer), String), '#pop'),
00703             (r'[^\s>]+', String, '#pop'),
00704         ],
00705         'tag': [
00706             (r'\s+', Text),
00707             (r'py:[a-zA-Z0-9_-]+\s*=', Name.Attribute, 'pyattr'),
00708             (r'[a-zA-Z0-9_:-]+\s*=', Name.Attribute, 'attr'),
00709             (r'/?\s*>', Name.Tag, '#pop'),
00710         ],
00711         'attr': [
00712             ('"', String, 'attr-dstring'),
00713             ("'", String, 'attr-sstring'),
00714             (r'[^\s>]*', String, '#pop')
00715         ],
00716         'attr-dstring': [
00717             ('"', String, '#pop'),
00718             include('strings'),
00719             ("'", String)
00720         ],
00721         'attr-sstring': [
00722             ("'", String, '#pop'),
00723             include('strings'),
00724             ("'", String)
00725         ],
00726         'strings': [
00727             ('[^"\'$]+', String),
00728             include('variable')
00729         ],
00730         'variable': [
00731             (r'(?<!\$)(\$\{)(.+?)(\})',
00732              bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
00733             (r'(?<!\$)(\$)([a-zA-Z_][a-zA-Z0-9_\.]*)',
00734              Name.Variable),
00735         ]
00736     }
00737 
00738 
00739 class HtmlGenshiLexer(DelegatingLexer):
00740     """
00741     A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ and
00742     `kid <http://kid-templating.org/>`_ kid HTML templates.
00743     """
00744 
00745     name = 'HTML+Genshi'
00746     aliases = ['html+genshi', 'html+kid']
00747     alias_filenames = ['*.html', '*.htm', '*.xhtml']
00748     mimetypes = ['text/html+genshi']
00749 
00750     def __init__(self, **options):
00751         super(HtmlGenshiLexer, self).__init__(HtmlLexer, GenshiMarkupLexer,
00752                                               **options)
00753 
00754     def analyse_text(text):
00755         rv = 0.0
00756         if re.search('\$\{.*?\}', text) is not None:
00757             rv += 0.2
00758         if re.search('py:(.*?)=["\']', text) is not None:
00759             rv += 0.2
00760         return rv + HtmlLexer.analyse_text(text) - 0.01
00761 
00762 
00763 class GenshiLexer(DelegatingLexer):
00764     """
00765     A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ and
00766     `kid <http://kid-templating.org/>`_ kid XML templates.
00767     """
00768 
00769     name = 'Genshi'
00770     aliases = ['genshi', 'kid', 'xml+genshi', 'xml+kid']
00771     filenames = ['*.kid']
00772     alias_filenames = ['*.xml']
00773     mimetypes = ['application/x-genshi', 'application/x-kid']
00774 
00775     def __init__(self, **options):
00776         super(GenshiLexer, self).__init__(XmlLexer, GenshiMarkupLexer,
00777                                           **options)
00778 
00779     def analyse_text(text):
00780         rv = 0.0
00781         if re.search('\$\{.*?\}', text) is not None:
00782             rv += 0.2
00783         if re.search('py:(.*?)=["\']', text) is not None:
00784             rv += 0.2
00785         return rv + XmlLexer.analyse_text(text) - 0.01
00786 
00787 
00788 class JavascriptGenshiLexer(DelegatingLexer):
00789     """
00790     A lexer that highlights javascript code in genshi text templates.
00791     """
00792 
00793     name = 'JavaScript+Genshi Text'
00794     aliases = ['js+genshitext', 'js+genshi', 'javascript+genshitext',
00795                'javascript+genshi']
00796     alias_filenames = ['*.js']
00797     mimetypes = ['application/x-javascript+genshi',
00798                  'text/x-javascript+genshi',
00799                  'text/javascript+genshi']
00800 
00801     def __init__(self, **options):
00802         super(JavascriptGenshiLexer, self).__init__(JavascriptLexer,
00803                                                     GenshiTextLexer,
00804                                                     **options)
00805 
00806     def analyse_text(text):
00807         return GenshiLexer.analyse_text(text) - 0.05
00808 
00809 
00810 class CssGenshiLexer(DelegatingLexer):
00811     """
00812     A lexer that highlights CSS definitions in genshi text templates.
00813     """
00814 
00815     name = 'CSS+Genshi Text'
00816     aliases = ['css+genshitext', 'css+genshi']
00817     alias_filenames = ['*.css']
00818     mimetypes = ['text/css+genshi']
00819 
00820     def __init__(self, **options):
00821         super(CssGenshiLexer, self).__init__(CssLexer, GenshiTextLexer,
00822                                              **options)
00823 
00824     def analyse_text(text):
00825         return GenshiLexer.analyse_text(text) - 0.05
00826 
00827 
00828 class RhtmlLexer(DelegatingLexer):
00829     """
00830     Subclass of the ERB lexer that highlights the unlexed data with the
00831     html lexer.
00832 
00833     Nested Javascript and CSS is highlighted too.
00834     """
00835 
00836     name = 'RHTML'
00837     aliases = ['rhtml', 'html+erb', 'html+ruby']
00838     filenames = ['*.rhtml']
00839     alias_filenames = ['*.html', '*.htm', '*.xhtml']
00840     mimetypes = ['text/html+ruby']
00841 
00842     def __init__(self, **options):
00843         super(RhtmlLexer, self).__init__(HtmlLexer, ErbLexer, **options)
00844 
00845     def analyse_text(text):
00846         rv = ErbLexer.analyse_text(text) - 0.01
00847         if html_doctype_matches(text):
00848             # one more than the XmlErbLexer returns
00849             rv += 0.5
00850         return rv
00851 
00852 
00853 class XmlErbLexer(DelegatingLexer):
00854     """
00855     Subclass of `ErbLexer` which highlights data outside preprocessor
00856     directives with the `XmlLexer`.
00857     """
00858 
00859     name = 'XML+Ruby'
00860     aliases = ['xml+erb', 'xml+ruby']
00861     alias_filenames = ['*.xml']
00862     mimetypes = ['application/xml+ruby']
00863 
00864     def __init__(self, **options):
00865         super(XmlErbLexer, self).__init__(XmlLexer, ErbLexer, **options)
00866 
00867     def analyse_text(text):
00868         rv = ErbLexer.analyse_text(text) - 0.01
00869         if looks_like_xml(text):
00870             rv += 0.4
00871         return rv
00872 
00873 
00874 class CssErbLexer(DelegatingLexer):
00875     """
00876     Subclass of `ErbLexer` which highlights unlexed data with the `CssLexer`.
00877     """
00878 
00879     name = 'CSS+Ruby'
00880     aliases = ['css+erb', 'css+ruby']
00881     alias_filenames = ['*.css']
00882     mimetypes = ['text/css+ruby']
00883 
00884     def __init__(self, **options):
00885         super(CssErbLexer, self).__init__(CssLexer, ErbLexer, **options)
00886 
00887     def analyse_text(text):
00888         return ErbLexer.analyse_text(text) - 0.05
00889 
00890 
00891 class JavascriptErbLexer(DelegatingLexer):
00892     """
00893     Subclass of `ErbLexer` which highlights unlexed data with the
00894     `JavascriptLexer`.
00895     """
00896 
00897     name = 'JavaScript+Ruby'
00898     aliases = ['js+erb', 'javascript+erb', 'js+ruby', 'javascript+ruby']
00899     alias_filenames = ['*.js']
00900     mimetypes = ['application/x-javascript+ruby',
00901                  'text/x-javascript+ruby',
00902                  'text/javascript+ruby']
00903 
00904     def __init__(self, **options):
00905         super(JavascriptErbLexer, self).__init__(JavascriptLexer, ErbLexer,
00906                                                  **options)
00907 
00908     def analyse_text(text):
00909         return ErbLexer.analyse_text(text) - 0.05
00910 
00911 
00912 class HtmlPhpLexer(DelegatingLexer):
00913     """
00914     Subclass of `PhpLexer` that highlights unhandled data with the `HtmlLexer`.
00915 
00916     Nested Javascript and CSS is highlighted too.
00917     """
00918 
00919     name = 'HTML+PHP'
00920     aliases = ['html+php']
00921     filenames = ['*.phtml']
00922     alias_filenames = ['*.php', '*.html', '*.htm', '*.xhtml',
00923                        '*.php[345]']
00924     mimetypes = ['application/x-php',
00925                  'application/x-httpd-php', 'application/x-httpd-php3',
00926                  'application/x-httpd-php4', 'application/x-httpd-php5']
00927 
00928     def __init__(self, **options):
00929         super(HtmlPhpLexer, self).__init__(HtmlLexer, PhpLexer, **options)
00930 
00931     def analyse_text(text):
00932         rv = PhpLexer.analyse_text(text) - 0.01
00933         if html_doctype_matches(text):
00934             rv += 0.5
00935         return rv
00936 
00937 
00938 class XmlPhpLexer(DelegatingLexer):
00939     """
00940     Subclass of `PhpLexer` that higlights unhandled data with the `XmlLexer`.
00941     """
00942 
00943     name = 'XML+PHP'
00944     aliases = ['xml+php']
00945     alias_filenames = ['*.xml', '*.php', '*.php[345]']
00946     mimetypes = ['application/xml+php']
00947 
00948     def __init__(self, **options):
00949         super(XmlPhpLexer, self).__init__(XmlLexer, PhpLexer, **options)
00950 
00951     def analyse_text(text):
00952         rv = PhpLexer.analyse_text(text) - 0.01
00953         if looks_like_xml(text):
00954             rv += 0.4
00955         return rv
00956 
00957 
00958 class CssPhpLexer(DelegatingLexer):
00959     """
00960     Subclass of `PhpLexer` which highlights unmatched data with the `CssLexer`.
00961     """
00962 
00963     name = 'CSS+PHP'
00964     aliases = ['css+php']
00965     alias_filenames = ['*.css']
00966     mimetypes = ['text/css+php']
00967 
00968     def __init__(self, **options):
00969         super(CssPhpLexer, self).__init__(CssLexer, PhpLexer, **options)
00970 
00971     def analyse_text(text):
00972         return PhpLexer.analyse_text(text) - 0.05
00973 
00974 
00975 class JavascriptPhpLexer(DelegatingLexer):
00976     """
00977     Subclass of `PhpLexer` which highlights unmatched data with the
00978     `JavascriptLexer`.
00979     """
00980 
00981     name = 'JavaScript+PHP'
00982     aliases = ['js+php', 'javascript+php']
00983     alias_filenames = ['*.js']
00984     mimetypes = ['application/x-javascript+php',
00985                  'text/x-javascript+php',
00986                  'text/javascript+php']
00987 
00988     def __init__(self, **options):
00989         super(JavascriptPhpLexer, self).__init__(JavascriptLexer, PhpLexer,
00990                                                  **options)
00991 
00992     def analyse_text(text):
00993         return PhpLexer.analyse_text(text)
00994 
00995 
00996 class HtmlSmartyLexer(DelegatingLexer):
00997     """
00998     Subclass of the `SmartyLexer` that highighlights unlexed data with the
00999     `HtmlLexer`.
01000 
01001     Nested Javascript and CSS is highlighted too.
01002     """
01003 
01004     name = 'HTML+Smarty'
01005     aliases = ['html+smarty']
01006     alias_filenames = ['*.html', '*.htm', '*.xhtml', '*.tpl']
01007     mimetypes = ['text/html+smarty']
01008 
01009     def __init__(self, **options):
01010         super(HtmlSmartyLexer, self).__init__(HtmlLexer, SmartyLexer, **options)
01011 
01012     def analyse_text(text):
01013         rv = SmartyLexer.analyse_text(text) - 0.01
01014         if html_doctype_matches(text):
01015             rv += 0.5
01016         return rv
01017 
01018 
01019 class XmlSmartyLexer(DelegatingLexer):
01020     """
01021     Subclass of the `SmartyLexer` that highlights unlexed data with the
01022     `XmlLexer`.
01023     """
01024 
01025     name = 'XML+Smarty'
01026     aliases = ['xml+smarty']
01027     alias_filenames = ['*.xml', '*.tpl']
01028     mimetypes = ['application/xml+smarty']
01029 
01030     def __init__(self, **options):
01031         super(XmlSmartyLexer, self).__init__(XmlLexer, SmartyLexer, **options)
01032 
01033     def analyse_text(text):
01034         rv = SmartyLexer.analyse_text(text) - 0.01
01035         if looks_like_xml(text):
01036             rv += 0.4
01037         return rv
01038 
01039 
01040 class CssSmartyLexer(DelegatingLexer):
01041     """
01042     Subclass of the `SmartyLexer` that highlights unlexed data with the
01043     `CssLexer`.
01044     """
01045 
01046     name = 'CSS+Smarty'
01047     aliases = ['css+smarty']
01048     alias_filenames = ['*.css', '*.tpl']
01049     mimetypes = ['text/css+smarty']
01050 
01051     def __init__(self, **options):
01052         super(CssSmartyLexer, self).__init__(CssLexer, SmartyLexer, **options)
01053 
01054     def analyse_text(text):
01055         return SmartyLexer.analyse_text(text) - 0.05
01056 
01057 
01058 class JavascriptSmartyLexer(DelegatingLexer):
01059     """
01060     Subclass of the `SmartyLexer` that highlights unlexed data with the
01061     `JavascriptLexer`.
01062     """
01063 
01064     name = 'JavaScript+Smarty'
01065     aliases = ['js+smarty', 'javascript+smarty']
01066     alias_filenames = ['*.js', '*.tpl']
01067     mimetypes = ['application/x-javascript+smarty',
01068                  'text/x-javascript+smarty',
01069                  'text/javascript+smarty']
01070 
01071     def __init__(self, **options):
01072         super(JavascriptSmartyLexer, self).__init__(JavascriptLexer, SmartyLexer,
01073                                                     **options)
01074 
01075     def analyse_text(text):
01076         return SmartyLexer.analyse_text(text) - 0.05
01077 
01078 
01079 class HtmlDjangoLexer(DelegatingLexer):
01080     """
01081     Subclass of the `DjangoLexer` that highighlights unlexed data with the
01082     `HtmlLexer`.
01083 
01084     Nested Javascript and CSS is highlighted too.
01085     """
01086 
01087     name = 'HTML+Django/Jinja'
01088     aliases = ['html+django', 'html+jinja']
01089     alias_filenames = ['*.html', '*.htm', '*.xhtml']
01090     mimetypes = ['text/html+django', 'text/html+jinja']
01091 
01092     def __init__(self, **options):
01093         super(HtmlDjangoLexer, self).__init__(HtmlLexer, DjangoLexer, **options)
01094 
01095     def analyse_text(text):
01096         rv = DjangoLexer.analyse_text(text) - 0.01
01097         if html_doctype_matches(text):
01098             rv += 0.5
01099         return rv
01100 
01101 
01102 class XmlDjangoLexer(DelegatingLexer):
01103     """
01104     Subclass of the `DjangoLexer` that highlights unlexed data with the
01105     `XmlLexer`.
01106     """
01107 
01108     name = 'XML+Django/Jinja'
01109     aliases = ['xml+django', 'xml+jinja']
01110     alias_filenames = ['*.xml']
01111     mimetypes = ['application/xml+django', 'application/xml+jinja']
01112 
01113     def __init__(self, **options):
01114         super(XmlDjangoLexer, self).__init__(XmlLexer, DjangoLexer, **options)
01115 
01116     def analyse_text(text):
01117         rv = DjangoLexer.analyse_text(text) - 0.01
01118         if looks_like_xml(text):
01119             rv += 0.4
01120         return rv
01121 
01122 
01123 class CssDjangoLexer(DelegatingLexer):
01124     """
01125     Subclass of the `DjangoLexer` that highlights unlexed data with the
01126     `CssLexer`.
01127     """
01128 
01129     name = 'CSS+Django/Jinja'
01130     aliases = ['css+django', 'css+jinja']
01131     alias_filenames = ['*.css']
01132     mimetypes = ['text/css+django', 'text/css+jinja']
01133 
01134     def __init__(self, **options):
01135         super(CssDjangoLexer, self).__init__(CssLexer, DjangoLexer, **options)
01136 
01137     def analyse_text(text):
01138         return DjangoLexer.analyse_text(text) - 0.05
01139 
01140 
01141 class JavascriptDjangoLexer(DelegatingLexer):
01142     """
01143     Subclass of the `DjangoLexer` that highlights unlexed data with the
01144     `JavascriptLexer`.
01145     """
01146 
01147     name = 'JavaScript+Django/Jinja'
01148     aliases = ['js+django', 'javascript+django',
01149                'js+jinja', 'javascript+jinja']
01150     alias_filenames = ['*.js']
01151     mimetypes = ['application/x-javascript+django',
01152                  'application/x-javascript+jinja',
01153                  'text/x-javascript+django',
01154                  'text/x-javascript+jinja',
01155                  'text/javascript+django',
01156                  'text/javascript+jinja']
01157 
01158     def __init__(self, **options):
01159         super(JavascriptDjangoLexer, self).__init__(JavascriptLexer, DjangoLexer,
01160                                                     **options)
01161 
01162     def analyse_text(text):
01163         return DjangoLexer.analyse_text(text) - 0.05
01164 
01165 
01166 class JspRootLexer(RegexLexer):
01167     """
01168     Base for the `JspLexer`. Yields `Token.Other` for area outside of
01169     JSP tags.
01170 
01171     *New in Pygments 0.7.*
01172     """
01173 
01174     tokens = {
01175         'root': [
01176             (r'<%\S?', Keyword, 'sec'),
01177             # FIXME: I want to make these keywords but still parse attributes.
01178             (r'</?jsp:(forward|getProperty|include|plugin|setProperty|useBean).*?>',
01179              Keyword),
01180             (r'[^<]+', Other),
01181             (r'<', Other),
01182         ],
01183         'sec': [
01184             (r'%>', Keyword, '#pop'),
01185             # note: '\w\W' != '.' without DOTALL.
01186             (r'[\w\W]+?(?=%>|\Z)', using(JavaLexer)),
01187         ],
01188     }
01189 
01190 
01191 class JspLexer(DelegatingLexer):
01192     """
01193     Lexer for Java Server Pages.
01194 
01195     *New in Pygments 0.7.*
01196     """
01197     name = 'Java Server Page'
01198     aliases = ['jsp']
01199     filenames = ['*.jsp']
01200     mimetypes = ['application/x-jsp']
01201 
01202     def __init__(self, **options):
01203         super(JspLexer, self).__init__(XmlLexer, JspRootLexer, **options)
01204 
01205     def analyse_text(text):
01206         rv = JavaLexer.analyse_text(text) - 0.01
01207         if looks_like_xml(text):
01208             rv += 0.4
01209         if '<%' in text and '%>' in text:
01210             rv += 0.1
01211         return rv
01212 
01213 
01214 class EvoqueLexer(RegexLexer):
01215     """
01216     For files using the Evoque templating system.
01217 
01218     *New in Pygments 1.1.*
01219     """
01220     name = 'Evoque'
01221     aliases = ['evoque']
01222     filenames = ['*.evoque']
01223     mimetypes = ['application/x-evoque']
01224 
01225     flags = re.DOTALL
01226 
01227     tokens = {
01228         'root': [
01229             (r'[^#$]+', Other),
01230             (r'#\[', Comment.Multiline, 'comment'),
01231             (r'\$\$', Other),
01232             # svn keywords
01233             (r'\$\w+:[^$\n]*\$', Comment.Multiline),
01234             # directives: begin, end
01235             (r'(\$)(begin|end)(\{(%)?)(.*?)((?(4)%)\})',
01236              bygroups(Punctuation, Name.Builtin, Punctuation, None,
01237                       String, Punctuation, None)),
01238             # directives: evoque, overlay
01239             # see doc for handling first name arg: /directives/evoque/
01240             #+ minor inconsistency: the "name" in e.g. $overlay{name=site_base}
01241             # should be using(PythonLexer), not passed out as String
01242             (r'(\$)(evoque|overlay)(\{(%)?)(\s*[#\w\-"\'.]+[^=,%}]+?)?'
01243              r'(.*?)((?(4)%)\})',
01244              bygroups(Punctuation, Name.Builtin, Punctuation, None,
01245                       String, using(PythonLexer), Punctuation, None)),
01246             # directives: if, for, prefer, test
01247             (r'(\$)(\w+)(\{(%)?)(.*?)((?(4)%)\})',
01248              bygroups(Punctuation, Name.Builtin, Punctuation, None,
01249                       using(PythonLexer), Punctuation, None)),
01250             # directive clauses (no {} expression)
01251             (r'(\$)(else|rof|fi)', bygroups(Punctuation, Name.Builtin)),
01252             # expressions
01253             (r'(\$\{(%)?)(.*?)((!)(.*?))?((?(2)%)\})',
01254              bygroups(Punctuation, None, using(PythonLexer),
01255                       Name.Builtin, None, None, Punctuation, None)),
01256             (r'#', Other),
01257         ],
01258         'comment': [
01259             (r'[^\]#]', Comment.Multiline),
01260             (r'#\[', Comment.Multiline, '#push'),
01261             (r'\]#', Comment.Multiline, '#pop'),
01262             (r'[\]#]', Comment.Multiline)
01263         ],
01264     }
01265 
01266 class EvoqueHtmlLexer(DelegatingLexer):
01267     """
01268     Subclass of the `EvoqueLexer` that highlights unlexed data with the
01269     `HtmlLexer`.
01270 
01271     *New in Pygments 1.1.*
01272     """
01273     name = 'HTML+Evoque'
01274     aliases = ['html+evoque']
01275     filenames = ['*.html']
01276     mimetypes = ['text/html+evoque']
01277 
01278     def __init__(self, **options):
01279         super(EvoqueHtmlLexer, self).__init__(HtmlLexer, EvoqueLexer,
01280                                               **options)
01281 
01282 class EvoqueXmlLexer(DelegatingLexer):
01283     """
01284     Subclass of the `EvoqueLexer` that highlights unlexed data with the
01285     `XmlLexer`.
01286 
01287     *New in Pygments 1.1.*
01288     """
01289     name = 'XML+Evoque'
01290     aliases = ['xml+evoque']
01291     filenames = ['*.xml']
01292     mimetypes = ['application/xml+evoque']
01293 
01294     def __init__(self, **options):
01295         super(EvoqueXmlLexer, self).__init__(XmlLexer, EvoqueLexer,
01296                                              **options)