"Fossies" - the Fresh Open Source Software Archive

Member "eric6-20.9/eric/eric6/ThirdParty/Pygments/pygments/lexers/templates.py" (2 May 2020, 73612 Bytes) of package /linux/misc/eric6-20.9.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) Python source code syntax highlighting (style: standard) with prefixed line numbers. Alternatively you can here view or download the uninterpreted source code file.

    1 # -*- coding: utf-8 -*-
    2 """
    3     pygments.lexers.templates
    4     ~~~~~~~~~~~~~~~~~~~~~~~~~
    5 
    6     Lexers for various template engines' markup.
    7 
    8     :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
    9     :license: BSD, see LICENSE for details.
   10 """
   11 
   12 import re
   13 
   14 from pygments.lexers.html import HtmlLexer, XmlLexer
   15 from pygments.lexers.javascript import JavascriptLexer, LassoLexer
   16 from pygments.lexers.css import CssLexer
   17 from pygments.lexers.php import PhpLexer
   18 from pygments.lexers.python import PythonLexer
   19 from pygments.lexers.perl import PerlLexer
   20 from pygments.lexers.jvm import JavaLexer, TeaLangLexer
   21 from pygments.lexers.data import YamlLexer
   22 from pygments.lexer import Lexer, DelegatingLexer, RegexLexer, bygroups, \
   23     include, using, this, default, combined
   24 from pygments.token import Error, Punctuation, Whitespace, \
   25     Text, Comment, Operator, Keyword, Name, String, Number, Other, Token
   26 from pygments.util import html_doctype_matches, looks_like_xml
   27 
   28 __all__ = ['HtmlPhpLexer', 'XmlPhpLexer', 'CssPhpLexer',
   29            'JavascriptPhpLexer', 'ErbLexer', 'RhtmlLexer',
   30            'XmlErbLexer', 'CssErbLexer', 'JavascriptErbLexer',
   31            'SmartyLexer', 'HtmlSmartyLexer', 'XmlSmartyLexer',
   32            'CssSmartyLexer', 'JavascriptSmartyLexer', 'DjangoLexer',
   33            'HtmlDjangoLexer', 'CssDjangoLexer', 'XmlDjangoLexer',
   34            'JavascriptDjangoLexer', 'GenshiLexer', 'HtmlGenshiLexer',
   35            'GenshiTextLexer', 'CssGenshiLexer', 'JavascriptGenshiLexer',
   36            'MyghtyLexer', 'MyghtyHtmlLexer', 'MyghtyXmlLexer',
   37            'MyghtyCssLexer', 'MyghtyJavascriptLexer', 'MasonLexer', 'MakoLexer',
   38            'MakoHtmlLexer', 'MakoXmlLexer', 'MakoJavascriptLexer',
   39            'MakoCssLexer', 'JspLexer', 'CheetahLexer', 'CheetahHtmlLexer',
   40            'CheetahXmlLexer', 'CheetahJavascriptLexer', 'EvoqueLexer',
   41            'EvoqueHtmlLexer', 'EvoqueXmlLexer', 'ColdfusionLexer',
   42            'ColdfusionHtmlLexer', 'ColdfusionCFCLexer', 'VelocityLexer',
   43            'VelocityHtmlLexer', 'VelocityXmlLexer', 'SspLexer',
   44            'TeaTemplateLexer', 'LassoHtmlLexer', 'LassoXmlLexer',
   45            'LassoCssLexer', 'LassoJavascriptLexer', 'HandlebarsLexer',
   46            'HandlebarsHtmlLexer', 'YamlJinjaLexer', 'LiquidLexer',
   47            'TwigLexer', 'TwigHtmlLexer', 'Angular2Lexer', 'Angular2HtmlLexer']
   48 
   49 
   50 class ErbLexer(Lexer):
   51     """
   52     Generic `ERB <http://ruby-doc.org/core/classes/ERB.html>`_ (Ruby Templating)
   53     lexer.
   54 
   55     Just highlights ruby code between the preprocessor directives, other data
   56     is left untouched by the lexer.
   57 
   58     All options are also forwarded to the `RubyLexer`.
   59     """
   60 
   61     name = 'ERB'
   62     aliases = ['erb']
   63     mimetypes = ['application/x-ruby-templating']
   64 
   65     _block_re = re.compile(r'(<%%|%%>|<%=|<%#|<%-|<%|-%>|%>|^%[^%].*?$)', re.M)
   66 
   67     def __init__(self, **options):
   68         from pygments.lexers.ruby import RubyLexer
   69         self.ruby_lexer = RubyLexer(**options)
   70         Lexer.__init__(self, **options)
   71 
   72     def get_tokens_unprocessed(self, text):
   73         """
   74         Since ERB doesn't allow "<%" and other tags inside of ruby
   75         blocks we have to use a split approach here that fails for
   76         that too.
   77         """
   78         tokens = self._block_re.split(text)
   79         tokens.reverse()
   80         state = idx = 0
   81         try:
   82             while True:
   83                 # text
   84                 if state == 0:
   85                     val = tokens.pop()
   86                     yield idx, Other, val
   87                     idx += len(val)
   88                     state = 1
   89                 # block starts
   90                 elif state == 1:
   91                     tag = tokens.pop()
   92                     # literals
   93                     if tag in ('<%%', '%%>'):
   94                         yield idx, Other, tag
   95                         idx += 3
   96                         state = 0
   97                     # comment
   98                     elif tag == '<%#':
   99                         yield idx, Comment.Preproc, tag
  100                         val = tokens.pop()
  101                         yield idx + 3, Comment, val
  102                         idx += 3 + len(val)
  103                         state = 2
  104                     # blocks or output
  105                     elif tag in ('<%', '<%=', '<%-'):
  106                         yield idx, Comment.Preproc, tag
  107                         idx += len(tag)
  108                         data = tokens.pop()
  109                         r_idx = 0
  110                         for r_idx, r_token, r_value in \
  111                                 self.ruby_lexer.get_tokens_unprocessed(data):
  112                             yield r_idx + idx, r_token, r_value
  113                         idx += len(data)
  114                         state = 2
  115                     elif tag in ('%>', '-%>'):
  116                         yield idx, Error, tag
  117                         idx += len(tag)
  118                         state = 0
  119                     # % raw ruby statements
  120                     else:
  121                         yield idx, Comment.Preproc, tag[0]
  122                         r_idx = 0
  123                         for r_idx, r_token, r_value in \
  124                                 self.ruby_lexer.get_tokens_unprocessed(tag[1:]):
  125                             yield idx + 1 + r_idx, r_token, r_value
  126                         idx += len(tag)
  127                         state = 0
  128                 # block ends
  129                 elif state == 2:
  130                     tag = tokens.pop()
  131                     if tag not in ('%>', '-%>'):
  132                         yield idx, Other, tag
  133                     else:
  134                         yield idx, Comment.Preproc, tag
  135                     idx += len(tag)
  136                     state = 0
  137         except IndexError:
  138             return
  139 
  140     def analyse_text(text):
  141         if '<%' in text and '%>' in text:
  142             return 0.4
  143 
  144 
  145 class SmartyLexer(RegexLexer):
  146     """
  147     Generic `Smarty <http://smarty.php.net/>`_ template lexer.
  148 
  149     Just highlights smarty code between the preprocessor directives, other
  150     data is left untouched by the lexer.
  151     """
  152 
  153     name = 'Smarty'
  154     aliases = ['smarty']
  155     filenames = ['*.tpl']
  156     mimetypes = ['application/x-smarty']
  157 
  158     flags = re.MULTILINE | re.DOTALL
  159 
  160     tokens = {
  161         'root': [
  162             (r'[^{]+', Other),
  163             (r'(\{)(\*.*?\*)(\})',
  164              bygroups(Comment.Preproc, Comment, Comment.Preproc)),
  165             (r'(\{php\})(.*?)(\{/php\})',
  166              bygroups(Comment.Preproc, using(PhpLexer, startinline=True),
  167                       Comment.Preproc)),
  168             (r'(\{)(/?[a-zA-Z_]\w*)(\s*)',
  169              bygroups(Comment.Preproc, Name.Function, Text), 'smarty'),
  170             (r'\{', Comment.Preproc, 'smarty')
  171         ],
  172         'smarty': [
  173             (r'\s+', Text),
  174             (r'\{', Comment.Preproc, '#push'),
  175             (r'\}', Comment.Preproc, '#pop'),
  176             (r'#[a-zA-Z_]\w*#', Name.Variable),
  177             (r'\$[a-zA-Z_]\w*(\.\w+)*', Name.Variable),
  178             (r'[~!%^&*()+=|\[\]:;,.<>/?@-]', Operator),
  179             (r'(true|false|null)\b', Keyword.Constant),
  180             (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
  181              r"0[xX][0-9a-fA-F]+[Ll]?", Number),
  182             (r'"(\\\\|\\"|[^"])*"', String.Double),
  183             (r"'(\\\\|\\'|[^'])*'", String.Single),
  184             (r'[a-zA-Z_]\w*', Name.Attribute)
  185         ]
  186     }
  187 
  188     def analyse_text(text):
  189         rv = 0.0
  190         if re.search(r'\{if\s+.*?\}.*?\{/if\}', text):
  191             rv += 0.15
  192         if re.search(r'\{include\s+file=.*?\}', text):
  193             rv += 0.15
  194         if re.search(r'\{foreach\s+.*?\}.*?\{/foreach\}', text):
  195             rv += 0.15
  196         if re.search(r'\{\$.*?\}', text):
  197             rv += 0.01
  198         return rv
  199 
  200 
  201 class VelocityLexer(RegexLexer):
  202     """
  203     Generic `Velocity <http://velocity.apache.org/>`_ template lexer.
  204 
  205     Just highlights velocity directives and variable references, other
  206     data is left untouched by the lexer.
  207     """
  208 
  209     name = 'Velocity'
  210     aliases = ['velocity']
  211     filenames = ['*.vm', '*.fhtml']
  212 
  213     flags = re.MULTILINE | re.DOTALL
  214 
  215     identifier = r'[a-zA-Z_]\w*'
  216 
  217     tokens = {
  218         'root': [
  219             (r'[^{#$]+', Other),
  220             (r'(#)(\*.*?\*)(#)',
  221              bygroups(Comment.Preproc, Comment, Comment.Preproc)),
  222             (r'(##)(.*?$)',
  223              bygroups(Comment.Preproc, Comment)),
  224             (r'(#\{?)(' + identifier + r')(\}?)(\s?\()',
  225              bygroups(Comment.Preproc, Name.Function, Comment.Preproc, Punctuation),
  226              'directiveparams'),
  227             (r'(#\{?)(' + identifier + r')(\}|\b)',
  228              bygroups(Comment.Preproc, Name.Function, Comment.Preproc)),
  229             (r'\$!?\{?', Punctuation, 'variable')
  230         ],
  231         'variable': [
  232             (identifier, Name.Variable),
  233             (r'\(', Punctuation, 'funcparams'),
  234             (r'(\.)(' + identifier + r')',
  235              bygroups(Punctuation, Name.Variable), '#push'),
  236             (r'\}', Punctuation, '#pop'),
  237             default('#pop')
  238         ],
  239         'directiveparams': [
  240             (r'(&&|\|\||==?|!=?|[-<>+*%&|^/])|\b(eq|ne|gt|lt|ge|le|not|in)\b',
  241              Operator),
  242             (r'\[', Operator, 'rangeoperator'),
  243             (r'\b' + identifier + r'\b', Name.Function),
  244             include('funcparams')
  245         ],
  246         'rangeoperator': [
  247             (r'\.\.', Operator),
  248             include('funcparams'),
  249             (r'\]', Operator, '#pop')
  250         ],
  251         'funcparams': [
  252             (r'\$!?\{?', Punctuation, 'variable'),
  253             (r'\s+', Text),
  254             (r'[,:]', Punctuation),
  255             (r'"(\\\\|\\"|[^"])*"', String.Double),
  256             (r"'(\\\\|\\'|[^'])*'", String.Single),
  257             (r"0[xX][0-9a-fA-F]+[Ll]?", Number),
  258             (r"\b[0-9]+\b", Number),
  259             (r'(true|false|null)\b', Keyword.Constant),
  260             (r'\(', Punctuation, '#push'),
  261             (r'\)', Punctuation, '#pop'),
  262             (r'\{', Punctuation, '#push'),
  263             (r'\}', Punctuation, '#pop'),
  264             (r'\[', Punctuation, '#push'),
  265             (r'\]', Punctuation, '#pop'),
  266         ]
  267     }
  268 
  269     def analyse_text(text):
  270         rv = 0.0
  271         if re.search(r'#\{?macro\}?\(.*?\).*?#\{?end\}?', text):
  272             rv += 0.25
  273         if re.search(r'#\{?if\}?\(.+?\).*?#\{?end\}?', text):
  274             rv += 0.15
  275         if re.search(r'#\{?foreach\}?\(.+?\).*?#\{?end\}?', text):
  276             rv += 0.15
  277         if re.search(r'\$!?\{?[a-zA-Z_]\w*(\([^)]*\))?'
  278                      r'(\.\w+(\([^)]*\))?)*\}?', text):
  279             rv += 0.01
  280         return rv
  281 
  282 
  283 class VelocityHtmlLexer(DelegatingLexer):
  284     """
  285     Subclass of the `VelocityLexer` that highlights unlexed data
  286     with the `HtmlLexer`.
  287 
  288     """
  289 
  290     name = 'HTML+Velocity'
  291     aliases = ['html+velocity']
  292     alias_filenames = ['*.html', '*.fhtml']
  293     mimetypes = ['text/html+velocity']
  294 
  295     def __init__(self, **options):
  296         super(VelocityHtmlLexer, self).__init__(HtmlLexer, VelocityLexer,
  297                                                 **options)
  298 
  299 
  300 class VelocityXmlLexer(DelegatingLexer):
  301     """
  302     Subclass of the `VelocityLexer` that highlights unlexed data
  303     with the `XmlLexer`.
  304 
  305     """
  306 
  307     name = 'XML+Velocity'
  308     aliases = ['xml+velocity']
  309     alias_filenames = ['*.xml', '*.vm']
  310     mimetypes = ['application/xml+velocity']
  311 
  312     def __init__(self, **options):
  313         super(VelocityXmlLexer, self).__init__(XmlLexer, VelocityLexer,
  314                                                **options)
  315 
  316     def analyse_text(text):
  317         rv = VelocityLexer.analyse_text(text) - 0.01
  318         if looks_like_xml(text):
  319             rv += 0.4
  320         return rv
  321 
  322 
  323 class DjangoLexer(RegexLexer):
  324     """
  325     Generic `django <http://www.djangoproject.com/documentation/templates/>`_
  326     and `jinja <https://jinja.pocoo.org/jinja/>`_ template lexer.
  327 
  328     It just highlights django/jinja code between the preprocessor directives,
  329     other data is left untouched by the lexer.
  330     """
  331 
  332     name = 'Django/Jinja'
  333     aliases = ['django', 'jinja']
  334     mimetypes = ['application/x-django-templating', 'application/x-jinja']
  335 
  336     flags = re.M | re.S
  337 
  338     tokens = {
  339         'root': [
  340             (r'[^{]+', Other),
  341             (r'\{\{', Comment.Preproc, 'var'),
  342             # jinja/django comments
  343             (r'\{[*#].*?[*#]\}', Comment),
  344             # django comments
  345             (r'(\{%)(-?\s*)(comment)(\s*-?)(%\})(.*?)'
  346              r'(\{%)(-?\s*)(endcomment)(\s*-?)(%\})',
  347              bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
  348                       Comment, Comment.Preproc, Text, Keyword, Text,
  349                       Comment.Preproc)),
  350             # raw jinja blocks
  351             (r'(\{%)(-?\s*)(raw)(\s*-?)(%\})(.*?)'
  352              r'(\{%)(-?\s*)(endraw)(\s*-?)(%\})',
  353              bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
  354                       Text, Comment.Preproc, Text, Keyword, Text,
  355                       Comment.Preproc)),
  356             # filter blocks
  357             (r'(\{%)(-?\s*)(filter)(\s+)([a-zA-Z_]\w*)',
  358              bygroups(Comment.Preproc, Text, Keyword, Text, Name.Function),
  359              'block'),
  360             (r'(\{%)(-?\s*)([a-zA-Z_]\w*)',
  361              bygroups(Comment.Preproc, Text, Keyword), 'block'),
  362             (r'\{', Other)
  363         ],
  364         'varnames': [
  365             (r'(\|)(\s*)([a-zA-Z_]\w*)',
  366              bygroups(Operator, Text, Name.Function)),
  367             (r'(is)(\s+)(not)?(\s+)?([a-zA-Z_]\w*)',
  368              bygroups(Keyword, Text, Keyword, Text, Name.Function)),
  369             (r'(_|true|false|none|True|False|None)\b', Keyword.Pseudo),
  370             (r'(in|as|reversed|recursive|not|and|or|is|if|else|import|'
  371              r'with(?:(?:out)?\s*context)?|scoped|ignore\s+missing)\b',
  372              Keyword),
  373             (r'(loop|block|super|forloop)\b', Name.Builtin),
  374             (r'[a-zA-Z_][\w-]*', Name.Variable),
  375             (r'\.\w+', Name.Variable),
  376             (r':?"(\\\\|\\"|[^"])*"', String.Double),
  377             (r":?'(\\\\|\\'|[^'])*'", String.Single),
  378             (r'([{}()\[\]+\-*/%,:~]|[><=]=?|!=)', Operator),
  379             (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
  380              r"0[xX][0-9a-fA-F]+[Ll]?", Number),
  381         ],
  382         'var': [
  383             (r'\s+', Text),
  384             (r'(-?)(\}\})', bygroups(Text, Comment.Preproc), '#pop'),
  385             include('varnames')
  386         ],
  387         'block': [
  388             (r'\s+', Text),
  389             (r'(-?)(%\})', bygroups(Text, Comment.Preproc), '#pop'),
  390             include('varnames'),
  391             (r'.', Punctuation)
  392         ]
  393     }
  394 
  395     def analyse_text(text):
  396         rv = 0.0
  397         if re.search(r'\{%\s*(block|extends)', text) is not None:
  398             rv += 0.4
  399         if re.search(r'\{%\s*if\s*.*?%\}', text) is not None:
  400             rv += 0.1
  401         if re.search(r'\{\{.*?\}\}', text) is not None:
  402             rv += 0.1
  403         return rv
  404 
  405 
  406 class MyghtyLexer(RegexLexer):
  407     """
  408     Generic `myghty templates`_ lexer. Code that isn't Myghty
  409     markup is yielded as `Token.Other`.
  410 
  411     .. versionadded:: 0.6
  412 
  413     .. _myghty templates: http://www.myghty.org/
  414     """
  415 
  416     name = 'Myghty'
  417     aliases = ['myghty']
  418     filenames = ['*.myt', 'autodelegate']
  419     mimetypes = ['application/x-myghty']
  420 
  421     tokens = {
  422         'root': [
  423             (r'\s+', Text),
  424             (r'(?s)(<%(?:def|method))(\s*)(.*?)(>)(.*?)(</%\2\s*>)',
  425              bygroups(Name.Tag, Text, Name.Function, Name.Tag,
  426                       using(this), Name.Tag)),
  427             (r'(?s)(<%\w+)(.*?)(>)(.*?)(</%\2\s*>)',
  428              bygroups(Name.Tag, Name.Function, Name.Tag,
  429                       using(PythonLexer), Name.Tag)),
  430             (r'(<&[^|])(.*?)(,.*?)?(&>)',
  431              bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)),
  432             (r'(?s)(<&\|)(.*?)(,.*?)?(&>)',
  433              bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)),
  434             (r'</&>', Name.Tag),
  435             (r'(?s)(<%!?)(.*?)(%>)',
  436              bygroups(Name.Tag, using(PythonLexer), Name.Tag)),
  437             (r'(?<=^)#[^\n]*(\n|\Z)', Comment),
  438             (r'(?<=^)(%)([^\n]*)(\n|\Z)',
  439              bygroups(Name.Tag, using(PythonLexer), Other)),
  440             (r"""(?sx)
  441                  (.+?)               # anything, followed by:
  442                  (?:
  443                   (?<=\n)(?=[%#]) |  # an eval or comment line
  444                   (?=</?[%&]) |      # a substitution or block or
  445                                      # call start or end
  446                                      # - don't consume
  447                   (\\\n) |           # an escaped newline
  448                   \Z                 # end of string
  449                  )""", bygroups(Other, Operator)),
  450         ]
  451     }
  452 
  453 
  454 class MyghtyHtmlLexer(DelegatingLexer):
  455     """
  456     Subclass of the `MyghtyLexer` that highlights unlexed data
  457     with the `HtmlLexer`.
  458 
  459     .. versionadded:: 0.6
  460     """
  461 
  462     name = 'HTML+Myghty'
  463     aliases = ['html+myghty']
  464     mimetypes = ['text/html+myghty']
  465 
  466     def __init__(self, **options):
  467         super(MyghtyHtmlLexer, self).__init__(HtmlLexer, MyghtyLexer,
  468                                               **options)
  469 
  470 
  471 class MyghtyXmlLexer(DelegatingLexer):
  472     """
  473     Subclass of the `MyghtyLexer` that highlights unlexed data
  474     with the `XmlLexer`.
  475 
  476     .. versionadded:: 0.6
  477     """
  478 
  479     name = 'XML+Myghty'
  480     aliases = ['xml+myghty']
  481     mimetypes = ['application/xml+myghty']
  482 
  483     def __init__(self, **options):
  484         super(MyghtyXmlLexer, self).__init__(XmlLexer, MyghtyLexer,
  485                                              **options)
  486 
  487 
  488 class MyghtyJavascriptLexer(DelegatingLexer):
  489     """
  490     Subclass of the `MyghtyLexer` that highlights unlexed data
  491     with the `JavascriptLexer`.
  492 
  493     .. versionadded:: 0.6
  494     """
  495 
  496     name = 'JavaScript+Myghty'
  497     aliases = ['js+myghty', 'javascript+myghty']
  498     mimetypes = ['application/x-javascript+myghty',
  499                  'text/x-javascript+myghty',
  500                  'text/javascript+mygthy']
  501 
  502     def __init__(self, **options):
  503         super(MyghtyJavascriptLexer, self).__init__(JavascriptLexer,
  504                                                     MyghtyLexer, **options)
  505 
  506 
  507 class MyghtyCssLexer(DelegatingLexer):
  508     """
  509     Subclass of the `MyghtyLexer` that highlights unlexed data
  510     with the `CssLexer`.
  511 
  512     .. versionadded:: 0.6
  513     """
  514 
  515     name = 'CSS+Myghty'
  516     aliases = ['css+myghty']
  517     mimetypes = ['text/css+myghty']
  518 
  519     def __init__(self, **options):
  520         super(MyghtyCssLexer, self).__init__(CssLexer, MyghtyLexer,
  521                                              **options)
  522 
  523 
  524 class MasonLexer(RegexLexer):
  525     """
  526     Generic `mason templates`_ lexer. Stolen from Myghty lexer. Code that isn't
  527     Mason markup is HTML.
  528 
  529     .. _mason templates: http://www.masonhq.com/
  530 
  531     .. versionadded:: 1.4
  532     """
  533     name = 'Mason'
  534     aliases = ['mason']
  535     filenames = ['*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler']
  536     mimetypes = ['application/x-mason']
  537 
  538     tokens = {
  539         'root': [
  540             (r'\s+', Text),
  541             (r'(?s)(<%doc>)(.*?)(</%doc>)',
  542              bygroups(Name.Tag, Comment.Multiline, Name.Tag)),
  543             (r'(?s)(<%(?:def|method))(\s*)(.*?)(>)(.*?)(</%\2\s*>)',
  544              bygroups(Name.Tag, Text, Name.Function, Name.Tag,
  545                       using(this), Name.Tag)),
  546             (r'(?s)(<%\w+)(.*?)(>)(.*?)(</%\2\s*>)',
  547              bygroups(Name.Tag, Name.Function, Name.Tag,
  548                       using(PerlLexer), Name.Tag)),
  549             (r'(?s)(<&[^|])(.*?)(,.*?)?(&>)',
  550              bygroups(Name.Tag, Name.Function, using(PerlLexer), Name.Tag)),
  551             (r'(?s)(<&\|)(.*?)(,.*?)?(&>)',
  552              bygroups(Name.Tag, Name.Function, using(PerlLexer), Name.Tag)),
  553             (r'</&>', Name.Tag),
  554             (r'(?s)(<%!?)(.*?)(%>)',
  555              bygroups(Name.Tag, using(PerlLexer), Name.Tag)),
  556             (r'(?<=^)#[^\n]*(\n|\Z)', Comment),
  557             (r'(?<=^)(%)([^\n]*)(\n|\Z)',
  558              bygroups(Name.Tag, using(PerlLexer), Other)),
  559             (r"""(?sx)
  560                  (.+?)               # anything, followed by:
  561                  (?:
  562                   (?<=\n)(?=[%#]) |  # an eval or comment line
  563                   (?=</?[%&]) |      # a substitution or block or
  564                                      # call start or end
  565                                      # - don't consume
  566                   (\\\n) |           # an escaped newline
  567                   \Z                 # end of string
  568                  )""", bygroups(using(HtmlLexer), Operator)),
  569         ]
  570     }
  571 
  572     def analyse_text(text):
  573         result = 0.0
  574         if re.search(r'</%(class|doc|init)%>', text) is not None:
  575             result = 1.0
  576         elif re.search(r'<&.+&>', text, re.DOTALL) is not None:
  577             result = 0.11
  578         return result
  579 
  580 
  581 class MakoLexer(RegexLexer):
  582     """
  583     Generic `mako templates`_ lexer. Code that isn't Mako
  584     markup is yielded as `Token.Other`.
  585 
  586     .. versionadded:: 0.7
  587 
  588     .. _mako templates: http://www.makotemplates.org/
  589     """
  590 
  591     name = 'Mako'
  592     aliases = ['mako']
  593     filenames = ['*.mao']
  594     mimetypes = ['application/x-mako']
  595 
  596     tokens = {
  597         'root': [
  598             (r'(\s*)(%)(\s*end(?:\w+))(\n|\Z)',
  599              bygroups(Text, Comment.Preproc, Keyword, Other)),
  600             (r'(\s*)(%)([^\n]*)(\n|\Z)',
  601              bygroups(Text, Comment.Preproc, using(PythonLexer), Other)),
  602             (r'(\s*)(##[^\n]*)(\n|\Z)',
  603              bygroups(Text, Comment.Preproc, Other)),
  604             (r'(?s)<%doc>.*?</%doc>', Comment.Preproc),
  605             (r'(<%)([\w.:]+)',
  606              bygroups(Comment.Preproc, Name.Builtin), 'tag'),
  607             (r'(</%)([\w.:]+)(>)',
  608              bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc)),
  609             (r'<%(?=([\w.:]+))', Comment.Preproc, 'ondeftags'),
  610             (r'(?s)(<%(?:!?))(.*?)(%>)',
  611              bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
  612             (r'(\$\{)(.*?)(\})',
  613              bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
  614             (r'''(?sx)
  615                 (.+?)                # anything, followed by:
  616                 (?:
  617                  (?<=\n)(?=%|\#\#) | # an eval or comment line
  618                  (?=\#\*) |          # multiline comment
  619                  (?=</?%) |          # a python block
  620                                      # call start or end
  621                  (?=\$\{) |          # a substitution
  622                  (?<=\n)(?=\s*%) |
  623                                      # - don't consume
  624                  (\\\n) |            # an escaped newline
  625                  \Z                  # end of string
  626                 )
  627             ''', bygroups(Other, Operator)),
  628             (r'\s+', Text),
  629         ],
  630         'ondeftags': [
  631             (r'<%', Comment.Preproc),
  632             (r'(?<=<%)(include|inherit|namespace|page)', Name.Builtin),
  633             include('tag'),
  634         ],
  635         'tag': [
  636             (r'((?:\w+)\s*=)(\s*)(".*?")',
  637              bygroups(Name.Attribute, Text, String)),
  638             (r'/?\s*>', Comment.Preproc, '#pop'),
  639             (r'\s+', Text),
  640         ],
  641         'attr': [
  642             ('".*?"', String, '#pop'),
  643             ("'.*?'", String, '#pop'),
  644             (r'[^\s>]+', String, '#pop'),
  645         ],
  646     }
  647 
  648 
  649 class MakoHtmlLexer(DelegatingLexer):
  650     """
  651     Subclass of the `MakoLexer` that highlights unlexed data
  652     with the `HtmlLexer`.
  653 
  654     .. versionadded:: 0.7
  655     """
  656 
  657     name = 'HTML+Mako'
  658     aliases = ['html+mako']
  659     mimetypes = ['text/html+mako']
  660 
  661     def __init__(self, **options):
  662         super(MakoHtmlLexer, self).__init__(HtmlLexer, MakoLexer,
  663                                             **options)
  664 
  665 
  666 class MakoXmlLexer(DelegatingLexer):
  667     """
  668     Subclass of the `MakoLexer` that highlights unlexed data
  669     with the `XmlLexer`.
  670 
  671     .. versionadded:: 0.7
  672     """
  673 
  674     name = 'XML+Mako'
  675     aliases = ['xml+mako']
  676     mimetypes = ['application/xml+mako']
  677 
  678     def __init__(self, **options):
  679         super(MakoXmlLexer, self).__init__(XmlLexer, MakoLexer,
  680                                            **options)
  681 
  682 
  683 class MakoJavascriptLexer(DelegatingLexer):
  684     """
  685     Subclass of the `MakoLexer` that highlights unlexed data
  686     with the `JavascriptLexer`.
  687 
  688     .. versionadded:: 0.7
  689     """
  690 
  691     name = 'JavaScript+Mako'
  692     aliases = ['js+mako', 'javascript+mako']
  693     mimetypes = ['application/x-javascript+mako',
  694                  'text/x-javascript+mako',
  695                  'text/javascript+mako']
  696 
  697     def __init__(self, **options):
  698         super(MakoJavascriptLexer, self).__init__(JavascriptLexer,
  699                                                   MakoLexer, **options)
  700 
  701 
  702 class MakoCssLexer(DelegatingLexer):
  703     """
  704     Subclass of the `MakoLexer` that highlights unlexed data
  705     with the `CssLexer`.
  706 
  707     .. versionadded:: 0.7
  708     """
  709 
  710     name = 'CSS+Mako'
  711     aliases = ['css+mako']
  712     mimetypes = ['text/css+mako']
  713 
  714     def __init__(self, **options):
  715         super(MakoCssLexer, self).__init__(CssLexer, MakoLexer,
  716                                            **options)
  717 
  718 
  719 # Genshi and Cheetah lexers courtesy of Matt Good.
  720 
  721 class CheetahPythonLexer(Lexer):
  722     """
  723     Lexer for handling Cheetah's special $ tokens in Python syntax.
  724     """
  725 
  726     def get_tokens_unprocessed(self, text):
  727         pylexer = PythonLexer(**self.options)
  728         for pos, type_, value in pylexer.get_tokens_unprocessed(text):
  729             if type_ == Token.Error and value == '$':
  730                 type_ = Comment.Preproc
  731             yield pos, type_, value
  732 
  733 
  734 class CheetahLexer(RegexLexer):
  735     """
  736     Generic `cheetah templates`_ lexer. Code that isn't Cheetah
  737     markup is yielded as `Token.Other`.  This also works for
  738     `spitfire templates`_ which use the same syntax.
  739 
  740     .. _cheetah templates: http://www.cheetahtemplate.org/
  741     .. _spitfire templates: http://code.google.com/p/spitfire/
  742     """
  743 
  744     name = 'Cheetah'
  745     aliases = ['cheetah', 'spitfire']
  746     filenames = ['*.tmpl', '*.spt']
  747     mimetypes = ['application/x-cheetah', 'application/x-spitfire']
  748 
  749     tokens = {
  750         'root': [
  751             (r'(##[^\n]*)$',
  752              (bygroups(Comment))),
  753             (r'#[*](.|\n)*?[*]#', Comment),
  754             (r'#end[^#\n]*(?:#|$)', Comment.Preproc),
  755             (r'#slurp$', Comment.Preproc),
  756             (r'(#[a-zA-Z]+)([^#\n]*)(#|$)',
  757              (bygroups(Comment.Preproc, using(CheetahPythonLexer),
  758                        Comment.Preproc))),
  759             # TODO support other Python syntax like $foo['bar']
  760             (r'(\$)([a-zA-Z_][\w.]*\w)',
  761              bygroups(Comment.Preproc, using(CheetahPythonLexer))),
  762             (r'(?s)(\$\{!?)(.*?)(\})',
  763              bygroups(Comment.Preproc, using(CheetahPythonLexer),
  764                       Comment.Preproc)),
  765             (r'''(?sx)
  766                 (.+?)               # anything, followed by:
  767                 (?:
  768                  (?=\#[#a-zA-Z]*) | # an eval comment
  769                  (?=\$[a-zA-Z_{]) | # a substitution
  770                  \Z                 # end of string
  771                 )
  772             ''', Other),
  773             (r'\s+', Text),
  774         ],
  775     }
  776 
  777 
  778 class CheetahHtmlLexer(DelegatingLexer):
  779     """
  780     Subclass of the `CheetahLexer` that highlights unlexed data
  781     with the `HtmlLexer`.
  782     """
  783 
  784     name = 'HTML+Cheetah'
  785     aliases = ['html+cheetah', 'html+spitfire', 'htmlcheetah']
  786     mimetypes = ['text/html+cheetah', 'text/html+spitfire']
  787 
  788     def __init__(self, **options):
  789         super(CheetahHtmlLexer, self).__init__(HtmlLexer, CheetahLexer,
  790                                                **options)
  791 
  792 
  793 class CheetahXmlLexer(DelegatingLexer):
  794     """
  795     Subclass of the `CheetahLexer` that highlights unlexed data
  796     with the `XmlLexer`.
  797     """
  798 
  799     name = 'XML+Cheetah'
  800     aliases = ['xml+cheetah', 'xml+spitfire']
  801     mimetypes = ['application/xml+cheetah', 'application/xml+spitfire']
  802 
  803     def __init__(self, **options):
  804         super(CheetahXmlLexer, self).__init__(XmlLexer, CheetahLexer,
  805                                               **options)
  806 
  807 
  808 class CheetahJavascriptLexer(DelegatingLexer):
  809     """
  810     Subclass of the `CheetahLexer` that highlights unlexed data
  811     with the `JavascriptLexer`.
  812     """
  813 
  814     name = 'JavaScript+Cheetah'
  815     aliases = ['js+cheetah', 'javascript+cheetah',
  816                'js+spitfire', 'javascript+spitfire']
  817     mimetypes = ['application/x-javascript+cheetah',
  818                  'text/x-javascript+cheetah',
  819                  'text/javascript+cheetah',
  820                  'application/x-javascript+spitfire',
  821                  'text/x-javascript+spitfire',
  822                  'text/javascript+spitfire']
  823 
  824     def __init__(self, **options):
  825         super(CheetahJavascriptLexer, self).__init__(JavascriptLexer,
  826                                                      CheetahLexer, **options)
  827 
  828 
  829 class GenshiTextLexer(RegexLexer):
  830     """
  831     A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ text
  832     templates.
  833     """
  834 
  835     name = 'Genshi Text'
  836     aliases = ['genshitext']
  837     mimetypes = ['application/x-genshi-text', 'text/x-genshi']
  838 
  839     tokens = {
  840         'root': [
  841             (r'[^#$\s]+', Other),
  842             (r'^(\s*)(##.*)$', bygroups(Text, Comment)),
  843             (r'^(\s*)(#)', bygroups(Text, Comment.Preproc), 'directive'),
  844             include('variable'),
  845             (r'[#$\s]', Other),
  846         ],
  847         'directive': [
  848             (r'\n', Text, '#pop'),
  849             (r'(?:def|for|if)\s+.*', using(PythonLexer), '#pop'),
  850             (r'(choose|when|with)([^\S\n]+)(.*)',
  851              bygroups(Keyword, Text, using(PythonLexer)), '#pop'),
  852             (r'(choose|otherwise)\b', Keyword, '#pop'),
  853             (r'(end\w*)([^\S\n]*)(.*)', bygroups(Keyword, Text, Comment), '#pop'),
  854         ],
  855         'variable': [
  856             (r'(?<!\$)(\$\{)(.+?)(\})',
  857              bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
  858             (r'(?<!\$)(\$)([a-zA-Z_][\w.]*)',
  859              Name.Variable),
  860         ]
  861     }
  862 
  863 
  864 class GenshiMarkupLexer(RegexLexer):
  865     """
  866     Base lexer for Genshi markup, used by `HtmlGenshiLexer` and
  867     `GenshiLexer`.
  868     """
  869 
  870     flags = re.DOTALL
  871 
  872     tokens = {
  873         'root': [
  874             (r'[^<$]+', Other),
  875             (r'(<\?python)(.*?)(\?>)',
  876              bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
  877             # yield style and script blocks as Other
  878             (r'<\s*(script|style)\s*.*?>.*?<\s*/\1\s*>', Other),
  879             (r'<\s*py:[a-zA-Z0-9]+', Name.Tag, 'pytag'),
  880             (r'<\s*[a-zA-Z0-9:.]+', Name.Tag, 'tag'),
  881             include('variable'),
  882             (r'[<$]', Other),
  883         ],
  884         'pytag': [
  885             (r'\s+', Text),
  886             (r'[\w:-]+\s*=', Name.Attribute, 'pyattr'),
  887             (r'/?\s*>', Name.Tag, '#pop'),
  888         ],
  889         'pyattr': [
  890             ('(")(.*?)(")', bygroups(String, using(PythonLexer), String), '#pop'),
  891             ("(')(.*?)(')", bygroups(String, using(PythonLexer), String), '#pop'),
  892             (r'[^\s>]+', String, '#pop'),
  893         ],
  894         'tag': [
  895             (r'\s+', Text),
  896             (r'py:[\w-]+\s*=', Name.Attribute, 'pyattr'),
  897             (r'[\w:-]+\s*=', Name.Attribute, 'attr'),
  898             (r'/?\s*>', Name.Tag, '#pop'),
  899         ],
  900         'attr': [
  901             ('"', String, 'attr-dstring'),
  902             ("'", String, 'attr-sstring'),
  903             (r'[^\s>]*', String, '#pop')
  904         ],
  905         'attr-dstring': [
  906             ('"', String, '#pop'),
  907             include('strings'),
  908             ("'", String)
  909         ],
  910         'attr-sstring': [
  911             ("'", String, '#pop'),
  912             include('strings'),
  913             ("'", String)
  914         ],
  915         'strings': [
  916             ('[^"\'$]+', String),
  917             include('variable')
  918         ],
  919         'variable': [
  920             (r'(?<!\$)(\$\{)(.+?)(\})',
  921              bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
  922             (r'(?<!\$)(\$)([a-zA-Z_][\w\.]*)',
  923              Name.Variable),
  924         ]
  925     }
  926 
  927 
  928 class HtmlGenshiLexer(DelegatingLexer):
  929     """
  930     A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ and
  931     `kid <http://kid-templating.org/>`_ kid HTML templates.
  932     """
  933 
  934     name = 'HTML+Genshi'
  935     aliases = ['html+genshi', 'html+kid']
  936     alias_filenames = ['*.html', '*.htm', '*.xhtml']
  937     mimetypes = ['text/html+genshi']
  938 
  939     def __init__(self, **options):
  940         super(HtmlGenshiLexer, self).__init__(HtmlLexer, GenshiMarkupLexer,
  941                                               **options)
  942 
  943     def analyse_text(text):
  944         rv = 0.0
  945         if re.search(r'\$\{.*?\}', text) is not None:
  946             rv += 0.2
  947         if re.search(r'py:(.*?)=["\']', text) is not None:
  948             rv += 0.2
  949         return rv + HtmlLexer.analyse_text(text) - 0.01
  950 
  951 
  952 class GenshiLexer(DelegatingLexer):
  953     """
  954     A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ and
  955     `kid <http://kid-templating.org/>`_ kid XML templates.
  956     """
  957 
  958     name = 'Genshi'
  959     aliases = ['genshi', 'kid', 'xml+genshi', 'xml+kid']
  960     filenames = ['*.kid']
  961     alias_filenames = ['*.xml']
  962     mimetypes = ['application/x-genshi', 'application/x-kid']
  963 
  964     def __init__(self, **options):
  965         super(GenshiLexer, self).__init__(XmlLexer, GenshiMarkupLexer,
  966                                           **options)
  967 
  968     def analyse_text(text):
  969         rv = 0.0
  970         if re.search(r'\$\{.*?\}', text) is not None:
  971             rv += 0.2
  972         if re.search(r'py:(.*?)=["\']', text) is not None:
  973             rv += 0.2
  974         return rv + XmlLexer.analyse_text(text) - 0.01
  975 
  976 
  977 class JavascriptGenshiLexer(DelegatingLexer):
  978     """
  979     A lexer that highlights javascript code in genshi text templates.
  980     """
  981 
  982     name = 'JavaScript+Genshi Text'
  983     aliases = ['js+genshitext', 'js+genshi', 'javascript+genshitext',
  984                'javascript+genshi']
  985     alias_filenames = ['*.js']
  986     mimetypes = ['application/x-javascript+genshi',
  987                  'text/x-javascript+genshi',
  988                  'text/javascript+genshi']
  989 
  990     def __init__(self, **options):
  991         super(JavascriptGenshiLexer, self).__init__(JavascriptLexer,
  992                                                     GenshiTextLexer,
  993                                                     **options)
  994 
  995     def analyse_text(text):
  996         return GenshiLexer.analyse_text(text) - 0.05
  997 
  998 
  999 class CssGenshiLexer(DelegatingLexer):
 1000     """
 1001     A lexer that highlights CSS definitions in genshi text templates.
 1002     """
 1003 
 1004     name = 'CSS+Genshi Text'
 1005     aliases = ['css+genshitext', 'css+genshi']
 1006     alias_filenames = ['*.css']
 1007     mimetypes = ['text/css+genshi']
 1008 
 1009     def __init__(self, **options):
 1010         super(CssGenshiLexer, self).__init__(CssLexer, GenshiTextLexer,
 1011                                              **options)
 1012 
 1013     def analyse_text(text):
 1014         return GenshiLexer.analyse_text(text) - 0.05
 1015 
 1016 
 1017 class RhtmlLexer(DelegatingLexer):
 1018     """
 1019     Subclass of the ERB lexer that highlights the unlexed data with the
 1020     html lexer.
 1021 
 1022     Nested Javascript and CSS is highlighted too.
 1023     """
 1024 
 1025     name = 'RHTML'
 1026     aliases = ['rhtml', 'html+erb', 'html+ruby']
 1027     filenames = ['*.rhtml']
 1028     alias_filenames = ['*.html', '*.htm', '*.xhtml']
 1029     mimetypes = ['text/html+ruby']
 1030 
 1031     def __init__(self, **options):
 1032         super(RhtmlLexer, self).__init__(HtmlLexer, ErbLexer, **options)
 1033 
 1034     def analyse_text(text):
 1035         rv = ErbLexer.analyse_text(text) - 0.01
 1036         if html_doctype_matches(text):
 1037             # one more than the XmlErbLexer returns
 1038             rv += 0.5
 1039         return rv
 1040 
 1041 
 1042 class XmlErbLexer(DelegatingLexer):
 1043     """
 1044     Subclass of `ErbLexer` which highlights data outside preprocessor
 1045     directives with the `XmlLexer`.
 1046     """
 1047 
 1048     name = 'XML+Ruby'
 1049     aliases = ['xml+erb', 'xml+ruby']
 1050     alias_filenames = ['*.xml']
 1051     mimetypes = ['application/xml+ruby']
 1052 
 1053     def __init__(self, **options):
 1054         super(XmlErbLexer, self).__init__(XmlLexer, ErbLexer, **options)
 1055 
 1056     def analyse_text(text):
 1057         rv = ErbLexer.analyse_text(text) - 0.01
 1058         if looks_like_xml(text):
 1059             rv += 0.4
 1060         return rv
 1061 
 1062 
 1063 class CssErbLexer(DelegatingLexer):
 1064     """
 1065     Subclass of `ErbLexer` which highlights unlexed data with the `CssLexer`.
 1066     """
 1067 
 1068     name = 'CSS+Ruby'
 1069     aliases = ['css+erb', 'css+ruby']
 1070     alias_filenames = ['*.css']
 1071     mimetypes = ['text/css+ruby']
 1072 
 1073     def __init__(self, **options):
 1074         super(CssErbLexer, self).__init__(CssLexer, ErbLexer, **options)
 1075 
 1076     def analyse_text(text):
 1077         return ErbLexer.analyse_text(text) - 0.05
 1078 
 1079 
 1080 class JavascriptErbLexer(DelegatingLexer):
 1081     """
 1082     Subclass of `ErbLexer` which highlights unlexed data with the
 1083     `JavascriptLexer`.
 1084     """
 1085 
 1086     name = 'JavaScript+Ruby'
 1087     aliases = ['js+erb', 'javascript+erb', 'js+ruby', 'javascript+ruby']
 1088     alias_filenames = ['*.js']
 1089     mimetypes = ['application/x-javascript+ruby',
 1090                  'text/x-javascript+ruby',
 1091                  'text/javascript+ruby']
 1092 
 1093     def __init__(self, **options):
 1094         super(JavascriptErbLexer, self).__init__(JavascriptLexer, ErbLexer,
 1095                                                  **options)
 1096 
 1097     def analyse_text(text):
 1098         return ErbLexer.analyse_text(text) - 0.05
 1099 
 1100 
 1101 class HtmlPhpLexer(DelegatingLexer):
 1102     """
 1103     Subclass of `PhpLexer` that highlights unhandled data with the `HtmlLexer`.
 1104 
 1105     Nested Javascript and CSS is highlighted too.
 1106     """
 1107 
 1108     name = 'HTML+PHP'
 1109     aliases = ['html+php']
 1110     filenames = ['*.phtml']
 1111     alias_filenames = ['*.php', '*.html', '*.htm', '*.xhtml',
 1112                        '*.php[345]']
 1113     mimetypes = ['application/x-php',
 1114                  'application/x-httpd-php', 'application/x-httpd-php3',
 1115                  'application/x-httpd-php4', 'application/x-httpd-php5']
 1116 
 1117     def __init__(self, **options):
 1118         super(HtmlPhpLexer, self).__init__(HtmlLexer, PhpLexer, **options)
 1119 
 1120     def analyse_text(text):
 1121         rv = PhpLexer.analyse_text(text) - 0.01
 1122         if html_doctype_matches(text):
 1123             rv += 0.5
 1124         return rv
 1125 
 1126 
 1127 class XmlPhpLexer(DelegatingLexer):
 1128     """
 1129     Subclass of `PhpLexer` that highlights unhandled data with the `XmlLexer`.
 1130     """
 1131 
 1132     name = 'XML+PHP'
 1133     aliases = ['xml+php']
 1134     alias_filenames = ['*.xml', '*.php', '*.php[345]']
 1135     mimetypes = ['application/xml+php']
 1136 
 1137     def __init__(self, **options):
 1138         super(XmlPhpLexer, self).__init__(XmlLexer, PhpLexer, **options)
 1139 
 1140     def analyse_text(text):
 1141         rv = PhpLexer.analyse_text(text) - 0.01
 1142         if looks_like_xml(text):
 1143             rv += 0.4
 1144         return rv
 1145 
 1146 
 1147 class CssPhpLexer(DelegatingLexer):
 1148     """
 1149     Subclass of `PhpLexer` which highlights unmatched data with the `CssLexer`.
 1150     """
 1151 
 1152     name = 'CSS+PHP'
 1153     aliases = ['css+php']
 1154     alias_filenames = ['*.css']
 1155     mimetypes = ['text/css+php']
 1156 
 1157     def __init__(self, **options):
 1158         super(CssPhpLexer, self).__init__(CssLexer, PhpLexer, **options)
 1159 
 1160     def analyse_text(text):
 1161         return PhpLexer.analyse_text(text) - 0.05
 1162 
 1163 
 1164 class JavascriptPhpLexer(DelegatingLexer):
 1165     """
 1166     Subclass of `PhpLexer` which highlights unmatched data with the
 1167     `JavascriptLexer`.
 1168     """
 1169 
 1170     name = 'JavaScript+PHP'
 1171     aliases = ['js+php', 'javascript+php']
 1172     alias_filenames = ['*.js']
 1173     mimetypes = ['application/x-javascript+php',
 1174                  'text/x-javascript+php',
 1175                  'text/javascript+php']
 1176 
 1177     def __init__(self, **options):
 1178         super(JavascriptPhpLexer, self).__init__(JavascriptLexer, PhpLexer,
 1179                                                  **options)
 1180 
 1181     def analyse_text(text):
 1182         return PhpLexer.analyse_text(text)
 1183 
 1184 
 1185 class HtmlSmartyLexer(DelegatingLexer):
 1186     """
 1187     Subclass of the `SmartyLexer` that highlights unlexed data with the
 1188     `HtmlLexer`.
 1189 
 1190     Nested Javascript and CSS is highlighted too.
 1191     """
 1192 
 1193     name = 'HTML+Smarty'
 1194     aliases = ['html+smarty']
 1195     alias_filenames = ['*.html', '*.htm', '*.xhtml', '*.tpl']
 1196     mimetypes = ['text/html+smarty']
 1197 
 1198     def __init__(self, **options):
 1199         super(HtmlSmartyLexer, self).__init__(HtmlLexer, SmartyLexer, **options)
 1200 
 1201     def analyse_text(text):
 1202         rv = SmartyLexer.analyse_text(text) - 0.01
 1203         if html_doctype_matches(text):
 1204             rv += 0.5
 1205         return rv
 1206 
 1207 
 1208 class XmlSmartyLexer(DelegatingLexer):
 1209     """
 1210     Subclass of the `SmartyLexer` that highlights unlexed data with the
 1211     `XmlLexer`.
 1212     """
 1213 
 1214     name = 'XML+Smarty'
 1215     aliases = ['xml+smarty']
 1216     alias_filenames = ['*.xml', '*.tpl']
 1217     mimetypes = ['application/xml+smarty']
 1218 
 1219     def __init__(self, **options):
 1220         super(XmlSmartyLexer, self).__init__(XmlLexer, SmartyLexer, **options)
 1221 
 1222     def analyse_text(text):
 1223         rv = SmartyLexer.analyse_text(text) - 0.01
 1224         if looks_like_xml(text):
 1225             rv += 0.4
 1226         return rv
 1227 
 1228 
 1229 class CssSmartyLexer(DelegatingLexer):
 1230     """
 1231     Subclass of the `SmartyLexer` that highlights unlexed data with the
 1232     `CssLexer`.
 1233     """
 1234 
 1235     name = 'CSS+Smarty'
 1236     aliases = ['css+smarty']
 1237     alias_filenames = ['*.css', '*.tpl']
 1238     mimetypes = ['text/css+smarty']
 1239 
 1240     def __init__(self, **options):
 1241         super(CssSmartyLexer, self).__init__(CssLexer, SmartyLexer, **options)
 1242 
 1243     def analyse_text(text):
 1244         return SmartyLexer.analyse_text(text) - 0.05
 1245 
 1246 
 1247 class JavascriptSmartyLexer(DelegatingLexer):
 1248     """
 1249     Subclass of the `SmartyLexer` that highlights unlexed data with the
 1250     `JavascriptLexer`.
 1251     """
 1252 
 1253     name = 'JavaScript+Smarty'
 1254     aliases = ['js+smarty', 'javascript+smarty']
 1255     alias_filenames = ['*.js', '*.tpl']
 1256     mimetypes = ['application/x-javascript+smarty',
 1257                  'text/x-javascript+smarty',
 1258                  'text/javascript+smarty']
 1259 
 1260     def __init__(self, **options):
 1261         super(JavascriptSmartyLexer, self).__init__(JavascriptLexer, SmartyLexer,
 1262                                                     **options)
 1263 
 1264     def analyse_text(text):
 1265         return SmartyLexer.analyse_text(text) - 0.05
 1266 
 1267 
 1268 class HtmlDjangoLexer(DelegatingLexer):
 1269     """
 1270     Subclass of the `DjangoLexer` that highlights unlexed data with the
 1271     `HtmlLexer`.
 1272 
 1273     Nested Javascript and CSS is highlighted too.
 1274     """
 1275 
 1276     name = 'HTML+Django/Jinja'
 1277     aliases = ['html+django', 'html+jinja', 'htmldjango']
 1278     alias_filenames = ['*.html', '*.htm', '*.xhtml']
 1279     mimetypes = ['text/html+django', 'text/html+jinja']
 1280 
 1281     def __init__(self, **options):
 1282         super(HtmlDjangoLexer, self).__init__(HtmlLexer, DjangoLexer, **options)
 1283 
 1284     def analyse_text(text):
 1285         rv = DjangoLexer.analyse_text(text) - 0.01
 1286         if html_doctype_matches(text):
 1287             rv += 0.5
 1288         return rv
 1289 
 1290 
 1291 class XmlDjangoLexer(DelegatingLexer):
 1292     """
 1293     Subclass of the `DjangoLexer` that highlights unlexed data with the
 1294     `XmlLexer`.
 1295     """
 1296 
 1297     name = 'XML+Django/Jinja'
 1298     aliases = ['xml+django', 'xml+jinja']
 1299     alias_filenames = ['*.xml']
 1300     mimetypes = ['application/xml+django', 'application/xml+jinja']
 1301 
 1302     def __init__(self, **options):
 1303         super(XmlDjangoLexer, self).__init__(XmlLexer, DjangoLexer, **options)
 1304 
 1305     def analyse_text(text):
 1306         rv = DjangoLexer.analyse_text(text) - 0.01
 1307         if looks_like_xml(text):
 1308             rv += 0.4
 1309         return rv
 1310 
 1311 
 1312 class CssDjangoLexer(DelegatingLexer):
 1313     """
 1314     Subclass of the `DjangoLexer` that highlights unlexed data with the
 1315     `CssLexer`.
 1316     """
 1317 
 1318     name = 'CSS+Django/Jinja'
 1319     aliases = ['css+django', 'css+jinja']
 1320     alias_filenames = ['*.css']
 1321     mimetypes = ['text/css+django', 'text/css+jinja']
 1322 
 1323     def __init__(self, **options):
 1324         super(CssDjangoLexer, self).__init__(CssLexer, DjangoLexer, **options)
 1325 
 1326     def analyse_text(text):
 1327         return DjangoLexer.analyse_text(text) - 0.05
 1328 
 1329 
 1330 class JavascriptDjangoLexer(DelegatingLexer):
 1331     """
 1332     Subclass of the `DjangoLexer` that highlights unlexed data with the
 1333     `JavascriptLexer`.
 1334     """
 1335 
 1336     name = 'JavaScript+Django/Jinja'
 1337     aliases = ['js+django', 'javascript+django',
 1338                'js+jinja', 'javascript+jinja']
 1339     alias_filenames = ['*.js']
 1340     mimetypes = ['application/x-javascript+django',
 1341                  'application/x-javascript+jinja',
 1342                  'text/x-javascript+django',
 1343                  'text/x-javascript+jinja',
 1344                  'text/javascript+django',
 1345                  'text/javascript+jinja']
 1346 
 1347     def __init__(self, **options):
 1348         super(JavascriptDjangoLexer, self).__init__(JavascriptLexer, DjangoLexer,
 1349                                                     **options)
 1350 
 1351     def analyse_text(text):
 1352         return DjangoLexer.analyse_text(text) - 0.05
 1353 
 1354 
 1355 class JspRootLexer(RegexLexer):
 1356     """
 1357     Base for the `JspLexer`. Yields `Token.Other` for area outside of
 1358     JSP tags.
 1359 
 1360     .. versionadded:: 0.7
 1361     """
 1362 
 1363     tokens = {
 1364         'root': [
 1365             (r'<%\S?', Keyword, 'sec'),
 1366             # FIXME: I want to make these keywords but still parse attributes.
 1367             (r'</?jsp:(forward|getProperty|include|plugin|setProperty|useBean).*?>',
 1368              Keyword),
 1369             (r'[^<]+', Other),
 1370             (r'<', Other),
 1371         ],
 1372         'sec': [
 1373             (r'%>', Keyword, '#pop'),
 1374             # note: '\w\W' != '.' without DOTALL.
 1375             (r'[\w\W]+?(?=%>|\Z)', using(JavaLexer)),
 1376         ],
 1377     }
 1378 
 1379 
 1380 class JspLexer(DelegatingLexer):
 1381     """
 1382     Lexer for Java Server Pages.
 1383 
 1384     .. versionadded:: 0.7
 1385     """
 1386     name = 'Java Server Page'
 1387     aliases = ['jsp']
 1388     filenames = ['*.jsp']
 1389     mimetypes = ['application/x-jsp']
 1390 
 1391     def __init__(self, **options):
 1392         super(JspLexer, self).__init__(XmlLexer, JspRootLexer, **options)
 1393 
 1394     def analyse_text(text):
 1395         rv = JavaLexer.analyse_text(text) - 0.01
 1396         if looks_like_xml(text):
 1397             rv += 0.4
 1398         if '<%' in text and '%>' in text:
 1399             rv += 0.1
 1400         return rv
 1401 
 1402 
 1403 class EvoqueLexer(RegexLexer):
 1404     """
 1405     For files using the Evoque templating system.
 1406 
 1407     .. versionadded:: 1.1
 1408     """
 1409     name = 'Evoque'
 1410     aliases = ['evoque']
 1411     filenames = ['*.evoque']
 1412     mimetypes = ['application/x-evoque']
 1413 
 1414     flags = re.DOTALL
 1415 
 1416     tokens = {
 1417         'root': [
 1418             (r'[^#$]+', Other),
 1419             (r'#\[', Comment.Multiline, 'comment'),
 1420             (r'\$\$', Other),
 1421             # svn keywords
 1422             (r'\$\w+:[^$\n]*\$', Comment.Multiline),
 1423             # directives: begin, end
 1424             (r'(\$)(begin|end)(\{(%)?)(.*?)((?(4)%)\})',
 1425              bygroups(Punctuation, Name.Builtin, Punctuation, None,
 1426                       String, Punctuation)),
 1427             # directives: evoque, overlay
 1428             # see doc for handling first name arg: /directives/evoque/
 1429             # + minor inconsistency: the "name" in e.g. $overlay{name=site_base}
 1430             # should be using(PythonLexer), not passed out as String
 1431             (r'(\$)(evoque|overlay)(\{(%)?)(\s*[#\w\-"\'.]+[^=,%}]+?)?'
 1432              r'(.*?)((?(4)%)\})',
 1433              bygroups(Punctuation, Name.Builtin, Punctuation, None,
 1434                       String, using(PythonLexer), Punctuation)),
 1435             # directives: if, for, prefer, test
 1436             (r'(\$)(\w+)(\{(%)?)(.*?)((?(4)%)\})',
 1437              bygroups(Punctuation, Name.Builtin, Punctuation, None,
 1438                       using(PythonLexer), Punctuation)),
 1439             # directive clauses (no {} expression)
 1440             (r'(\$)(else|rof|fi)', bygroups(Punctuation, Name.Builtin)),
 1441             # expressions
 1442             (r'(\$\{(%)?)(.*?)((!)(.*?))?((?(2)%)\})',
 1443              bygroups(Punctuation, None, using(PythonLexer),
 1444                       Name.Builtin, None, None, Punctuation)),
 1445             (r'#', Other),
 1446         ],
 1447         'comment': [
 1448             (r'[^\]#]', Comment.Multiline),
 1449             (r'#\[', Comment.Multiline, '#push'),
 1450             (r'\]#', Comment.Multiline, '#pop'),
 1451             (r'[\]#]', Comment.Multiline)
 1452         ],
 1453     }
 1454 
 1455 
 1456 class EvoqueHtmlLexer(DelegatingLexer):
 1457     """
 1458     Subclass of the `EvoqueLexer` that highlights unlexed data with the
 1459     `HtmlLexer`.
 1460 
 1461     .. versionadded:: 1.1
 1462     """
 1463     name = 'HTML+Evoque'
 1464     aliases = ['html+evoque']
 1465     filenames = ['*.html']
 1466     mimetypes = ['text/html+evoque']
 1467 
 1468     def __init__(self, **options):
 1469         super(EvoqueHtmlLexer, self).__init__(HtmlLexer, EvoqueLexer,
 1470                                               **options)
 1471 
 1472 
 1473 class EvoqueXmlLexer(DelegatingLexer):
 1474     """
 1475     Subclass of the `EvoqueLexer` that highlights unlexed data with the
 1476     `XmlLexer`.
 1477 
 1478     .. versionadded:: 1.1
 1479     """
 1480     name = 'XML+Evoque'
 1481     aliases = ['xml+evoque']
 1482     filenames = ['*.xml']
 1483     mimetypes = ['application/xml+evoque']
 1484 
 1485     def __init__(self, **options):
 1486         super(EvoqueXmlLexer, self).__init__(XmlLexer, EvoqueLexer,
 1487                                              **options)
 1488 
 1489 
 1490 class ColdfusionLexer(RegexLexer):
 1491     """
 1492     Coldfusion statements
 1493     """
 1494     name = 'cfstatement'
 1495     aliases = ['cfs']
 1496     filenames = []
 1497     mimetypes = []
 1498     flags = re.IGNORECASE
 1499 
 1500     tokens = {
 1501         'root': [
 1502             (r'//.*?\n', Comment.Single),
 1503             (r'/\*(?:.|\n)*?\*/', Comment.Multiline),
 1504             (r'\+\+|--', Operator),
 1505             (r'[-+*/^&=!]', Operator),
 1506             (r'<=|>=|<|>|==', Operator),
 1507             (r'mod\b', Operator),
 1508             (r'(eq|lt|gt|lte|gte|not|is|and|or)\b', Operator),
 1509             (r'\|\||&&', Operator),
 1510             (r'\?', Operator),
 1511             (r'"', String.Double, 'string'),
 1512             # There is a special rule for allowing html in single quoted
 1513             # strings, evidently.
 1514             (r"'.*?'", String.Single),
 1515             (r'\d+', Number),
 1516             (r'(if|else|len|var|xml|default|break|switch|component|property|function|do|'
 1517              r'try|catch|in|continue|for|return|while|required|any|array|binary|boolean|'
 1518              r'component|date|guid|numeric|query|string|struct|uuid|case)\b', Keyword),
 1519             (r'(true|false|null)\b', Keyword.Constant),
 1520             (r'(application|session|client|cookie|super|this|variables|arguments)\b',
 1521              Name.Constant),
 1522             (r'([a-z_$][\w.]*)(\s*)(\()',
 1523              bygroups(Name.Function, Text, Punctuation)),
 1524             (r'[a-z_$][\w.]*', Name.Variable),
 1525             (r'[()\[\]{};:,.\\]', Punctuation),
 1526             (r'\s+', Text),
 1527         ],
 1528         'string': [
 1529             (r'""', String.Double),
 1530             (r'#.+?#', String.Interp),
 1531             (r'[^"#]+', String.Double),
 1532             (r'#', String.Double),
 1533             (r'"', String.Double, '#pop'),
 1534         ],
 1535     }
 1536 
 1537 
 1538 class ColdfusionMarkupLexer(RegexLexer):
 1539     """
 1540     Coldfusion markup only
 1541     """
 1542     name = 'Coldfusion'
 1543     aliases = ['cf']
 1544     filenames = []
 1545     mimetypes = []
 1546 
 1547     tokens = {
 1548         'root': [
 1549             (r'[^<]+', Other),
 1550             include('tags'),
 1551             (r'<[^<>]*', Other),
 1552         ],
 1553         'tags': [
 1554             (r'<!---', Comment.Multiline, 'cfcomment'),
 1555             (r'(?s)<!--.*?-->', Comment),
 1556             (r'<cfoutput.*?>', Name.Builtin, 'cfoutput'),
 1557             (r'(?s)(<cfscript.*?>)(.+?)(</cfscript.*?>)',
 1558              bygroups(Name.Builtin, using(ColdfusionLexer), Name.Builtin)),
 1559             # negative lookbehind is for strings with embedded >
 1560             (r'(?s)(</?cf(?:component|include|if|else|elseif|loop|return|'
 1561              r'dbinfo|dump|abort|location|invoke|throw|file|savecontent|'
 1562              r'mailpart|mail|header|content|zip|image|lock|argument|try|'
 1563              r'catch|break|directory|http|set|function|param)\b)(.*?)((?<!\\)>)',
 1564              bygroups(Name.Builtin, using(ColdfusionLexer), Name.Builtin)),
 1565         ],
 1566         'cfoutput': [
 1567             (r'[^#<]+', Other),
 1568             (r'(#)(.*?)(#)', bygroups(Punctuation, using(ColdfusionLexer),
 1569                                       Punctuation)),
 1570             # (r'<cfoutput.*?>', Name.Builtin, '#push'),
 1571             (r'</cfoutput.*?>', Name.Builtin, '#pop'),
 1572             include('tags'),
 1573             (r'(?s)<[^<>]*', Other),
 1574             (r'#', Other),
 1575         ],
 1576         'cfcomment': [
 1577             (r'<!---', Comment.Multiline, '#push'),
 1578             (r'--->', Comment.Multiline, '#pop'),
 1579             (r'([^<-]|<(?!!---)|-(?!-->))+', Comment.Multiline),
 1580         ],
 1581     }
 1582 
 1583 
 1584 class ColdfusionHtmlLexer(DelegatingLexer):
 1585     """
 1586     Coldfusion markup in html
 1587     """
 1588     name = 'Coldfusion HTML'
 1589     aliases = ['cfm']
 1590     filenames = ['*.cfm', '*.cfml']
 1591     mimetypes = ['application/x-coldfusion']
 1592 
 1593     def __init__(self, **options):
 1594         super(ColdfusionHtmlLexer, self).__init__(HtmlLexer, ColdfusionMarkupLexer,
 1595                                                   **options)
 1596 
 1597 
 1598 class ColdfusionCFCLexer(DelegatingLexer):
 1599     """
 1600     Coldfusion markup/script components
 1601 
 1602     .. versionadded:: 2.0
 1603     """
 1604     name = 'Coldfusion CFC'
 1605     aliases = ['cfc']
 1606     filenames = ['*.cfc']
 1607     mimetypes = []
 1608 
 1609     def __init__(self, **options):
 1610         super(ColdfusionCFCLexer, self).__init__(ColdfusionHtmlLexer, ColdfusionLexer,
 1611                                                  **options)
 1612 
 1613 
 1614 class SspLexer(DelegatingLexer):
 1615     """
 1616     Lexer for Scalate Server Pages.
 1617 
 1618     .. versionadded:: 1.4
 1619     """
 1620     name = 'Scalate Server Page'
 1621     aliases = ['ssp']
 1622     filenames = ['*.ssp']
 1623     mimetypes = ['application/x-ssp']
 1624 
 1625     def __init__(self, **options):
 1626         super(SspLexer, self).__init__(XmlLexer, JspRootLexer, **options)
 1627 
 1628     def analyse_text(text):
 1629         rv = 0.0
 1630         if re.search(r'val \w+\s*:', text):
 1631             rv += 0.6
 1632         if looks_like_xml(text):
 1633             rv += 0.2
 1634         if '<%' in text and '%>' in text:
 1635             rv += 0.1
 1636         return rv
 1637 
 1638 
 1639 class TeaTemplateRootLexer(RegexLexer):
 1640     """
 1641     Base for the `TeaTemplateLexer`. Yields `Token.Other` for area outside of
 1642     code blocks.
 1643 
 1644     .. versionadded:: 1.5
 1645     """
 1646 
 1647     tokens = {
 1648         'root': [
 1649             (r'<%\S?', Keyword, 'sec'),
 1650             (r'[^<]+', Other),
 1651             (r'<', Other),
 1652         ],
 1653         'sec': [
 1654             (r'%>', Keyword, '#pop'),
 1655             # note: '\w\W' != '.' without DOTALL.
 1656             (r'[\w\W]+?(?=%>|\Z)', using(TeaLangLexer)),
 1657         ],
 1658     }
 1659 
 1660 
 1661 class TeaTemplateLexer(DelegatingLexer):
 1662     """
 1663     Lexer for `Tea Templates <http://teatrove.org/>`_.
 1664 
 1665     .. versionadded:: 1.5
 1666     """
 1667     name = 'Tea'
 1668     aliases = ['tea']
 1669     filenames = ['*.tea']
 1670     mimetypes = ['text/x-tea']
 1671 
 1672     def __init__(self, **options):
 1673         super(TeaTemplateLexer, self).__init__(XmlLexer,
 1674                                                TeaTemplateRootLexer, **options)
 1675 
 1676     def analyse_text(text):
 1677         rv = TeaLangLexer.analyse_text(text) - 0.01
 1678         if looks_like_xml(text):
 1679             rv += 0.4
 1680         if '<%' in text and '%>' in text:
 1681             rv += 0.1
 1682         return rv
 1683 
 1684 
 1685 class LassoHtmlLexer(DelegatingLexer):
 1686     """
 1687     Subclass of the `LassoLexer` which highlights unhandled data with the
 1688     `HtmlLexer`.
 1689 
 1690     Nested JavaScript and CSS is also highlighted.
 1691 
 1692     .. versionadded:: 1.6
 1693     """
 1694 
 1695     name = 'HTML+Lasso'
 1696     aliases = ['html+lasso']
 1697     alias_filenames = ['*.html', '*.htm', '*.xhtml', '*.lasso', '*.lasso[89]',
 1698                        '*.incl', '*.inc', '*.las']
 1699     mimetypes = ['text/html+lasso',
 1700                  'application/x-httpd-lasso',
 1701                  'application/x-httpd-lasso[89]']
 1702 
 1703     def __init__(self, **options):
 1704         super(LassoHtmlLexer, self).__init__(HtmlLexer, LassoLexer, **options)
 1705 
 1706     def analyse_text(text):
 1707         rv = LassoLexer.analyse_text(text) - 0.01
 1708         if html_doctype_matches(text):  # same as HTML lexer
 1709             rv += 0.5
 1710         return rv
 1711 
 1712 
 1713 class LassoXmlLexer(DelegatingLexer):
 1714     """
 1715     Subclass of the `LassoLexer` which highlights unhandled data with the
 1716     `XmlLexer`.
 1717 
 1718     .. versionadded:: 1.6
 1719     """
 1720 
 1721     name = 'XML+Lasso'
 1722     aliases = ['xml+lasso']
 1723     alias_filenames = ['*.xml', '*.lasso', '*.lasso[89]',
 1724                        '*.incl', '*.inc', '*.las']
 1725     mimetypes = ['application/xml+lasso']
 1726 
 1727     def __init__(self, **options):
 1728         super(LassoXmlLexer, self).__init__(XmlLexer, LassoLexer, **options)
 1729 
 1730     def analyse_text(text):
 1731         rv = LassoLexer.analyse_text(text) - 0.01
 1732         if looks_like_xml(text):
 1733             rv += 0.4
 1734         return rv
 1735 
 1736 
 1737 class LassoCssLexer(DelegatingLexer):
 1738     """
 1739     Subclass of the `LassoLexer` which highlights unhandled data with the
 1740     `CssLexer`.
 1741 
 1742     .. versionadded:: 1.6
 1743     """
 1744 
 1745     name = 'CSS+Lasso'
 1746     aliases = ['css+lasso']
 1747     alias_filenames = ['*.css']
 1748     mimetypes = ['text/css+lasso']
 1749 
 1750     def __init__(self, **options):
 1751         options['requiredelimiters'] = True
 1752         super(LassoCssLexer, self).__init__(CssLexer, LassoLexer, **options)
 1753 
 1754     def analyse_text(text):
 1755         rv = LassoLexer.analyse_text(text) - 0.05
 1756         if re.search(r'\w+:.+?;', text):
 1757             rv += 0.1
 1758         if 'padding:' in text:
 1759             rv += 0.1
 1760         return rv
 1761 
 1762 
 1763 class LassoJavascriptLexer(DelegatingLexer):
 1764     """
 1765     Subclass of the `LassoLexer` which highlights unhandled data with the
 1766     `JavascriptLexer`.
 1767 
 1768     .. versionadded:: 1.6
 1769     """
 1770 
 1771     name = 'JavaScript+Lasso'
 1772     aliases = ['js+lasso', 'javascript+lasso']
 1773     alias_filenames = ['*.js']
 1774     mimetypes = ['application/x-javascript+lasso',
 1775                  'text/x-javascript+lasso',
 1776                  'text/javascript+lasso']
 1777 
 1778     def __init__(self, **options):
 1779         options['requiredelimiters'] = True
 1780         super(LassoJavascriptLexer, self).__init__(JavascriptLexer, LassoLexer,
 1781                                                    **options)
 1782 
 1783     def analyse_text(text):
 1784         rv = LassoLexer.analyse_text(text) - 0.05
 1785         return rv
 1786 
 1787 
 1788 class HandlebarsLexer(RegexLexer):
 1789     """
 1790     Generic `handlebars <http://handlebarsjs.com/>` template lexer.
 1791 
 1792     Highlights only the Handlebars template tags (stuff between `{{` and `}}`).
 1793     Everything else is left for a delegating lexer.
 1794 
 1795     .. versionadded:: 2.0
 1796     """
 1797 
 1798     name = "Handlebars"
 1799     aliases = ['handlebars']
 1800 
 1801     tokens = {
 1802         'root': [
 1803             (r'[^{]+', Other),
 1804 
 1805             # Comment start {{!  }} or {{!--
 1806             (r'\{\{!.*\}\}', Comment),
 1807 
 1808             # HTML Escaping open {{{expression
 1809             (r'(\{\{\{)(\s*)', bygroups(Comment.Special, Text), 'tag'),
 1810 
 1811             # {{blockOpen {{#blockOpen {{/blockClose with optional tilde ~
 1812             (r'(\{\{)([#~/]+)([^\s}]*)',
 1813              bygroups(Comment.Preproc, Number.Attribute, Number.Attribute), 'tag'),
 1814             (r'(\{\{)(\s*)', bygroups(Comment.Preproc, Text), 'tag'),
 1815         ],
 1816 
 1817         'tag': [
 1818             (r'\s+', Text),
 1819             # HTML Escaping close }}}
 1820             (r'\}\}\}', Comment.Special, '#pop'),
 1821             # blockClose}}, includes optional tilde ~
 1822             (r'(~?)(\}\})', bygroups(Number, Comment.Preproc), '#pop'),
 1823 
 1824             # {{opt=something}}
 1825             (r'([^\s}]+)(=)', bygroups(Name.Attribute, Operator)),
 1826 
 1827             # Partials {{> ...}}
 1828             (r'(>)(\s*)(@partial-block)', bygroups(Keyword, Text, Keyword)),
 1829             (r'(#?>)(\s*)([\w-]+)', bygroups(Keyword, Text, Name.Variable)),
 1830             (r'(>)(\s*)(\()', bygroups(Keyword, Text, Punctuation),
 1831              'dynamic-partial'),
 1832 
 1833             include('generic'),
 1834         ],
 1835         'dynamic-partial': [
 1836             (r'\s+', Text),
 1837             (r'\)', Punctuation, '#pop'),
 1838 
 1839             (r'(lookup)(\s+)(\.|this)(\s+)', bygroups(Keyword, Text,
 1840                                                       Name.Variable, Text)),
 1841             (r'(lookup)(\s+)(\S+)', bygroups(Keyword, Text,
 1842                                              using(this, state='variable'))),
 1843             (r'[\w-]+', Name.Function),
 1844 
 1845             include('generic'),
 1846         ],
 1847         'variable': [
 1848             (r'[()/@a-zA-Z][\w-]*', Name.Variable),
 1849             (r'\.[\w-]+', Name.Variable),
 1850             (r'(this\/|\.\/|(\.\.\/)+)[\w-]+', Name.Variable),
 1851         ],
 1852         'generic': [
 1853             include('variable'),
 1854 
 1855             # borrowed from DjangoLexer
 1856             (r':?"(\\\\|\\"|[^"])*"', String.Double),
 1857             (r":?'(\\\\|\\'|[^'])*'", String.Single),
 1858             (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
 1859              r"0[xX][0-9a-fA-F]+[Ll]?", Number),
 1860         ]
 1861     }
 1862 
 1863 
 1864 class HandlebarsHtmlLexer(DelegatingLexer):
 1865     """
 1866     Subclass of the `HandlebarsLexer` that highlights unlexed data with the
 1867     `HtmlLexer`.
 1868 
 1869     .. versionadded:: 2.0
 1870     """
 1871 
 1872     name = "HTML+Handlebars"
 1873     aliases = ["html+handlebars"]
 1874     filenames = ['*.handlebars', '*.hbs']
 1875     mimetypes = ['text/html+handlebars', 'text/x-handlebars-template']
 1876 
 1877     def __init__(self, **options):
 1878         super(HandlebarsHtmlLexer, self).__init__(HtmlLexer, HandlebarsLexer, **options)
 1879 
 1880 
 1881 class YamlJinjaLexer(DelegatingLexer):
 1882     """
 1883     Subclass of the `DjangoLexer` that highlights unlexed data with the
 1884     `YamlLexer`.
 1885 
 1886     Commonly used in Saltstack salt states.
 1887 
 1888     .. versionadded:: 2.0
 1889     """
 1890 
 1891     name = 'YAML+Jinja'
 1892     aliases = ['yaml+jinja', 'salt', 'sls']
 1893     filenames = ['*.sls']
 1894     mimetypes = ['text/x-yaml+jinja', 'text/x-sls']
 1895 
 1896     def __init__(self, **options):
 1897         super(YamlJinjaLexer, self).__init__(YamlLexer, DjangoLexer, **options)
 1898 
 1899 
 1900 class LiquidLexer(RegexLexer):
 1901     """
 1902     Lexer for `Liquid templates
 1903     <http://www.rubydoc.info/github/Shopify/liquid>`_.
 1904 
 1905     .. versionadded:: 2.0
 1906     """
 1907     name = 'liquid'
 1908     aliases = ['liquid']
 1909     filenames = ['*.liquid']
 1910 
 1911     tokens = {
 1912         'root': [
 1913             (r'[^{]+', Text),
 1914             # tags and block tags
 1915             (r'(\{%)(\s*)', bygroups(Punctuation, Whitespace), 'tag-or-block'),
 1916             # output tags
 1917             (r'(\{\{)(\s*)([^\s}]+)',
 1918              bygroups(Punctuation, Whitespace, using(this, state = 'generic')),
 1919              'output'),
 1920             (r'\{', Text)
 1921         ],
 1922 
 1923         'tag-or-block': [
 1924             # builtin logic blocks
 1925             (r'(if|unless|elsif|case)(?=\s+)', Keyword.Reserved, 'condition'),
 1926             (r'(when)(\s+)', bygroups(Keyword.Reserved, Whitespace),
 1927              combined('end-of-block', 'whitespace', 'generic')),
 1928             (r'(else)(\s*)(%\})',
 1929              bygroups(Keyword.Reserved, Whitespace, Punctuation), '#pop'),
 1930 
 1931             # other builtin blocks
 1932             (r'(capture)(\s+)([^\s%]+)(\s*)(%\})',
 1933              bygroups(Name.Tag, Whitespace, using(this, state = 'variable'),
 1934                       Whitespace, Punctuation), '#pop'),
 1935             (r'(comment)(\s*)(%\})',
 1936              bygroups(Name.Tag, Whitespace, Punctuation), 'comment'),
 1937             (r'(raw)(\s*)(%\})',
 1938              bygroups(Name.Tag, Whitespace, Punctuation), 'raw'),
 1939 
 1940             # end of block
 1941             (r'(end(case|unless|if))(\s*)(%\})',
 1942              bygroups(Keyword.Reserved, None, Whitespace, Punctuation), '#pop'),
 1943             (r'(end([^\s%]+))(\s*)(%\})',
 1944              bygroups(Name.Tag, None, Whitespace, Punctuation), '#pop'),
 1945 
 1946             # builtin tags (assign and include are handled together with usual tags)
 1947             (r'(cycle)(\s+)(?:([^\s:]*)(:))?(\s*)',
 1948              bygroups(Name.Tag, Whitespace,
 1949                       using(this, state='generic'), Punctuation, Whitespace),
 1950              'variable-tag-markup'),
 1951 
 1952             # other tags or blocks
 1953             (r'([^\s%]+)(\s*)', bygroups(Name.Tag, Whitespace), 'tag-markup')
 1954         ],
 1955 
 1956         'output': [
 1957             include('whitespace'),
 1958             (r'\}\}', Punctuation, '#pop'),  # end of output
 1959 
 1960             (r'\|', Punctuation, 'filters')
 1961         ],
 1962 
 1963         'filters': [
 1964             include('whitespace'),
 1965             (r'\}\}', Punctuation, ('#pop', '#pop')),  # end of filters and output
 1966 
 1967             (r'([^\s|:]+)(:?)(\s*)',
 1968              bygroups(Name.Function, Punctuation, Whitespace), 'filter-markup')
 1969         ],
 1970 
 1971         'filter-markup': [
 1972             (r'\|', Punctuation, '#pop'),
 1973             include('end-of-tag'),
 1974             include('default-param-markup')
 1975         ],
 1976 
 1977         'condition': [
 1978             include('end-of-block'),
 1979             include('whitespace'),
 1980 
 1981             (r'([^\s=!><]+)(\s*)([=!><]=?)(\s*)(\S+)(\s*)(%\})',
 1982              bygroups(using(this, state = 'generic'), Whitespace, Operator,
 1983                       Whitespace, using(this, state = 'generic'), Whitespace,
 1984                       Punctuation)),
 1985             (r'\b!', Operator),
 1986             (r'\bnot\b', Operator.Word),
 1987             (r'([\w.\'"]+)(\s+)(contains)(\s+)([\w.\'"]+)',
 1988              bygroups(using(this, state = 'generic'), Whitespace, Operator.Word,
 1989                       Whitespace, using(this, state = 'generic'))),
 1990 
 1991             include('generic'),
 1992             include('whitespace')
 1993         ],
 1994 
 1995         'generic-value': [
 1996             include('generic'),
 1997             include('end-at-whitespace')
 1998         ],
 1999 
 2000         'operator': [
 2001             (r'(\s*)((=|!|>|<)=?)(\s*)',
 2002              bygroups(Whitespace, Operator, None, Whitespace), '#pop'),
 2003             (r'(\s*)(\bcontains\b)(\s*)',
 2004              bygroups(Whitespace, Operator.Word, Whitespace), '#pop'),
 2005         ],
 2006 
 2007         'end-of-tag': [
 2008             (r'\}\}', Punctuation, '#pop')
 2009         ],
 2010 
 2011         'end-of-block': [
 2012             (r'%\}', Punctuation, ('#pop', '#pop'))
 2013         ],
 2014 
 2015         'end-at-whitespace': [
 2016             (r'\s+', Whitespace, '#pop')
 2017         ],
 2018 
 2019         # states for unknown markup
 2020         'param-markup': [
 2021             include('whitespace'),
 2022             # params with colons or equals
 2023             (r'([^\s=:]+)(\s*)(=|:)',
 2024              bygroups(Name.Attribute, Whitespace, Operator)),
 2025             # explicit variables
 2026             (r'(\{\{)(\s*)([^\s}])(\s*)(\}\})',
 2027              bygroups(Punctuation, Whitespace, using(this, state = 'variable'),
 2028                       Whitespace, Punctuation)),
 2029 
 2030             include('string'),
 2031             include('number'),
 2032             include('keyword'),
 2033             (r',', Punctuation)
 2034         ],
 2035 
 2036         'default-param-markup': [
 2037             include('param-markup'),
 2038             (r'.', Text)  # fallback for switches / variables / un-quoted strings / ...
 2039         ],
 2040 
 2041         'variable-param-markup': [
 2042             include('param-markup'),
 2043             include('variable'),
 2044             (r'.', Text)  # fallback
 2045         ],
 2046 
 2047         'tag-markup': [
 2048             (r'%\}', Punctuation, ('#pop', '#pop')),  # end of tag
 2049             include('default-param-markup')
 2050         ],
 2051 
 2052         'variable-tag-markup': [
 2053             (r'%\}', Punctuation, ('#pop', '#pop')),  # end of tag
 2054             include('variable-param-markup')
 2055         ],
 2056 
 2057         # states for different values types
 2058         'keyword': [
 2059             (r'\b(false|true)\b', Keyword.Constant)
 2060         ],
 2061 
 2062         'variable': [
 2063             (r'[a-zA-Z_]\w*', Name.Variable),
 2064             (r'(?<=\w)\.(?=\w)', Punctuation)
 2065         ],
 2066 
 2067         'string': [
 2068             (r"'[^']*'", String.Single),
 2069             (r'"[^"]*"', String.Double)
 2070         ],
 2071 
 2072         'number': [
 2073             (r'\d+\.\d+', Number.Float),
 2074             (r'\d+', Number.Integer)
 2075         ],
 2076 
 2077         'generic': [  # decides for variable, string, keyword or number
 2078             include('keyword'),
 2079             include('string'),
 2080             include('number'),
 2081             include('variable')
 2082         ],
 2083 
 2084         'whitespace': [
 2085             (r'[ \t]+', Whitespace)
 2086         ],
 2087 
 2088         # states for builtin blocks
 2089         'comment': [
 2090             (r'(\{%)(\s*)(endcomment)(\s*)(%\})',
 2091              bygroups(Punctuation, Whitespace, Name.Tag, Whitespace,
 2092                       Punctuation), ('#pop', '#pop')),
 2093             (r'.', Comment)
 2094         ],
 2095 
 2096         'raw': [
 2097             (r'[^{]+', Text),
 2098             (r'(\{%)(\s*)(endraw)(\s*)(%\})',
 2099              bygroups(Punctuation, Whitespace, Name.Tag, Whitespace,
 2100                       Punctuation), '#pop'),
 2101             (r'\{', Text)
 2102         ],
 2103     }
 2104 
 2105 
 2106 class TwigLexer(RegexLexer):
 2107     """
 2108     `Twig <http://twig.sensiolabs.org/>`_ template lexer.
 2109 
 2110     It just highlights Twig code between the preprocessor directives,
 2111     other data is left untouched by the lexer.
 2112 
 2113     .. versionadded:: 2.0
 2114     """
 2115 
 2116     name = 'Twig'
 2117     aliases = ['twig']
 2118     mimetypes = ['application/x-twig']
 2119 
 2120     flags = re.M | re.S
 2121 
 2122     # Note that a backslash is included in the following two patterns
 2123     # PHP uses a backslash as a namespace separator
 2124     _ident_char = r'[\\\w-]|[^\x00-\x7f]'
 2125     _ident_begin = r'(?:[\\_a-z]|[^\x00-\x7f])'
 2126     _ident_end = r'(?:' + _ident_char + ')*'
 2127     _ident_inner = _ident_begin + _ident_end
 2128 
 2129     tokens = {
 2130         'root': [
 2131             (r'[^{]+', Other),
 2132             (r'\{\{', Comment.Preproc, 'var'),
 2133             # twig comments
 2134             (r'\{\#.*?\#\}', Comment),
 2135             # raw twig blocks
 2136             (r'(\{%)(-?\s*)(raw)(\s*-?)(%\})(.*?)'
 2137              r'(\{%)(-?\s*)(endraw)(\s*-?)(%\})',
 2138              bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
 2139                       Other, Comment.Preproc, Text, Keyword, Text,
 2140                       Comment.Preproc)),
 2141             (r'(\{%)(-?\s*)(verbatim)(\s*-?)(%\})(.*?)'
 2142              r'(\{%)(-?\s*)(endverbatim)(\s*-?)(%\})',
 2143              bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
 2144                       Other, Comment.Preproc, Text, Keyword, Text,
 2145                       Comment.Preproc)),
 2146             # filter blocks
 2147             (r'(\{%%)(-?\s*)(filter)(\s+)(%s)' % _ident_inner,
 2148              bygroups(Comment.Preproc, Text, Keyword, Text, Name.Function),
 2149              'tag'),
 2150             (r'(\{%)(-?\s*)([a-zA-Z_]\w*)',
 2151              bygroups(Comment.Preproc, Text, Keyword), 'tag'),
 2152             (r'\{', Other),
 2153         ],
 2154         'varnames': [
 2155             (r'(\|)(\s*)(%s)' % _ident_inner,
 2156              bygroups(Operator, Text, Name.Function)),
 2157             (r'(is)(\s+)(not)?(\s*)(%s)' % _ident_inner,
 2158              bygroups(Keyword, Text, Keyword, Text, Name.Function)),
 2159             (r'(?i)(true|false|none|null)\b', Keyword.Pseudo),
 2160             (r'(in|not|and|b-and|or|b-or|b-xor|is'
 2161              r'if|elseif|else|import'
 2162              r'constant|defined|divisibleby|empty|even|iterable|odd|sameas'
 2163              r'matches|starts\s+with|ends\s+with)\b',
 2164              Keyword),
 2165             (r'(loop|block|parent)\b', Name.Builtin),
 2166             (_ident_inner, Name.Variable),
 2167             (r'\.' + _ident_inner, Name.Variable),
 2168             (r'\.[0-9]+', Number),
 2169             (r':?"(\\\\|\\"|[^"])*"', String.Double),
 2170             (r":?'(\\\\|\\'|[^'])*'", String.Single),
 2171             (r'([{}()\[\]+\-*/,:~%]|\.\.|\?|:|\*\*|\/\/|!=|[><=]=?)', Operator),
 2172             (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
 2173              r"0[xX][0-9a-fA-F]+[Ll]?", Number),
 2174         ],
 2175         'var': [
 2176             (r'\s+', Text),
 2177             (r'(-?)(\}\})', bygroups(Text, Comment.Preproc), '#pop'),
 2178             include('varnames')
 2179         ],
 2180         'tag': [
 2181             (r'\s+', Text),
 2182             (r'(-?)(%\})', bygroups(Text, Comment.Preproc), '#pop'),
 2183             include('varnames'),
 2184             (r'.', Punctuation),
 2185         ],
 2186     }
 2187 
 2188 
 2189 class TwigHtmlLexer(DelegatingLexer):
 2190     """
 2191     Subclass of the `TwigLexer` that highlights unlexed data with the
 2192     `HtmlLexer`.
 2193 
 2194     .. versionadded:: 2.0
 2195     """
 2196 
 2197     name = "HTML+Twig"
 2198     aliases = ["html+twig"]
 2199     filenames = ['*.twig']
 2200     mimetypes = ['text/html+twig']
 2201 
 2202     def __init__(self, **options):
 2203         super(TwigHtmlLexer, self).__init__(HtmlLexer, TwigLexer, **options)
 2204 
 2205 
 2206 class Angular2Lexer(RegexLexer):
 2207     """
 2208     Generic
 2209     `angular2 <http://victorsavkin.com/post/119943127151/angular-2-template-syntax>`_
 2210     template lexer.
 2211 
 2212     Highlights only the Angular template tags (stuff between `{{` and `}}` and
 2213     special attributes: '(event)=', '[property]=', '[(twoWayBinding)]=').
 2214     Everything else is left for a delegating lexer.
 2215 
 2216     .. versionadded:: 2.1
 2217     """
 2218 
 2219     name = "Angular2"
 2220     aliases = ['ng2']
 2221 
 2222     tokens = {
 2223         'root': [
 2224             (r'[^{([*#]+', Other),
 2225 
 2226             # {{meal.name}}
 2227             (r'(\{\{)(\s*)', bygroups(Comment.Preproc, Text), 'ngExpression'),
 2228 
 2229             # (click)="deleteOrder()"; [value]="test"; [(twoWayTest)]="foo.bar"
 2230             (r'([([]+)([\w:.-]+)([\])]+)(\s*)(=)(\s*)',
 2231              bygroups(Punctuation, Name.Attribute, Punctuation, Text, Operator, Text),
 2232              'attr'),
 2233             (r'([([]+)([\w:.-]+)([\])]+)(\s*)',
 2234              bygroups(Punctuation, Name.Attribute, Punctuation, Text)),
 2235 
 2236             # *ngIf="..."; #f="ngForm"
 2237             (r'([*#])([\w:.-]+)(\s*)(=)(\s*)',
 2238              bygroups(Punctuation, Name.Attribute, Punctuation, Operator), 'attr'),
 2239             (r'([*#])([\w:.-]+)(\s*)',
 2240              bygroups(Punctuation, Name.Attribute, Punctuation)),
 2241         ],
 2242 
 2243         'ngExpression': [
 2244             (r'\s+(\|\s+)?', Text),
 2245             (r'\}\}', Comment.Preproc, '#pop'),
 2246 
 2247             # Literals
 2248             (r':?(true|false)', String.Boolean),
 2249             (r':?"(\\\\|\\"|[^"])*"', String.Double),
 2250             (r":?'(\\\\|\\'|[^'])*'", String.Single),
 2251             (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
 2252              r"0[xX][0-9a-fA-F]+[Ll]?", Number),
 2253 
 2254             # Variabletext
 2255             (r'[a-zA-Z][\w-]*(\(.*\))?', Name.Variable),
 2256             (r'\.[\w-]+(\(.*\))?', Name.Variable),
 2257 
 2258             # inline If
 2259             (r'(\?)(\s*)([^}\s]+)(\s*)(:)(\s*)([^}\s]+)(\s*)',
 2260              bygroups(Operator, Text, String, Text, Operator, Text, String, Text)),
 2261         ],
 2262         'attr': [
 2263             ('".*?"', String, '#pop'),
 2264             ("'.*?'", String, '#pop'),
 2265             (r'[^\s>]+', String, '#pop'),
 2266         ],
 2267     }
 2268 
 2269 
 2270 class Angular2HtmlLexer(DelegatingLexer):
 2271     """
 2272     Subclass of the `Angular2Lexer` that highlights unlexed data with the
 2273     `HtmlLexer`.
 2274 
 2275     .. versionadded:: 2.0
 2276     """
 2277 
 2278     name = "HTML + Angular2"
 2279     aliases = ["html+ng2"]
 2280     filenames = ['*.ng2']
 2281 
 2282     def __init__(self, **options):
 2283         super(Angular2HtmlLexer, self).__init__(HtmlLexer, Angular2Lexer, **options)