refactor: move brother_node development artifact to dev/test-nodes subdirectory

Development Artifact Cleanup:
 BROTHER_NODE REORGANIZATION: Moved development test node to appropriate location
- dev/test-nodes/brother_node/: Moved from root directory for better organization
- Contains development configuration, test logs, and test chain data
- No impact on production systems - purely development/testing artifact

 DEVELOPMENT ARTIFACTS IDENTIFIED:
- Chain ID: aitbc-brother-chain (test/development chain)
- Ports: 8010 (P2P) and 8011 (RPC) - different from production
- Environment: .env file with test configuration
- Logs: rpc.log and node.log from development testing session (March 15, 2026)

 ROOT DIRECTORY CLEANUP: Removed development clutter from production directory
- brother_node/ moved to dev/test-nodes/brother_node/
- Root directory now contains only production-ready components
- Development artifacts properly organized in dev/ subdirectory

DIRECTORY STRUCTURE IMPROVEMENT:
📁 dev/test-nodes/: Development and testing node configurations
🏗️ Root Directory: Clean production structure with only essential components
🧪 Development Isolation: Test environments separated from production

BENEFITS:
 Clean Production Directory: No development artifacts in root
 Better Organization: Development nodes grouped in dev/ subdirectory
 Clear Separation: Production vs development environments clearly distinguished
 Maintainability: Easier to identify and manage development components

RESULT: Successfully moved brother_node development artifact to dev/test-nodes/ subdirectory, cleaning up the root directory while preserving development testing environment for future use.
This commit is contained in:
2026-03-30 17:09:06 +02:00
parent bf730dcb4a
commit 816e258d4c
11734 changed files with 2001707 additions and 0 deletions

View File

@@ -0,0 +1,362 @@
"""
pygments.lexers
~~~~~~~~~~~~~~~
Pygments lexers.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import sys
import types
import fnmatch
from os.path import basename
from pygments.lexers._mapping import LEXERS
from pygments.modeline import get_filetype_from_buffer
from pygments.plugin import find_plugin_lexers
from pygments.util import ClassNotFound, guess_decode
COMPAT = {
'Python3Lexer': 'PythonLexer',
'Python3TracebackLexer': 'PythonTracebackLexer',
'LeanLexer': 'Lean3Lexer',
}
__all__ = ['get_lexer_by_name', 'get_lexer_for_filename', 'find_lexer_class',
'guess_lexer', 'load_lexer_from_file'] + list(LEXERS) + list(COMPAT)
_lexer_cache = {}
_pattern_cache = {}
def _fn_matches(fn, glob):
"""Return whether the supplied file name fn matches pattern filename."""
if glob not in _pattern_cache:
pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob))
return pattern.match(fn)
return _pattern_cache[glob].match(fn)
def _load_lexers(module_name):
"""Load a lexer (and all others in the module too)."""
mod = __import__(module_name, None, None, ['__all__'])
for lexer_name in mod.__all__:
cls = getattr(mod, lexer_name)
_lexer_cache[cls.name] = cls
def get_all_lexers(plugins=True):
"""Return a generator of tuples in the form ``(name, aliases,
filenames, mimetypes)`` of all know lexers.
If *plugins* is true (the default), plugin lexers supplied by entrypoints
are also returned. Otherwise, only builtin ones are considered.
"""
for item in LEXERS.values():
yield item[1:]
if plugins:
for lexer in find_plugin_lexers():
yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes
def find_lexer_class(name):
"""
Return the `Lexer` subclass that with the *name* attribute as given by
the *name* argument.
"""
if name in _lexer_cache:
return _lexer_cache[name]
# lookup builtin lexers
for module_name, lname, aliases, _, _ in LEXERS.values():
if name == lname:
_load_lexers(module_name)
return _lexer_cache[name]
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if cls.name == name:
return cls
def find_lexer_class_by_name(_alias):
"""
Return the `Lexer` subclass that has `alias` in its aliases list, without
instantiating it.
Like `get_lexer_by_name`, but does not instantiate the class.
Will raise :exc:`pygments.util.ClassNotFound` if no lexer with that alias is
found.
.. versionadded:: 2.2
"""
if not _alias:
raise ClassNotFound(f'no lexer for alias {_alias!r} found')
# lookup builtin lexers
for module_name, name, aliases, _, _ in LEXERS.values():
if _alias.lower() in aliases:
if name not in _lexer_cache:
_load_lexers(module_name)
return _lexer_cache[name]
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if _alias.lower() in cls.aliases:
return cls
raise ClassNotFound(f'no lexer for alias {_alias!r} found')
def get_lexer_by_name(_alias, **options):
"""
Return an instance of a `Lexer` subclass that has `alias` in its
aliases list. The lexer is given the `options` at its
instantiation.
Will raise :exc:`pygments.util.ClassNotFound` if no lexer with that alias is
found.
"""
if not _alias:
raise ClassNotFound(f'no lexer for alias {_alias!r} found')
# lookup builtin lexers
for module_name, name, aliases, _, _ in LEXERS.values():
if _alias.lower() in aliases:
if name not in _lexer_cache:
_load_lexers(module_name)
return _lexer_cache[name](**options)
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if _alias.lower() in cls.aliases:
return cls(**options)
raise ClassNotFound(f'no lexer for alias {_alias!r} found')
def load_lexer_from_file(filename, lexername="CustomLexer", **options):
"""Load a lexer from a file.
This method expects a file located relative to the current working
directory, which contains a Lexer class. By default, it expects the
Lexer to be name CustomLexer; you can specify your own class name
as the second argument to this function.
Users should be very careful with the input, because this method
is equivalent to running eval on the input file.
Raises ClassNotFound if there are any problems importing the Lexer.
.. versionadded:: 2.2
"""
try:
# This empty dict will contain the namespace for the exec'd file
custom_namespace = {}
with open(filename, 'rb') as f:
exec(f.read(), custom_namespace)
# Retrieve the class `lexername` from that namespace
if lexername not in custom_namespace:
raise ClassNotFound(f'no valid {lexername} class found in {filename}')
lexer_class = custom_namespace[lexername]
# And finally instantiate it with the options
return lexer_class(**options)
except OSError as err:
raise ClassNotFound(f'cannot read {filename}: {err}')
except ClassNotFound:
raise
except Exception as err:
raise ClassNotFound(f'error when loading custom lexer: {err}')
def find_lexer_class_for_filename(_fn, code=None):
"""Get a lexer for a filename.
If multiple lexers match the filename pattern, use ``analyse_text()`` to
figure out which one is more appropriate.
Returns None if not found.
"""
matches = []
fn = basename(_fn)
for modname, name, _, filenames, _ in LEXERS.values():
for filename in filenames:
if _fn_matches(fn, filename):
if name not in _lexer_cache:
_load_lexers(modname)
matches.append((_lexer_cache[name], filename))
for cls in find_plugin_lexers():
for filename in cls.filenames:
if _fn_matches(fn, filename):
matches.append((cls, filename))
if isinstance(code, bytes):
# decode it, since all analyse_text functions expect unicode
code = guess_decode(code)
def get_rating(info):
cls, filename = info
# explicit patterns get a bonus
bonus = '*' not in filename and 0.5 or 0
# The class _always_ defines analyse_text because it's included in
# the Lexer class. The default implementation returns None which
# gets turned into 0.0. Run scripts/detect_missing_analyse_text.py
# to find lexers which need it overridden.
if code:
return cls.analyse_text(code) + bonus, cls.__name__
return cls.priority + bonus, cls.__name__
if matches:
matches.sort(key=get_rating)
# print "Possible lexers, after sort:", matches
return matches[-1][0]
def get_lexer_for_filename(_fn, code=None, **options):
"""Get a lexer for a filename.
Return a `Lexer` subclass instance that has a filename pattern
matching `fn`. The lexer is given the `options` at its
instantiation.
Raise :exc:`pygments.util.ClassNotFound` if no lexer for that filename
is found.
If multiple lexers match the filename pattern, use their ``analyse_text()``
methods to figure out which one is more appropriate.
"""
res = find_lexer_class_for_filename(_fn, code)
if not res:
raise ClassNotFound(f'no lexer for filename {_fn!r} found')
return res(**options)
def get_lexer_for_mimetype(_mime, **options):
"""
Return a `Lexer` subclass instance that has `mime` in its mimetype
list. The lexer is given the `options` at its instantiation.
Will raise :exc:`pygments.util.ClassNotFound` if not lexer for that mimetype
is found.
"""
for modname, name, _, _, mimetypes in LEXERS.values():
if _mime in mimetypes:
if name not in _lexer_cache:
_load_lexers(modname)
return _lexer_cache[name](**options)
for cls in find_plugin_lexers():
if _mime in cls.mimetypes:
return cls(**options)
raise ClassNotFound(f'no lexer for mimetype {_mime!r} found')
def _iter_lexerclasses(plugins=True):
"""Return an iterator over all lexer classes."""
for key in sorted(LEXERS):
module_name, name = LEXERS[key][:2]
if name not in _lexer_cache:
_load_lexers(module_name)
yield _lexer_cache[name]
if plugins:
yield from find_plugin_lexers()
def guess_lexer_for_filename(_fn, _text, **options):
"""
As :func:`guess_lexer()`, but only lexers which have a pattern in `filenames`
or `alias_filenames` that matches `filename` are taken into consideration.
:exc:`pygments.util.ClassNotFound` is raised if no lexer thinks it can
handle the content.
"""
fn = basename(_fn)
primary = {}
matching_lexers = set()
for lexer in _iter_lexerclasses():
for filename in lexer.filenames:
if _fn_matches(fn, filename):
matching_lexers.add(lexer)
primary[lexer] = True
for filename in lexer.alias_filenames:
if _fn_matches(fn, filename):
matching_lexers.add(lexer)
primary[lexer] = False
if not matching_lexers:
raise ClassNotFound(f'no lexer for filename {fn!r} found')
if len(matching_lexers) == 1:
return matching_lexers.pop()(**options)
result = []
for lexer in matching_lexers:
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
result.append((rv, lexer))
def type_sort(t):
# sort by:
# - analyse score
# - is primary filename pattern?
# - priority
# - last resort: class name
return (t[0], primary[t[1]], t[1].priority, t[1].__name__)
result.sort(key=type_sort)
return result[-1][1](**options)
def guess_lexer(_text, **options):
"""
Return a `Lexer` subclass instance that's guessed from the text in
`text`. For that, the :meth:`.analyse_text()` method of every known lexer
class is called with the text as argument, and the lexer which returned the
highest value will be instantiated and returned.
:exc:`pygments.util.ClassNotFound` is raised if no lexer thinks it can
handle the content.
"""
if not isinstance(_text, str):
inencoding = options.get('inencoding', options.get('encoding'))
if inencoding:
_text = _text.decode(inencoding or 'utf8')
else:
_text, _ = guess_decode(_text)
# try to get a vim modeline first
ft = get_filetype_from_buffer(_text)
if ft is not None:
try:
return get_lexer_by_name(ft, **options)
except ClassNotFound:
pass
best_lexer = [0.0, None]
for lexer in _iter_lexerclasses():
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
if rv > best_lexer[0]:
best_lexer[:] = (rv, lexer)
if not best_lexer[0] or best_lexer[1] is None:
raise ClassNotFound('no lexer matching the text found')
return best_lexer[1](**options)
class _automodule(types.ModuleType):
"""Automatically import lexers."""
def __getattr__(self, name):
info = LEXERS.get(name)
if info:
_load_lexers(info[0])
cls = _lexer_cache[info[1]]
setattr(self, name, cls)
return cls
if name in COMPAT:
return getattr(self, COMPAT[name])
raise AttributeError(name)
oldmod = sys.modules[__name__]
newmod = _automodule(__name__)
newmod.__dict__.update(oldmod.__dict__)
sys.modules[__name__] = newmod
del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types

View File

@@ -0,0 +1,103 @@
"""
pygments.lexers._ada_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Ada builtins.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
KEYWORD_LIST = (
'abort',
'abs',
'abstract',
'accept',
'access',
'aliased',
'all',
'array',
'at',
'begin',
'body',
'case',
'constant',
'declare',
'delay',
'delta',
'digits',
'do',
'else',
'elsif',
'end',
'entry',
'exception',
'exit',
'interface',
'for',
'goto',
'if',
'is',
'limited',
'loop',
'new',
'null',
'of',
'or',
'others',
'out',
'overriding',
'pragma',
'protected',
'raise',
'range',
'record',
'renames',
'requeue',
'return',
'reverse',
'select',
'separate',
'some',
'subtype',
'synchronized',
'task',
'tagged',
'terminate',
'then',
'type',
'until',
'when',
'while',
'xor'
)
BUILTIN_LIST = (
'Address',
'Byte',
'Boolean',
'Character',
'Controlled',
'Count',
'Cursor',
'Duration',
'File_Mode',
'File_Type',
'Float',
'Generator',
'Integer',
'Long_Float',
'Long_Integer',
'Long_Long_Float',
'Long_Long_Integer',
'Natural',
'Positive',
'Reference_Type',
'Short_Float',
'Short_Integer',
'Short_Short_Float',
'Short_Short_Integer',
'String',
'Wide_Character',
'Wide_String'
)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,231 @@
"""
pygments.lexers._cl_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ANSI Common Lisp builtins.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
BUILTIN_FUNCTIONS = { # 638 functions
'<', '<=', '=', '>', '>=', '-', '/', '/=', '*', '+', '1-', '1+',
'abort', 'abs', 'acons', 'acos', 'acosh', 'add-method', 'adjoin',
'adjustable-array-p', 'adjust-array', 'allocate-instance',
'alpha-char-p', 'alphanumericp', 'append', 'apply', 'apropos',
'apropos-list', 'aref', 'arithmetic-error-operands',
'arithmetic-error-operation', 'array-dimension', 'array-dimensions',
'array-displacement', 'array-element-type', 'array-has-fill-pointer-p',
'array-in-bounds-p', 'arrayp', 'array-rank', 'array-row-major-index',
'array-total-size', 'ash', 'asin', 'asinh', 'assoc', 'assoc-if',
'assoc-if-not', 'atan', 'atanh', 'atom', 'bit', 'bit-and', 'bit-andc1',
'bit-andc2', 'bit-eqv', 'bit-ior', 'bit-nand', 'bit-nor', 'bit-not',
'bit-orc1', 'bit-orc2', 'bit-vector-p', 'bit-xor', 'boole',
'both-case-p', 'boundp', 'break', 'broadcast-stream-streams',
'butlast', 'byte', 'byte-position', 'byte-size', 'caaaar', 'caaadr',
'caaar', 'caadar', 'caaddr', 'caadr', 'caar', 'cadaar', 'cadadr',
'cadar', 'caddar', 'cadddr', 'caddr', 'cadr', 'call-next-method', 'car',
'cdaaar', 'cdaadr', 'cdaar', 'cdadar', 'cdaddr', 'cdadr', 'cdar',
'cddaar', 'cddadr', 'cddar', 'cdddar', 'cddddr', 'cdddr', 'cddr', 'cdr',
'ceiling', 'cell-error-name', 'cerror', 'change-class', 'char', 'char<',
'char<=', 'char=', 'char>', 'char>=', 'char/=', 'character',
'characterp', 'char-code', 'char-downcase', 'char-equal',
'char-greaterp', 'char-int', 'char-lessp', 'char-name',
'char-not-equal', 'char-not-greaterp', 'char-not-lessp', 'char-upcase',
'cis', 'class-name', 'class-of', 'clear-input', 'clear-output',
'close', 'clrhash', 'code-char', 'coerce', 'compile',
'compiled-function-p', 'compile-file', 'compile-file-pathname',
'compiler-macro-function', 'complement', 'complex', 'complexp',
'compute-applicable-methods', 'compute-restarts', 'concatenate',
'concatenated-stream-streams', 'conjugate', 'cons', 'consp',
'constantly', 'constantp', 'continue', 'copy-alist', 'copy-list',
'copy-pprint-dispatch', 'copy-readtable', 'copy-seq', 'copy-structure',
'copy-symbol', 'copy-tree', 'cos', 'cosh', 'count', 'count-if',
'count-if-not', 'decode-float', 'decode-universal-time', 'delete',
'delete-duplicates', 'delete-file', 'delete-if', 'delete-if-not',
'delete-package', 'denominator', 'deposit-field', 'describe',
'describe-object', 'digit-char', 'digit-char-p', 'directory',
'directory-namestring', 'disassemble', 'documentation', 'dpb',
'dribble', 'echo-stream-input-stream', 'echo-stream-output-stream',
'ed', 'eighth', 'elt', 'encode-universal-time', 'endp',
'enough-namestring', 'ensure-directories-exist',
'ensure-generic-function', 'eq', 'eql', 'equal', 'equalp', 'error',
'eval', 'evenp', 'every', 'exp', 'export', 'expt', 'fboundp',
'fceiling', 'fdefinition', 'ffloor', 'fifth', 'file-author',
'file-error-pathname', 'file-length', 'file-namestring',
'file-position', 'file-string-length', 'file-write-date',
'fill', 'fill-pointer', 'find', 'find-all-symbols', 'find-class',
'find-if', 'find-if-not', 'find-method', 'find-package', 'find-restart',
'find-symbol', 'finish-output', 'first', 'float', 'float-digits',
'floatp', 'float-precision', 'float-radix', 'float-sign', 'floor',
'fmakunbound', 'force-output', 'format', 'fourth', 'fresh-line',
'fround', 'ftruncate', 'funcall', 'function-keywords',
'function-lambda-expression', 'functionp', 'gcd', 'gensym', 'gentemp',
'get', 'get-decoded-time', 'get-dispatch-macro-character', 'getf',
'gethash', 'get-internal-real-time', 'get-internal-run-time',
'get-macro-character', 'get-output-stream-string', 'get-properties',
'get-setf-expansion', 'get-universal-time', 'graphic-char-p',
'hash-table-count', 'hash-table-p', 'hash-table-rehash-size',
'hash-table-rehash-threshold', 'hash-table-size', 'hash-table-test',
'host-namestring', 'identity', 'imagpart', 'import',
'initialize-instance', 'input-stream-p', 'inspect',
'integer-decode-float', 'integer-length', 'integerp',
'interactive-stream-p', 'intern', 'intersection',
'invalid-method-error', 'invoke-debugger', 'invoke-restart',
'invoke-restart-interactively', 'isqrt', 'keywordp', 'last', 'lcm',
'ldb', 'ldb-test', 'ldiff', 'length', 'lisp-implementation-type',
'lisp-implementation-version', 'list', 'list*', 'list-all-packages',
'listen', 'list-length', 'listp', 'load',
'load-logical-pathname-translations', 'log', 'logand', 'logandc1',
'logandc2', 'logbitp', 'logcount', 'logeqv', 'logical-pathname',
'logical-pathname-translations', 'logior', 'lognand', 'lognor',
'lognot', 'logorc1', 'logorc2', 'logtest', 'logxor', 'long-site-name',
'lower-case-p', 'machine-instance', 'machine-type', 'machine-version',
'macroexpand', 'macroexpand-1', 'macro-function', 'make-array',
'make-broadcast-stream', 'make-concatenated-stream', 'make-condition',
'make-dispatch-macro-character', 'make-echo-stream', 'make-hash-table',
'make-instance', 'make-instances-obsolete', 'make-list',
'make-load-form', 'make-load-form-saving-slots', 'make-package',
'make-pathname', 'make-random-state', 'make-sequence', 'make-string',
'make-string-input-stream', 'make-string-output-stream', 'make-symbol',
'make-synonym-stream', 'make-two-way-stream', 'makunbound', 'map',
'mapc', 'mapcan', 'mapcar', 'mapcon', 'maphash', 'map-into', 'mapl',
'maplist', 'mask-field', 'max', 'member', 'member-if', 'member-if-not',
'merge', 'merge-pathnames', 'method-combination-error',
'method-qualifiers', 'min', 'minusp', 'mismatch', 'mod',
'muffle-warning', 'name-char', 'namestring', 'nbutlast', 'nconc',
'next-method-p', 'nintersection', 'ninth', 'no-applicable-method',
'no-next-method', 'not', 'notany', 'notevery', 'nreconc', 'nreverse',
'nset-difference', 'nset-exclusive-or', 'nstring-capitalize',
'nstring-downcase', 'nstring-upcase', 'nsublis', 'nsubst', 'nsubst-if',
'nsubst-if-not', 'nsubstitute', 'nsubstitute-if', 'nsubstitute-if-not',
'nth', 'nthcdr', 'null', 'numberp', 'numerator', 'nunion', 'oddp',
'open', 'open-stream-p', 'output-stream-p', 'package-error-package',
'package-name', 'package-nicknames', 'packagep',
'package-shadowing-symbols', 'package-used-by-list', 'package-use-list',
'pairlis', 'parse-integer', 'parse-namestring', 'pathname',
'pathname-device', 'pathname-directory', 'pathname-host',
'pathname-match-p', 'pathname-name', 'pathnamep', 'pathname-type',
'pathname-version', 'peek-char', 'phase', 'plusp', 'position',
'position-if', 'position-if-not', 'pprint', 'pprint-dispatch',
'pprint-fill', 'pprint-indent', 'pprint-linear', 'pprint-newline',
'pprint-tab', 'pprint-tabular', 'prin1', 'prin1-to-string', 'princ',
'princ-to-string', 'print', 'print-object', 'probe-file', 'proclaim',
'provide', 'random', 'random-state-p', 'rassoc', 'rassoc-if',
'rassoc-if-not', 'rational', 'rationalize', 'rationalp', 'read',
'read-byte', 'read-char', 'read-char-no-hang', 'read-delimited-list',
'read-from-string', 'read-line', 'read-preserving-whitespace',
'read-sequence', 'readtable-case', 'readtablep', 'realp', 'realpart',
'reduce', 'reinitialize-instance', 'rem', 'remhash', 'remove',
'remove-duplicates', 'remove-if', 'remove-if-not', 'remove-method',
'remprop', 'rename-file', 'rename-package', 'replace', 'require',
'rest', 'restart-name', 'revappend', 'reverse', 'room', 'round',
'row-major-aref', 'rplaca', 'rplacd', 'sbit', 'scale-float', 'schar',
'search', 'second', 'set', 'set-difference',
'set-dispatch-macro-character', 'set-exclusive-or',
'set-macro-character', 'set-pprint-dispatch', 'set-syntax-from-char',
'seventh', 'shadow', 'shadowing-import', 'shared-initialize',
'short-site-name', 'signal', 'signum', 'simple-bit-vector-p',
'simple-condition-format-arguments', 'simple-condition-format-control',
'simple-string-p', 'simple-vector-p', 'sin', 'sinh', 'sixth', 'sleep',
'slot-boundp', 'slot-exists-p', 'slot-makunbound', 'slot-missing',
'slot-unbound', 'slot-value', 'software-type', 'software-version',
'some', 'sort', 'special-operator-p', 'sqrt', 'stable-sort',
'standard-char-p', 'store-value', 'stream-element-type',
'stream-error-stream', 'stream-external-format', 'streamp', 'string',
'string<', 'string<=', 'string=', 'string>', 'string>=', 'string/=',
'string-capitalize', 'string-downcase', 'string-equal',
'string-greaterp', 'string-left-trim', 'string-lessp',
'string-not-equal', 'string-not-greaterp', 'string-not-lessp',
'stringp', 'string-right-trim', 'string-trim', 'string-upcase',
'sublis', 'subseq', 'subsetp', 'subst', 'subst-if', 'subst-if-not',
'substitute', 'substitute-if', 'substitute-if-not', 'subtypep','svref',
'sxhash', 'symbol-function', 'symbol-name', 'symbolp', 'symbol-package',
'symbol-plist', 'symbol-value', 'synonym-stream-symbol', 'syntax:',
'tailp', 'tan', 'tanh', 'tenth', 'terpri', 'third',
'translate-logical-pathname', 'translate-pathname', 'tree-equal',
'truename', 'truncate', 'two-way-stream-input-stream',
'two-way-stream-output-stream', 'type-error-datum',
'type-error-expected-type', 'type-of', 'typep', 'unbound-slot-instance',
'unexport', 'unintern', 'union', 'unread-char', 'unuse-package',
'update-instance-for-different-class',
'update-instance-for-redefined-class', 'upgraded-array-element-type',
'upgraded-complex-part-type', 'upper-case-p', 'use-package',
'user-homedir-pathname', 'use-value', 'values', 'values-list', 'vector',
'vectorp', 'vector-pop', 'vector-push', 'vector-push-extend', 'warn',
'wild-pathname-p', 'write', 'write-byte', 'write-char', 'write-line',
'write-sequence', 'write-string', 'write-to-string', 'yes-or-no-p',
'y-or-n-p', 'zerop',
}
SPECIAL_FORMS = {
'block', 'catch', 'declare', 'eval-when', 'flet', 'function', 'go', 'if',
'labels', 'lambda', 'let', 'let*', 'load-time-value', 'locally', 'macrolet',
'multiple-value-call', 'multiple-value-prog1', 'progn', 'progv', 'quote',
'return-from', 'setq', 'symbol-macrolet', 'tagbody', 'the', 'throw',
'unwind-protect',
}
MACROS = {
'and', 'assert', 'call-method', 'case', 'ccase', 'check-type', 'cond',
'ctypecase', 'decf', 'declaim', 'defclass', 'defconstant', 'defgeneric',
'define-compiler-macro', 'define-condition', 'define-method-combination',
'define-modify-macro', 'define-setf-expander', 'define-symbol-macro',
'defmacro', 'defmethod', 'defpackage', 'defparameter', 'defsetf',
'defstruct', 'deftype', 'defun', 'defvar', 'destructuring-bind', 'do',
'do*', 'do-all-symbols', 'do-external-symbols', 'dolist', 'do-symbols',
'dotimes', 'ecase', 'etypecase', 'formatter', 'handler-bind',
'handler-case', 'ignore-errors', 'incf', 'in-package', 'lambda', 'loop',
'loop-finish', 'make-method', 'multiple-value-bind', 'multiple-value-list',
'multiple-value-setq', 'nth-value', 'or', 'pop',
'pprint-exit-if-list-exhausted', 'pprint-logical-block', 'pprint-pop',
'print-unreadable-object', 'prog', 'prog*', 'prog1', 'prog2', 'psetf',
'psetq', 'push', 'pushnew', 'remf', 'restart-bind', 'restart-case',
'return', 'rotatef', 'setf', 'shiftf', 'step', 'time', 'trace', 'typecase',
'unless', 'untrace', 'when', 'with-accessors', 'with-compilation-unit',
'with-condition-restarts', 'with-hash-table-iterator',
'with-input-from-string', 'with-open-file', 'with-open-stream',
'with-output-to-string', 'with-package-iterator', 'with-simple-restart',
'with-slots', 'with-standard-io-syntax',
}
LAMBDA_LIST_KEYWORDS = {
'&allow-other-keys', '&aux', '&body', '&environment', '&key', '&optional',
'&rest', '&whole',
}
DECLARATIONS = {
'dynamic-extent', 'ignore', 'optimize', 'ftype', 'inline', 'special',
'ignorable', 'notinline', 'type',
}
BUILTIN_TYPES = {
'atom', 'boolean', 'base-char', 'base-string', 'bignum', 'bit',
'compiled-function', 'extended-char', 'fixnum', 'keyword', 'nil',
'signed-byte', 'short-float', 'single-float', 'double-float', 'long-float',
'simple-array', 'simple-base-string', 'simple-bit-vector', 'simple-string',
'simple-vector', 'standard-char', 'unsigned-byte',
# Condition Types
'arithmetic-error', 'cell-error', 'condition', 'control-error',
'division-by-zero', 'end-of-file', 'error', 'file-error',
'floating-point-inexact', 'floating-point-overflow',
'floating-point-underflow', 'floating-point-invalid-operation',
'parse-error', 'package-error', 'print-not-readable', 'program-error',
'reader-error', 'serious-condition', 'simple-condition', 'simple-error',
'simple-type-error', 'simple-warning', 'stream-error', 'storage-condition',
'style-warning', 'type-error', 'unbound-variable', 'unbound-slot',
'undefined-function', 'warning',
}
BUILTIN_CLASSES = {
'array', 'broadcast-stream', 'bit-vector', 'built-in-class', 'character',
'class', 'complex', 'concatenated-stream', 'cons', 'echo-stream',
'file-stream', 'float', 'function', 'generic-function', 'hash-table',
'integer', 'list', 'logical-pathname', 'method-combination', 'method',
'null', 'number', 'package', 'pathname', 'ratio', 'rational', 'readtable',
'real', 'random-state', 'restart', 'sequence', 'standard-class',
'standard-generic-function', 'standard-method', 'standard-object',
'string-stream', 'stream', 'string', 'structure-class', 'structure-object',
'symbol', 'synonym-stream', 't', 'two-way-stream', 'vector',
}

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,558 @@
"""
pygments.lexers._css_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file is autogenerated by scripts/get_css_properties.py
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
_css_properties = (
'-webkit-line-clamp',
'accent-color',
'align-content',
'align-items',
'align-self',
'alignment-baseline',
'all',
'animation',
'animation-delay',
'animation-direction',
'animation-duration',
'animation-fill-mode',
'animation-iteration-count',
'animation-name',
'animation-play-state',
'animation-timing-function',
'appearance',
'aspect-ratio',
'azimuth',
'backface-visibility',
'background',
'background-attachment',
'background-blend-mode',
'background-clip',
'background-color',
'background-image',
'background-origin',
'background-position',
'background-repeat',
'background-size',
'baseline-shift',
'baseline-source',
'block-ellipsis',
'block-size',
'block-step',
'block-step-align',
'block-step-insert',
'block-step-round',
'block-step-size',
'bookmark-label',
'bookmark-level',
'bookmark-state',
'border',
'border-block',
'border-block-color',
'border-block-end',
'border-block-end-color',
'border-block-end-style',
'border-block-end-width',
'border-block-start',
'border-block-start-color',
'border-block-start-style',
'border-block-start-width',
'border-block-style',
'border-block-width',
'border-bottom',
'border-bottom-color',
'border-bottom-left-radius',
'border-bottom-right-radius',
'border-bottom-style',
'border-bottom-width',
'border-boundary',
'border-collapse',
'border-color',
'border-end-end-radius',
'border-end-start-radius',
'border-image',
'border-image-outset',
'border-image-repeat',
'border-image-slice',
'border-image-source',
'border-image-width',
'border-inline',
'border-inline-color',
'border-inline-end',
'border-inline-end-color',
'border-inline-end-style',
'border-inline-end-width',
'border-inline-start',
'border-inline-start-color',
'border-inline-start-style',
'border-inline-start-width',
'border-inline-style',
'border-inline-width',
'border-left',
'border-left-color',
'border-left-style',
'border-left-width',
'border-radius',
'border-right',
'border-right-color',
'border-right-style',
'border-right-width',
'border-spacing',
'border-start-end-radius',
'border-start-start-radius',
'border-style',
'border-top',
'border-top-color',
'border-top-left-radius',
'border-top-right-radius',
'border-top-style',
'border-top-width',
'border-width',
'bottom',
'box-decoration-break',
'box-shadow',
'box-sizing',
'box-snap',
'break-after',
'break-before',
'break-inside',
'caption-side',
'caret',
'caret-color',
'caret-shape',
'chains',
'clear',
'clip',
'clip-path',
'clip-rule',
'color',
'color-adjust',
'color-interpolation-filters',
'color-scheme',
'column-count',
'column-fill',
'column-gap',
'column-rule',
'column-rule-color',
'column-rule-style',
'column-rule-width',
'column-span',
'column-width',
'columns',
'contain',
'contain-intrinsic-block-size',
'contain-intrinsic-height',
'contain-intrinsic-inline-size',
'contain-intrinsic-size',
'contain-intrinsic-width',
'container',
'container-name',
'container-type',
'content',
'content-visibility',
'continue',
'counter-increment',
'counter-reset',
'counter-set',
'cue',
'cue-after',
'cue-before',
'cursor',
'direction',
'display',
'dominant-baseline',
'elevation',
'empty-cells',
'fill',
'fill-break',
'fill-color',
'fill-image',
'fill-opacity',
'fill-origin',
'fill-position',
'fill-repeat',
'fill-rule',
'fill-size',
'filter',
'flex',
'flex-basis',
'flex-direction',
'flex-flow',
'flex-grow',
'flex-shrink',
'flex-wrap',
'float',
'float-defer',
'float-offset',
'float-reference',
'flood-color',
'flood-opacity',
'flow',
'flow-from',
'flow-into',
'font',
'font-family',
'font-feature-settings',
'font-kerning',
'font-language-override',
'font-optical-sizing',
'font-palette',
'font-size',
'font-size-adjust',
'font-stretch',
'font-style',
'font-synthesis',
'font-synthesis-small-caps',
'font-synthesis-style',
'font-synthesis-weight',
'font-variant',
'font-variant-alternates',
'font-variant-caps',
'font-variant-east-asian',
'font-variant-emoji',
'font-variant-ligatures',
'font-variant-numeric',
'font-variant-position',
'font-variation-settings',
'font-weight',
'footnote-display',
'footnote-policy',
'forced-color-adjust',
'gap',
'glyph-orientation-vertical',
'grid',
'grid-area',
'grid-auto-columns',
'grid-auto-flow',
'grid-auto-rows',
'grid-column',
'grid-column-end',
'grid-column-start',
'grid-row',
'grid-row-end',
'grid-row-start',
'grid-template',
'grid-template-areas',
'grid-template-columns',
'grid-template-rows',
'hanging-punctuation',
'height',
'hyphenate-character',
'hyphenate-limit-chars',
'hyphenate-limit-last',
'hyphenate-limit-lines',
'hyphenate-limit-zone',
'hyphens',
'image-orientation',
'image-rendering',
'image-resolution',
'initial-letter',
'initial-letter-align',
'initial-letter-wrap',
'inline-size',
'inline-sizing',
'input-security',
'inset',
'inset-block',
'inset-block-end',
'inset-block-start',
'inset-inline',
'inset-inline-end',
'inset-inline-start',
'isolation',
'justify-content',
'justify-items',
'justify-self',
'leading-trim',
'left',
'letter-spacing',
'lighting-color',
'line-break',
'line-clamp',
'line-grid',
'line-height',
'line-height-step',
'line-padding',
'line-snap',
'list-style',
'list-style-image',
'list-style-position',
'list-style-type',
'margin',
'margin-block',
'margin-block-end',
'margin-block-start',
'margin-bottom',
'margin-break',
'margin-inline',
'margin-inline-end',
'margin-inline-start',
'margin-left',
'margin-right',
'margin-top',
'margin-trim',
'marker',
'marker-end',
'marker-knockout-left',
'marker-knockout-right',
'marker-mid',
'marker-pattern',
'marker-segment',
'marker-side',
'marker-start',
'mask',
'mask-border',
'mask-border-mode',
'mask-border-outset',
'mask-border-repeat',
'mask-border-slice',
'mask-border-source',
'mask-border-width',
'mask-clip',
'mask-composite',
'mask-image',
'mask-mode',
'mask-origin',
'mask-position',
'mask-repeat',
'mask-size',
'mask-type',
'max-block-size',
'max-height',
'max-inline-size',
'max-lines',
'max-width',
'min-block-size',
'min-height',
'min-inline-size',
'min-intrinsic-sizing',
'min-width',
'mix-blend-mode',
'nav-down',
'nav-left',
'nav-right',
'nav-up',
'object-fit',
'object-overflow',
'object-position',
'object-view-box',
'offset',
'offset-anchor',
'offset-distance',
'offset-path',
'offset-position',
'offset-rotate',
'opacity',
'order',
'orphans',
'outline',
'outline-color',
'outline-offset',
'outline-style',
'outline-width',
'overflow',
'overflow-anchor',
'overflow-block',
'overflow-clip-margin',
'overflow-inline',
'overflow-wrap',
'overflow-x',
'overflow-y',
'overscroll-behavior',
'overscroll-behavior-block',
'overscroll-behavior-inline',
'overscroll-behavior-x',
'overscroll-behavior-y',
'padding',
'padding-block',
'padding-block-end',
'padding-block-start',
'padding-bottom',
'padding-inline',
'padding-inline-end',
'padding-inline-start',
'padding-left',
'padding-right',
'padding-top',
'page',
'page-break-after',
'page-break-before',
'page-break-inside',
'pause',
'pause-after',
'pause-before',
'perspective',
'perspective-origin',
'pitch',
'pitch-range',
'place-content',
'place-items',
'place-self',
'play-during',
'pointer-events',
'position',
'print-color-adjust',
'property-name',
'quotes',
'region-fragment',
'resize',
'rest',
'rest-after',
'rest-before',
'richness',
'right',
'rotate',
'row-gap',
'ruby-align',
'ruby-merge',
'ruby-overhang',
'ruby-position',
'running',
'scale',
'scroll-behavior',
'scroll-margin',
'scroll-margin-block',
'scroll-margin-block-end',
'scroll-margin-block-start',
'scroll-margin-bottom',
'scroll-margin-inline',
'scroll-margin-inline-end',
'scroll-margin-inline-start',
'scroll-margin-left',
'scroll-margin-right',
'scroll-margin-top',
'scroll-padding',
'scroll-padding-block',
'scroll-padding-block-end',
'scroll-padding-block-start',
'scroll-padding-bottom',
'scroll-padding-inline',
'scroll-padding-inline-end',
'scroll-padding-inline-start',
'scroll-padding-left',
'scroll-padding-right',
'scroll-padding-top',
'scroll-snap-align',
'scroll-snap-stop',
'scroll-snap-type',
'scrollbar-color',
'scrollbar-gutter',
'scrollbar-width',
'shape-image-threshold',
'shape-inside',
'shape-margin',
'shape-outside',
'spatial-navigation-action',
'spatial-navigation-contain',
'spatial-navigation-function',
'speak',
'speak-as',
'speak-header',
'speak-numeral',
'speak-punctuation',
'speech-rate',
'stress',
'string-set',
'stroke',
'stroke-align',
'stroke-alignment',
'stroke-break',
'stroke-color',
'stroke-dash-corner',
'stroke-dash-justify',
'stroke-dashadjust',
'stroke-dasharray',
'stroke-dashcorner',
'stroke-dashoffset',
'stroke-image',
'stroke-linecap',
'stroke-linejoin',
'stroke-miterlimit',
'stroke-opacity',
'stroke-origin',
'stroke-position',
'stroke-repeat',
'stroke-size',
'stroke-width',
'tab-size',
'table-layout',
'text-align',
'text-align-all',
'text-align-last',
'text-combine-upright',
'text-decoration',
'text-decoration-color',
'text-decoration-line',
'text-decoration-skip',
'text-decoration-skip-box',
'text-decoration-skip-ink',
'text-decoration-skip-inset',
'text-decoration-skip-self',
'text-decoration-skip-spaces',
'text-decoration-style',
'text-decoration-thickness',
'text-edge',
'text-emphasis',
'text-emphasis-color',
'text-emphasis-position',
'text-emphasis-skip',
'text-emphasis-style',
'text-group-align',
'text-indent',
'text-justify',
'text-orientation',
'text-overflow',
'text-shadow',
'text-space-collapse',
'text-space-trim',
'text-spacing',
'text-transform',
'text-underline-offset',
'text-underline-position',
'text-wrap',
'top',
'transform',
'transform-box',
'transform-origin',
'transform-style',
'transition',
'transition-delay',
'transition-duration',
'transition-property',
'transition-timing-function',
'translate',
'unicode-bidi',
'user-select',
'vertical-align',
'visibility',
'voice-balance',
'voice-duration',
'voice-family',
'voice-pitch',
'voice-range',
'voice-rate',
'voice-stress',
'voice-volume',
'volume',
'white-space',
'widows',
'width',
'will-change',
'word-boundary-detection',
'word-boundary-expansion',
'word-break',
'word-spacing',
'word-wrap',
'wrap-after',
'wrap-before',
'wrap-flow',
'wrap-inside',
'wrap-through',
'writing-mode',
'z-index',
)

View File

@@ -0,0 +1,918 @@
"""
pygments.lexers._googlesql_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Autogenerated data files for the GoogleSQL lexer.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
constants = [
'FALSE',
'NULL',
'TRUE',
'UNKNOWN',
]
# Everything below this line is auto-generated from the GoogleSQL source code.
# ----------------------------------------------------------------------------
functionnames = [
'ABS',
'ACOS',
'ACOSH',
'AEAD.DECRYPT_BYTES',
'AEAD.DECRYPT_STRING',
'AEAD.ENCRYPT',
'AEAD.ENVELOPE_DECRYPT_BYTES',
'AEAD.ENVELOPE_DECRYPT_STRING',
'AEAD.ENVELOPE_ENCRYPT',
'ALL_DIFFERENT',
'ANON_AVG',
'ANON_COUNT',
'ANON_COUNT',
'ANON_PERCENTILE_CONT',
'ANON_QUANTILES',
'ANON_STDDEV_POP',
'ANON_SUM',
'ANON_VAR_POP',
'ANY_VALUE',
'APPROX_COSINE_DISTANCE',
'APPROX_COUNT_DISTINCT',
'APPROX_DOT_PRODUCT',
'APPROX_EUCLIDEAN_DISTANCE',
'APPROX_QUANTILES',
'APPROX_TOP_COUNT',
'APPROX_TOP_SUM',
'ARRAY[KEY()]',
'ARRAY[SAFE_KEY()]',
'ARRAY_AGG',
'ARRAY_AVG',
'ARRAY_CONCAT',
'ARRAY_CONCAT_AGG',
'ARRAY_FILTER',
'ARRAY_FIND',
'ARRAY_FIND_ALL',
'ARRAY_FIRST',
'ARRAY_FIRST_N',
'ARRAY_INCLUDES',
'ARRAY_INCLUDES_ALL',
'ARRAY_INCLUDES_ANY',
'ARRAY_IS_DISTINCT',
'ARRAY_LAST',
'ARRAY_LAST_N',
'ARRAY_LENGTH',
'ARRAY_MAX',
'ARRAY_MIN',
'ARRAY_OFFSET',
'ARRAY_OFFSETS',
'ARRAY_REMOVE_FIRST_N',
'ARRAY_REMOVE_LAST_N',
'ARRAY_REVERSE',
'ARRAY_SLICE',
'ARRAY_SUM',
'ARRAY_TO_STRING',
'ARRAY_TRANSFORM',
'ARRAY_ZIP',
'ASCII',
'ASIN',
'ASINH',
'ATAN',
'ATAN2',
'ATANH',
'AVG',
'BIT_AND',
'BIT_COUNT',
'BIT_OR',
'BIT_XOR',
'BOOL',
'BOOL_ARRAY',
'BYTE_LENGTH',
'CASE',
'CAST',
'CBRT',
'CEIL',
'CEILING',
'CHARACTER_LENGTH',
'CHAR_LENGTH',
'CHR',
'COALESCE',
'CODE_POINTS_TO_BYTES',
'CODE_POINTS_TO_STRING',
'COLLATE',
'CONCAT',
'CORR',
'COS',
'COSH',
'COSINE_DISTANCE',
'COT',
'COTH',
'COUNT',
'COUNT(*)',
'COUNTIF',
'COVAR_POP',
'COVAR_SAMP',
'CSC',
'CSCH',
'CUME_DIST',
'CURRENT_DATE',
'CURRENT_DATETIME',
'CURRENT_TIME',
'CURRENT_TIMESTAMP',
'D3A_COUNT.EXTRACT',
'D3A_COUNT.INIT',
'D3A_COUNT.MERGE',
'D3A_COUNT.MERGE_PARTIAL',
'D3A_COUNT.TO_HLL',
'DATE',
'DATETIME',
'DATETIME_ADD',
'DATETIME_BUCKET',
'DATETIME_DIFF',
'DATETIME_SUB',
'DATETIME_TRUNC',
'DATE_ADD',
'DATE_BUCKET',
'DATE_DIFF',
'DATE_FROM_UNIX_DATE',
'DATE_SUB',
'DATE_TRUNC',
'DENSE_RANK',
'DESTINATION_NODE_ID',
'DETERMINISTIC_DECRYPT_BYTES',
'DETERMINISTIC_DECRYPT_STRING',
'DETERMINISTIC_ENCRYPT',
'DIV',
'DOT_PRODUCT',
'EDGES',
'EDIT_DISTANCE',
'ELEMENTWISE_AVG',
'ELEMENTWISE_SUM',
'ELEMENT_DEFINITION_NAME',
'ELEMENT_ID',
'ENDS_WITH',
'ENUM_VALUE_DESCRIPTOR_PROTO',
'ERROR',
'EUCLIDEAN_DISTANCE',
'EXP',
'EXTRACT',
'EXTRACT_FOR_DP_APPROX_COUNT_DISTINCT',
'FARM_FINGERPRINT',
'FILTER_FIELDS',
'FIRST_VALUE',
'FLATTEN',
'FLOAT32',
'FLOAT32_ARRAY',
'FLOAT64',
'FLOAT64_ARRAY',
'FLOOR',
'FORMAT',
'FORMAT_DATE',
'FORMAT_DATETIME',
'FORMAT_TIME',
'FORMAT_TIMESTAMP',
'FROM_BASE32',
'FROM_BASE64',
'FROM_HEX',
'GENERATE_ARRAY',
'GENERATE_DATE_ARRAY',
'GENERATE_RANGE_ARRAY',
'GENERATE_TIMESTAMP_ARRAY',
'GENERATE_UUID',
'GREATEST',
'GROUPING',
'HLL_COUNT.EXTRACT',
'HLL_COUNT.INIT',
'HLL_COUNT.MERGE',
'HLL_COUNT.MERGE_PARTIAL',
'IEEE_DIVIDE',
'IF',
'IFERROR',
'IFNULL',
'IN UNNEST',
'INITCAP',
'INIT_FOR_DP_APPROX_COUNT_DISTINCT',
'INSTR',
'INT64',
'INT64_ARRAY',
'IS DESTINATION OF',
'IS DISTINCT FROM',
'IS NOT DISTINCT FROM',
'IS SOURCE OF',
'ISERROR',
'IS_ACYCLIC',
'IS_INF',
'IS_NAN',
'IS_SIMPLE',
'IS_TRAIL',
'JSON_ARRAY',
'JSON_ARRAY_APPEND',
'JSON_ARRAY_INSERT',
'JSON_CONTAINS',
'JSON_EXTRACT',
'JSON_EXTRACT_ARRAY',
'JSON_EXTRACT_SCALAR',
'JSON_EXTRACT_STRING_ARRAY',
'JSON_KEYS',
'JSON_OBJECT',
'JSON_QUERY',
'JSON_QUERY_ARRAY',
'JSON_REMOVE',
'JSON_SET',
'JSON_STRIP_NULLS',
'JSON_TYPE',
'JSON_VALUE',
'JSON_VALUE_ARRAY',
'JUSTIFY_DAYS',
'JUSTIFY_HOURS',
'JUSTIFY_INTERVAL',
'KEYS.ADD_KEY_FROM_RAW_BYTES',
'KEYS.KEYSET_CHAIN',
'KEYS.KEYSET_FROM_JSON',
'KEYS.KEYSET_LENGTH',
'KEYS.KEYSET_TO_JSON',
'KEYS.NEW_KEYSET',
'KEYS.NEW_WRAPPED_KEYSET',
'KEYS.REWRAP_KEYSET',
'KEYS.ROTATE_KEYSET',
'KEYS.ROTATE_WRAPPED_KEYSET',
'KLL_QUANTILES.EXTRACT_FLOAT64',
'KLL_QUANTILES.EXTRACT_INT64',
'KLL_QUANTILES.EXTRACT_POINT_FLOAT64',
'KLL_QUANTILES.EXTRACT_POINT_INT64',
'KLL_QUANTILES.INIT_FLOAT64',
'KLL_QUANTILES.INIT_INT64',
'KLL_QUANTILES.MERGE_FLOAT64',
'KLL_QUANTILES.MERGE_INT64',
'KLL_QUANTILES.MERGE_PARTIAL',
'KLL_QUANTILES.MERGE_POINT_FLOAT64',
'KLL_QUANTILES.MERGE_POINT_INT64',
'L1_NORM',
'L2_NORM',
'LABELS',
'LAG',
'LAST_DAY',
'LAST_VALUE',
'LAX_BOOL',
'LAX_BOOL_ARRAY',
'LAX_FLOAT32',
'LAX_FLOAT32_ARRAY',
'LAX_FLOAT64',
'LAX_FLOAT64_ARRAY',
'LAX_INT64',
'LAX_INT64_ARRAY',
'LAX_STRING',
'LAX_STRING_ARRAY',
'LEAD',
'LEAST',
'LEFT',
'LENGTH',
'LIKE ALL',
'LIKE ALL UNNEST',
'LIKE ANY',
'LIKE ANY UNNEST',
'LN',
'LOG',
'LOG10',
'LOGICAL_AND',
'LOGICAL_OR',
'LOWER',
'LPAD',
'LTRIM',
'MAKE_INTERVAL',
'MANHATTAN_DISTANCE',
'MAP_CARDINALITY',
'MAP_CONTAINS_KEY',
'MAP_DELETE',
'MAP_EMPTY',
'MAP_ENTRIES_SORTED',
'MAP_ENTRIES_UNSORTED',
'MAP_FILTER',
'MAP_FROM_ARRAY',
'MAP_GET',
'MAP_INSERT',
'MAP_INSERT_OR_REPLACE',
'MAP_KEYS_SORTED',
'MAP_KEYS_UNSORTED',
'MAP_REPLACE',
'MAP_VALUES_SORTED',
'MAP_VALUES_SORTED_BY_KEY',
'MAP_VALUES_UNSORTED',
'MAX',
'MD5',
'MERGE_PARTIAL_FOR_DP_APPROX_COUNT_DISTINCT',
'MIN',
'MOD',
'NET.HOST',
'NET.IPV4_FROM_INT64',
'NET.IPV4_TO_INT64',
'NET.IP_FROM_STRING',
'NET.IP_NET_MASK',
'NET.IP_TO_STRING',
'NET.IP_TRUNC',
'NET.PUBLIC_SUFFIX',
'NET.REG_DOMAIN',
'NET.SAFE_IP_FROM_STRING',
'NEW_UUID',
'NODES',
'NORMALIZE',
'NORMALIZE_AND_CASEFOLD',
'NOT LIKE ALL',
'NOT LIKE ALL UNNEST',
'NOT LIKE ANY',
'NOT LIKE ANY UNNEST',
'NTH_VALUE',
'NTILE',
'NULLIF',
'NULLIFERROR',
'NULLIFZERO',
'OCTET_LENGTH',
'OFFSET',
'ORDINAL',
'PARSE_BIGNUMERIC',
'PARSE_DATE',
'PARSE_DATETIME',
'PARSE_JSON',
'PARSE_NUMERIC',
'PARSE_TIME',
'PARSE_TIMESTAMP',
'PATH',
'PATH_FIRST',
'PATH_LAST',
'PATH_LENGTH',
'PERCENTILE_CONT',
'PERCENTILE_DISC',
'PERCENT_RANK',
'PI',
'PIVOT',
'PI_BIGNUMERIC',
'PI_NUMERIC',
'POW',
'POWER',
'PROPERTY_EXISTS',
'PROPERTY_NAMES',
'PROTO_MAP_CONTAINS_KEY',
'PROTO_MODIFY_MAP',
'RAND',
'RANGE',
'RANGE_BUCKET',
'RANGE_CONTAINS',
'RANGE_END',
'RANGE_INTERSECT',
'RANGE_IS_END_UNBOUNDED',
'RANGE_IS_START_UNBOUNDED',
'RANGE_OVERLAPS',
'RANGE_START',
'RANK',
'REGEXP_CONTAINS',
'REGEXP_EXTRACT',
'REGEXP_EXTRACT_ALL',
'REGEXP_INSTR',
'REGEXP_REPLACE',
'REGEXP_SUBSTR',
'REPEAT',
'REPLACE',
'REVERSE',
'RIGHT',
'ROUND',
'ROW_NUMBER',
'RPAD',
'RTRIM',
'S2_CELLIDFROMPOINT',
'S2_COVERINGCELLIDS',
'SAFE_ADD',
'SAFE_CONVERT_BYTES_TO_STRING',
'SAFE_DIVIDE',
'SAFE_MULTIPLY',
'SAFE_NEGATE',
'SAFE_OFFSET',
'SAFE_ORDINAL',
'SAFE_SUBTRACT',
'SAFE_TO_JSON',
'SAME',
'SEC',
'SECH',
'SESSION_USER',
'SHA1',
'SHA256',
'SHA512',
'SIGN',
'SIN',
'SINH',
'SOUNDEX',
'SOURCE_NODE_ID',
'SPLIT',
'SPLIT_SUBSTR',
'SQRT',
'STARTS_WITH',
'STDDEV',
'STDDEV_POP',
'STDDEV_SAMP',
'STRING',
'STRING_AGG',
'STRING_ARRAY',
'STRPOS',
'ST_ANGLE',
'ST_AREA',
'ST_ASBINARY',
'ST_ASGEOJSON',
'ST_ASKML',
'ST_ASTEXT',
'ST_AZIMUTH',
'ST_BOUNDARY',
'ST_BOUNDINGBOX',
'ST_BUFFER',
'ST_BUFFERWITHTOLERANCE',
'ST_CENTROID',
'ST_CENTROID_AGG',
'ST_CLOSESTPOINT',
'ST_CLUSTERDBSCAN',
'ST_CONTAINS',
'ST_CONVEXHULL',
'ST_COVEREDBY',
'ST_COVERS',
'ST_DIFFERENCE',
'ST_DIMENSION',
'ST_DISJOINT',
'ST_DISTANCE',
'ST_DUMP',
'ST_DUMPPOINTS',
'ST_DWITHIN',
'ST_ENDPOINT',
'ST_EQUALS',
'ST_EXTENT',
'ST_EXTERIORRING',
'ST_GEOGFROM',
'ST_GEOGFROMGEOJSON',
'ST_GEOGFROMKML',
'ST_GEOGFROMTEXT',
'ST_GEOGFROMWKB',
'ST_GEOGPOINT',
'ST_GEOGPOINTFROMGEOHASH',
'ST_GEOHASH',
'ST_GEOMETRYTYPE',
'ST_HAUSDORFFDISTANCE',
'ST_HAUSDORFFDWITHIN',
'ST_INTERIORRINGS',
'ST_INTERSECTION',
'ST_INTERSECTS',
'ST_INTERSECTSBOX',
'ST_ISCLOSED',
'ST_ISCOLLECTION',
'ST_ISEMPTY',
'ST_ISRING',
'ST_LENGTH',
'ST_LINEINTERPOLATEPOINT',
'ST_LINELOCATEPOINT',
'ST_LINESUBSTRING',
'ST_MAKELINE',
'ST_MAKEPOLYGON',
'ST_MAKEPOLYGONORIENTED',
'ST_MAXDISTANCE',
'ST_NEAREST_NEIGHBORS',
'ST_NPOINTS',
'ST_NUMGEOMETRIES',
'ST_NUMPOINTS',
'ST_PERIMETER',
'ST_POINTN',
'ST_SIMPLIFY',
'ST_SNAPTOGRID',
'ST_STARTPOINT',
'ST_TOUCHES',
'ST_UNARYUNION',
'ST_UNION',
'ST_UNION_AGG',
'ST_WITHIN',
'ST_X',
'ST_Y',
'SUBSTR',
'SUBSTRING',
'SUM',
'TAN',
'TANH',
'TIME',
'TIMESTAMP',
'TIMESTAMP_ADD',
'TIMESTAMP_BUCKET',
'TIMESTAMP_DIFF',
'TIMESTAMP_FROM_UNIX_MICROS',
'TIMESTAMP_FROM_UNIX_MILLIS',
'TIMESTAMP_FROM_UNIX_SECONDS',
'TIMESTAMP_MICROS',
'TIMESTAMP_MILLIS',
'TIMESTAMP_SECONDS',
'TIMESTAMP_SUB',
'TIMESTAMP_TRUNC',
'TIME_ADD',
'TIME_DIFF',
'TIME_SUB',
'TIME_TRUNC',
'TO_BASE32',
'TO_BASE64',
'TO_CODE_POINTS',
'TO_HEX',
'TO_JSON',
'TO_JSON_STRING',
'TRANSLATE',
'TRIM',
'TRUNC',
'TYPEOF',
'UNICODE',
'UNIX_DATE',
'UNIX_MICROS',
'UNIX_MILLIS',
'UNIX_SECONDS',
'UNNEST',
'UNPIVOT',
'UPPER',
'VARIANCE',
'VAR_POP',
'VAR_SAMP',
'ZEROIFNULL',
]
keywords = [
'ABORT',
'ACCESS',
'ACTION',
'ACYCLIC',
'ADD',
'AFTER',
'AGGREGATE',
'ALL',
'ALTER',
'ALWAYS',
'ANALYZE',
'AND',
'ANY',
'APPROX',
'ARE',
'AS',
'ASC',
'ASCENDING',
'ASSERT',
'ASSERT_ROWS_MODIFIED',
'AT',
'BATCH',
'BEGIN',
'BETWEEN',
'BIGDECIMAL',
'BREAK',
'BY',
'CALL',
'CASCADE',
'CASE',
'CAST',
'CHECK',
'CLAMPED',
'CLONE',
'CLUSTER',
'COLLATE',
'COLUMN',
'COLUMNS',
'COMMIT',
'CONFLICT',
'CONNECTION',
'CONSTANT',
'CONSTRAINT',
'CONTAINS',
'CONTINUE',
'COPY',
'CORRESPONDING',
'CREATE',
'CROSS',
'CUBE',
'CURRENT',
'CYCLE',
'DATA',
'DATABASE',
'DAY',
'DAYOFWEEK',
'DAYOFYEAR',
'DECIMAL',
'DECLARE',
'DEFAULT',
'DEFINE',
'DEFINER',
'DELETE',
'DELETION',
'DEPTH',
'DESC',
'DESCENDING',
'DESCRIBE',
'DESCRIPTOR',
'DESTINATION',
'DETERMINISTIC',
'DISTINCT',
'DO',
'DROP',
'EDGE',
'ELSE',
'ELSEIF',
'END',
'ENFORCED',
'ERROR',
'ESCAPE',
'EXCEPT',
'EXCEPTION',
'EXCLUDE',
'EXECUTE',
'EXISTS',
'EXPLAIN',
'EXPORT',
'EXTEND',
'EXTERNAL',
'EXTRACT',
'FALSE',
'FETCH',
'FIELD',
'FILES',
'FILL',
'FILTER',
'FIRST',
'FOLLOWING',
'FOR',
'FOREIGN',
'FORK',
'FORMAT',
'FRIDAY',
'FROM',
'FULL',
'FUNCTION',
'GENERATED',
'GRANT',
'GRAPH',
'GRAPH_TABLE',
'GROUP',
'GROUPING',
'GROUPS',
'GROUP_ROWS',
'HAS',
'HASH',
'HAVING',
'HIDDEN',
'HOUR',
'IDENTITY',
'IF',
'IGNORE',
'IMMEDIATE',
'IMMUTABLE',
'IMPORT',
'IN',
'INCLUDE',
'INCREMENT',
'INDEX',
'INNER',
'INOUT',
'INPUT',
'INSERT',
'INTERLEAVE',
'INTERSECT',
'INTO',
'INVOKER',
'IS',
'ISOLATION',
'ISOWEEK ',
'ISOYEAR',
'ITERATE',
'JOIN',
'KEY',
'LABEL',
'LABELED',
'LANGUAGE',
'LAST',
'LATERAL',
'LEAVE',
'LEFT',
'LET',
'LEVEL',
'LIKE',
'LIMIT',
'LOAD',
'LOG',
'LOOKUP',
'LOOP',
'MACRO',
'MATCH',
'MATCHED',
'MATCH_RECOGNIZE',
'MATERIALIZED',
'MAX',
'MAXVALUE',
'MEASURES',
'MERGE',
'MESSAGE',
'METADATA',
'MICROSECOND',
'MILLISECOND',
'MIN',
'MINUTE',
'MINVALUE',
'MODEL',
'MODULE',
'MONDAY',
'MONTH',
'NAME',
'NANOSECOND',
'NATURAL',
'NEW',
'NEXT',
'NO',
'NODE',
'NOT',
'NOTHING',
'NULL',
'NULLS',
'NULL_FILTERED',
'OF',
'OFFSET',
'ON',
'ONEOF_CASE',
'ONLY',
'OPTIONAL',
'OPTIONS',
'OR',
'ORDER',
'OUT',
'OUTER',
'OUTPUT',
'OVER',
'OVERWRITE',
'PARENT',
'PARTITION',
'PARTITIONS',
'PAST',
'PATH',
'PATHS',
'PATTERN',
'PERCENT',
'PIVOT',
'POLICIES',
'POLICY',
'PRECEDING',
'PRIMARY',
'PRIVATE',
'PRIVILEGE',
'PRIVILEGES',
'PROCEDURE',
'PROJECT',
'PROPERTIES',
'PROPERTY',
'PUBLIC',
'QUALIFY',
'QUARTER',
'RAISE',
'RAW',
'READ',
'RECURSIVE',
'REFERENCES',
'REMOTE',
'REMOVE',
'RENAME',
'REPEAT',
'REPEATABLE',
'REPLACE',
'REPLACE_FIELDS',
'REPLICA',
'REPORT',
'RESPECT',
'RESTRICT',
'RESTRICTION',
'RETURN',
'RETURNS',
'REVOKE',
'RIGHT',
'ROLLBACK',
'ROLLUP',
'ROW',
'ROWS',
'RUN',
'SAFE_CAST',
'SATURDAY',
'SCHEMA',
'SEARCH',
'SECOND ',
'SECURITY',
'SELECT',
'SEQUENCE',
'SET',
'SETS',
'SHORTEST',
'SHOW',
'SIMPLE',
'SKIP',
'SNAPSHOT',
'SOME',
'SOURCE',
'SQL',
'STABLE',
'START',
'STATIC_DESCRIBE',
'STORED',
'STORING',
'STRICT',
'SUNDAY',
'SYSTEM',
'SYSTEM_TIME',
'TABLE',
'TABLES',
'TABLESAMPLE',
'TARGET',
'TEMP',
'TEMPORARY',
'THEN',
'THURSDAY',
'TO',
'TRAIL',
'TRANSACTION',
'TRANSFORM',
'TREAT',
'TRUE',
'TRUNCATE',
'TUESDAY',
'TYPE',
'UNBOUNDED',
'UNDROP',
'UNION',
'UNIQUE',
'UNKNOWN',
'UNNEST',
'UNPIVOT',
'UNTIL',
'UPDATE',
'USING',
'VALUE',
'VALUES',
'VECTOR',
'VIEW',
'VIEWS',
'VOLATILE',
'WALK',
'WEDNESDAY',
'WEEK',
'WEIGHT',
'WHEN',
'WHERE',
'WHILE',
'WINDOW',
'WITH',
'WITHIN',
'WRITE',
'YEAR',
'ZONE',
]
operators = [
'!=',
'&',
'*',
'+',
'-',
'/',
'<',
'<<',
'<=',
'=',
'>',
'>=',
'>>',
'^',
'|',
'||',
'~',
]
types = [
'ARRAY',
'BIGNUMERIC',
'BOOL',
'BYTES',
'DATE',
'DATETIME',
'DOUBLE',
'ENUM',
'EXTENDED',
'FLOAT',
'GEOGRAPHY',
'GRAPH_ELEMENT',
'GRAPH_PATH',
'INT32',
'INT64',
'INTERVAL',
'JSON',
'MAP',
'MEASURE',
'NUMERIC',
'PROTO',
'RANGE',
'STRING',
'STRUCT',
'TIME',
'TIMESTAMP',
'TIMESTAMP_PICOS',
'TOKENLIST',
'UINT32',
'UINT64',
'UUID',
]

View File

@@ -0,0 +1,411 @@
"""
pygments.lexers._julia_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Julia builtins.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# operators
# see https://github.com/JuliaLang/julia/blob/master/src/julia-parser.scm
# Julia v1.6.0-rc1
OPERATORS_LIST = [
# other
'->',
# prec-assignment
':=', '$=',
# prec-conditional, prec-lazy-or, prec-lazy-and
'?', '||', '&&',
# prec-colon
':',
# prec-plus
'$',
# prec-decl
'::',
]
DOTTED_OPERATORS_LIST = [
# prec-assignment
r'=', r'+=', r'-=', r'*=', r'/=', r'//=', r'\=', r'^=', r'÷=', r'%=', r'<<=',
r'>>=', r'>>>=', r'|=', r'&=', r'⊻=', r'', r'', r"'", r'~',
# prec-pair
'=>',
# prec-arrow
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'', r'', r'', r'⬿', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'', r'-->', r'<--', r'<-->',
# prec-comparison
r'>', r'<', r'>=', r'', r'<=', r'', r'==', r'===', r'', r'!=', r'', r'!==',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'⩿', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'⪿', r'',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'<:', r'>:',
# prec-pipe
'<|', '|>',
# prec-colon
r'', r'', r'', r'', r'', r'',
# prec-plus
r'+', r'-', r'¦', r'|', r'', r'', r'', r'', r'++', r'', r'', r'', r'±', r'',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
# prec-times
r'*', r'/', r'', r'÷', r'%', r'&', r'', r'', r'×', '\\', r'', r'', r'', r'',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'⦿', r'', r'',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'', r'', r'', r'',
# prec-rational, prec-bitshift
'//', '>>', '<<', '>>>',
# prec-power
r'^', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'', r'',
r'', r'', r'',
# unary-ops, excluding unary-and-binary-ops
'!', r'¬', r'', r'', r''
]
# Generated with the following in Julia v1.6.0-rc1
'''
#!/usr/bin/env julia
import REPL.REPLCompletions
res = String["in", "isa", "where"]
for kw in collect(x.keyword for x in REPLCompletions.complete_keyword(""))
if !(contains(kw, " ") || kw == "struct")
push!(res, kw)
end
end
sort!(unique!(setdiff!(res, ["true", "false"])))
foreach(x -> println("\'", x, "\',"), res)
'''
KEYWORD_LIST = (
'baremodule',
'begin',
'break',
'catch',
'ccall',
'const',
'continue',
'do',
'else',
'elseif',
'end',
'export',
'finally',
'for',
'function',
'global',
'if',
'import',
'in',
'isa',
'let',
'local',
'macro',
'module',
'quote',
'return',
'try',
'using',
'where',
'while',
)
# Generated with the following in Julia v1.6.0-rc1
'''
#!/usr/bin/env julia
import REPL.REPLCompletions
res = String[]
for compl in filter!(x -> isa(x, REPLCompletions.ModuleCompletion) && (x.parent === Base || x.parent === Core),
REPLCompletions.completions("", 0)[1])
try
v = eval(Symbol(compl.mod))
if (v isa Type || v isa TypeVar) && (compl.mod != "=>")
push!(res, compl.mod)
end
catch e
end
end
sort!(unique!(res))
foreach(x -> println("\'", x, "\',"), res)
'''
BUILTIN_LIST = (
'AbstractArray',
'AbstractChannel',
'AbstractChar',
'AbstractDict',
'AbstractDisplay',
'AbstractFloat',
'AbstractIrrational',
'AbstractMatch',
'AbstractMatrix',
'AbstractPattern',
'AbstractRange',
'AbstractSet',
'AbstractString',
'AbstractUnitRange',
'AbstractVecOrMat',
'AbstractVector',
'Any',
'ArgumentError',
'Array',
'AssertionError',
'BigFloat',
'BigInt',
'BitArray',
'BitMatrix',
'BitSet',
'BitVector',
'Bool',
'BoundsError',
'CapturedException',
'CartesianIndex',
'CartesianIndices',
'Cchar',
'Cdouble',
'Cfloat',
'Channel',
'Char',
'Cint',
'Cintmax_t',
'Clong',
'Clonglong',
'Cmd',
'Colon',
'Complex',
'ComplexF16',
'ComplexF32',
'ComplexF64',
'ComposedFunction',
'CompositeException',
'Condition',
'Cptrdiff_t',
'Cshort',
'Csize_t',
'Cssize_t',
'Cstring',
'Cuchar',
'Cuint',
'Cuintmax_t',
'Culong',
'Culonglong',
'Cushort',
'Cvoid',
'Cwchar_t',
'Cwstring',
'DataType',
'DenseArray',
'DenseMatrix',
'DenseVecOrMat',
'DenseVector',
'Dict',
'DimensionMismatch',
'Dims',
'DivideError',
'DomainError',
'EOFError',
'Enum',
'ErrorException',
'Exception',
'ExponentialBackOff',
'Expr',
'Float16',
'Float32',
'Float64',
'Function',
'GlobalRef',
'HTML',
'IO',
'IOBuffer',
'IOContext',
'IOStream',
'IdDict',
'IndexCartesian',
'IndexLinear',
'IndexStyle',
'InexactError',
'InitError',
'Int',
'Int128',
'Int16',
'Int32',
'Int64',
'Int8',
'Integer',
'InterruptException',
'InvalidStateException',
'Irrational',
'KeyError',
'LinRange',
'LineNumberNode',
'LinearIndices',
'LoadError',
'MIME',
'Matrix',
'Method',
'MethodError',
'Missing',
'MissingException',
'Module',
'NTuple',
'NamedTuple',
'Nothing',
'Number',
'OrdinalRange',
'OutOfMemoryError',
'OverflowError',
'Pair',
'PartialQuickSort',
'PermutedDimsArray',
'Pipe',
'ProcessFailedException',
'Ptr',
'QuoteNode',
'Rational',
'RawFD',
'ReadOnlyMemoryError',
'Real',
'ReentrantLock',
'Ref',
'Regex',
'RegexMatch',
'RoundingMode',
'SegmentationFault',
'Set',
'Signed',
'Some',
'StackOverflowError',
'StepRange',
'StepRangeLen',
'StridedArray',
'StridedMatrix',
'StridedVecOrMat',
'StridedVector',
'String',
'StringIndexError',
'SubArray',
'SubString',
'SubstitutionString',
'Symbol',
'SystemError',
'Task',
'TaskFailedException',
'Text',
'TextDisplay',
'Timer',
'Tuple',
'Type',
'TypeError',
'TypeVar',
'UInt',
'UInt128',
'UInt16',
'UInt32',
'UInt64',
'UInt8',
'UndefInitializer',
'UndefKeywordError',
'UndefRefError',
'UndefVarError',
'Union',
'UnionAll',
'UnitRange',
'Unsigned',
'Val',
'Vararg',
'VecElement',
'VecOrMat',
'Vector',
'VersionNumber',
'WeakKeyDict',
'WeakRef',
)
# Generated with the following in Julia v1.6.0-rc1
'''
#!/usr/bin/env julia
import REPL.REPLCompletions
res = String["true", "false"]
for compl in filter!(x -> isa(x, REPLCompletions.ModuleCompletion) && (x.parent === Base || x.parent === Core),
REPLCompletions.completions("", 0)[1])
try
v = eval(Symbol(compl.mod))
if !(v isa Function || v isa Type || v isa TypeVar || v isa Module || v isa Colon)
push!(res, compl.mod)
end
catch e
end
end
sort!(unique!(res))
foreach(x -> println("\'", x, "\',"), res)
'''
LITERAL_LIST = (
'ARGS',
'C_NULL',
'DEPOT_PATH',
'ENDIAN_BOM',
'ENV',
'Inf',
'Inf16',
'Inf32',
'Inf64',
'InsertionSort',
'LOAD_PATH',
'MergeSort',
'NaN',
'NaN16',
'NaN32',
'NaN64',
'PROGRAM_FILE',
'QuickSort',
'RoundDown',
'RoundFromZero',
'RoundNearest',
'RoundNearestTiesAway',
'RoundNearestTiesUp',
'RoundToZero',
'RoundUp',
'VERSION',
'devnull',
'false',
'im',
'missing',
'nothing',
'pi',
'stderr',
'stdin',
'stdout',
'true',
'undef',
'π',
'',
)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,285 @@
"""
pygments.lexers._lua_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the names and modules of lua functions
It is able to re-generate itself, but for adding new functions you
probably have to add some callbacks (see function module_callbacks).
Do not edit the MODULES dict by hand.
Run with `python -I` to regenerate.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
MODULES = {'basic': ('_G',
'_VERSION',
'assert',
'collectgarbage',
'dofile',
'error',
'getmetatable',
'ipairs',
'load',
'loadfile',
'next',
'pairs',
'pcall',
'print',
'rawequal',
'rawget',
'rawlen',
'rawset',
'select',
'setmetatable',
'tonumber',
'tostring',
'type',
'warn',
'xpcall'),
'bit32': ('bit32.arshift',
'bit32.band',
'bit32.bnot',
'bit32.bor',
'bit32.btest',
'bit32.bxor',
'bit32.extract',
'bit32.lrotate',
'bit32.lshift',
'bit32.replace',
'bit32.rrotate',
'bit32.rshift'),
'coroutine': ('coroutine.close',
'coroutine.create',
'coroutine.isyieldable',
'coroutine.resume',
'coroutine.running',
'coroutine.status',
'coroutine.wrap',
'coroutine.yield'),
'debug': ('debug.debug',
'debug.gethook',
'debug.getinfo',
'debug.getlocal',
'debug.getmetatable',
'debug.getregistry',
'debug.getupvalue',
'debug.getuservalue',
'debug.sethook',
'debug.setlocal',
'debug.setmetatable',
'debug.setupvalue',
'debug.setuservalue',
'debug.traceback',
'debug.upvalueid',
'debug.upvaluejoin'),
'io': ('io.close',
'io.flush',
'io.input',
'io.lines',
'io.open',
'io.output',
'io.popen',
'io.read',
'io.stderr',
'io.stdin',
'io.stdout',
'io.tmpfile',
'io.type',
'io.write'),
'math': ('math.abs',
'math.acos',
'math.asin',
'math.atan',
'math.atan2',
'math.ceil',
'math.cos',
'math.cosh',
'math.deg',
'math.exp',
'math.floor',
'math.fmod',
'math.frexp',
'math.huge',
'math.ldexp',
'math.log',
'math.max',
'math.maxinteger',
'math.min',
'math.mininteger',
'math.modf',
'math.pi',
'math.pow',
'math.rad',
'math.random',
'math.randomseed',
'math.sin',
'math.sinh',
'math.sqrt',
'math.tan',
'math.tanh',
'math.tointeger',
'math.type',
'math.ult'),
'modules': ('package.config',
'package.cpath',
'package.loaded',
'package.loadlib',
'package.path',
'package.preload',
'package.searchers',
'package.searchpath',
'require'),
'os': ('os.clock',
'os.date',
'os.difftime',
'os.execute',
'os.exit',
'os.getenv',
'os.remove',
'os.rename',
'os.setlocale',
'os.time',
'os.tmpname'),
'string': ('string.byte',
'string.char',
'string.dump',
'string.find',
'string.format',
'string.gmatch',
'string.gsub',
'string.len',
'string.lower',
'string.match',
'string.pack',
'string.packsize',
'string.rep',
'string.reverse',
'string.sub',
'string.unpack',
'string.upper'),
'table': ('table.concat',
'table.insert',
'table.move',
'table.pack',
'table.remove',
'table.sort',
'table.unpack'),
'utf8': ('utf8.char',
'utf8.charpattern',
'utf8.codepoint',
'utf8.codes',
'utf8.len',
'utf8.offset')}
if __name__ == '__main__': # pragma: no cover
import re
from urllib.request import urlopen
import pprint
# you can't generally find out what module a function belongs to if you
# have only its name. Because of this, here are some callback functions
# that recognize if a gioven function belongs to a specific module
def module_callbacks():
def is_in_coroutine_module(name):
return name.startswith('coroutine.')
def is_in_modules_module(name):
if name in ['require', 'module'] or name.startswith('package'):
return True
else:
return False
def is_in_string_module(name):
return name.startswith('string.')
def is_in_table_module(name):
return name.startswith('table.')
def is_in_math_module(name):
return name.startswith('math')
def is_in_io_module(name):
return name.startswith('io.')
def is_in_os_module(name):
return name.startswith('os.')
def is_in_debug_module(name):
return name.startswith('debug.')
return {'coroutine': is_in_coroutine_module,
'modules': is_in_modules_module,
'string': is_in_string_module,
'table': is_in_table_module,
'math': is_in_math_module,
'io': is_in_io_module,
'os': is_in_os_module,
'debug': is_in_debug_module}
def get_newest_version():
f = urlopen('http://www.lua.org/manual/')
r = re.compile(r'^<A HREF="(\d\.\d)/">(Lua )?\1</A>')
for line in f:
m = r.match(line.decode('iso-8859-1'))
if m is not None:
return m.groups()[0]
def get_lua_functions(version):
f = urlopen(f'http://www.lua.org/manual/{version}/')
r = re.compile(r'^<A HREF="manual.html#pdf-(?!lua|LUA)([^:]+)">\1</A>')
functions = []
for line in f:
m = r.match(line.decode('iso-8859-1'))
if m is not None:
functions.append(m.groups()[0])
return functions
def get_function_module(name):
for mod, cb in module_callbacks().items():
if cb(name):
return mod
if '.' in name:
return name.split('.')[0]
else:
return 'basic'
def regenerate(filename, modules):
with open(filename, encoding='utf-8') as fp:
content = fp.read()
header = content[:content.find('MODULES = {')]
footer = content[content.find("if __name__ == '__main__':"):]
with open(filename, 'w', encoding='utf-8') as fp:
fp.write(header)
fp.write(f'MODULES = {pprint.pformat(modules)}\n\n')
fp.write(footer)
def run():
version = get_newest_version()
functions = set()
for v in ('5.2', version):
print(f'> Downloading function index for Lua {v}')
f = get_lua_functions(v)
print('> %d functions found, %d new:' %
(len(f), len(set(f) - functions)))
functions |= set(f)
functions = sorted(functions)
modules = {}
for full_function_name in functions:
print(f'>> {full_function_name}')
m = get_function_module(full_function_name)
modules.setdefault(m, []).append(full_function_name)
modules = {k: tuple(v) for k, v in modules.items()}
regenerate(__file__, modules)
run()

View File

@@ -0,0 +1,62 @@
"""
pygments.lexers._luau_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Includes the builtins for Luau and Roblox.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
LUAU_BUILTINS = {
'bit32',
'buffer',
'coroutine',
'debug',
'math',
'os',
'string',
'table',
'utf8',
}
ROBLOX_BUILTINS = {
'task',
'Axes',
'BrickColor',
'CatalogSearchParams',
'CFrame',
'Color3',
'ColorSequence',
'ColorSequenceKeypoint',
'DateTime',
'DockWidgetPluginGuiInfo',
'Faces',
'FloatCurveKey',
'Font',
'Instance',
'NumberRange',
'NumberSequence',
'NumberSequenceKeypoint',
'OverlapParams',
'PathWaypoint',
'PhysicalProperties',
'Random',
'Ray',
'RaycastParams',
'RaycastResult',
'RBXScriptConnection',
'RBXScriptSignal',
'Rect',
'Region3',
'Region3int16',
'SharedTable',
'TweenInfo',
'UDim',
'UDim2',
'Vector2',
'Vector2int16',
'Vector3',
'Vector3int16',
}

View File

@@ -0,0 +1,602 @@
# Automatically generated by scripts/gen_mapfiles.py.
# DO NOT EDIT BY HAND; run `tox -e mapfiles` instead.
LEXERS = {
'ABAPLexer': ('pygments.lexers.business', 'ABAP', ('abap',), ('*.abap', '*.ABAP'), ('text/x-abap',)),
'AMDGPULexer': ('pygments.lexers.amdgpu', 'AMDGPU', ('amdgpu',), ('*.isa',), ()),
'APLLexer': ('pygments.lexers.apl', 'APL', ('apl',), ('*.apl', '*.aplf', '*.aplo', '*.apln', '*.aplc', '*.apli', '*.dyalog'), ()),
'AbnfLexer': ('pygments.lexers.grammar_notation', 'ABNF', ('abnf',), ('*.abnf',), ('text/x-abnf',)),
'ActionScript3Lexer': ('pygments.lexers.actionscript', 'ActionScript 3', ('actionscript3', 'as3'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')),
'ActionScriptLexer': ('pygments.lexers.actionscript', 'ActionScript', ('actionscript', 'as'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')),
'AdaLexer': ('pygments.lexers.ada', 'Ada', ('ada', 'ada95', 'ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)),
'AdlLexer': ('pygments.lexers.archetype', 'ADL', ('adl',), ('*.adl', '*.adls', '*.adlf', '*.adlx'), ()),
'AgdaLexer': ('pygments.lexers.haskell', 'Agda', ('agda',), ('*.agda',), ('text/x-agda',)),
'AheuiLexer': ('pygments.lexers.esoteric', 'Aheui', ('aheui',), ('*.aheui',), ()),
'AlloyLexer': ('pygments.lexers.dsls', 'Alloy', ('alloy',), ('*.als',), ('text/x-alloy',)),
'AmbientTalkLexer': ('pygments.lexers.ambient', 'AmbientTalk', ('ambienttalk', 'ambienttalk/2', 'at'), ('*.at',), ('text/x-ambienttalk',)),
'AmplLexer': ('pygments.lexers.ampl', 'Ampl', ('ampl',), ('*.run',), ()),
'Angular2HtmlLexer': ('pygments.lexers.templates', 'HTML + Angular2', ('html+ng2',), ('*.ng2',), ()),
'Angular2Lexer': ('pygments.lexers.templates', 'Angular2', ('ng2',), (), ()),
'AntlrActionScriptLexer': ('pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-actionscript', 'antlr-as'), ('*.G', '*.g'), ()),
'AntlrCSharpLexer': ('pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()),
'AntlrCppLexer': ('pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()),
'AntlrJavaLexer': ('pygments.lexers.parsers', 'ANTLR With Java Target', ('antlr-java',), ('*.G', '*.g'), ()),
'AntlrLexer': ('pygments.lexers.parsers', 'ANTLR', ('antlr',), (), ()),
'AntlrObjectiveCLexer': ('pygments.lexers.parsers', 'ANTLR With ObjectiveC Target', ('antlr-objc',), ('*.G', '*.g'), ()),
'AntlrPerlLexer': ('pygments.lexers.parsers', 'ANTLR With Perl Target', ('antlr-perl',), ('*.G', '*.g'), ()),
'AntlrPythonLexer': ('pygments.lexers.parsers', 'ANTLR With Python Target', ('antlr-python',), ('*.G', '*.g'), ()),
'AntlrRubyLexer': ('pygments.lexers.parsers', 'ANTLR With Ruby Target', ('antlr-ruby', 'antlr-rb'), ('*.G', '*.g'), ()),
'ApacheConfLexer': ('pygments.lexers.configs', 'ApacheConf', ('apacheconf', 'aconf', 'apache'), ('.htaccess', 'apache.conf', 'apache2.conf'), ('text/x-apacheconf',)),
'AppleScriptLexer': ('pygments.lexers.scripting', 'AppleScript', ('applescript',), ('*.applescript',), ()),
'ArduinoLexer': ('pygments.lexers.c_like', 'Arduino', ('arduino',), ('*.ino',), ('text/x-arduino',)),
'ArrowLexer': ('pygments.lexers.arrow', 'Arrow', ('arrow',), ('*.arw',), ()),
'ArturoLexer': ('pygments.lexers.arturo', 'Arturo', ('arturo', 'art'), ('*.art',), ()),
'AscLexer': ('pygments.lexers.asc', 'ASCII armored', ('asc', 'pem'), ('*.asc', '*.pem', 'id_dsa', 'id_ecdsa', 'id_ecdsa_sk', 'id_ed25519', 'id_ed25519_sk', 'id_rsa'), ('application/pgp-keys', 'application/pgp-encrypted', 'application/pgp-signature', 'application/pem-certificate-chain')),
'Asn1Lexer': ('pygments.lexers.asn1', 'ASN.1', ('asn1',), ('*.asn1',), ()),
'AspectJLexer': ('pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)),
'AsymptoteLexer': ('pygments.lexers.graphics', 'Asymptote', ('asymptote', 'asy'), ('*.asy',), ('text/x-asymptote',)),
'AugeasLexer': ('pygments.lexers.configs', 'Augeas', ('augeas',), ('*.aug',), ()),
'AutoItLexer': ('pygments.lexers.automation', 'AutoIt', ('autoit',), ('*.au3',), ('text/x-autoit',)),
'AutohotkeyLexer': ('pygments.lexers.automation', 'autohotkey', ('autohotkey', 'ahk'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)),
'AwkLexer': ('pygments.lexers.textedit', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)),
'BBCBasicLexer': ('pygments.lexers.basic', 'BBC Basic', ('bbcbasic',), ('*.bbc',), ()),
'BBCodeLexer': ('pygments.lexers.markup', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)),
'BCLexer': ('pygments.lexers.algebra', 'BC', ('bc',), ('*.bc',), ()),
'BQNLexer': ('pygments.lexers.bqn', 'BQN', ('bqn',), ('*.bqn',), ()),
'BSTLexer': ('pygments.lexers.bibtex', 'BST', ('bst', 'bst-pybtex'), ('*.bst',), ()),
'BareLexer': ('pygments.lexers.bare', 'BARE', ('bare',), ('*.bare',), ()),
'BaseMakefileLexer': ('pygments.lexers.make', 'Base Makefile', ('basemake',), (), ()),
'BashLexer': ('pygments.lexers.shell', 'Bash', ('bash', 'sh', 'ksh', 'zsh', 'shell', 'openrc'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '*.exheres-0', '*.exlib', '*.zsh', '.bashrc', 'bashrc', '.bash_*', 'bash_*', 'zshrc', '.zshrc', '.kshrc', 'kshrc', 'PKGBUILD'), ('application/x-sh', 'application/x-shellscript', 'text/x-shellscript')),
'BashSessionLexer': ('pygments.lexers.shell', 'Bash Session', ('console', 'shell-session'), ('*.sh-session', '*.shell-session'), ('application/x-shell-session', 'application/x-sh-session')),
'BatchLexer': ('pygments.lexers.shell', 'Batchfile', ('batch', 'bat', 'dosbatch', 'winbatch'), ('*.bat', '*.cmd'), ('application/x-dos-batch',)),
'BddLexer': ('pygments.lexers.bdd', 'Bdd', ('bdd',), ('*.feature',), ('text/x-bdd',)),
'BefungeLexer': ('pygments.lexers.esoteric', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)),
'BerryLexer': ('pygments.lexers.berry', 'Berry', ('berry', 'be'), ('*.be',), ('text/x-berry', 'application/x-berry')),
'BibTeXLexer': ('pygments.lexers.bibtex', 'BibTeX', ('bibtex', 'bib'), ('*.bib',), ('text/x-bibtex',)),
'BlitzBasicLexer': ('pygments.lexers.basic', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)),
'BlitzMaxLexer': ('pygments.lexers.basic', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)),
'BlueprintLexer': ('pygments.lexers.blueprint', 'Blueprint', ('blueprint',), ('*.blp',), ('text/x-blueprint',)),
'BnfLexer': ('pygments.lexers.grammar_notation', 'BNF', ('bnf',), ('*.bnf',), ('text/x-bnf',)),
'BoaLexer': ('pygments.lexers.boa', 'Boa', ('boa',), ('*.boa',), ()),
'BooLexer': ('pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)),
'BoogieLexer': ('pygments.lexers.verification', 'Boogie', ('boogie',), ('*.bpl',), ()),
'BrainfuckLexer': ('pygments.lexers.esoteric', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)),
'BugsLexer': ('pygments.lexers.modeling', 'BUGS', ('bugs', 'winbugs', 'openbugs'), ('*.bug',), ()),
'CAmkESLexer': ('pygments.lexers.esoteric', 'CAmkES', ('camkes', 'idl4'), ('*.camkes', '*.idl4'), ()),
'CLexer': ('pygments.lexers.c_cpp', 'C', ('c',), ('*.c', '*.h', '*.idc', '*.x[bp]m'), ('text/x-chdr', 'text/x-csrc', 'image/x-xbitmap', 'image/x-xpixmap')),
'CMakeLexer': ('pygments.lexers.make', 'CMake', ('cmake',), ('*.cmake', 'CMakeLists.txt'), ('text/x-cmake',)),
'CObjdumpLexer': ('pygments.lexers.asm', 'c-objdump', ('c-objdump',), ('*.c-objdump',), ('text/x-c-objdump',)),
'CPSALexer': ('pygments.lexers.lisp', 'CPSA', ('cpsa',), ('*.cpsa',), ()),
'CSSUL4Lexer': ('pygments.lexers.ul4', 'CSS+UL4', ('css+ul4',), ('*.cssul4',), ()),
'CSharpAspxLexer': ('pygments.lexers.dotnet', 'aspx-cs', ('aspx-cs',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
'CSharpLexer': ('pygments.lexers.dotnet', 'C#', ('csharp', 'c#', 'cs'), ('*.cs',), ('text/x-csharp',)),
'Ca65Lexer': ('pygments.lexers.asm', 'ca65 assembler', ('ca65',), ('*.s',), ()),
'CadlLexer': ('pygments.lexers.archetype', 'cADL', ('cadl',), ('*.cadl',), ()),
'CapDLLexer': ('pygments.lexers.esoteric', 'CapDL', ('capdl',), ('*.cdl',), ()),
'CapnProtoLexer': ('pygments.lexers.capnproto', "Cap'n Proto", ('capnp',), ('*.capnp',), ()),
'CarbonLexer': ('pygments.lexers.carbon', 'Carbon', ('carbon',), ('*.carbon',), ('text/x-carbon',)),
'CbmBasicV2Lexer': ('pygments.lexers.basic', 'CBM BASIC V2', ('cbmbas',), ('*.bas',), ()),
'CddlLexer': ('pygments.lexers.cddl', 'CDDL', ('cddl',), ('*.cddl',), ('text/x-cddl',)),
'CeylonLexer': ('pygments.lexers.jvm', 'Ceylon', ('ceylon',), ('*.ceylon',), ('text/x-ceylon',)),
'Cfengine3Lexer': ('pygments.lexers.configs', 'CFEngine3', ('cfengine3', 'cf3'), ('*.cf',), ()),
'ChaiscriptLexer': ('pygments.lexers.scripting', 'ChaiScript', ('chaiscript', 'chai'), ('*.chai',), ('text/x-chaiscript', 'application/x-chaiscript')),
'ChapelLexer': ('pygments.lexers.chapel', 'Chapel', ('chapel', 'chpl'), ('*.chpl',), ()),
'CharmciLexer': ('pygments.lexers.c_like', 'Charmci', ('charmci',), ('*.ci',), ()),
'CheetahHtmlLexer': ('pygments.lexers.templates', 'HTML+Cheetah', ('html+cheetah', 'html+spitfire', 'htmlcheetah'), (), ('text/html+cheetah', 'text/html+spitfire')),
'CheetahJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Cheetah', ('javascript+cheetah', 'js+cheetah', 'javascript+spitfire', 'js+spitfire'), (), ('application/x-javascript+cheetah', 'text/x-javascript+cheetah', 'text/javascript+cheetah', 'application/x-javascript+spitfire', 'text/x-javascript+spitfire', 'text/javascript+spitfire')),
'CheetahLexer': ('pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')),
'CheetahXmlLexer': ('pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')),
'CirruLexer': ('pygments.lexers.webmisc', 'Cirru', ('cirru',), ('*.cirru',), ('text/x-cirru',)),
'ClayLexer': ('pygments.lexers.c_like', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)),
'CleanLexer': ('pygments.lexers.clean', 'Clean', ('clean',), ('*.icl', '*.dcl'), ()),
'ClojureLexer': ('pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj', '*.cljc'), ('text/x-clojure', 'application/x-clojure')),
'ClojureScriptLexer': ('pygments.lexers.jvm', 'ClojureScript', ('clojurescript', 'cljs'), ('*.cljs',), ('text/x-clojurescript', 'application/x-clojurescript')),
'CobolFreeformatLexer': ('pygments.lexers.business', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()),
'CobolLexer': ('pygments.lexers.business', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)),
'CodeQLLexer': ('pygments.lexers.codeql', 'CodeQL', ('codeql', 'ql'), ('*.ql', '*.qll'), ()),
'CoffeeScriptLexer': ('pygments.lexers.javascript', 'CoffeeScript', ('coffeescript', 'coffee-script', 'coffee'), ('*.coffee',), ('text/coffeescript',)),
'ColdfusionCFCLexer': ('pygments.lexers.templates', 'Coldfusion CFC', ('cfc',), ('*.cfc',), ()),
'ColdfusionHtmlLexer': ('pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml'), ('application/x-coldfusion',)),
'ColdfusionLexer': ('pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()),
'Comal80Lexer': ('pygments.lexers.comal', 'COMAL-80', ('comal', 'comal80'), ('*.cml', '*.comal'), ()),
'CommonLispLexer': ('pygments.lexers.lisp', 'Common Lisp', ('common-lisp', 'cl', 'lisp'), ('*.cl', '*.lisp'), ('text/x-common-lisp',)),
'ComponentPascalLexer': ('pygments.lexers.oberon', 'Component Pascal', ('componentpascal', 'cp'), ('*.cp', '*.cps'), ('text/x-component-pascal',)),
'CoqLexer': ('pygments.lexers.theorem', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)),
'CplintLexer': ('pygments.lexers.cplint', 'cplint', ('cplint',), ('*.ecl', '*.prolog', '*.pro', '*.pl', '*.P', '*.lpad', '*.cpl'), ('text/x-cplint',)),
'CppLexer': ('pygments.lexers.c_cpp', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP', '*.tpp'), ('text/x-c++hdr', 'text/x-c++src')),
'CppObjdumpLexer': ('pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)),
'CrmshLexer': ('pygments.lexers.dsls', 'Crmsh', ('crmsh', 'pcmk'), ('*.crmsh', '*.pcmk'), ()),
'CrocLexer': ('pygments.lexers.d', 'Croc', ('croc',), ('*.croc',), ('text/x-crocsrc',)),
'CryptolLexer': ('pygments.lexers.haskell', 'Cryptol', ('cryptol', 'cry'), ('*.cry',), ('text/x-cryptol',)),
'CrystalLexer': ('pygments.lexers.crystal', 'Crystal', ('cr', 'crystal'), ('*.cr',), ('text/x-crystal',)),
'CsoundDocumentLexer': ('pygments.lexers.csound', 'Csound Document', ('csound-document', 'csound-csd'), ('*.csd',), ()),
'CsoundOrchestraLexer': ('pygments.lexers.csound', 'Csound Orchestra', ('csound', 'csound-orc'), ('*.orc', '*.udo'), ()),
'CsoundScoreLexer': ('pygments.lexers.csound', 'Csound Score', ('csound-score', 'csound-sco'), ('*.sco',), ()),
'CssDjangoLexer': ('pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), ('*.css.j2', '*.css.jinja2'), ('text/css+django', 'text/css+jinja')),
'CssErbLexer': ('pygments.lexers.templates', 'CSS+Ruby', ('css+ruby', 'css+erb'), (), ('text/css+ruby',)),
'CssGenshiLexer': ('pygments.lexers.templates', 'CSS+Genshi Text', ('css+genshitext', 'css+genshi'), (), ('text/css+genshi',)),
'CssLexer': ('pygments.lexers.css', 'CSS', ('css',), ('*.css',), ('text/css',)),
'CssPhpLexer': ('pygments.lexers.templates', 'CSS+PHP', ('css+php',), (), ('text/css+php',)),
'CssSmartyLexer': ('pygments.lexers.templates', 'CSS+Smarty', ('css+smarty',), (), ('text/css+smarty',)),
'CudaLexer': ('pygments.lexers.c_like', 'CUDA', ('cuda', 'cu'), ('*.cu', '*.cuh'), ('text/x-cuda',)),
'CypherLexer': ('pygments.lexers.graph', 'Cypher', ('cypher',), ('*.cyp', '*.cypher'), ()),
'CythonLexer': ('pygments.lexers.python', 'Cython', ('cython', 'pyx', 'pyrex'), ('*.pyx', '*.pxd', '*.pxi'), ('text/x-cython', 'application/x-cython')),
'DLexer': ('pygments.lexers.d', 'D', ('d',), ('*.d', '*.di'), ('text/x-dsrc',)),
'DObjdumpLexer': ('pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)),
'DarcsPatchLexer': ('pygments.lexers.diff', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()),
'DartLexer': ('pygments.lexers.javascript', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)),
'Dasm16Lexer': ('pygments.lexers.asm', 'DASM16', ('dasm16',), ('*.dasm16', '*.dasm'), ('text/x-dasm16',)),
'DaxLexer': ('pygments.lexers.dax', 'Dax', ('dax',), ('*.dax',), ()),
'DebianControlLexer': ('pygments.lexers.installers', 'Debian Control file', ('debcontrol', 'control'), ('control',), ()),
'DebianSourcesLexer': ('pygments.lexers.installers', 'Debian Sources file', ('debian.sources',), ('*.sources',), ()),
'DelphiLexer': ('pygments.lexers.pascal', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas', '*.dpr'), ('text/x-pascal',)),
'DesktopLexer': ('pygments.lexers.configs', 'Desktop file', ('desktop',), ('*.desktop',), ('application/x-desktop',)),
'DevicetreeLexer': ('pygments.lexers.devicetree', 'Devicetree', ('devicetree', 'dts'), ('*.dts', '*.dtsi'), ('text/x-c',)),
'DgLexer': ('pygments.lexers.python', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)),
'DiffLexer': ('pygments.lexers.diff', 'Diff', ('diff', 'udiff'), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')),
'DjangoLexer': ('pygments.lexers.templates', 'Django/Jinja', ('django', 'jinja'), (), ('application/x-django-templating', 'application/x-jinja')),
'DnsZoneLexer': ('pygments.lexers.dns', 'Zone', ('zone',), ('*.zone',), ('text/dns',)),
'DockerLexer': ('pygments.lexers.configs', 'Docker', ('docker', 'dockerfile'), ('Dockerfile', '*.docker'), ('text/x-dockerfile-config',)),
'DtdLexer': ('pygments.lexers.html', 'DTD', ('dtd',), ('*.dtd',), ('application/xml-dtd',)),
'DuelLexer': ('pygments.lexers.webmisc', 'Duel', ('duel', 'jbst', 'jsonml+bst'), ('*.duel', '*.jbst'), ('text/x-duel', 'text/x-jbst')),
'DylanConsoleLexer': ('pygments.lexers.dylan', 'Dylan session', ('dylan-console', 'dylan-repl'), ('*.dylan-console',), ('text/x-dylan-console',)),
'DylanLexer': ('pygments.lexers.dylan', 'Dylan', ('dylan',), ('*.dylan', '*.dyl', '*.intr'), ('text/x-dylan',)),
'DylanLidLexer': ('pygments.lexers.dylan', 'DylanLID', ('dylan-lid', 'lid'), ('*.lid', '*.hdp'), ('text/x-dylan-lid',)),
'ECLLexer': ('pygments.lexers.ecl', 'ECL', ('ecl',), ('*.ecl',), ('application/x-ecl',)),
'ECLexer': ('pygments.lexers.c_like', 'eC', ('ec',), ('*.ec', '*.eh'), ('text/x-echdr', 'text/x-ecsrc')),
'EarlGreyLexer': ('pygments.lexers.javascript', 'Earl Grey', ('earl-grey', 'earlgrey', 'eg'), ('*.eg',), ('text/x-earl-grey',)),
'EasytrieveLexer': ('pygments.lexers.scripting', 'Easytrieve', ('easytrieve',), ('*.ezt', '*.mac'), ('text/x-easytrieve',)),
'EbnfLexer': ('pygments.lexers.parsers', 'EBNF', ('ebnf',), ('*.ebnf',), ('text/x-ebnf',)),
'EiffelLexer': ('pygments.lexers.eiffel', 'Eiffel', ('eiffel',), ('*.e',), ('text/x-eiffel',)),
'ElixirConsoleLexer': ('pygments.lexers.erlang', 'Elixir iex session', ('iex',), (), ('text/x-elixir-shellsession',)),
'ElixirLexer': ('pygments.lexers.erlang', 'Elixir', ('elixir', 'ex', 'exs'), ('*.ex', '*.eex', '*.exs', '*.leex'), ('text/x-elixir',)),
'ElmLexer': ('pygments.lexers.elm', 'Elm', ('elm',), ('*.elm',), ('text/x-elm',)),
'ElpiLexer': ('pygments.lexers.elpi', 'Elpi', ('elpi',), ('*.elpi',), ('text/x-elpi',)),
'EmacsLispLexer': ('pygments.lexers.lisp', 'EmacsLisp', ('emacs-lisp', 'elisp', 'emacs'), ('*.el',), ('text/x-elisp', 'application/x-elisp')),
'EmailLexer': ('pygments.lexers.email', 'E-mail', ('email', 'eml'), ('*.eml',), ('message/rfc822',)),
'ErbLexer': ('pygments.lexers.templates', 'ERB', ('erb',), (), ('application/x-ruby-templating',)),
'ErlangLexer': ('pygments.lexers.erlang', 'Erlang', ('erlang',), ('*.erl', '*.hrl', '*.es', '*.escript'), ('text/x-erlang',)),
'ErlangShellLexer': ('pygments.lexers.erlang', 'Erlang erl session', ('erl',), ('*.erl-sh',), ('text/x-erl-shellsession',)),
'EvoqueHtmlLexer': ('pygments.lexers.templates', 'HTML+Evoque', ('html+evoque',), (), ('text/html+evoque',)),
'EvoqueLexer': ('pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)),
'EvoqueXmlLexer': ('pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), (), ('application/xml+evoque',)),
'ExeclineLexer': ('pygments.lexers.shell', 'execline', ('execline',), ('*.exec',), ()),
'EzhilLexer': ('pygments.lexers.ezhil', 'Ezhil', ('ezhil',), ('*.n',), ('text/x-ezhil',)),
'FSharpLexer': ('pygments.lexers.dotnet', 'F#', ('fsharp', 'f#'), ('*.fs', '*.fsi', '*.fsx'), ('text/x-fsharp',)),
'FStarLexer': ('pygments.lexers.ml', 'FStar', ('fstar',), ('*.fst', '*.fsti'), ('text/x-fstar',)),
'FactorLexer': ('pygments.lexers.factor', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)),
'FancyLexer': ('pygments.lexers.ruby', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)),
'FantomLexer': ('pygments.lexers.fantom', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)),
'FelixLexer': ('pygments.lexers.felix', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)),
'FennelLexer': ('pygments.lexers.lisp', 'Fennel', ('fennel', 'fnl'), ('*.fnl',), ()),
'FiftLexer': ('pygments.lexers.fift', 'Fift', ('fift', 'fif'), ('*.fif',), ()),
'FishShellLexer': ('pygments.lexers.shell', 'Fish', ('fish', 'fishshell'), ('*.fish', '*.load'), ('application/x-fish',)),
'FlatlineLexer': ('pygments.lexers.dsls', 'Flatline', ('flatline',), (), ('text/x-flatline',)),
'FloScriptLexer': ('pygments.lexers.floscript', 'FloScript', ('floscript', 'flo'), ('*.flo',), ()),
'ForthLexer': ('pygments.lexers.forth', 'Forth', ('forth',), ('*.frt', '*.fs'), ('application/x-forth',)),
'FortranFixedLexer': ('pygments.lexers.fortran', 'FortranFixed', ('fortranfixed',), ('*.f', '*.F'), ()),
'FortranLexer': ('pygments.lexers.fortran', 'Fortran', ('fortran', 'f90'), ('*.f03', '*.f90', '*.F03', '*.F90'), ('text/x-fortran',)),
'FoxProLexer': ('pygments.lexers.foxpro', 'FoxPro', ('foxpro', 'vfp', 'clipper', 'xbase'), ('*.PRG', '*.prg'), ()),
'FreeFemLexer': ('pygments.lexers.freefem', 'Freefem', ('freefem',), ('*.edp',), ('text/x-freefem',)),
'FuncLexer': ('pygments.lexers.func', 'FunC', ('func', 'fc'), ('*.fc', '*.func'), ()),
'FutharkLexer': ('pygments.lexers.futhark', 'Futhark', ('futhark',), ('*.fut',), ('text/x-futhark',)),
'GAPConsoleLexer': ('pygments.lexers.algebra', 'GAP session', ('gap-console', 'gap-repl'), ('*.tst',), ()),
'GAPLexer': ('pygments.lexers.algebra', 'GAP', ('gap',), ('*.g', '*.gd', '*.gi', '*.gap'), ()),
'GDScriptLexer': ('pygments.lexers.gdscript', 'GDScript', ('gdscript', 'gd'), ('*.gd',), ('text/x-gdscript', 'application/x-gdscript')),
'GLShaderLexer': ('pygments.lexers.graphics', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)),
'GSQLLexer': ('pygments.lexers.gsql', 'GSQL', ('gsql',), ('*.gsql',), ()),
'GasLexer': ('pygments.lexers.asm', 'GAS', ('gas', 'asm'), ('*.s', '*.S'), ('text/x-gas',)),
'GcodeLexer': ('pygments.lexers.gcodelexer', 'g-code', ('gcode',), ('*.gcode',), ()),
'GenshiLexer': ('pygments.lexers.templates', 'Genshi', ('genshi', 'kid', 'xml+genshi', 'xml+kid'), ('*.kid',), ('application/x-genshi', 'application/x-kid')),
'GenshiTextLexer': ('pygments.lexers.templates', 'Genshi Text', ('genshitext',), (), ('application/x-genshi-text', 'text/x-genshi')),
'GettextLexer': ('pygments.lexers.textfmts', 'Gettext Catalog', ('pot', 'po'), ('*.pot', '*.po'), ('application/x-gettext', 'text/x-gettext', 'text/gettext')),
'GherkinLexer': ('pygments.lexers.testing', 'Gherkin', ('gherkin', 'cucumber'), ('*.feature',), ('text/x-gherkin',)),
'GleamLexer': ('pygments.lexers.gleam', 'Gleam', ('gleam',), ('*.gleam',), ('text/x-gleam',)),
'GnuplotLexer': ('pygments.lexers.graphics', 'Gnuplot', ('gnuplot',), ('*.plot', '*.plt'), ('text/x-gnuplot',)),
'GoLexer': ('pygments.lexers.go', 'Go', ('go', 'golang'), ('*.go',), ('text/x-gosrc',)),
'GoloLexer': ('pygments.lexers.jvm', 'Golo', ('golo',), ('*.golo',), ()),
'GoodDataCLLexer': ('pygments.lexers.business', 'GoodData-CL', ('gooddata-cl',), ('*.gdc',), ('text/x-gooddata-cl',)),
'GoogleSqlLexer': ('pygments.lexers.sql', 'GoogleSQL', ('googlesql', 'zetasql'), ('*.googlesql', '*.googlesql.sql'), ('text/x-google-sql', 'text/x-google-sql-aux')),
'GosuLexer': ('pygments.lexers.jvm', 'Gosu', ('gosu',), ('*.gs', '*.gsx', '*.gsp', '*.vark'), ('text/x-gosu',)),
'GosuTemplateLexer': ('pygments.lexers.jvm', 'Gosu Template', ('gst',), ('*.gst',), ('text/x-gosu-template',)),
'GraphQLLexer': ('pygments.lexers.graphql', 'GraphQL', ('graphql',), ('*.graphql',), ()),
'GraphvizLexer': ('pygments.lexers.graphviz', 'Graphviz', ('graphviz', 'dot'), ('*.gv', '*.dot'), ('text/x-graphviz', 'text/vnd.graphviz')),
'GroffLexer': ('pygments.lexers.markup', 'Groff', ('groff', 'nroff', 'man'), ('*.[1-9]', '*.man', '*.1p', '*.3pm'), ('application/x-troff', 'text/troff')),
'GroovyLexer': ('pygments.lexers.jvm', 'Groovy', ('groovy',), ('*.groovy', '*.gradle'), ('text/x-groovy',)),
'HLSLShaderLexer': ('pygments.lexers.graphics', 'HLSL', ('hlsl',), ('*.hlsl', '*.hlsli'), ('text/x-hlsl',)),
'HTMLUL4Lexer': ('pygments.lexers.ul4', 'HTML+UL4', ('html+ul4',), ('*.htmlul4',), ()),
'HamlLexer': ('pygments.lexers.html', 'Haml', ('haml',), ('*.haml',), ('text/x-haml',)),
'HandlebarsHtmlLexer': ('pygments.lexers.templates', 'HTML+Handlebars', ('html+handlebars',), ('*.handlebars', '*.hbs'), ('text/html+handlebars', 'text/x-handlebars-template')),
'HandlebarsLexer': ('pygments.lexers.templates', 'Handlebars', ('handlebars',), (), ()),
'HareLexer': ('pygments.lexers.hare', 'Hare', ('hare',), ('*.ha',), ('text/x-hare',)),
'HaskellLexer': ('pygments.lexers.haskell', 'Haskell', ('haskell', 'hs'), ('*.hs',), ('text/x-haskell',)),
'HaxeLexer': ('pygments.lexers.haxe', 'Haxe', ('haxe', 'hxsl', 'hx'), ('*.hx', '*.hxsl'), ('text/haxe', 'text/x-haxe', 'text/x-hx')),
'HexdumpLexer': ('pygments.lexers.hexdump', 'Hexdump', ('hexdump',), (), ()),
'HsailLexer': ('pygments.lexers.asm', 'HSAIL', ('hsail', 'hsa'), ('*.hsail',), ('text/x-hsail',)),
'HspecLexer': ('pygments.lexers.haskell', 'Hspec', ('hspec',), ('*Spec.hs',), ()),
'HtmlDjangoLexer': ('pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), ('*.html.j2', '*.htm.j2', '*.xhtml.j2', '*.html.jinja2', '*.htm.jinja2', '*.xhtml.jinja2'), ('text/html+django', 'text/html+jinja')),
'HtmlGenshiLexer': ('pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)),
'HtmlLexer': ('pygments.lexers.html', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')),
'HtmlPhpLexer': ('pygments.lexers.templates', 'HTML+PHP', ('html+php',), ('*.phtml',), ('application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5')),
'HtmlSmartyLexer': ('pygments.lexers.templates', 'HTML+Smarty', ('html+smarty',), (), ('text/html+smarty',)),
'HttpLexer': ('pygments.lexers.textfmts', 'HTTP', ('http',), (), ()),
'HxmlLexer': ('pygments.lexers.haxe', 'Hxml', ('haxeml', 'hxml'), ('*.hxml',), ()),
'HyLexer': ('pygments.lexers.lisp', 'Hy', ('hylang', 'hy'), ('*.hy',), ('text/x-hy', 'application/x-hy')),
'HybrisLexer': ('pygments.lexers.scripting', 'Hybris', ('hybris',), ('*.hyb',), ('text/x-hybris', 'application/x-hybris')),
'IDLLexer': ('pygments.lexers.idl', 'IDL', ('idl',), ('*.pro',), ('text/idl',)),
'IconLexer': ('pygments.lexers.unicon', 'Icon', ('icon',), ('*.icon', '*.ICON'), ()),
'IdrisLexer': ('pygments.lexers.haskell', 'Idris', ('idris', 'idr'), ('*.idr',), ('text/x-idris',)),
'IgorLexer': ('pygments.lexers.igor', 'Igor', ('igor', 'igorpro'), ('*.ipf',), ('text/ipf',)),
'Inform6Lexer': ('pygments.lexers.int_fiction', 'Inform 6', ('inform6', 'i6'), ('*.inf',), ()),
'Inform6TemplateLexer': ('pygments.lexers.int_fiction', 'Inform 6 template', ('i6t',), ('*.i6t',), ()),
'Inform7Lexer': ('pygments.lexers.int_fiction', 'Inform 7', ('inform7', 'i7'), ('*.ni', '*.i7x'), ()),
'IniLexer': ('pygments.lexers.configs', 'INI', ('ini', 'cfg', 'dosini'), ('*.ini', '*.cfg', '*.inf', '.editorconfig'), ('text/x-ini', 'text/inf')),
'IoLexer': ('pygments.lexers.iolang', 'Io', ('io',), ('*.io',), ('text/x-iosrc',)),
'IokeLexer': ('pygments.lexers.jvm', 'Ioke', ('ioke', 'ik'), ('*.ik',), ('text/x-iokesrc',)),
'IrcLogsLexer': ('pygments.lexers.textfmts', 'IRC logs', ('irc',), ('*.weechatlog',), ('text/x-irclog',)),
'IsabelleLexer': ('pygments.lexers.theorem', 'Isabelle', ('isabelle',), ('*.thy',), ('text/x-isabelle',)),
'JLexer': ('pygments.lexers.j', 'J', ('j',), ('*.ijs',), ('text/x-j',)),
'JMESPathLexer': ('pygments.lexers.jmespath', 'JMESPath', ('jmespath', 'jp'), ('*.jp',), ()),
'JSLTLexer': ('pygments.lexers.jslt', 'JSLT', ('jslt',), ('*.jslt',), ('text/x-jslt',)),
'JagsLexer': ('pygments.lexers.modeling', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()),
'JanetLexer': ('pygments.lexers.lisp', 'Janet', ('janet',), ('*.janet', '*.jdn'), ('text/x-janet', 'application/x-janet')),
'JasminLexer': ('pygments.lexers.jvm', 'Jasmin', ('jasmin', 'jasminxt'), ('*.j',), ()),
'JavaLexer': ('pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)),
'JavascriptDjangoLexer': ('pygments.lexers.templates', 'JavaScript+Django/Jinja', ('javascript+django', 'js+django', 'javascript+jinja', 'js+jinja'), ('*.js.j2', '*.js.jinja2'), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')),
'JavascriptErbLexer': ('pygments.lexers.templates', 'JavaScript+Ruby', ('javascript+ruby', 'js+ruby', 'javascript+erb', 'js+erb'), (), ('application/x-javascript+ruby', 'text/x-javascript+ruby', 'text/javascript+ruby')),
'JavascriptGenshiLexer': ('pygments.lexers.templates', 'JavaScript+Genshi Text', ('js+genshitext', 'js+genshi', 'javascript+genshitext', 'javascript+genshi'), (), ('application/x-javascript+genshi', 'text/x-javascript+genshi', 'text/javascript+genshi')),
'JavascriptLexer': ('pygments.lexers.javascript', 'JavaScript', ('javascript', 'js'), ('*.js', '*.jsm', '*.mjs', '*.cjs'), ('application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript')),
'JavascriptPhpLexer': ('pygments.lexers.templates', 'JavaScript+PHP', ('javascript+php', 'js+php'), (), ('application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php')),
'JavascriptSmartyLexer': ('pygments.lexers.templates', 'JavaScript+Smarty', ('javascript+smarty', 'js+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')),
'JavascriptUL4Lexer': ('pygments.lexers.ul4', 'Javascript+UL4', ('js+ul4',), ('*.jsul4',), ()),
'JclLexer': ('pygments.lexers.scripting', 'JCL', ('jcl',), ('*.jcl',), ('text/x-jcl',)),
'JsgfLexer': ('pygments.lexers.grammar_notation', 'JSGF', ('jsgf',), ('*.jsgf',), ('application/jsgf', 'application/x-jsgf', 'text/jsgf')),
'Json5Lexer': ('pygments.lexers.json5', 'JSON5', ('json5',), ('*.json5',), ()),
'JsonBareObjectLexer': ('pygments.lexers.data', 'JSONBareObject', (), (), ()),
'JsonLdLexer': ('pygments.lexers.data', 'JSON-LD', ('jsonld', 'json-ld'), ('*.jsonld',), ('application/ld+json',)),
'JsonLexer': ('pygments.lexers.data', 'JSON', ('json', 'json-object'), ('*.json', '*.jsonl', '*.ndjson', 'Pipfile.lock'), ('application/json', 'application/json-object', 'application/x-ndjson', 'application/jsonl', 'application/json-seq')),
'JsonnetLexer': ('pygments.lexers.jsonnet', 'Jsonnet', ('jsonnet',), ('*.jsonnet', '*.libsonnet'), ()),
'JspLexer': ('pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)),
'JsxLexer': ('pygments.lexers.jsx', 'JSX', ('jsx', 'react'), ('*.jsx', '*.react'), ('text/jsx', 'text/typescript-jsx')),
'JuliaConsoleLexer': ('pygments.lexers.julia', 'Julia console', ('jlcon', 'julia-repl'), (), ()),
'JuliaLexer': ('pygments.lexers.julia', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')),
'JuttleLexer': ('pygments.lexers.javascript', 'Juttle', ('juttle',), ('*.juttle',), ('application/juttle', 'application/x-juttle', 'text/x-juttle', 'text/juttle')),
'KLexer': ('pygments.lexers.q', 'K', ('k',), ('*.k',), ()),
'KalLexer': ('pygments.lexers.javascript', 'Kal', ('kal',), ('*.kal',), ('text/kal', 'application/kal')),
'KconfigLexer': ('pygments.lexers.configs', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig*', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)),
'KernelLogLexer': ('pygments.lexers.textfmts', 'Kernel log', ('kmsg', 'dmesg'), ('*.kmsg', '*.dmesg'), ()),
'KokaLexer': ('pygments.lexers.haskell', 'Koka', ('koka',), ('*.kk', '*.kki'), ('text/x-koka',)),
'KotlinLexer': ('pygments.lexers.jvm', 'Kotlin', ('kotlin',), ('*.kt', '*.kts'), ('text/x-kotlin',)),
'KuinLexer': ('pygments.lexers.kuin', 'Kuin', ('kuin',), ('*.kn',), ()),
'KustoLexer': ('pygments.lexers.kusto', 'Kusto', ('kql', 'kusto'), ('*.kql', '*.kusto', '.csl'), ()),
'LSLLexer': ('pygments.lexers.scripting', 'LSL', ('lsl',), ('*.lsl',), ('text/x-lsl',)),
'LassoCssLexer': ('pygments.lexers.templates', 'CSS+Lasso', ('css+lasso',), (), ('text/css+lasso',)),
'LassoHtmlLexer': ('pygments.lexers.templates', 'HTML+Lasso', ('html+lasso',), (), ('text/html+lasso', 'application/x-httpd-lasso', 'application/x-httpd-lasso[89]')),
'LassoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Lasso', ('javascript+lasso', 'js+lasso'), (), ('application/x-javascript+lasso', 'text/x-javascript+lasso', 'text/javascript+lasso')),
'LassoLexer': ('pygments.lexers.javascript', 'Lasso', ('lasso', 'lassoscript'), ('*.lasso', '*.lasso[89]'), ('text/x-lasso',)),
'LassoXmlLexer': ('pygments.lexers.templates', 'XML+Lasso', ('xml+lasso',), (), ('application/xml+lasso',)),
'LdaprcLexer': ('pygments.lexers.ldap', 'LDAP configuration file', ('ldapconf', 'ldaprc'), ('.ldaprc', 'ldaprc', 'ldap.conf'), ('text/x-ldapconf',)),
'LdifLexer': ('pygments.lexers.ldap', 'LDIF', ('ldif',), ('*.ldif',), ('text/x-ldif',)),
'Lean3Lexer': ('pygments.lexers.lean', 'Lean', ('lean', 'lean3'), ('*.lean',), ('text/x-lean', 'text/x-lean3')),
'Lean4Lexer': ('pygments.lexers.lean', 'Lean4', ('lean4',), ('*.lean',), ('text/x-lean4',)),
'LessCssLexer': ('pygments.lexers.css', 'LessCss', ('less',), ('*.less',), ('text/x-less-css',)),
'LighttpdConfLexer': ('pygments.lexers.configs', 'Lighttpd configuration file', ('lighttpd', 'lighty'), ('lighttpd.conf',), ('text/x-lighttpd-conf',)),
'LilyPondLexer': ('pygments.lexers.lilypond', 'LilyPond', ('lilypond',), ('*.ly',), ()),
'LimboLexer': ('pygments.lexers.inferno', 'Limbo', ('limbo',), ('*.b',), ('text/limbo',)),
'LiquidLexer': ('pygments.lexers.templates', 'liquid', ('liquid',), ('*.liquid',), ()),
'LiterateAgdaLexer': ('pygments.lexers.haskell', 'Literate Agda', ('literate-agda', 'lagda'), ('*.lagda',), ('text/x-literate-agda',)),
'LiterateCryptolLexer': ('pygments.lexers.haskell', 'Literate Cryptol', ('literate-cryptol', 'lcryptol', 'lcry'), ('*.lcry',), ('text/x-literate-cryptol',)),
'LiterateHaskellLexer': ('pygments.lexers.haskell', 'Literate Haskell', ('literate-haskell', 'lhaskell', 'lhs'), ('*.lhs',), ('text/x-literate-haskell',)),
'LiterateIdrisLexer': ('pygments.lexers.haskell', 'Literate Idris', ('literate-idris', 'lidris', 'lidr'), ('*.lidr',), ('text/x-literate-idris',)),
'LiveScriptLexer': ('pygments.lexers.javascript', 'LiveScript', ('livescript', 'live-script'), ('*.ls',), ('text/livescript',)),
'LlvmLexer': ('pygments.lexers.asm', 'LLVM', ('llvm',), ('*.ll',), ('text/x-llvm',)),
'LlvmMirBodyLexer': ('pygments.lexers.asm', 'LLVM-MIR Body', ('llvm-mir-body',), (), ()),
'LlvmMirLexer': ('pygments.lexers.asm', 'LLVM-MIR', ('llvm-mir',), ('*.mir',), ()),
'LogosLexer': ('pygments.lexers.objective', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)),
'LogtalkLexer': ('pygments.lexers.prolog', 'Logtalk', ('logtalk',), ('*.lgt', '*.logtalk'), ('text/x-logtalk',)),
'LuaLexer': ('pygments.lexers.scripting', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')),
'LuauLexer': ('pygments.lexers.scripting', 'Luau', ('luau',), ('*.luau',), ()),
'MCFunctionLexer': ('pygments.lexers.minecraft', 'MCFunction', ('mcfunction', 'mcf'), ('*.mcfunction',), ('text/mcfunction',)),
'MCSchemaLexer': ('pygments.lexers.minecraft', 'MCSchema', ('mcschema',), ('*.mcschema',), ('text/mcschema',)),
'MIMELexer': ('pygments.lexers.mime', 'MIME', ('mime',), (), ('multipart/mixed', 'multipart/related', 'multipart/alternative')),
'MIPSLexer': ('pygments.lexers.mips', 'MIPS', ('mips',), ('*.mips', '*.MIPS'), ()),
'MOOCodeLexer': ('pygments.lexers.scripting', 'MOOCode', ('moocode', 'moo'), ('*.moo',), ('text/x-moocode',)),
'MSDOSSessionLexer': ('pygments.lexers.shell', 'MSDOS Session', ('doscon',), (), ()),
'Macaulay2Lexer': ('pygments.lexers.macaulay2', 'Macaulay2', ('macaulay2',), ('*.m2',), ()),
'MakefileLexer': ('pygments.lexers.make', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', '*.mk', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)),
'MakoCssLexer': ('pygments.lexers.templates', 'CSS+Mako', ('css+mako',), (), ('text/css+mako',)),
'MakoHtmlLexer': ('pygments.lexers.templates', 'HTML+Mako', ('html+mako',), (), ('text/html+mako',)),
'MakoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Mako', ('javascript+mako', 'js+mako'), (), ('application/x-javascript+mako', 'text/x-javascript+mako', 'text/javascript+mako')),
'MakoLexer': ('pygments.lexers.templates', 'Mako', ('mako',), ('*.mao',), ('application/x-mako',)),
'MakoXmlLexer': ('pygments.lexers.templates', 'XML+Mako', ('xml+mako',), (), ('application/xml+mako',)),
'MapleLexer': ('pygments.lexers.maple', 'Maple', ('maple',), ('*.mpl', '*.mi', '*.mm'), ('text/x-maple',)),
'MaqlLexer': ('pygments.lexers.business', 'MAQL', ('maql',), ('*.maql',), ('text/x-gooddata-maql', 'application/x-gooddata-maql')),
'MarkdownLexer': ('pygments.lexers.markup', 'Markdown', ('markdown', 'md'), ('*.md', '*.markdown'), ('text/x-markdown',)),
'MaskLexer': ('pygments.lexers.javascript', 'Mask', ('mask',), ('*.mask',), ('text/x-mask',)),
'MasonLexer': ('pygments.lexers.templates', 'Mason', ('mason',), ('*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'), ('application/x-mason',)),
'MathematicaLexer': ('pygments.lexers.algebra', 'Mathematica', ('mathematica', 'mma', 'nb'), ('*.nb', '*.cdf', '*.nbp', '*.ma'), ('application/mathematica', 'application/vnd.wolfram.mathematica', 'application/vnd.wolfram.mathematica.package', 'application/vnd.wolfram.cdf')),
'MatlabLexer': ('pygments.lexers.matlab', 'Matlab', ('matlab',), ('*.m',), ('text/matlab',)),
'MatlabSessionLexer': ('pygments.lexers.matlab', 'Matlab session', ('matlabsession',), (), ()),
'MaximaLexer': ('pygments.lexers.maxima', 'Maxima', ('maxima', 'macsyma'), ('*.mac', '*.max'), ()),
'MesonLexer': ('pygments.lexers.meson', 'Meson', ('meson', 'meson.build'), ('meson.build', 'meson_options.txt'), ('text/x-meson',)),
'MiniDLexer': ('pygments.lexers.d', 'MiniD', ('minid',), (), ('text/x-minidsrc',)),
'MiniScriptLexer': ('pygments.lexers.scripting', 'MiniScript', ('miniscript', 'ms'), ('*.ms',), ('text/x-minicript', 'application/x-miniscript')),
'ModelicaLexer': ('pygments.lexers.modeling', 'Modelica', ('modelica',), ('*.mo',), ('text/x-modelica',)),
'Modula2Lexer': ('pygments.lexers.modula2', 'Modula-2', ('modula2', 'm2'), ('*.def', '*.mod'), ('text/x-modula2',)),
'MoinWikiLexer': ('pygments.lexers.markup', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)),
'MojoLexer': ('pygments.lexers.mojo', 'Mojo', ('mojo', '🔥'), ('*.mojo', '*.🔥'), ('text/x-mojo', 'application/x-mojo')),
'MonkeyLexer': ('pygments.lexers.basic', 'Monkey', ('monkey',), ('*.monkey',), ('text/x-monkey',)),
'MonteLexer': ('pygments.lexers.monte', 'Monte', ('monte',), ('*.mt',), ()),
'MoonScriptLexer': ('pygments.lexers.scripting', 'MoonScript', ('moonscript', 'moon'), ('*.moon',), ('text/x-moonscript', 'application/x-moonscript')),
'MoselLexer': ('pygments.lexers.mosel', 'Mosel', ('mosel',), ('*.mos',), ()),
'MozPreprocCssLexer': ('pygments.lexers.markup', 'CSS+mozpreproc', ('css+mozpreproc',), ('*.css.in',), ()),
'MozPreprocHashLexer': ('pygments.lexers.markup', 'mozhashpreproc', ('mozhashpreproc',), (), ()),
'MozPreprocJavascriptLexer': ('pygments.lexers.markup', 'Javascript+mozpreproc', ('javascript+mozpreproc',), ('*.js.in',), ()),
'MozPreprocPercentLexer': ('pygments.lexers.markup', 'mozpercentpreproc', ('mozpercentpreproc',), (), ()),
'MozPreprocXulLexer': ('pygments.lexers.markup', 'XUL+mozpreproc', ('xul+mozpreproc',), ('*.xul.in',), ()),
'MqlLexer': ('pygments.lexers.c_like', 'MQL', ('mql', 'mq4', 'mq5', 'mql4', 'mql5'), ('*.mq4', '*.mq5', '*.mqh'), ('text/x-mql',)),
'MscgenLexer': ('pygments.lexers.dsls', 'Mscgen', ('mscgen', 'msc'), ('*.msc',), ()),
'MuPADLexer': ('pygments.lexers.algebra', 'MuPAD', ('mupad',), ('*.mu',), ()),
'MxmlLexer': ('pygments.lexers.actionscript', 'MXML', ('mxml',), ('*.mxml',), ()),
'MySqlLexer': ('pygments.lexers.sql', 'MySQL', ('mysql',), (), ('text/x-mysql',)),
'MyghtyCssLexer': ('pygments.lexers.templates', 'CSS+Myghty', ('css+myghty',), (), ('text/css+myghty',)),
'MyghtyHtmlLexer': ('pygments.lexers.templates', 'HTML+Myghty', ('html+myghty',), (), ('text/html+myghty',)),
'MyghtyJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Myghty', ('javascript+myghty', 'js+myghty'), (), ('application/x-javascript+myghty', 'text/x-javascript+myghty', 'text/javascript+mygthy')),
'MyghtyLexer': ('pygments.lexers.templates', 'Myghty', ('myghty',), ('*.myt', 'autodelegate'), ('application/x-myghty',)),
'MyghtyXmlLexer': ('pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)),
'NCLLexer': ('pygments.lexers.ncl', 'NCL', ('ncl',), ('*.ncl',), ('text/ncl',)),
'NSISLexer': ('pygments.lexers.installers', 'NSIS', ('nsis', 'nsi', 'nsh'), ('*.nsi', '*.nsh'), ('text/x-nsis',)),
'NasmLexer': ('pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM', '*.nasm'), ('text/x-nasm',)),
'NasmObjdumpLexer': ('pygments.lexers.asm', 'objdump-nasm', ('objdump-nasm',), ('*.objdump-intel',), ('text/x-nasm-objdump',)),
'NemerleLexer': ('pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)),
'NesCLexer': ('pygments.lexers.c_like', 'nesC', ('nesc',), ('*.nc',), ('text/x-nescsrc',)),
'NestedTextLexer': ('pygments.lexers.configs', 'NestedText', ('nestedtext', 'nt'), ('*.nt',), ()),
'NewLispLexer': ('pygments.lexers.lisp', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl', '*.kif'), ('text/x-newlisp', 'application/x-newlisp')),
'NewspeakLexer': ('pygments.lexers.smalltalk', 'Newspeak', ('newspeak',), ('*.ns2',), ('text/x-newspeak',)),
'NginxConfLexer': ('pygments.lexers.configs', 'Nginx configuration file', ('nginx',), ('nginx.conf',), ('text/x-nginx-conf',)),
'NimrodLexer': ('pygments.lexers.nimrod', 'Nimrod', ('nimrod', 'nim'), ('*.nim', '*.nimrod'), ('text/x-nim',)),
'NitLexer': ('pygments.lexers.nit', 'Nit', ('nit',), ('*.nit',), ()),
'NixLexer': ('pygments.lexers.nix', 'Nix', ('nixos', 'nix'), ('*.nix',), ('text/x-nix',)),
'NodeConsoleLexer': ('pygments.lexers.javascript', 'Node.js REPL console session', ('nodejsrepl',), (), ('text/x-nodejsrepl',)),
'NotmuchLexer': ('pygments.lexers.textfmts', 'Notmuch', ('notmuch',), (), ()),
'NuSMVLexer': ('pygments.lexers.smv', 'NuSMV', ('nusmv',), ('*.smv',), ()),
'NumPyLexer': ('pygments.lexers.python', 'NumPy', ('numpy',), (), ()),
'NumbaIRLexer': ('pygments.lexers.numbair', 'Numba_IR', ('numba_ir', 'numbair'), ('*.numba_ir',), ('text/x-numba_ir', 'text/x-numbair')),
'ObjdumpLexer': ('pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)),
'ObjectiveCLexer': ('pygments.lexers.objective', 'Objective-C', ('objective-c', 'objectivec', 'obj-c', 'objc'), ('*.m', '*.h'), ('text/x-objective-c',)),
'ObjectiveCppLexer': ('pygments.lexers.objective', 'Objective-C++', ('objective-c++', 'objectivec++', 'obj-c++', 'objc++'), ('*.mm', '*.hh'), ('text/x-objective-c++',)),
'ObjectiveJLexer': ('pygments.lexers.javascript', 'Objective-J', ('objective-j', 'objectivej', 'obj-j', 'objj'), ('*.j',), ('text/x-objective-j',)),
'OcamlLexer': ('pygments.lexers.ml', 'OCaml', ('ocaml',), ('*.ml', '*.mli', '*.mll', '*.mly'), ('text/x-ocaml',)),
'OctaveLexer': ('pygments.lexers.matlab', 'Octave', ('octave',), ('*.m',), ('text/octave',)),
'OdinLexer': ('pygments.lexers.archetype', 'ODIN', ('odin',), ('*.odin',), ('text/odin',)),
'OmgIdlLexer': ('pygments.lexers.c_like', 'OMG Interface Definition Language', ('omg-idl',), ('*.idl', '*.pidl'), ()),
'OocLexer': ('pygments.lexers.ooc', 'Ooc', ('ooc',), ('*.ooc',), ('text/x-ooc',)),
'OpaLexer': ('pygments.lexers.ml', 'Opa', ('opa',), ('*.opa',), ('text/x-opa',)),
'OpenEdgeLexer': ('pygments.lexers.business', 'OpenEdge ABL', ('openedge', 'abl', 'progress'), ('*.p', '*.cls'), ('text/x-openedge', 'application/x-openedge')),
'OpenScadLexer': ('pygments.lexers.openscad', 'OpenSCAD', ('openscad',), ('*.scad',), ('application/x-openscad',)),
'OrgLexer': ('pygments.lexers.markup', 'Org Mode', ('org', 'orgmode', 'org-mode'), ('*.org',), ('text/org',)),
'OutputLexer': ('pygments.lexers.special', 'Text output', ('output',), (), ()),
'PacmanConfLexer': ('pygments.lexers.configs', 'PacmanConf', ('pacmanconf',), ('pacman.conf',), ()),
'PanLexer': ('pygments.lexers.dsls', 'Pan', ('pan',), ('*.pan',), ()),
'ParaSailLexer': ('pygments.lexers.parasail', 'ParaSail', ('parasail',), ('*.psi', '*.psl'), ('text/x-parasail',)),
'PawnLexer': ('pygments.lexers.pawn', 'Pawn', ('pawn',), ('*.p', '*.pwn', '*.inc'), ('text/x-pawn',)),
'PddlLexer': ('pygments.lexers.pddl', 'PDDL', ('pddl',), ('*.pddl',), ()),
'PegLexer': ('pygments.lexers.grammar_notation', 'PEG', ('peg',), ('*.peg',), ('text/x-peg',)),
'Perl6Lexer': ('pygments.lexers.perl', 'Perl6', ('perl6', 'pl6', 'raku'), ('*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6', '*.t', '*.raku', '*.rakumod', '*.rakutest', '*.rakudoc'), ('text/x-perl6', 'application/x-perl6')),
'PerlLexer': ('pygments.lexers.perl', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm', '*.t', '*.perl'), ('text/x-perl', 'application/x-perl')),
'PhixLexer': ('pygments.lexers.phix', 'Phix', ('phix',), ('*.exw',), ('text/x-phix',)),
'PhpLexer': ('pygments.lexers.php', 'PHP', ('php', 'php3', 'php4', 'php5'), ('*.php', '*.php[345]', '*.inc'), ('text/x-php',)),
'PigLexer': ('pygments.lexers.jvm', 'Pig', ('pig',), ('*.pig',), ('text/x-pig',)),
'PikeLexer': ('pygments.lexers.c_like', 'Pike', ('pike',), ('*.pike', '*.pmod'), ('text/x-pike',)),
'PkgConfigLexer': ('pygments.lexers.configs', 'PkgConfig', ('pkgconfig',), ('*.pc',), ()),
'PlPgsqlLexer': ('pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)),
'PointlessLexer': ('pygments.lexers.pointless', 'Pointless', ('pointless',), ('*.ptls',), ()),
'PonyLexer': ('pygments.lexers.pony', 'Pony', ('pony',), ('*.pony',), ()),
'PortugolLexer': ('pygments.lexers.pascal', 'Portugol', ('portugol',), ('*.alg', '*.portugol'), ()),
'PostScriptLexer': ('pygments.lexers.graphics', 'PostScript', ('postscript', 'postscr'), ('*.ps', '*.eps'), ('application/postscript',)),
'PostgresConsoleLexer': ('pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)),
'PostgresExplainLexer': ('pygments.lexers.sql', 'PostgreSQL EXPLAIN dialect', ('postgres-explain',), ('*.explain',), ('text/x-postgresql-explain',)),
'PostgresLexer': ('pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)),
'PovrayLexer': ('pygments.lexers.graphics', 'POVRay', ('pov',), ('*.pov', '*.inc'), ('text/x-povray',)),
'PowerShellLexer': ('pygments.lexers.shell', 'PowerShell', ('powershell', 'pwsh', 'posh', 'ps1', 'psm1'), ('*.ps1', '*.psm1'), ('text/x-powershell',)),
'PowerShellSessionLexer': ('pygments.lexers.shell', 'PowerShell Session', ('pwsh-session', 'ps1con'), (), ()),
'PraatLexer': ('pygments.lexers.praat', 'Praat', ('praat',), ('*.praat', '*.proc', '*.psc'), ()),
'ProcfileLexer': ('pygments.lexers.procfile', 'Procfile', ('procfile',), ('Procfile',), ()),
'PrologLexer': ('pygments.lexers.prolog', 'Prolog', ('prolog',), ('*.ecl', '*.prolog', '*.pro', '*.pl'), ('text/x-prolog',)),
'PromQLLexer': ('pygments.lexers.promql', 'PromQL', ('promql',), ('*.promql',), ()),
'PromelaLexer': ('pygments.lexers.c_like', 'Promela', ('promela',), ('*.pml', '*.prom', '*.prm', '*.promela', '*.pr', '*.pm'), ('text/x-promela',)),
'PropertiesLexer': ('pygments.lexers.configs', 'Properties', ('properties', 'jproperties'), ('*.properties',), ('text/x-java-properties',)),
'ProtoBufLexer': ('pygments.lexers.dsls', 'Protocol Buffer', ('protobuf', 'proto'), ('*.proto',), ()),
'PrqlLexer': ('pygments.lexers.prql', 'PRQL', ('prql',), ('*.prql',), ('application/prql', 'application/x-prql')),
'PsyshConsoleLexer': ('pygments.lexers.php', 'PsySH console session for PHP', ('psysh',), (), ()),
'PtxLexer': ('pygments.lexers.ptx', 'PTX', ('ptx',), ('*.ptx',), ('text/x-ptx',)),
'PugLexer': ('pygments.lexers.html', 'Pug', ('pug', 'jade'), ('*.pug', '*.jade'), ('text/x-pug', 'text/x-jade')),
'PuppetLexer': ('pygments.lexers.dsls', 'Puppet', ('puppet',), ('*.pp',), ()),
'PyPyLogLexer': ('pygments.lexers.console', 'PyPy Log', ('pypylog', 'pypy'), ('*.pypylog',), ('application/x-pypylog',)),
'Python2Lexer': ('pygments.lexers.python', 'Python 2.x', ('python2', 'py2'), (), ('text/x-python2', 'application/x-python2')),
'Python2TracebackLexer': ('pygments.lexers.python', 'Python 2.x Traceback', ('py2tb',), ('*.py2tb',), ('text/x-python2-traceback',)),
'PythonConsoleLexer': ('pygments.lexers.python', 'Python console session', ('pycon', 'python-console'), (), ('text/x-python-doctest',)),
'PythonLexer': ('pygments.lexers.python', 'Python', ('python', 'py', 'sage', 'python3', 'py3', 'bazel', 'starlark', 'pyi'), ('*.py', '*.pyw', '*.pyi', '*.jy', '*.sage', '*.sc', 'SConstruct', 'SConscript', '*.bzl', 'BUCK', 'BUILD', 'BUILD.bazel', 'WORKSPACE', '*.tac'), ('text/x-python', 'application/x-python', 'text/x-python3', 'application/x-python3')),
'PythonTracebackLexer': ('pygments.lexers.python', 'Python Traceback', ('pytb', 'py3tb'), ('*.pytb', '*.py3tb'), ('text/x-python-traceback', 'text/x-python3-traceback')),
'PythonUL4Lexer': ('pygments.lexers.ul4', 'Python+UL4', ('py+ul4',), ('*.pyul4',), ()),
'QBasicLexer': ('pygments.lexers.basic', 'QBasic', ('qbasic', 'basic'), ('*.BAS', '*.bas'), ('text/basic',)),
'QLexer': ('pygments.lexers.q', 'Q', ('q',), ('*.q',), ()),
'QVToLexer': ('pygments.lexers.qvt', 'QVTO', ('qvto', 'qvt'), ('*.qvto',), ()),
'QlikLexer': ('pygments.lexers.qlik', 'Qlik', ('qlik', 'qlikview', 'qliksense', 'qlikscript'), ('*.qvs', '*.qvw'), ()),
'QmlLexer': ('pygments.lexers.webmisc', 'QML', ('qml', 'qbs'), ('*.qml', '*.qbs'), ('application/x-qml', 'application/x-qt.qbs+qml')),
'RConsoleLexer': ('pygments.lexers.r', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()),
'RNCCompactLexer': ('pygments.lexers.rnc', 'Relax-NG Compact', ('rng-compact', 'rnc'), ('*.rnc',), ()),
'RPMSpecLexer': ('pygments.lexers.installers', 'RPMSpec', ('spec',), ('*.spec',), ('text/x-rpm-spec',)),
'RacketLexer': ('pygments.lexers.lisp', 'Racket', ('racket', 'rkt'), ('*.rkt', '*.rktd', '*.rktl'), ('text/x-racket', 'application/x-racket')),
'RagelCLexer': ('pygments.lexers.parsers', 'Ragel in C Host', ('ragel-c',), ('*.rl',), ()),
'RagelCppLexer': ('pygments.lexers.parsers', 'Ragel in CPP Host', ('ragel-cpp',), ('*.rl',), ()),
'RagelDLexer': ('pygments.lexers.parsers', 'Ragel in D Host', ('ragel-d',), ('*.rl',), ()),
'RagelEmbeddedLexer': ('pygments.lexers.parsers', 'Embedded Ragel', ('ragel-em',), ('*.rl',), ()),
'RagelJavaLexer': ('pygments.lexers.parsers', 'Ragel in Java Host', ('ragel-java',), ('*.rl',), ()),
'RagelLexer': ('pygments.lexers.parsers', 'Ragel', ('ragel',), (), ()),
'RagelObjectiveCLexer': ('pygments.lexers.parsers', 'Ragel in Objective C Host', ('ragel-objc',), ('*.rl',), ()),
'RagelRubyLexer': ('pygments.lexers.parsers', 'Ragel in Ruby Host', ('ragel-ruby', 'ragel-rb'), ('*.rl',), ()),
'RawTokenLexer': ('pygments.lexers.special', 'Raw token data', (), (), ('application/x-pygments-tokens',)),
'RdLexer': ('pygments.lexers.r', 'Rd', ('rd',), ('*.Rd',), ('text/x-r-doc',)),
'ReasonLexer': ('pygments.lexers.ml', 'ReasonML', ('reasonml', 'reason'), ('*.re', '*.rei'), ('text/x-reasonml',)),
'RebolLexer': ('pygments.lexers.rebol', 'REBOL', ('rebol',), ('*.r', '*.r3', '*.reb'), ('text/x-rebol',)),
'RedLexer': ('pygments.lexers.rebol', 'Red', ('red', 'red/system'), ('*.red', '*.reds'), ('text/x-red', 'text/x-red-system')),
'RedcodeLexer': ('pygments.lexers.esoteric', 'Redcode', ('redcode',), ('*.cw',), ()),
'RegeditLexer': ('pygments.lexers.configs', 'reg', ('registry',), ('*.reg',), ('text/x-windows-registry',)),
'RegoLexer': ('pygments.lexers.rego', 'Rego', ('rego',), ('*.rego',), ('text/x-rego',)),
'ResourceLexer': ('pygments.lexers.resource', 'ResourceBundle', ('resourcebundle', 'resource'), (), ()),
'RexxLexer': ('pygments.lexers.scripting', 'Rexx', ('rexx', 'arexx'), ('*.rexx', '*.rex', '*.rx', '*.arexx'), ('text/x-rexx',)),
'RhtmlLexer': ('pygments.lexers.templates', 'RHTML', ('rhtml', 'html+erb', 'html+ruby'), ('*.rhtml',), ('text/html+ruby',)),
'RideLexer': ('pygments.lexers.ride', 'Ride', ('ride',), ('*.ride',), ('text/x-ride',)),
'RitaLexer': ('pygments.lexers.rita', 'Rita', ('rita',), ('*.rita',), ('text/rita',)),
'RoboconfGraphLexer': ('pygments.lexers.roboconf', 'Roboconf Graph', ('roboconf-graph',), ('*.graph',), ()),
'RoboconfInstancesLexer': ('pygments.lexers.roboconf', 'Roboconf Instances', ('roboconf-instances',), ('*.instances',), ()),
'RobotFrameworkLexer': ('pygments.lexers.robotframework', 'RobotFramework', ('robotframework',), ('*.robot', '*.resource'), ('text/x-robotframework',)),
'RqlLexer': ('pygments.lexers.sql', 'RQL', ('rql',), ('*.rql',), ('text/x-rql',)),
'RslLexer': ('pygments.lexers.dsls', 'RSL', ('rsl',), ('*.rsl',), ('text/rsl',)),
'RstLexer': ('pygments.lexers.markup', 'reStructuredText', ('restructuredtext', 'rst', 'rest'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')),
'RtsLexer': ('pygments.lexers.trafficscript', 'TrafficScript', ('trafficscript', 'rts'), ('*.rts',), ()),
'RubyConsoleLexer': ('pygments.lexers.ruby', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)),
'RubyLexer': ('pygments.lexers.ruby', 'Ruby', ('ruby', 'rb', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby', 'Gemfile', 'Vagrantfile'), ('text/x-ruby', 'application/x-ruby')),
'RustLexer': ('pygments.lexers.rust', 'Rust', ('rust', 'rs'), ('*.rs', '*.rs.in'), ('text/rust', 'text/x-rust')),
'SASLexer': ('pygments.lexers.sas', 'SAS', ('sas',), ('*.SAS', '*.sas'), ('text/x-sas', 'text/sas', 'application/x-sas')),
'SLexer': ('pygments.lexers.r', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')),
'SMLLexer': ('pygments.lexers.ml', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')),
'SNBTLexer': ('pygments.lexers.minecraft', 'SNBT', ('snbt',), ('*.snbt',), ('text/snbt',)),
'SarlLexer': ('pygments.lexers.jvm', 'SARL', ('sarl',), ('*.sarl',), ('text/x-sarl',)),
'SassLexer': ('pygments.lexers.css', 'Sass', ('sass',), ('*.sass',), ('text/x-sass',)),
'SaviLexer': ('pygments.lexers.savi', 'Savi', ('savi',), ('*.savi',), ()),
'ScalaLexer': ('pygments.lexers.jvm', 'Scala', ('scala',), ('*.scala',), ('text/x-scala',)),
'ScamlLexer': ('pygments.lexers.html', 'Scaml', ('scaml',), ('*.scaml',), ('text/x-scaml',)),
'ScdocLexer': ('pygments.lexers.scdoc', 'scdoc', ('scdoc', 'scd'), ('*.scd', '*.scdoc'), ()),
'SchemeLexer': ('pygments.lexers.lisp', 'Scheme', ('scheme', 'scm'), ('*.scm', '*.ss'), ('text/x-scheme', 'application/x-scheme')),
'ScilabLexer': ('pygments.lexers.matlab', 'Scilab', ('scilab',), ('*.sci', '*.sce', '*.tst'), ('text/scilab',)),
'ScssLexer': ('pygments.lexers.css', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)),
'SedLexer': ('pygments.lexers.textedit', 'Sed', ('sed', 'gsed', 'ssed'), ('*.sed', '*.[gs]sed'), ('text/x-sed',)),
'ShExCLexer': ('pygments.lexers.rdf', 'ShExC', ('shexc', 'shex'), ('*.shex',), ('text/shex',)),
'ShenLexer': ('pygments.lexers.lisp', 'Shen', ('shen',), ('*.shen',), ('text/x-shen', 'application/x-shen')),
'SieveLexer': ('pygments.lexers.sieve', 'Sieve', ('sieve',), ('*.siv', '*.sieve'), ()),
'SilverLexer': ('pygments.lexers.verification', 'Silver', ('silver',), ('*.sil', '*.vpr'), ()),
'SingularityLexer': ('pygments.lexers.configs', 'Singularity', ('singularity',), ('*.def', 'Singularity'), ()),
'SlashLexer': ('pygments.lexers.slash', 'Slash', ('slash',), ('*.sla',), ()),
'SlimLexer': ('pygments.lexers.webmisc', 'Slim', ('slim',), ('*.slim',), ('text/x-slim',)),
'SlurmBashLexer': ('pygments.lexers.shell', 'Slurm', ('slurm', 'sbatch'), ('*.sl',), ()),
'SmaliLexer': ('pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)),
'SmalltalkLexer': ('pygments.lexers.smalltalk', 'Smalltalk', ('smalltalk', 'squeak', 'st'), ('*.st',), ('text/x-smalltalk',)),
'SmartGameFormatLexer': ('pygments.lexers.sgf', 'SmartGameFormat', ('sgf',), ('*.sgf',), ()),
'SmartyLexer': ('pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)),
'SmithyLexer': ('pygments.lexers.smithy', 'Smithy', ('smithy',), ('*.smithy',), ()),
'SnobolLexer': ('pygments.lexers.snobol', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)),
'SnowballLexer': ('pygments.lexers.dsls', 'Snowball', ('snowball',), ('*.sbl',), ()),
'SolidityLexer': ('pygments.lexers.solidity', 'Solidity', ('solidity',), ('*.sol',), ()),
'SoongLexer': ('pygments.lexers.soong', 'Soong', ('androidbp', 'bp', 'soong'), ('Android.bp',), ()),
'SophiaLexer': ('pygments.lexers.sophia', 'Sophia', ('sophia',), ('*.aes',), ()),
'SourcePawnLexer': ('pygments.lexers.pawn', 'SourcePawn', ('sp',), ('*.sp',), ('text/x-sourcepawn',)),
'SourcesListLexer': ('pygments.lexers.installers', 'Debian Sourcelist', ('debsources', 'sourceslist', 'sources.list'), ('sources.list',), ()),
'SparqlLexer': ('pygments.lexers.rdf', 'SPARQL', ('sparql',), ('*.rq', '*.sparql'), ('application/sparql-query',)),
'SpiceLexer': ('pygments.lexers.spice', 'Spice', ('spice', 'spicelang'), ('*.spice',), ('text/x-spice',)),
'SqlJinjaLexer': ('pygments.lexers.templates', 'SQL+Jinja', ('sql+jinja',), ('*.sql', '*.sql.j2', '*.sql.jinja2'), ()),
'SqlLexer': ('pygments.lexers.sql', 'SQL', ('sql',), ('*.sql',), ('text/x-sql',)),
'SqliteConsoleLexer': ('pygments.lexers.sql', 'sqlite3con', ('sqlite3',), ('*.sqlite3-console',), ('text/x-sqlite3-console',)),
'SquidConfLexer': ('pygments.lexers.configs', 'SquidConf', ('squidconf', 'squid.conf', 'squid'), ('squid.conf',), ('text/x-squidconf',)),
'SrcinfoLexer': ('pygments.lexers.srcinfo', 'Srcinfo', ('srcinfo',), ('.SRCINFO',), ()),
'SspLexer': ('pygments.lexers.templates', 'Scalate Server Page', ('ssp',), ('*.ssp',), ('application/x-ssp',)),
'StanLexer': ('pygments.lexers.modeling', 'Stan', ('stan',), ('*.stan',), ()),
'StataLexer': ('pygments.lexers.stata', 'Stata', ('stata', 'do'), ('*.do', '*.ado'), ('text/x-stata', 'text/stata', 'application/x-stata')),
'SuperColliderLexer': ('pygments.lexers.supercollider', 'SuperCollider', ('supercollider', 'sc'), ('*.sc', '*.scd'), ('application/supercollider', 'text/supercollider')),
'SwiftLexer': ('pygments.lexers.objective', 'Swift', ('swift',), ('*.swift',), ('text/x-swift',)),
'SwigLexer': ('pygments.lexers.c_like', 'SWIG', ('swig',), ('*.swg', '*.i'), ('text/swig',)),
'SystemVerilogLexer': ('pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)),
'SystemdLexer': ('pygments.lexers.configs', 'Systemd', ('systemd',), ('*.service', '*.socket', '*.device', '*.mount', '*.automount', '*.swap', '*.target', '*.path', '*.timer', '*.slice', '*.scope'), ()),
'TAPLexer': ('pygments.lexers.testing', 'TAP', ('tap',), ('*.tap',), ()),
'TNTLexer': ('pygments.lexers.tnt', 'Typographic Number Theory', ('tnt',), ('*.tnt',), ()),
'TOMLLexer': ('pygments.lexers.configs', 'TOML', ('toml',), ('*.toml', 'Pipfile', 'poetry.lock'), ('application/toml',)),
'TableGenLexer': ('pygments.lexers.tablegen', 'TableGen', ('tablegen', 'td'), ('*.td',), ()),
'TactLexer': ('pygments.lexers.tact', 'Tact', ('tact',), ('*.tact',), ()),
'Tads3Lexer': ('pygments.lexers.int_fiction', 'TADS 3', ('tads3',), ('*.t',), ()),
'TalLexer': ('pygments.lexers.tal', 'Tal', ('tal', 'uxntal'), ('*.tal',), ('text/x-uxntal',)),
'TasmLexer': ('pygments.lexers.asm', 'TASM', ('tasm',), ('*.asm', '*.ASM', '*.tasm'), ('text/x-tasm',)),
'TclLexer': ('pygments.lexers.tcl', 'Tcl', ('tcl',), ('*.tcl', '*.rvt'), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')),
'TcshLexer': ('pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)),
'TcshSessionLexer': ('pygments.lexers.shell', 'Tcsh Session', ('tcshcon',), (), ()),
'TeaTemplateLexer': ('pygments.lexers.templates', 'Tea', ('tea',), ('*.tea',), ('text/x-tea',)),
'TealLexer': ('pygments.lexers.teal', 'teal', ('teal',), ('*.teal',), ()),
'TeraTermLexer': ('pygments.lexers.teraterm', 'Tera Term macro', ('teratermmacro', 'teraterm', 'ttl'), ('*.ttl',), ('text/x-teratermmacro',)),
'TermcapLexer': ('pygments.lexers.configs', 'Termcap', ('termcap',), ('termcap', 'termcap.src'), ()),
'TerminfoLexer': ('pygments.lexers.configs', 'Terminfo', ('terminfo',), ('terminfo', 'terminfo.src'), ()),
'TerraformLexer': ('pygments.lexers.configs', 'Terraform', ('terraform', 'tf', 'hcl'), ('*.tf', '*.hcl'), ('application/x-tf', 'application/x-terraform')),
'TexLexer': ('pygments.lexers.markup', 'TeX', ('tex', 'latex'), ('*.tex', '*.aux', '*.toc'), ('text/x-tex', 'text/x-latex')),
'TextLexer': ('pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)),
'ThingsDBLexer': ('pygments.lexers.thingsdb', 'ThingsDB', ('ti', 'thingsdb'), ('*.ti',), ()),
'ThriftLexer': ('pygments.lexers.dsls', 'Thrift', ('thrift',), ('*.thrift',), ('application/x-thrift',)),
'TiddlyWiki5Lexer': ('pygments.lexers.markup', 'tiddler', ('tid',), ('*.tid',), ('text/vnd.tiddlywiki',)),
'TlbLexer': ('pygments.lexers.tlb', 'Tl-b', ('tlb',), ('*.tlb',), ()),
'TlsLexer': ('pygments.lexers.tls', 'TLS Presentation Language', ('tls',), (), ()),
'TodotxtLexer': ('pygments.lexers.textfmts', 'Todotxt', ('todotxt',), ('todo.txt', '*.todotxt'), ('text/x-todo',)),
'TransactSqlLexer': ('pygments.lexers.sql', 'Transact-SQL', ('tsql', 't-sql'), ('*.sql',), ('text/x-tsql',)),
'TreetopLexer': ('pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()),
'TsxLexer': ('pygments.lexers.jsx', 'TSX', ('tsx',), ('*.tsx',), ('text/typescript-tsx',)),
'TurtleLexer': ('pygments.lexers.rdf', 'Turtle', ('turtle',), ('*.ttl',), ('text/turtle', 'application/x-turtle')),
'TwigHtmlLexer': ('pygments.lexers.templates', 'HTML+Twig', ('html+twig',), ('*.twig',), ('text/html+twig',)),
'TwigLexer': ('pygments.lexers.templates', 'Twig', ('twig',), (), ('application/x-twig',)),
'TypeScriptLexer': ('pygments.lexers.javascript', 'TypeScript', ('typescript', 'ts'), ('*.ts',), ('application/x-typescript', 'text/x-typescript')),
'TypoScriptCssDataLexer': ('pygments.lexers.typoscript', 'TypoScriptCssData', ('typoscriptcssdata',), (), ()),
'TypoScriptHtmlDataLexer': ('pygments.lexers.typoscript', 'TypoScriptHtmlData', ('typoscripthtmldata',), (), ()),
'TypoScriptLexer': ('pygments.lexers.typoscript', 'TypoScript', ('typoscript',), ('*.typoscript',), ('text/x-typoscript',)),
'TypstLexer': ('pygments.lexers.typst', 'Typst', ('typst',), ('*.typ',), ('text/x-typst',)),
'UL4Lexer': ('pygments.lexers.ul4', 'UL4', ('ul4',), ('*.ul4',), ()),
'UcodeLexer': ('pygments.lexers.unicon', 'ucode', ('ucode',), ('*.u', '*.u1', '*.u2'), ()),
'UniconLexer': ('pygments.lexers.unicon', 'Unicon', ('unicon',), ('*.icn',), ('text/unicon',)),
'UnixConfigLexer': ('pygments.lexers.configs', 'Unix/Linux config files', ('unixconfig', 'linuxconfig'), (), ()),
'UrbiscriptLexer': ('pygments.lexers.urbi', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)),
'UrlEncodedLexer': ('pygments.lexers.html', 'urlencoded', ('urlencoded',), (), ('application/x-www-form-urlencoded',)),
'UsdLexer': ('pygments.lexers.usd', 'USD', ('usd', 'usda'), ('*.usd', '*.usda'), ()),
'VBScriptLexer': ('pygments.lexers.basic', 'VBScript', ('vbscript',), ('*.vbs', '*.VBS'), ()),
'VCLLexer': ('pygments.lexers.varnish', 'VCL', ('vcl',), ('*.vcl',), ('text/x-vclsrc',)),
'VCLSnippetLexer': ('pygments.lexers.varnish', 'VCLSnippets', ('vclsnippets', 'vclsnippet'), (), ('text/x-vclsnippet',)),
'VCTreeStatusLexer': ('pygments.lexers.console', 'VCTreeStatus', ('vctreestatus',), (), ()),
'VGLLexer': ('pygments.lexers.dsls', 'VGL', ('vgl',), ('*.rpf',), ()),
'ValaLexer': ('pygments.lexers.c_like', 'Vala', ('vala', 'vapi'), ('*.vala', '*.vapi'), ('text/x-vala',)),
'VbNetAspxLexer': ('pygments.lexers.dotnet', 'aspx-vb', ('aspx-vb',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
'VbNetLexer': ('pygments.lexers.dotnet', 'VB.net', ('vb.net', 'vbnet', 'lobas', 'oobas', 'sobas', 'visual-basic', 'visualbasic'), ('*.vb', '*.bas'), ('text/x-vbnet', 'text/x-vba')),
'VelocityHtmlLexer': ('pygments.lexers.templates', 'HTML+Velocity', ('html+velocity',), (), ('text/html+velocity',)),
'VelocityLexer': ('pygments.lexers.templates', 'Velocity', ('velocity',), ('*.vm', '*.fhtml'), ()),
'VelocityXmlLexer': ('pygments.lexers.templates', 'XML+Velocity', ('xml+velocity',), (), ('application/xml+velocity',)),
'VerifpalLexer': ('pygments.lexers.verifpal', 'Verifpal', ('verifpal',), ('*.vp',), ('text/x-verifpal',)),
'VerilogLexer': ('pygments.lexers.hdl', 'verilog', ('verilog', 'v'), ('*.v',), ('text/x-verilog',)),
'VhdlLexer': ('pygments.lexers.hdl', 'vhdl', ('vhdl',), ('*.vhdl', '*.vhd'), ('text/x-vhdl',)),
'VimLexer': ('pygments.lexers.textedit', 'VimL', ('vim',), ('*.vim', '.vimrc', '.exrc', '.gvimrc', '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'), ('text/x-vim',)),
'VisualPrologGrammarLexer': ('pygments.lexers.vip', 'Visual Prolog Grammar', ('visualprologgrammar',), ('*.vipgrm',), ()),
'VisualPrologLexer': ('pygments.lexers.vip', 'Visual Prolog', ('visualprolog',), ('*.pro', '*.cl', '*.i', '*.pack', '*.ph'), ()),
'VueLexer': ('pygments.lexers.html', 'Vue', ('vue',), ('*.vue',), ()),
'VyperLexer': ('pygments.lexers.vyper', 'Vyper', ('vyper',), ('*.vy',), ()),
'WDiffLexer': ('pygments.lexers.diff', 'WDiff', ('wdiff',), ('*.wdiff',), ()),
'WatLexer': ('pygments.lexers.webassembly', 'WebAssembly', ('wast', 'wat'), ('*.wat', '*.wast'), ()),
'WebIDLLexer': ('pygments.lexers.webidl', 'Web IDL', ('webidl',), ('*.webidl',), ()),
'WgslLexer': ('pygments.lexers.wgsl', 'WebGPU Shading Language', ('wgsl',), ('*.wgsl',), ('text/wgsl',)),
'WhileyLexer': ('pygments.lexers.whiley', 'Whiley', ('whiley',), ('*.whiley',), ('text/x-whiley',)),
'WikitextLexer': ('pygments.lexers.markup', 'Wikitext', ('wikitext', 'mediawiki'), (), ('text/x-wiki',)),
'WoWTocLexer': ('pygments.lexers.wowtoc', 'World of Warcraft TOC', ('wowtoc',), ('*.toc',), ()),
'WrenLexer': ('pygments.lexers.wren', 'Wren', ('wren',), ('*.wren',), ()),
'X10Lexer': ('pygments.lexers.x10', 'X10', ('x10', 'xten'), ('*.x10',), ('text/x-x10',)),
'XMLUL4Lexer': ('pygments.lexers.ul4', 'XML+UL4', ('xml+ul4',), ('*.xmlul4',), ()),
'XQueryLexer': ('pygments.lexers.webmisc', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')),
'XmlDjangoLexer': ('pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), ('*.xml.j2', '*.xml.jinja2'), ('application/xml+django', 'application/xml+jinja')),
'XmlErbLexer': ('pygments.lexers.templates', 'XML+Ruby', ('xml+ruby', 'xml+erb'), (), ('application/xml+ruby',)),
'XmlLexer': ('pygments.lexers.html', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl', '*.wsf'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')),
'XmlPhpLexer': ('pygments.lexers.templates', 'XML+PHP', ('xml+php',), (), ('application/xml+php',)),
'XmlSmartyLexer': ('pygments.lexers.templates', 'XML+Smarty', ('xml+smarty',), (), ('application/xml+smarty',)),
'XorgLexer': ('pygments.lexers.xorg', 'Xorg', ('xorg.conf',), ('xorg.conf',), ()),
'XppLexer': ('pygments.lexers.dotnet', 'X++', ('xpp', 'x++'), ('*.xpp',), ()),
'XsltLexer': ('pygments.lexers.html', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')),
'XtendLexer': ('pygments.lexers.jvm', 'Xtend', ('xtend',), ('*.xtend',), ('text/x-xtend',)),
'XtlangLexer': ('pygments.lexers.lisp', 'xtlang', ('extempore',), ('*.xtm',), ()),
'YamlJinjaLexer': ('pygments.lexers.templates', 'YAML+Jinja', ('yaml+jinja', 'salt', 'sls'), ('*.sls', '*.yaml.j2', '*.yml.j2', '*.yaml.jinja2', '*.yml.jinja2'), ('text/x-yaml+jinja', 'text/x-sls')),
'YamlLexer': ('pygments.lexers.data', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)),
'YangLexer': ('pygments.lexers.yang', 'YANG', ('yang',), ('*.yang',), ('application/yang',)),
'YaraLexer': ('pygments.lexers.yara', 'YARA', ('yara', 'yar'), ('*.yar',), ('text/x-yara',)),
'ZeekLexer': ('pygments.lexers.dsls', 'Zeek', ('zeek', 'bro'), ('*.zeek', '*.bro'), ()),
'ZephirLexer': ('pygments.lexers.php', 'Zephir', ('zephir',), ('*.zep',), ()),
'ZigLexer': ('pygments.lexers.zig', 'Zig', ('zig',), ('*.zig',), ('text/zig',)),
'apdlexer': ('pygments.lexers.apdlexer', 'ANSYS parametric design language', ('ansys', 'apdl'), ('*.ans',), ()),
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,739 @@
"""
pygments.lexers._postgres_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Self-updating data files for PostgreSQL lexer.
Run with `python -I` to update itself.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Autogenerated: please edit them if you like wasting your time.
KEYWORDS = (
'ABORT',
'ABSOLUTE',
'ACCESS',
'ACTION',
'ADD',
'ADMIN',
'AFTER',
'AGGREGATE',
'ALL',
'ALSO',
'ALTER',
'ALWAYS',
'ANALYSE',
'ANALYZE',
'AND',
'ANY',
'ARRAY',
'AS',
'ASC',
'ASENSITIVE',
'ASSERTION',
'ASSIGNMENT',
'ASYMMETRIC',
'AT',
'ATOMIC',
'ATTACH',
'ATTRIBUTE',
'AUTHORIZATION',
'BACKWARD',
'BEFORE',
'BEGIN',
'BETWEEN',
'BIGINT',
'BINARY',
'BIT',
'BOOLEAN',
'BOTH',
'BREADTH',
'BY',
'CACHE',
'CALL',
'CALLED',
'CASCADE',
'CASCADED',
'CASE',
'CAST',
'CATALOG',
'CHAIN',
'CHAR',
'CHARACTER',
'CHARACTERISTICS',
'CHECK',
'CHECKPOINT',
'CLASS',
'CLOSE',
'CLUSTER',
'COALESCE',
'COLLATE',
'COLLATION',
'COLUMN',
'COLUMNS',
'COMMENT',
'COMMENTS',
'COMMIT',
'COMMITTED',
'COMPRESSION',
'CONCURRENTLY',
'CONFIGURATION',
'CONFLICT',
'CONNECTION',
'CONSTRAINT',
'CONSTRAINTS',
'CONTENT',
'CONTINUE',
'CONVERSION',
'COPY',
'COST',
'CREATE',
'CROSS',
'CSV',
'CUBE',
'CURRENT',
'CURRENT_CATALOG',
'CURRENT_DATE',
'CURRENT_ROLE',
'CURRENT_SCHEMA',
'CURRENT_TIME',
'CURRENT_TIMESTAMP',
'CURRENT_USER',
'CURSOR',
'CYCLE',
'DATA',
'DATABASE',
'DAY',
'DEALLOCATE',
'DEC',
'DECIMAL',
'DECLARE',
'DEFAULT',
'DEFAULTS',
'DEFERRABLE',
'DEFERRED',
'DEFINER',
'DELETE',
'DELIMITER',
'DELIMITERS',
'DEPENDS',
'DEPTH',
'DESC',
'DETACH',
'DICTIONARY',
'DISABLE',
'DISCARD',
'DISTINCT',
'DO',
'DOCUMENT',
'DOMAIN',
'DOUBLE',
'DROP',
'EACH',
'ELSE',
'ENABLE',
'ENCODING',
'ENCRYPTED',
'END',
'ENUM',
'ESCAPE',
'EVENT',
'EXCEPT',
'EXCLUDE',
'EXCLUDING',
'EXCLUSIVE',
'EXECUTE',
'EXISTS',
'EXPLAIN',
'EXPRESSION',
'EXTENSION',
'EXTERNAL',
'EXTRACT',
'FALSE',
'FAMILY',
'FETCH',
'FILTER',
'FINALIZE',
'FIRST',
'FLOAT',
'FOLLOWING',
'FOR',
'FORCE',
'FOREIGN',
'FORWARD',
'FREEZE',
'FROM',
'FULL',
'FUNCTION',
'FUNCTIONS',
'GENERATED',
'GLOBAL',
'GRANT',
'GRANTED',
'GREATEST',
'GROUP',
'GROUPING',
'GROUPS',
'HANDLER',
'HAVING',
'HEADER',
'HOLD',
'HOUR',
'IDENTITY',
'IF',
'ILIKE',
'IMMEDIATE',
'IMMUTABLE',
'IMPLICIT',
'IMPORT',
'IN',
'INCLUDE',
'INCLUDING',
'INCREMENT',
'INDEX',
'INDEXES',
'INHERIT',
'INHERITS',
'INITIALLY',
'INLINE',
'INNER',
'INOUT',
'INPUT',
'INSENSITIVE',
'INSERT',
'INSTEAD',
'INT',
'INTEGER',
'INTERSECT',
'INTERVAL',
'INTO',
'INVOKER',
'IS',
'ISNULL',
'ISOLATION',
'JOIN',
'KEY',
'LABEL',
'LANGUAGE',
'LARGE',
'LAST',
'LATERAL',
'LEADING',
'LEAKPROOF',
'LEAST',
'LEFT',
'LEVEL',
'LIKE',
'LIMIT',
'LISTEN',
'LOAD',
'LOCAL',
'LOCALTIME',
'LOCALTIMESTAMP',
'LOCATION',
'LOCK',
'LOCKED',
'LOGGED',
'MAPPING',
'MATCH',
'MATERIALIZED',
'MAXVALUE',
'METHOD',
'MINUTE',
'MINVALUE',
'MODE',
'MONTH',
'MOVE',
'NAME',
'NAMES',
'NATIONAL',
'NATURAL',
'NCHAR',
'NEW',
'NEXT',
'NFC',
'NFD',
'NFKC',
'NFKD',
'NO',
'NONE',
'NORMALIZE',
'NORMALIZED',
'NOT',
'NOTHING',
'NOTIFY',
'NOTNULL',
'NOWAIT',
'NULL',
'NULLIF',
'NULLS',
'NUMERIC',
'OBJECT',
'OF',
'OFF',
'OFFSET',
'OIDS',
'OLD',
'ON',
'ONLY',
'OPERATOR',
'OPTION',
'OPTIONS',
'OR',
'ORDER',
'ORDINALITY',
'OTHERS',
'OUT',
'OUTER',
'OVER',
'OVERLAPS',
'OVERLAY',
'OVERRIDING',
'OWNED',
'OWNER',
'PARALLEL',
'PARSER',
'PARTIAL',
'PARTITION',
'PASSING',
'PASSWORD',
'PLACING',
'PLANS',
'POLICY',
'POSITION',
'PRECEDING',
'PRECISION',
'PREPARE',
'PREPARED',
'PRESERVE',
'PRIMARY',
'PRIOR',
'PRIVILEGES',
'PROCEDURAL',
'PROCEDURE',
'PROCEDURES',
'PROGRAM',
'PUBLICATION',
'QUOTE',
'RANGE',
'READ',
'REAL',
'REASSIGN',
'RECHECK',
'RECURSIVE',
'REF',
'REFERENCES',
'REFERENCING',
'REFRESH',
'REINDEX',
'RELATIVE',
'RELEASE',
'RENAME',
'REPEATABLE',
'REPLACE',
'REPLICA',
'RESET',
'RESTART',
'RESTRICT',
'RETURN',
'RETURNING',
'RETURNS',
'REVOKE',
'RIGHT',
'ROLE',
'ROLLBACK',
'ROLLUP',
'ROUTINE',
'ROUTINES',
'ROW',
'ROWS',
'RULE',
'SAVEPOINT',
'SCHEMA',
'SCHEMAS',
'SCROLL',
'SEARCH',
'SECOND',
'SECURITY',
'SELECT',
'SEQUENCE',
'SEQUENCES',
'SERIALIZABLE',
'SERVER',
'SESSION',
'SESSION_USER',
'SET',
'SETOF',
'SETS',
'SHARE',
'SHOW',
'SIMILAR',
'SIMPLE',
'SKIP',
'SMALLINT',
'SNAPSHOT',
'SOME',
'SQL',
'STABLE',
'STANDALONE',
'START',
'STATEMENT',
'STATISTICS',
'STDIN',
'STDOUT',
'STORAGE',
'STORED',
'STRICT',
'STRIP',
'SUBSCRIPTION',
'SUBSTRING',
'SUPPORT',
'SYMMETRIC',
'SYSID',
'SYSTEM',
'TABLE',
'TABLES',
'TABLESAMPLE',
'TABLESPACE',
'TEMP',
'TEMPLATE',
'TEMPORARY',
'TEXT',
'THEN',
'TIES',
'TIME',
'TIMESTAMP',
'TO',
'TRAILING',
'TRANSACTION',
'TRANSFORM',
'TREAT',
'TRIGGER',
'TRIM',
'TRUE',
'TRUNCATE',
'TRUSTED',
'TYPE',
'TYPES',
'UESCAPE',
'UNBOUNDED',
'UNCOMMITTED',
'UNENCRYPTED',
'UNION',
'UNIQUE',
'UNKNOWN',
'UNLISTEN',
'UNLOGGED',
'UNTIL',
'UPDATE',
'USER',
'USING',
'VACUUM',
'VALID',
'VALIDATE',
'VALIDATOR',
'VALUE',
'VALUES',
'VARCHAR',
'VARIADIC',
'VARYING',
'VERBOSE',
'VERSION',
'VIEW',
'VIEWS',
'VOLATILE',
'WHEN',
'WHERE',
'WHITESPACE',
'WINDOW',
'WITH',
'WITHIN',
'WITHOUT',
'WORK',
'WRAPPER',
'WRITE',
'XML',
'XMLATTRIBUTES',
'XMLCONCAT',
'XMLELEMENT',
'XMLEXISTS',
'XMLFOREST',
'XMLNAMESPACES',
'XMLPARSE',
'XMLPI',
'XMLROOT',
'XMLSERIALIZE',
'XMLTABLE',
'YEAR',
'YES',
'ZONE',
)
DATATYPES = (
'bigint',
'bigserial',
'bit',
'bit varying',
'bool',
'boolean',
'box',
'bytea',
'char',
'character',
'character varying',
'cidr',
'circle',
'date',
'decimal',
'double precision',
'float4',
'float8',
'inet',
'int',
'int2',
'int4',
'int8',
'integer',
'interval',
'json',
'jsonb',
'line',
'lseg',
'macaddr',
'macaddr8',
'money',
'numeric',
'path',
'pg_lsn',
'pg_snapshot',
'point',
'polygon',
'real',
'serial',
'serial2',
'serial4',
'serial8',
'smallint',
'smallserial',
'text',
'time',
'timestamp',
'timestamptz',
'timetz',
'tsquery',
'tsvector',
'txid_snapshot',
'uuid',
'varbit',
'varchar',
'with time zone',
'without time zone',
'xml',
)
PSEUDO_TYPES = (
'any',
'anyarray',
'anycompatible',
'anycompatiblearray',
'anycompatiblemultirange',
'anycompatiblenonarray',
'anycompatiblerange',
'anyelement',
'anyenum',
'anymultirange',
'anynonarray',
'anyrange',
'cstring',
'event_trigger',
'fdw_handler',
'index_am_handler',
'internal',
'language_handler',
'pg_ddl_command',
'record',
'table_am_handler',
'trigger',
'tsm_handler',
'unknown',
'void',
)
# Remove 'trigger' from types
PSEUDO_TYPES = tuple(sorted(set(PSEUDO_TYPES) - set(map(str.lower, KEYWORDS))))
PLPGSQL_KEYWORDS = (
'ALIAS', 'CONSTANT', 'DIAGNOSTICS', 'ELSIF', 'EXCEPTION', 'EXIT',
'FOREACH', 'GET', 'LOOP', 'NOTICE', 'OPEN', 'PERFORM', 'QUERY', 'RAISE',
'RETURN', 'REVERSE', 'SQLSTATE', 'WHILE',
)
# Most of these keywords are from ExplainNode function
# in src/backend/commands/explain.c
EXPLAIN_KEYWORDS = (
'Aggregate',
'Append',
'Bitmap Heap Scan',
'Bitmap Index Scan',
'BitmapAnd',
'BitmapOr',
'CTE Scan',
'Custom Scan',
'Delete',
'Foreign Scan',
'Function Scan',
'Gather Merge',
'Gather',
'Group',
'GroupAggregate',
'Hash Join',
'Hash',
'HashAggregate',
'Incremental Sort',
'Index Only Scan',
'Index Scan',
'Insert',
'Limit',
'LockRows',
'Materialize',
'Memoize',
'Merge Append',
'Merge Join',
'Merge',
'MixedAggregate',
'Named Tuplestore Scan',
'Nested Loop',
'ProjectSet',
'Recursive Union',
'Result',
'Sample Scan',
'Seq Scan',
'SetOp',
'Sort',
'SubPlan',
'Subquery Scan',
'Table Function Scan',
'Tid Range Scan',
'Tid Scan',
'Unique',
'Update',
'Values Scan',
'WindowAgg',
'WorkTable Scan',
)
if __name__ == '__main__': # pragma: no cover
import re
from urllib.request import urlopen
from pygments.util import format_lines
# One man's constant is another man's variable.
SOURCE_URL = 'https://github.com/postgres/postgres/raw/master'
KEYWORDS_URL = SOURCE_URL + '/src/include/parser/kwlist.h'
DATATYPES_URL = SOURCE_URL + '/doc/src/sgml/datatype.sgml'
def update_myself():
content = urlopen(DATATYPES_URL).read().decode('utf-8', errors='ignore')
data_file = list(content.splitlines())
datatypes = parse_datatypes(data_file)
pseudos = parse_pseudos(data_file)
content = urlopen(KEYWORDS_URL).read().decode('utf-8', errors='ignore')
keywords = parse_keywords(content)
update_consts(__file__, 'DATATYPES', datatypes)
update_consts(__file__, 'PSEUDO_TYPES', pseudos)
update_consts(__file__, 'KEYWORDS', keywords)
def parse_keywords(f):
kw = []
for m in re.finditer(r'PG_KEYWORD\("(.+?)"', f):
kw.append(m.group(1).upper())
if not kw:
raise ValueError('no keyword found')
kw.sort()
return kw
def parse_datatypes(f):
dt = set()
for line in f:
if '<sect1' in line:
break
if '<entry><type>' not in line:
continue
# Parse a string such as
# time [ (<replaceable>p</replaceable>) ] [ without time zone ]
# into types "time" and "without time zone"
# remove all the tags
line = re.sub("<replaceable>[^<]+</replaceable>", "", line)
line = re.sub("<[^>]+>", "", line)
# Drop the parts containing braces
for tmp in [t for tmp in line.split('[')
for t in tmp.split(']') if "(" not in t]:
for t in tmp.split(','):
t = t.strip()
if not t:
continue
dt.add(" ".join(t.split()))
dt = list(dt)
dt.sort()
return dt
def parse_pseudos(f):
dt = []
re_start = re.compile(r'\s*<table id="datatype-pseudotypes-table">')
re_entry = re.compile(r'\s*<entry><type>(.+?)</type></entry>')
re_end = re.compile(r'\s*</table>')
f = iter(f)
for line in f:
if re_start.match(line) is not None:
break
else:
raise ValueError('pseudo datatypes table not found')
for line in f:
m = re_entry.match(line)
if m is not None:
dt.append(m.group(1))
if re_end.match(line) is not None:
break
else:
raise ValueError('end of pseudo datatypes table not found')
if not dt:
raise ValueError('pseudo datatypes not found')
dt.sort()
return dt
def update_consts(filename, constname, content):
with open(filename, encoding='utf-8') as f:
data = f.read()
# Line to start/end inserting
re_match = re.compile(rf'^{constname}\s*=\s*\($.*?^\s*\)$', re.M | re.S)
m = re_match.search(data)
if not m:
raise ValueError(f'Could not find existing definition for {constname}')
new_block = format_lines(constname, content)
data = data[:m.start()] + new_block + data[m.end():]
with open(filename, 'w', encoding='utf-8', newline='\n') as f:
f.write(data)
update_myself()

View File

@@ -0,0 +1,666 @@
"""
pygments.lexers._qlik_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Qlik builtins.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# operators
# see https://help.qlik.com/en-US/sense/August2021/Subsystems/Hub/Content/Sense_Hub/Scripting/Operators/operators.htm
OPERATORS_LIST = {
"words": [
# Bit operators
"bitnot",
"bitand",
"bitor",
"bitxor",
# Logical operators
"and",
"or",
"not",
"xor",
# Relational operators
"precedes",
"follows",
# String operators
"like",
],
"symbols": [
# Bit operators
">>",
"<<",
# Logical operators
# Numeric operators
"+",
"-",
"/",
"*",
# Relational operators
"<",
"<=",
">",
">=",
"=",
"<>",
# String operators
"&",
],
}
# SCRIPT STATEMENTS
# see https://help.qlik.com/en-US/sense/August2021/Subsystems/Hub/Content/Sense_Hub/Scripting/
STATEMENT_LIST = [
# control statements
"for",
"each",
"in",
"next",
"do",
"while",
"until",
"unless",
"loop",
"return",
"switch",
"case",
"default",
"if",
"else",
"endif",
"then",
"end",
"exit",
"script",
"switch",
# prefixes
"Add",
"Buffer",
"Concatenate",
"Crosstable",
"First",
"Generic",
"Hierarchy",
"HierarchyBelongsTo",
"Inner",
"IntervalMatch",
"Join",
"Keep",
"Left",
"Mapping",
"Merge",
"NoConcatenate",
"Outer",
"Partial reload",
"Replace",
"Right",
"Sample",
"Semantic",
"Unless",
"When",
# regular statements
"Alias", # alias ... as ...
"as",
"AutoNumber",
"Binary",
"Comment field", # comment fields ... using ...
"Comment fields", # comment field ... with ...
"using",
"with",
"Comment table", # comment table ... with ...
"Comment tables", # comment tables ... using ...
"Connect",
"ODBC", # ODBC CONNECT TO ...
"OLEBD", # OLEDB CONNECT TO ...
"CUSTOM", # CUSTOM CONNECT TO ...
"LIB", # LIB CONNECT TO ...
"Declare",
"Derive",
"From",
"explicit",
"implicit",
"Direct Query",
"dimension",
"measure",
"Directory",
"Disconnect",
"Drop field",
"Drop fields",
"Drop table",
"Drop tables",
"Execute",
"FlushLog",
"Force",
"capitalization",
"case upper",
"case lower",
"case mixed",
"Load",
"distinct",
"from",
"inline",
"resident",
"from_field",
"autogenerate",
"extension",
"where",
"group by",
"order by",
"asc",
"desc",
"Let",
"Loosen Table",
"Map",
"NullAsNull",
"NullAsValue",
"Qualify",
"Rem",
"Rename field",
"Rename fields",
"Rename table",
"Rename tables",
"Search",
"include",
"exclude",
"Section",
"access",
"application",
"Select",
"Set",
"Sleep",
"SQL",
"SQLColumns",
"SQLTables",
"SQLTypes",
"Star",
"Store",
"Tag",
"Trace",
"Unmap",
"Unqualify",
"Untag",
# Qualifiers
"total",
]
# Script functions
# see https://help.qlik.com/en-US/sense/August2021/Subsystems/Hub/Content/Sense_Hub/Scripting/functions-in-scripts-chart-expressions.htm
SCRIPT_FUNCTIONS = [
# Basic aggregation functions in the data load script
"FirstSortedValue",
"Max",
"Min",
"Mode",
"Only",
"Sum",
# Counter aggregation functions in the data load script
"Count",
"MissingCount",
"NullCount",
"NumericCount",
"TextCount",
# Financial aggregation functions in the data load script
"IRR",
"XIRR",
"NPV",
"XNPV",
# Statistical aggregation functions in the data load script
"Avg",
"Correl",
"Fractile",
"FractileExc",
"Kurtosis",
"LINEST_B" "LINEST_df",
"LINEST_f",
"LINEST_m",
"LINEST_r2",
"LINEST_seb",
"LINEST_sem",
"LINEST_sey",
"LINEST_ssreg",
"Linest_ssresid",
"Median",
"Skew",
"Stdev",
"Sterr",
"STEYX",
# Statistical test functions
"Chi2Test_chi2",
"Chi2Test_df",
"Chi2Test_p",
# Two independent samples t-tests
"ttest_conf",
"ttest_df",
"ttest_dif",
"ttest_lower",
"ttest_sig",
"ttest_sterr",
"ttest_t",
"ttest_upper",
# Two independent weighted samples t-tests
"ttestw_conf",
"ttestw_df",
"ttestw_dif",
"ttestw_lower",
"ttestw_sig",
"ttestw_sterr",
"ttestw_t",
"ttestw_upper",
# One sample t-tests
"ttest1_conf",
"ttest1_df",
"ttest1_dif",
"ttest1_lower",
"ttest1_sig",
"ttest1_sterr",
"ttest1_t",
"ttest1_upper",
# One weighted sample t-tests
"ttest1w_conf",
"ttest1w_df",
"ttest1w_dif",
"ttest1w_lower",
"ttest1w_sig",
"ttest1w_sterr",
"ttest1w_t",
"ttest1w_upper",
# One column format functions
"ztest_conf",
"ztest_dif",
"ztest_sig",
"ztest_sterr",
"ztest_z",
"ztest_lower",
"ztest_upper",
# Weighted two-column format functions
"ztestw_conf",
"ztestw_dif",
"ztestw_lower",
"ztestw_sig",
"ztestw_sterr",
"ztestw_upper",
"ztestw_z",
# String aggregation functions in the data load script
"Concat",
"FirstValue",
"LastValue",
"MaxString",
"MinString",
# Synthetic dimension functions
"ValueList",
"ValueLoop",
# Color functions
"ARGB",
"HSL",
"RGB",
"Color",
"Colormix1",
"Colormix2",
"SysColor",
"ColorMapHue",
"ColorMapJet",
"black",
"blue",
"brown",
"cyan",
"darkgray",
"green",
"lightblue",
"lightcyan",
"lightgray",
"lightgreen",
"lightmagenta",
"lightred",
"magenta",
"red",
"white",
"yellow",
# Conditional functions
"alt",
"class",
"coalesce",
"if",
"match",
"mixmatch",
"pick",
"wildmatch",
# Counter functions
"autonumber",
"autonumberhash128",
"autonumberhash256",
"IterNo",
"RecNo",
"RowNo",
# Integer expressions of time
"second",
"minute",
"hour",
"day",
"week",
"month",
"year",
"weekyear",
"weekday",
# Timestamp functions
"now",
"today",
"LocalTime",
# Make functions
"makedate",
"makeweekdate",
"maketime",
# Other date functions
"AddMonths",
"AddYears",
"yeartodate",
# Timezone functions
"timezone",
"GMT",
"UTC",
"daylightsaving",
"converttolocaltime",
# Set time functions
"setdateyear",
"setdateyearmonth",
# In... functions
"inyear",
"inyeartodate",
"inquarter",
"inquartertodate",
"inmonth",
"inmonthtodate",
"inmonths",
"inmonthstodate",
"inweek",
"inweektodate",
"inlunarweek",
"inlunarweektodate",
"inday",
"indaytotime",
# Start ... end functions
"yearstart",
"yearend",
"yearname",
"quarterstart",
"quarterend",
"quartername",
"monthstart",
"monthend",
"monthname",
"monthsstart",
"monthsend",
"monthsname",
"weekstart",
"weekend",
"weekname",
"lunarweekstart",
"lunarweekend",
"lunarweekname",
"daystart",
"dayend",
"dayname",
# Day numbering functions
"age",
"networkdays",
"firstworkdate",
"lastworkdate",
"daynumberofyear",
"daynumberofquarter",
# Exponential and logarithmic
"exp",
"log",
"log10",
"pow",
"sqr",
"sqrt",
# Count functions
"GetAlternativeCount",
"GetExcludedCount",
"GetNotSelectedCount",
"GetPossibleCount",
"GetSelectedCount",
# Field and selection functions
"GetCurrentSelections",
"GetFieldSelections",
"GetObjectDimension",
"GetObjectField",
"GetObjectMeasure",
# File functions
"Attribute",
"ConnectString",
"FileBaseName",
"FileDir",
"FileExtension",
"FileName",
"FilePath",
"FileSize",
"FileTime",
"GetFolderPath",
"QvdCreateTime",
"QvdFieldName",
"QvdNoOfFields",
"QvdNoOfRecords",
"QvdTableName",
# Financial functions
"FV",
"nPer",
"Pmt",
"PV",
"Rate",
# Formatting functions
"ApplyCodepage",
"Date",
"Dual",
"Interval",
"Money",
"Num",
"Time",
"Timestamp",
# General numeric functions
"bitcount",
"div",
"fabs",
"fact",
"frac",
"sign",
# Combination and permutation functions
"combin",
"permut",
# Modulo functions
"fmod",
"mod",
# Parity functions
"even",
"odd",
# Rounding functions
"ceil",
"floor",
"round",
# Geospatial functions
"GeoAggrGeometry",
"GeoBoundingBox",
"GeoCountVertex",
"GeoInvProjectGeometry",
"GeoProjectGeometry",
"GeoReduceGeometry",
"GeoGetBoundingBox",
"GeoGetPolygonCenter",
"GeoMakePoint",
"GeoProject",
# Interpretation functions
"Date#",
"Interval#",
"Money#",
"Num#",
"Text",
"Time#",
"Timestamp#",
# Field functions
"FieldIndex",
"FieldValue",
"FieldValueCount",
# Inter-record functions in the data load script
"Exists",
"LookUp",
"Peek",
"Previous",
# Logical functions
"IsNum",
"IsText",
# Mapping functions
"ApplyMap",
"MapSubstring",
# Mathematical functions
"e",
"false",
"pi",
"rand",
"true",
# NULL functions
"EmptyIsNull",
"IsNull",
"Null",
# Basic range functions
"RangeMax",
"RangeMaxString",
"RangeMin",
"RangeMinString",
"RangeMode",
"RangeOnly",
"RangeSum",
# Counter range functions
"RangeCount",
"RangeMissingCount",
"RangeNullCount",
"RangeNumericCount",
"RangeTextCount",
# Statistical range functions
"RangeAvg",
"RangeCorrel",
"RangeFractile",
"RangeKurtosis",
"RangeSkew",
"RangeStdev",
# Financial range functions
"RangeIRR",
"RangeNPV",
"RangeXIRR",
"RangeXNPV",
# Statistical distribution
"CHIDIST",
"CHIINV",
"NORMDIST",
"NORMINV",
"TDIST",
"TINV",
"FDIST",
"FINV",
# String functions
"Capitalize",
"Chr",
"Evaluate",
"FindOneOf",
"Hash128",
"Hash160",
"Hash256",
"Index",
"KeepChar",
"Left",
"Len",
"LevenshteinDist",
"Lower",
"LTrim",
"Mid",
"Ord",
"PurgeChar",
"Repeat",
"Replace",
"Right",
"RTrim",
"SubField",
"SubStringCount",
"TextBetween",
"Trim",
"Upper",
# System functions
"Author",
"ClientPlatform",
"ComputerName",
"DocumentName",
"DocumentPath",
"DocumentTitle",
"EngineVersion",
"GetCollationLocale",
"GetObjectField",
"GetRegistryString",
"IsPartialReload",
"OSUser",
"ProductVersion",
"ReloadTime",
"StateName",
# Table functions
"FieldName",
"FieldNumber",
"NoOfFields",
"NoOfRows",
"NoOfTables",
"TableName",
"TableNumber",
]
# System variables and constants
# see https://help.qlik.com/en-US/sense/August2021/Subsystems/Hub/Content/Sense_Hub/Scripting/work-with-variables-in-data-load-editor.htm
CONSTANT_LIST = [
# System Variables
"floppy",
"cd",
"include",
"must_include",
"hideprefix",
"hidesuffix",
"qvpath",
"qvroot",
"QvWorkPath",
"QvWorkRoot",
"StripComments",
"Verbatim",
"OpenUrlTimeout",
"WinPath",
"WinRoot",
"CollationLocale",
"CreateSearchIndexOnReload",
# value handling variables
"NullDisplay",
"NullInterpret",
"NullValue",
"OtherSymbol",
# Currency formatting
"MoneyDecimalSep",
"MoneyFormat",
"MoneyThousandSep",
# Number formatting
"DecimalSep",
"ThousandSep",
"NumericalAbbreviation",
# Time formatting
"DateFormat",
"TimeFormat",
"TimestampFormat",
"MonthNames",
"LongMonthNames",
"DayNames",
"LongDayNames",
"FirstWeekDay",
"BrokenWeeks",
"ReferenceDay",
"FirstMonthOfYear",
# Error variables
"errormode",
"scripterror",
"scripterrorcount",
"scripterrorlist",
# Other
"null",
]

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,106 @@
"""
pygments.lexers._sql_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Data files for the SQL lexer.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
KEYWORDS = (
'ABORT', 'ABS', 'ABSOLUTE', 'ACCESS', 'ADA', 'ADD', 'ADMIN', 'AFTER',
'AGGREGATE', 'ALIAS', 'ALL', 'ALLOCATE', 'ALTER', 'ANALYSE', 'ANALYZE',
'AND', 'ANY', 'ARE', 'AS', 'ASC', 'ASENSITIVE', 'ASSERTION', 'ASSIGNMENT',
'ASYMMETRIC', 'AT', 'ATOMIC', 'AUTHORIZATION', 'AVG', 'BACKWARD',
'BEFORE', 'BEGIN', 'BETWEEN', 'BITVAR', 'BIT_LENGTH', 'BOTH', 'BREADTH',
'BY', 'C', 'CACHE', 'CALL', 'CALLED', 'CARDINALITY', 'CASCADE',
'CASCADED', 'CASE', 'CAST', 'CATALOG', 'CATALOG_NAME', 'CHAIN',
'CHARACTERISTICS', 'CHARACTER_LENGTH', 'CHARACTER_SET_CATALOG',
'CHARACTER_SET_NAME', 'CHARACTER_SET_SCHEMA', 'CHAR_LENGTH', 'CHECK',
'CHECKED', 'CHECKPOINT', 'CLASS', 'CLASS_ORIGIN', 'CLOB', 'CLOSE',
'CLUSTER', 'COALESCE', 'COBOL', 'COLLATE', 'COLLATION',
'COLLATION_CATALOG', 'COLLATION_NAME', 'COLLATION_SCHEMA', 'COLUMN',
'COLUMN_NAME', 'COMMAND_FUNCTION', 'COMMAND_FUNCTION_CODE', 'COMMENT',
'COMMIT', 'COMMITTED', 'COMPLETION', 'CONDITION_NUMBER', 'CONNECT',
'CONNECTION', 'CONNECTION_NAME', 'CONSTRAINT', 'CONSTRAINTS',
'CONSTRAINT_CATALOG', 'CONSTRAINT_NAME', 'CONSTRAINT_SCHEMA',
'CONSTRUCTOR', 'CONTAINS', 'CONTINUE', 'CONVERSION', 'CONVERT',
'COPY', 'CORRESPONDING', 'COUNT', 'CREATE', 'CREATEDB', 'CREATEUSER',
'CROSS', 'CUBE', 'CURRENT', 'CURRENT_DATE', 'CURRENT_PATH',
'CURRENT_ROLE', 'CURRENT_TIME', 'CURRENT_TIMESTAMP', 'CURRENT_USER',
'CURSOR', 'CURSOR_NAME', 'CYCLE', 'DATA', 'DATABASE',
'DATETIME_INTERVAL_CODE', 'DATETIME_INTERVAL_PRECISION', 'DAY',
'DEALLOCATE', 'DECLARE', 'DEFAULT', 'DEFAULTS', 'DEFERRABLE',
'DEFERRED', 'DEFINED', 'DEFINER', 'DELETE', 'DELIMITER', 'DELIMITERS',
'DEREF', 'DESC', 'DESCRIBE', 'DESCRIPTOR', 'DESTROY', 'DESTRUCTOR',
'DETERMINISTIC', 'DIAGNOSTICS', 'DICTIONARY', 'DISCONNECT', 'DISPATCH',
'DISTINCT', 'DO', 'DOMAIN', 'DROP', 'DYNAMIC', 'DYNAMIC_FUNCTION',
'DYNAMIC_FUNCTION_CODE', 'EACH', 'ELSE', 'ELSIF', 'ENCODING',
'ENCRYPTED', 'END', 'END-EXEC', 'EQUALS', 'ESCAPE', 'EVERY', 'EXCEPTION',
'EXCEPT', 'EXCLUDING', 'EXCLUSIVE', 'EXEC', 'EXECUTE', 'EXISTING',
'EXISTS', 'EXPLAIN', 'EXTERNAL', 'EXTRACT', 'FALSE', 'FETCH', 'FINAL',
'FIRST', 'FOR', 'FORCE', 'FOREIGN', 'FORTRAN', 'FORWARD', 'FOUND', 'FREE',
'FREEZE', 'FROM', 'FULL', 'FUNCTION', 'G', 'GENERAL', 'GENERATED', 'GET',
'GLOBAL', 'GO', 'GOTO', 'GRANT', 'GRANTED', 'GROUP', 'GROUPING',
'HANDLER', 'HAVING', 'HIERARCHY', 'HOLD', 'HOST', 'IDENTITY', 'IF',
'IGNORE', 'ILIKE', 'IMMEDIATE', 'IMMEDIATELY', 'IMMUTABLE', 'IMPLEMENTATION', 'IMPLICIT',
'IN', 'INCLUDING', 'INCREMENT', 'INDEX', 'INDITCATOR', 'INFIX',
'INHERITS', 'INITIALIZE', 'INITIALLY', 'INNER', 'INOUT', 'INPUT',
'INSENSITIVE', 'INSERT', 'INSTANTIABLE', 'INSTEAD', 'INTERSECT', 'INTO',
'INVOKER', 'IS', 'ISNULL', 'ISOLATION', 'ITERATE', 'JOIN', 'KEY',
'KEY_MEMBER', 'KEY_TYPE', 'LANCOMPILER', 'LANGUAGE', 'LARGE', 'LAST',
'LATERAL', 'LEADING', 'LEFT', 'LENGTH', 'LESS', 'LEVEL', 'LIKE', 'LIMIT',
'LISTEN', 'LOAD', 'LOCAL', 'LOCALTIME', 'LOCALTIMESTAMP', 'LOCATION',
'LOCATOR', 'LOCK', 'LOWER', 'MAP', 'MATCH', 'MAX', 'MAXVALUE',
'MESSAGE_LENGTH', 'MESSAGE_OCTET_LENGTH', 'MESSAGE_TEXT', 'METHOD', 'MIN',
'MINUTE', 'MINVALUE', 'MOD', 'MODE', 'MODIFIES', 'MODIFY', 'MONTH',
'MORE', 'MOVE', 'MUMPS', 'NAMES', 'NATIONAL', 'NATURAL', 'NCHAR', 'NCLOB',
'NEW', 'NEXT', 'NO', 'NOCREATEDB', 'NOCREATEUSER', 'NONE', 'NOT',
'NOTHING', 'NOTIFY', 'NOTNULL', 'NULL', 'NULLABLE', 'NULLIF', 'OBJECT',
'OCTET_LENGTH', 'OF', 'OFF', 'OFFSET', 'OIDS', 'OLD', 'ON', 'ONLY',
'OPEN', 'OPERATION', 'OPERATOR', 'OPTION', 'OPTIONS', 'OR', 'ORDER',
'ORDINALITY', 'OUT', 'OUTER', 'OUTPUT', 'OVERLAPS', 'OVERLAY',
'OVERRIDING', 'OWNER', 'PAD', 'PARAMETER', 'PARAMETERS', 'PARAMETER_MODE',
'PARAMETER_NAME', 'PARAMETER_ORDINAL_POSITION',
'PARAMETER_SPECIFIC_CATALOG', 'PARAMETER_SPECIFIC_NAME',
'PARAMETER_SPECIFIC_SCHEMA', 'PARTIAL', 'PASCAL', 'PENDANT', 'PERIOD', 'PLACING',
'PLI', 'POSITION', 'POSTFIX', 'PRECEEDS', 'PRECISION', 'PREFIX', 'PREORDER',
'PREPARE', 'PRESERVE', 'PRIMARY', 'PRIOR', 'PRIVILEGES', 'PROCEDURAL',
'PROCEDURE', 'PUBLIC', 'READ', 'READS', 'RECHECK', 'RECURSIVE', 'REF',
'REFERENCES', 'REFERENCING', 'REINDEX', 'RELATIVE', 'RENAME',
'REPEATABLE', 'REPLACE', 'RESET', 'RESTART', 'RESTRICT', 'RESULT',
'RETURN', 'RETURNED_LENGTH', 'RETURNED_OCTET_LENGTH', 'RETURNED_SQLSTATE',
'RETURNS', 'REVOKE', 'RIGHT', 'ROLE', 'ROLLBACK', 'ROLLUP', 'ROUTINE',
'ROUTINE_CATALOG', 'ROUTINE_NAME', 'ROUTINE_SCHEMA', 'ROW', 'ROWS',
'ROW_COUNT', 'RULE', 'SAVE_POINT', 'SCALE', 'SCHEMA', 'SCHEMA_NAME',
'SCOPE', 'SCROLL', 'SEARCH', 'SECOND', 'SECURITY', 'SELECT', 'SELF',
'SENSITIVE', 'SERIALIZABLE', 'SERVER_NAME', 'SESSION', 'SESSION_USER',
'SET', 'SETOF', 'SETS', 'SHARE', 'SHOW', 'SIMILAR', 'SIMPLE', 'SIZE',
'SOME', 'SOURCE', 'SPACE', 'SPECIFIC', 'SPECIFICTYPE', 'SPECIFIC_NAME',
'SQL', 'SQLCODE', 'SQLERROR', 'SQLEXCEPTION', 'SQLSTATE', 'SQLWARNINIG',
'STABLE', 'START', 'STATE', 'STATEMENT', 'STATIC', 'STATISTICS', 'STDIN',
'STDOUT', 'STORAGE', 'STRICT', 'STRUCTURE', 'STYPE', 'SUBCLASS_ORIGIN',
'SUBLIST', 'SUBSTRING', 'SUCCEEDS', 'SUM', 'SYMMETRIC', 'SYSID', 'SYSTEM',
'SYSTEM_USER', 'TABLE', 'TABLE_NAME', ' TEMP', 'TEMPLATE', 'TEMPORARY',
'TERMINATE', 'THAN', 'THEN', 'TIME', 'TIMESTAMP', 'TIMEZONE_HOUR',
'TIMEZONE_MINUTE', 'TO', 'TOAST', 'TRAILING', 'TRANSACTION',
'TRANSACTIONS_COMMITTED', 'TRANSACTIONS_ROLLED_BACK', 'TRANSACTION_ACTIVE',
'TRANSFORM', 'TRANSFORMS', 'TRANSLATE', 'TRANSLATION', 'TREAT', 'TRIGGER',
'TRIGGER_CATALOG', 'TRIGGER_NAME', 'TRIGGER_SCHEMA', 'TRIM', 'TRUE',
'TRUNCATE', 'TRUSTED', 'TYPE', 'UNCOMMITTED', 'UNDER', 'UNENCRYPTED',
'UNION', 'UNIQUE', 'UNKNOWN', 'UNLISTEN', 'UNNAMED', 'UNNEST', 'UNTIL',
'UPDATE', 'UPPER', 'USAGE', 'USER', 'USER_DEFINED_TYPE_CATALOG',
'USER_DEFINED_TYPE_NAME', 'USER_DEFINED_TYPE_SCHEMA', 'USING', 'VACUUM',
'VALID', 'VALIDATOR', 'VALUES', 'VARIABLE', 'VERBOSE',
'VERSION', 'VERSIONS', 'VERSIONING', 'VIEW',
'VOLATILE', 'WHEN', 'WHENEVER', 'WHERE', 'WITH', 'WITHOUT', 'WORK',
'WRITE', 'YEAR', 'ZONE'
)
DATATYPES = (
'ARRAY', 'BIGINT', 'BINARY', 'BIT', 'BLOB', 'BOOLEAN', 'CHAR',
'CHARACTER', 'DATE', 'DEC', 'DECIMAL', 'FLOAT', 'INT', 'INTEGER',
'INTERVAL', 'NUMBER', 'NUMERIC', 'REAL', 'SERIAL', 'SMALLINT',
'VARCHAR', 'VARYING', 'INT8', 'SERIAL8', 'TEXT'
)

View File

@@ -0,0 +1,648 @@
"""
pygments.lexers._stan_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the names of functions for Stan used by
``pygments.lexers.math.StanLexer. This is for Stan language version 2.29.0.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
KEYWORDS = (
'break',
'continue',
'else',
'for',
'if',
'in',
'print',
'reject',
'return',
'while',
)
TYPES = (
'cholesky_factor_corr',
'cholesky_factor_cov',
'corr_matrix',
'cov_matrix',
'int',
'matrix',
'ordered',
'positive_ordered',
'real',
'row_vector',
'simplex',
'unit_vector',
'vector',
'void',
'array',
'complex'
)
FUNCTIONS = (
'abs',
'acos',
'acosh',
'add_diag',
'algebra_solver',
'algebra_solver_newton',
'append_array',
'append_col',
'append_row',
'arg',
'asin',
'asinh',
'atan',
'atan2',
'atanh',
'bernoulli_cdf',
'bernoulli_lccdf',
'bernoulli_lcdf',
'bernoulli_logit_glm_lpmf',
'bernoulli_logit_glm_lupmf',
'bernoulli_logit_glm_rng',
'bernoulli_logit_lpmf',
'bernoulli_logit_lupmf',
'bernoulli_logit_rng',
'bernoulli_lpmf',
'bernoulli_lupmf',
'bernoulli_rng',
'bessel_first_kind',
'bessel_second_kind',
'beta',
'beta_binomial_cdf',
'beta_binomial_lccdf',
'beta_binomial_lcdf',
'beta_binomial_lpmf',
'beta_binomial_lupmf',
'beta_binomial_rng',
'beta_cdf',
'beta_lccdf',
'beta_lcdf',
'beta_lpdf',
'beta_lupdf',
'beta_proportion_lccdf',
'beta_proportion_lcdf',
'beta_proportion_rng',
'beta_rng',
'binary_log_loss',
'binomial_cdf',
'binomial_coefficient_log',
'binomial_lccdf',
'binomial_lcdf',
'binomial_logit_lpmf',
'binomial_logit_lupmf',
'binomial_lpmf',
'binomial_lupmf',
'binomial_rng',
'block',
'categorical_logit_glm_lpmf',
'categorical_logit_glm_lupmf',
'categorical_logit_lpmf',
'categorical_logit_lupmf',
'categorical_logit_rng',
'categorical_lpmf',
'categorical_lupmf',
'categorical_rng',
'cauchy_cdf',
'cauchy_lccdf',
'cauchy_lcdf',
'cauchy_lpdf',
'cauchy_lupdf',
'cauchy_rng',
'cbrt',
'ceil',
'chi_square_cdf',
'chi_square_lccdf',
'chi_square_lcdf',
'chi_square_lpdf',
'chi_square_lupdf',
'chi_square_rng',
'chol2inv',
'cholesky_decompose',
'choose',
'col',
'cols',
'columns_dot_product',
'columns_dot_self',
'conj',
'cos',
'cosh',
'cov_exp_quad',
'crossprod',
'csr_extract_u',
'csr_extract_v',
'csr_extract_w',
'csr_matrix_times_vector',
'csr_to_dense_matrix',
'cumulative_sum',
'dae',
'dae_tol',
'determinant',
'diag_matrix',
'diag_post_multiply',
'diag_pre_multiply',
'diagonal',
'digamma',
'dims',
'dirichlet_lpdf',
'dirichlet_lupdf',
'dirichlet_rng',
'discrete_range_cdf',
'discrete_range_lccdf',
'discrete_range_lcdf',
'discrete_range_lpmf',
'discrete_range_lupmf',
'discrete_range_rng',
'distance',
'dot_product',
'dot_self',
'double_exponential_cdf',
'double_exponential_lccdf',
'double_exponential_lcdf',
'double_exponential_lpdf',
'double_exponential_lupdf',
'double_exponential_rng',
'e',
'eigenvalues_sym',
'eigenvectors_sym',
'erf',
'erfc',
'exp',
'exp2',
'exp_mod_normal_cdf',
'exp_mod_normal_lccdf',
'exp_mod_normal_lcdf',
'exp_mod_normal_lpdf',
'exp_mod_normal_lupdf',
'exp_mod_normal_rng',
'expm1',
'exponential_cdf',
'exponential_lccdf',
'exponential_lcdf',
'exponential_lpdf',
'exponential_lupdf',
'exponential_rng',
'fabs',
'falling_factorial',
'fdim',
'floor',
'fma',
'fmax',
'fmin',
'fmod',
'frechet_cdf',
'frechet_lccdf',
'frechet_lcdf',
'frechet_lpdf',
'frechet_lupdf',
'frechet_rng',
'gamma_cdf',
'gamma_lccdf',
'gamma_lcdf',
'gamma_lpdf',
'gamma_lupdf',
'gamma_p',
'gamma_q',
'gamma_rng',
'gaussian_dlm_obs_lpdf',
'gaussian_dlm_obs_lupdf',
'generalized_inverse',
'get_imag',
'get_lp',
'get_real',
'gumbel_cdf',
'gumbel_lccdf',
'gumbel_lcdf',
'gumbel_lpdf',
'gumbel_lupdf',
'gumbel_rng',
'head',
'hmm_hidden_state_prob',
'hmm_latent_rng',
'hmm_marginal',
'hypergeometric_lpmf',
'hypergeometric_lupmf',
'hypergeometric_rng',
'hypot',
'identity_matrix',
'inc_beta',
'int_step',
'integrate_1d',
'integrate_ode',
'integrate_ode_adams',
'integrate_ode_bdf',
'integrate_ode_rk45',
'inv',
'inv_chi_square_cdf',
'inv_chi_square_lccdf',
'inv_chi_square_lcdf',
'inv_chi_square_lpdf',
'inv_chi_square_lupdf',
'inv_chi_square_rng',
'inv_cloglog',
'inv_erfc',
'inv_gamma_cdf',
'inv_gamma_lccdf',
'inv_gamma_lcdf',
'inv_gamma_lpdf',
'inv_gamma_lupdf',
'inv_gamma_rng',
'inv_logit',
'inv_Phi',
'inv_sqrt',
'inv_square',
'inv_wishart_lpdf',
'inv_wishart_lupdf',
'inv_wishart_rng',
'inverse',
'inverse_spd',
'is_inf',
'is_nan',
'lambert_w0',
'lambert_wm1',
'lbeta',
'lchoose',
'ldexp',
'lgamma',
'linspaced_array',
'linspaced_int_array',
'linspaced_row_vector',
'linspaced_vector',
'lkj_corr_cholesky_lpdf',
'lkj_corr_cholesky_lupdf',
'lkj_corr_cholesky_rng',
'lkj_corr_lpdf',
'lkj_corr_lupdf',
'lkj_corr_rng',
'lmgamma',
'lmultiply',
'log',
'log10',
'log1m',
'log1m_exp',
'log1m_inv_logit',
'log1p',
'log1p_exp',
'log2',
'log_determinant',
'log_diff_exp',
'log_falling_factorial',
'log_inv_logit',
'log_inv_logit_diff',
'log_mix',
'log_modified_bessel_first_kind',
'log_rising_factorial',
'log_softmax',
'log_sum_exp',
'logistic_cdf',
'logistic_lccdf',
'logistic_lcdf',
'logistic_lpdf',
'logistic_lupdf',
'logistic_rng',
'logit',
'loglogistic_cdf',
'loglogistic_lpdf',
'loglogistic_rng',
'lognormal_cdf',
'lognormal_lccdf',
'lognormal_lcdf',
'lognormal_lpdf',
'lognormal_lupdf',
'lognormal_rng',
'machine_precision',
'map_rect',
'matrix_exp',
'matrix_exp_multiply',
'matrix_power',
'max',
'mdivide_left_spd',
'mdivide_left_tri_low',
'mdivide_right_spd',
'mdivide_right_tri_low',
'mean',
'min',
'modified_bessel_first_kind',
'modified_bessel_second_kind',
'multi_gp_cholesky_lpdf',
'multi_gp_cholesky_lupdf',
'multi_gp_lpdf',
'multi_gp_lupdf',
'multi_normal_cholesky_lpdf',
'multi_normal_cholesky_lupdf',
'multi_normal_cholesky_rng',
'multi_normal_lpdf',
'multi_normal_lupdf',
'multi_normal_prec_lpdf',
'multi_normal_prec_lupdf',
'multi_normal_rng',
'multi_student_t_lpdf',
'multi_student_t_lupdf',
'multi_student_t_rng',
'multinomial_logit_lpmf',
'multinomial_logit_lupmf',
'multinomial_logit_rng',
'multinomial_lpmf',
'multinomial_lupmf',
'multinomial_rng',
'multiply_log',
'multiply_lower_tri_self_transpose',
'neg_binomial_2_cdf',
'neg_binomial_2_lccdf',
'neg_binomial_2_lcdf',
'neg_binomial_2_log_glm_lpmf',
'neg_binomial_2_log_glm_lupmf',
'neg_binomial_2_log_lpmf',
'neg_binomial_2_log_lupmf',
'neg_binomial_2_log_rng',
'neg_binomial_2_lpmf',
'neg_binomial_2_lupmf',
'neg_binomial_2_rng',
'neg_binomial_cdf',
'neg_binomial_lccdf',
'neg_binomial_lcdf',
'neg_binomial_lpmf',
'neg_binomial_lupmf',
'neg_binomial_rng',
'negative_infinity',
'norm',
'normal_cdf',
'normal_id_glm_lpdf',
'normal_id_glm_lupdf',
'normal_lccdf',
'normal_lcdf',
'normal_lpdf',
'normal_lupdf',
'normal_rng',
'not_a_number',
'num_elements',
'ode_adams',
'ode_adams_tol',
'ode_adjoint_tol_ctl',
'ode_bdf',
'ode_bdf_tol',
'ode_ckrk',
'ode_ckrk_tol',
'ode_rk45',
'ode_rk45_tol',
'one_hot_array',
'one_hot_int_array',
'one_hot_row_vector',
'one_hot_vector',
'ones_array',
'ones_int_array',
'ones_row_vector',
'ones_vector',
'ordered_logistic_glm_lpmf',
'ordered_logistic_glm_lupmf',
'ordered_logistic_lpmf',
'ordered_logistic_lupmf',
'ordered_logistic_rng',
'ordered_probit_lpmf',
'ordered_probit_lupmf',
'ordered_probit_rng',
'owens_t',
'pareto_cdf',
'pareto_lccdf',
'pareto_lcdf',
'pareto_lpdf',
'pareto_lupdf',
'pareto_rng',
'pareto_type_2_cdf',
'pareto_type_2_lccdf',
'pareto_type_2_lcdf',
'pareto_type_2_lpdf',
'pareto_type_2_lupdf',
'pareto_type_2_rng',
'Phi',
'Phi_approx',
'pi',
'poisson_cdf',
'poisson_lccdf',
'poisson_lcdf',
'poisson_log_glm_lpmf',
'poisson_log_glm_lupmf',
'poisson_log_lpmf',
'poisson_log_lupmf',
'poisson_log_rng',
'poisson_lpmf',
'poisson_lupmf',
'poisson_rng',
'polar',
'positive_infinity',
'pow',
'print',
'prod',
'proj',
'qr_Q',
'qr_R',
'qr_thin_Q',
'qr_thin_R',
'quad_form',
'quad_form_diag',
'quad_form_sym',
'quantile',
'rank',
'rayleigh_cdf',
'rayleigh_lccdf',
'rayleigh_lcdf',
'rayleigh_lpdf',
'rayleigh_lupdf',
'rayleigh_rng',
'reduce_sum',
'reject',
'rep_array',
'rep_matrix',
'rep_row_vector',
'rep_vector',
'reverse',
'rising_factorial',
'round',
'row',
'rows',
'rows_dot_product',
'rows_dot_self',
'scale_matrix_exp_multiply',
'scaled_inv_chi_square_cdf',
'scaled_inv_chi_square_lccdf',
'scaled_inv_chi_square_lcdf',
'scaled_inv_chi_square_lpdf',
'scaled_inv_chi_square_lupdf',
'scaled_inv_chi_square_rng',
'sd',
'segment',
'sin',
'singular_values',
'sinh',
'size',
'skew_double_exponential_cdf',
'skew_double_exponential_lccdf',
'skew_double_exponential_lcdf',
'skew_double_exponential_lpdf',
'skew_double_exponential_lupdf',
'skew_double_exponential_rng',
'skew_normal_cdf',
'skew_normal_lccdf',
'skew_normal_lcdf',
'skew_normal_lpdf',
'skew_normal_lupdf',
'skew_normal_rng',
'softmax',
'sort_asc',
'sort_desc',
'sort_indices_asc',
'sort_indices_desc',
'sqrt',
'sqrt2',
'square',
'squared_distance',
'std_normal_cdf',
'std_normal_lccdf',
'std_normal_lcdf',
'std_normal_lpdf',
'std_normal_lupdf',
'std_normal_rng',
'step',
'student_t_cdf',
'student_t_lccdf',
'student_t_lcdf',
'student_t_lpdf',
'student_t_lupdf',
'student_t_rng',
'sub_col',
'sub_row',
'sum',
'svd_U',
'svd_V',
'symmetrize_from_lower_tri',
'tail',
'tan',
'tanh',
'target',
'tcrossprod',
'tgamma',
'to_array_1d',
'to_array_2d',
'to_complex',
'to_matrix',
'to_row_vector',
'to_vector',
'trace',
'trace_gen_quad_form',
'trace_quad_form',
'trigamma',
'trunc',
'uniform_cdf',
'uniform_lccdf',
'uniform_lcdf',
'uniform_lpdf',
'uniform_lupdf',
'uniform_rng',
'uniform_simplex',
'variance',
'von_mises_cdf',
'von_mises_lccdf',
'von_mises_lcdf',
'von_mises_lpdf',
'von_mises_lupdf',
'von_mises_rng',
'weibull_cdf',
'weibull_lccdf',
'weibull_lcdf',
'weibull_lpdf',
'weibull_lupdf',
'weibull_rng',
'wiener_lpdf',
'wiener_lupdf',
'wishart_lpdf',
'wishart_lupdf',
'wishart_rng',
'zeros_array',
'zeros_int_array',
'zeros_row_vector'
)
DISTRIBUTIONS = (
'bernoulli',
'bernoulli_logit',
'bernoulli_logit_glm',
'beta',
'beta_binomial',
'binomial',
'binomial_logit',
'categorical',
'categorical_logit',
'categorical_logit_glm',
'cauchy',
'chi_square',
'dirichlet',
'discrete_range',
'double_exponential',
'exp_mod_normal',
'exponential',
'frechet',
'gamma',
'gaussian_dlm_obs',
'gumbel',
'hypergeometric',
'inv_chi_square',
'inv_gamma',
'inv_wishart',
'lkj_corr',
'lkj_corr_cholesky',
'logistic',
'loglogistic',
'lognormal',
'multi_gp',
'multi_gp_cholesky',
'multi_normal',
'multi_normal_cholesky',
'multi_normal_prec',
'multi_student_t',
'multinomial',
'multinomial_logit',
'neg_binomial',
'neg_binomial_2',
'neg_binomial_2_log',
'neg_binomial_2_log_glm',
'normal',
'normal_id_glm',
'ordered_logistic',
'ordered_logistic_glm',
'ordered_probit',
'pareto',
'pareto_type_2',
'poisson',
'poisson_log',
'poisson_log_glm',
'rayleigh',
'scaled_inv_chi_square',
'skew_double_exponential',
'skew_normal',
'std_normal',
'student_t',
'uniform',
'von_mises',
'weibull',
'wiener',
'wishart',
)
RESERVED = (
'repeat',
'until',
'then',
'true',
'false',
'var',
'struct',
'typedef',
'export',
'auto',
'extern',
'var',
'static',
)

View File

@@ -0,0 +1,457 @@
"""
pygments.lexers._stata_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Builtins for Stata
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
builtins_special = (
"if", "in", "using", "replace", "by", "gen", "generate"
)
builtins_base = (
"if", "else", "in", "foreach", "for", "forv", "forva",
"forval", "forvalu", "forvalue", "forvalues", "by", "bys",
"bysort", "quietly", "qui", "about", "ac",
"ac_7", "acprplot", "acprplot_7", "adjust", "ado", "adopath",
"adoupdate", "alpha", "ameans", "an", "ano", "anov", "anova",
"anova_estat", "anova_terms", "anovadef", "aorder", "ap", "app",
"appe", "appen", "append", "arch", "arch_dr", "arch_estat",
"arch_p", "archlm", "areg", "areg_p", "args", "arima",
"arima_dr", "arima_estat", "arima_p", "as", "asmprobit",
"asmprobit_estat", "asmprobit_lf", "asmprobit_mfx__dlg",
"asmprobit_p", "ass", "asse", "asser", "assert", "avplot",
"avplot_7", "avplots", "avplots_7", "bcskew0", "bgodfrey",
"binreg", "bip0_lf", "biplot", "bipp_lf", "bipr_lf",
"bipr_p", "biprobit", "bitest", "bitesti", "bitowt", "blogit",
"bmemsize", "boot", "bootsamp", "bootstrap", "bootstrap_8",
"boxco_l", "boxco_p", "boxcox", "boxcox_6", "boxcox_p",
"bprobit", "br", "break", "brier", "bro", "brow", "brows",
"browse", "brr", "brrstat", "bs", "bs_7", "bsampl_w",
"bsample", "bsample_7", "bsqreg", "bstat", "bstat_7", "bstat_8",
"bstrap", "bstrap_7", "ca", "ca_estat", "ca_p", "cabiplot",
"camat", "canon", "canon_8", "canon_8_p", "canon_estat",
"canon_p", "cap", "caprojection", "capt", "captu", "captur",
"capture", "cat", "cc", "cchart", "cchart_7", "cci",
"cd", "censobs_table", "centile", "cf", "char", "chdir",
"checkdlgfiles", "checkestimationsample", "checkhlpfiles",
"checksum", "chelp", "ci", "cii", "cl", "class", "classutil",
"clear", "cli", "clis", "clist", "clo", "clog", "clog_lf",
"clog_p", "clogi", "clogi_sw", "clogit", "clogit_lf",
"clogit_p", "clogitp", "clogl_sw", "cloglog", "clonevar",
"clslistarray", "cluster", "cluster_measures", "cluster_stop",
"cluster_tree", "cluster_tree_8", "clustermat", "cmdlog",
"cnr", "cnre", "cnreg", "cnreg_p", "cnreg_sw", "cnsreg",
"codebook", "collaps4", "collapse", "colormult_nb",
"colormult_nw", "compare", "compress", "conf", "confi",
"confir", "confirm", "conren", "cons", "const", "constr",
"constra", "constrai", "constrain", "constraint", "continue",
"contract", "copy", "copyright", "copysource", "cor", "corc",
"corr", "corr2data", "corr_anti", "corr_kmo", "corr_smc",
"corre", "correl", "correla", "correlat", "correlate",
"corrgram", "cou", "coun", "count", "cox", "cox_p", "cox_sw",
"coxbase", "coxhaz", "coxvar", "cprplot", "cprplot_7",
"crc", "cret", "cretu", "cretur", "creturn", "cross", "cs",
"cscript", "cscript_log", "csi", "ct", "ct_is", "ctset",
"ctst_5", "ctst_st", "cttost", "cumsp", "cumsp_7", "cumul",
"cusum", "cusum_7", "cutil", "d", "datasig", "datasign",
"datasigna", "datasignat", "datasignatu", "datasignatur",
"datasignature", "datetof", "db", "dbeta", "de", "dec",
"deco", "decod", "decode", "deff", "des", "desc", "descr",
"descri", "describ", "describe", "destring", "dfbeta",
"dfgls", "dfuller", "di", "di_g", "dir", "dirstats", "dis",
"discard", "disp", "disp_res", "disp_s", "displ", "displa",
"display", "distinct", "do", "doe", "doed", "doedi",
"doedit", "dotplot", "dotplot_7", "dprobit", "drawnorm",
"drop", "ds", "ds_util", "dstdize", "duplicates", "durbina",
"dwstat", "dydx", "e", "ed", "edi", "edit", "egen",
"eivreg", "emdef", "end", "en", "enc", "enco", "encod", "encode",
"eq", "erase", "ereg", "ereg_lf", "ereg_p", "ereg_sw",
"ereghet", "ereghet_glf", "ereghet_glf_sh", "ereghet_gp",
"ereghet_ilf", "ereghet_ilf_sh", "ereghet_ip", "eret",
"eretu", "eretur", "ereturn", "err", "erro", "error", "est",
"est_cfexist", "est_cfname", "est_clickable", "est_expand",
"est_hold", "est_table", "est_unhold", "est_unholdok",
"estat", "estat_default", "estat_summ", "estat_vce_only",
"esti", "estimates", "etodow", "etof", "etomdy", "ex",
"exi", "exit", "expand", "expandcl", "fac", "fact", "facto",
"factor", "factor_estat", "factor_p", "factor_pca_rotated",
"factor_rotate", "factormat", "fcast", "fcast_compute",
"fcast_graph", "fdades", "fdadesc", "fdadescr", "fdadescri",
"fdadescrib", "fdadescribe", "fdasav", "fdasave", "fdause",
"fh_st", "open", "read", "close",
"file", "filefilter", "fillin", "find_hlp_file", "findfile",
"findit", "findit_7", "fit", "fl", "fli", "flis", "flist",
"for5_0", "form", "forma", "format", "fpredict", "frac_154",
"frac_adj", "frac_chk", "frac_cox", "frac_ddp", "frac_dis",
"frac_dv", "frac_in", "frac_mun", "frac_pp", "frac_pq",
"frac_pv", "frac_wgt", "frac_xo", "fracgen", "fracplot",
"fracplot_7", "fracpoly", "fracpred", "fron_ex", "fron_hn",
"fron_p", "fron_tn", "fron_tn2", "frontier", "ftodate", "ftoe",
"ftomdy", "ftowdate", "g", "gamhet_glf", "gamhet_gp",
"gamhet_ilf", "gamhet_ip", "gamma", "gamma_d2", "gamma_p",
"gamma_sw", "gammahet", "gdi_hexagon", "gdi_spokes", "ge",
"gen", "gene", "gener", "genera", "generat", "generate",
"genrank", "genstd", "genvmean", "gettoken", "gl", "gladder",
"gladder_7", "glim_l01", "glim_l02", "glim_l03", "glim_l04",
"glim_l05", "glim_l06", "glim_l07", "glim_l08", "glim_l09",
"glim_l10", "glim_l11", "glim_l12", "glim_lf", "glim_mu",
"glim_nw1", "glim_nw2", "glim_nw3", "glim_p", "glim_v1",
"glim_v2", "glim_v3", "glim_v4", "glim_v5", "glim_v6",
"glim_v7", "glm", "glm_6", "glm_p", "glm_sw", "glmpred", "glo",
"glob", "globa", "global", "glogit", "glogit_8", "glogit_p",
"gmeans", "gnbre_lf", "gnbreg", "gnbreg_5", "gnbreg_p",
"gomp_lf", "gompe_sw", "gomper_p", "gompertz", "gompertzhet",
"gomphet_glf", "gomphet_glf_sh", "gomphet_gp", "gomphet_ilf",
"gomphet_ilf_sh", "gomphet_ip", "gphdot", "gphpen",
"gphprint", "gprefs", "gprobi_p", "gprobit", "gprobit_8", "gr",
"gr7", "gr_copy", "gr_current", "gr_db", "gr_describe",
"gr_dir", "gr_draw", "gr_draw_replay", "gr_drop", "gr_edit",
"gr_editviewopts", "gr_example", "gr_example2", "gr_export",
"gr_print", "gr_qscheme", "gr_query", "gr_read", "gr_rename",
"gr_replay", "gr_save", "gr_set", "gr_setscheme", "gr_table",
"gr_undo", "gr_use", "graph", "graph7", "grebar", "greigen",
"greigen_7", "greigen_8", "grmeanby", "grmeanby_7",
"gs_fileinfo", "gs_filetype", "gs_graphinfo", "gs_stat",
"gsort", "gwood", "h", "hadimvo", "hareg", "hausman",
"haver", "he", "heck_d2", "heckma_p", "heckman", "heckp_lf",
"heckpr_p", "heckprob", "hel", "help", "hereg", "hetpr_lf",
"hetpr_p", "hetprob", "hettest", "hexdump", "hilite",
"hist", "hist_7", "histogram", "hlogit", "hlu", "hmeans",
"hotel", "hotelling", "hprobit", "hreg", "hsearch", "icd9",
"icd9_ff", "icd9p", "iis", "impute", "imtest", "inbase",
"include", "inf", "infi", "infil", "infile", "infix", "inp",
"inpu", "input", "ins", "insheet", "insp", "inspe",
"inspec", "inspect", "integ", "inten", "intreg", "intreg_7",
"intreg_p", "intrg2_ll", "intrg_ll", "intrg_ll2", "ipolate",
"iqreg", "ir", "irf", "irf_create", "irfm", "iri", "is_svy",
"is_svysum", "isid", "istdize", "ivprob_1_lf", "ivprob_lf",
"ivprobit", "ivprobit_p", "ivreg", "ivreg_footnote",
"ivtob_1_lf", "ivtob_lf", "ivtobit", "ivtobit_p", "jackknife",
"jacknife", "jknife", "jknife_6", "jknife_8", "jkstat",
"joinby", "kalarma1", "kap", "kap_3", "kapmeier", "kappa",
"kapwgt", "kdensity", "kdensity_7", "keep", "ksm", "ksmirnov",
"ktau", "kwallis", "l", "la", "lab", "labe", "label",
"labelbook", "ladder", "levels", "levelsof", "leverage",
"lfit", "lfit_p", "li", "lincom", "line", "linktest",
"lis", "list", "lloghet_glf", "lloghet_glf_sh", "lloghet_gp",
"lloghet_ilf", "lloghet_ilf_sh", "lloghet_ip", "llogi_sw",
"llogis_p", "llogist", "llogistic", "llogistichet",
"lnorm_lf", "lnorm_sw", "lnorma_p", "lnormal", "lnormalhet",
"lnormhet_glf", "lnormhet_glf_sh", "lnormhet_gp",
"lnormhet_ilf", "lnormhet_ilf_sh", "lnormhet_ip", "lnskew0",
"loadingplot", "loc", "loca", "local", "log", "logi",
"logis_lf", "logistic", "logistic_p", "logit", "logit_estat",
"logit_p", "loglogs", "logrank", "loneway", "lookfor",
"lookup", "lowess", "lowess_7", "lpredict", "lrecomp", "lroc",
"lroc_7", "lrtest", "ls", "lsens", "lsens_7", "lsens_x",
"lstat", "ltable", "ltable_7", "ltriang", "lv", "lvr2plot",
"lvr2plot_7", "m", "ma", "mac", "macr", "macro", "makecns",
"man", "manova", "manova_estat", "manova_p", "manovatest",
"mantel", "mark", "markin", "markout", "marksample", "mat",
"mat_capp", "mat_order", "mat_put_rr", "mat_rapp", "mata",
"mata_clear", "mata_describe", "mata_drop", "mata_matdescribe",
"mata_matsave", "mata_matuse", "mata_memory", "mata_mlib",
"mata_mosave", "mata_rename", "mata_which", "matalabel",
"matcproc", "matlist", "matname", "matr", "matri",
"matrix", "matrix_input__dlg", "matstrik", "mcc", "mcci",
"md0_", "md1_", "md1debug_", "md2_", "md2debug_", "mds",
"mds_estat", "mds_p", "mdsconfig", "mdslong", "mdsmat",
"mdsshepard", "mdytoe", "mdytof", "me_derd", "mean",
"means", "median", "memory", "memsize", "meqparse", "mer",
"merg", "merge", "mfp", "mfx", "mhelp", "mhodds", "minbound",
"mixed_ll", "mixed_ll_reparm", "mkassert", "mkdir",
"mkmat", "mkspline", "ml", "ml_5", "ml_adjs", "ml_bhhhs",
"ml_c_d", "ml_check", "ml_clear", "ml_cnt", "ml_debug",
"ml_defd", "ml_e0", "ml_e0_bfgs", "ml_e0_cycle", "ml_e0_dfp",
"ml_e0i", "ml_e1", "ml_e1_bfgs", "ml_e1_bhhh", "ml_e1_cycle",
"ml_e1_dfp", "ml_e2", "ml_e2_cycle", "ml_ebfg0", "ml_ebfr0",
"ml_ebfr1", "ml_ebh0q", "ml_ebhh0", "ml_ebhr0", "ml_ebr0i",
"ml_ecr0i", "ml_edfp0", "ml_edfr0", "ml_edfr1", "ml_edr0i",
"ml_eds", "ml_eer0i", "ml_egr0i", "ml_elf", "ml_elf_bfgs",
"ml_elf_bhhh", "ml_elf_cycle", "ml_elf_dfp", "ml_elfi",
"ml_elfs", "ml_enr0i", "ml_enrr0", "ml_erdu0", "ml_erdu0_bfgs",
"ml_erdu0_bhhh", "ml_erdu0_bhhhq", "ml_erdu0_cycle",
"ml_erdu0_dfp", "ml_erdu0_nrbfgs", "ml_exde", "ml_footnote",
"ml_geqnr", "ml_grad0", "ml_graph", "ml_hbhhh", "ml_hd0",
"ml_hold", "ml_init", "ml_inv", "ml_log", "ml_max",
"ml_mlout", "ml_mlout_8", "ml_model", "ml_nb0", "ml_opt",
"ml_p", "ml_plot", "ml_query", "ml_rdgrd", "ml_repor",
"ml_s_e", "ml_score", "ml_searc", "ml_technique", "ml_unhold",
"mleval", "mlf_", "mlmatbysum", "mlmatsum", "mlog", "mlogi",
"mlogit", "mlogit_footnote", "mlogit_p", "mlopts", "mlsum",
"mlvecsum", "mnl0_", "mor", "more", "mov", "move", "mprobit",
"mprobit_lf", "mprobit_p", "mrdu0_", "mrdu1_", "mvdecode",
"mvencode", "mvreg", "mvreg_estat", "n", "nbreg",
"nbreg_al", "nbreg_lf", "nbreg_p", "nbreg_sw", "nestreg", "net",
"newey", "newey_7", "newey_p", "news", "nl", "nl_7", "nl_9",
"nl_9_p", "nl_p", "nl_p_7", "nlcom", "nlcom_p", "nlexp2",
"nlexp2_7", "nlexp2a", "nlexp2a_7", "nlexp3", "nlexp3_7",
"nlgom3", "nlgom3_7", "nlgom4", "nlgom4_7", "nlinit", "nllog3",
"nllog3_7", "nllog4", "nllog4_7", "nlog_rd", "nlogit",
"nlogit_p", "nlogitgen", "nlogittree", "nlpred", "no",
"nobreak", "noi", "nois", "noisi", "noisil", "noisily", "note",
"notes", "notes_dlg", "nptrend", "numlabel", "numlist", "odbc",
"old_ver", "olo", "olog", "ologi", "ologi_sw", "ologit",
"ologit_p", "ologitp", "on", "one", "onew", "onewa", "oneway",
"op_colnm", "op_comp", "op_diff", "op_inv", "op_str", "opr",
"opro", "oprob", "oprob_sw", "oprobi", "oprobi_p", "oprobit",
"oprobitp", "opts_exclusive", "order", "orthog", "orthpoly",
"ou", "out", "outf", "outfi", "outfil", "outfile", "outs",
"outsh", "outshe", "outshee", "outsheet", "ovtest", "pac",
"pac_7", "palette", "parse", "parse_dissim", "pause", "pca",
"pca_8", "pca_display", "pca_estat", "pca_p", "pca_rotate",
"pcamat", "pchart", "pchart_7", "pchi", "pchi_7", "pcorr",
"pctile", "pentium", "pergram", "pergram_7", "permute",
"permute_8", "personal", "peto_st", "pkcollapse", "pkcross",
"pkequiv", "pkexamine", "pkexamine_7", "pkshape", "pksumm",
"pksumm_7", "pl", "plo", "plot", "plugin", "pnorm",
"pnorm_7", "poisgof", "poiss_lf", "poiss_sw", "poisso_p",
"poisson", "poisson_estat", "post", "postclose", "postfile",
"postutil", "pperron", "pr", "prais", "prais_e", "prais_e2",
"prais_p", "predict", "predictnl", "preserve", "print",
"pro", "prob", "probi", "probit", "probit_estat", "probit_p",
"proc_time", "procoverlay", "procrustes", "procrustes_estat",
"procrustes_p", "profiler", "prog", "progr", "progra",
"program", "prop", "proportion", "prtest", "prtesti", "pwcorr",
"pwd", "q", "s", "qby", "qbys", "qchi", "qchi_7", "qladder",
"qladder_7", "qnorm", "qnorm_7", "qqplot", "qqplot_7", "qreg",
"qreg_c", "qreg_p", "qreg_sw", "qu", "quadchk", "quantile",
"quantile_7", "que", "quer", "query", "range", "ranksum",
"ratio", "rchart", "rchart_7", "rcof", "recast", "reclink",
"recode", "reg", "reg3", "reg3_p", "regdw", "regr", "regre",
"regre_p2", "regres", "regres_p", "regress", "regress_estat",
"regriv_p", "remap", "ren", "rena", "renam", "rename",
"renpfix", "repeat", "replace", "report", "reshape",
"restore", "ret", "retu", "retur", "return", "rm", "rmdir",
"robvar", "roccomp", "roccomp_7", "roccomp_8", "rocf_lf",
"rocfit", "rocfit_8", "rocgold", "rocplot", "rocplot_7",
"roctab", "roctab_7", "rolling", "rologit", "rologit_p",
"rot", "rota", "rotat", "rotate", "rotatemat", "rreg",
"rreg_p", "ru", "run", "runtest", "rvfplot", "rvfplot_7",
"rvpplot", "rvpplot_7", "sa", "safesum", "sample",
"sampsi", "sav", "save", "savedresults", "saveold", "sc",
"sca", "scal", "scala", "scalar", "scatter", "scm_mine",
"sco", "scob_lf", "scob_p", "scobi_sw", "scobit", "scor",
"score", "scoreplot", "scoreplot_help", "scree", "screeplot",
"screeplot_help", "sdtest", "sdtesti", "se", "search",
"separate", "seperate", "serrbar", "serrbar_7", "serset", "set",
"set_defaults", "sfrancia", "sh", "she", "shel", "shell",
"shewhart", "shewhart_7", "signestimationsample", "signrank",
"signtest", "simul", "simul_7", "simulate", "simulate_8",
"sktest", "sleep", "slogit", "slogit_d2", "slogit_p", "smooth",
"snapspan", "so", "sor", "sort", "spearman", "spikeplot",
"spikeplot_7", "spikeplt", "spline_x", "split", "sqreg",
"sqreg_p", "sret", "sretu", "sretur", "sreturn", "ssc", "st",
"st_ct", "st_hc", "st_hcd", "st_hcd_sh", "st_is", "st_issys",
"st_note", "st_promo", "st_set", "st_show", "st_smpl",
"st_subid", "stack", "statsby", "statsby_8", "stbase", "stci",
"stci_7", "stcox", "stcox_estat", "stcox_fr", "stcox_fr_ll",
"stcox_p", "stcox_sw", "stcoxkm", "stcoxkm_7", "stcstat",
"stcurv", "stcurve", "stcurve_7", "stdes", "stem", "stepwise",
"stereg", "stfill", "stgen", "stir", "stjoin", "stmc", "stmh",
"stphplot", "stphplot_7", "stphtest", "stphtest_7",
"stptime", "strate", "strate_7", "streg", "streg_sw", "streset",
"sts", "sts_7", "stset", "stsplit", "stsum", "sttocc",
"sttoct", "stvary", "stweib", "su", "suest", "suest_8",
"sum", "summ", "summa", "summar", "summari", "summariz",
"summarize", "sunflower", "sureg", "survcurv", "survsum",
"svar", "svar_p", "svmat", "svy", "svy_disp", "svy_dreg",
"svy_est", "svy_est_7", "svy_estat", "svy_get", "svy_gnbreg_p",
"svy_head", "svy_header", "svy_heckman_p", "svy_heckprob_p",
"svy_intreg_p", "svy_ivreg_p", "svy_logistic_p", "svy_logit_p",
"svy_mlogit_p", "svy_nbreg_p", "svy_ologit_p", "svy_oprobit_p",
"svy_poisson_p", "svy_probit_p", "svy_regress_p", "svy_sub",
"svy_sub_7", "svy_x", "svy_x_7", "svy_x_p", "svydes",
"svydes_8", "svygen", "svygnbreg", "svyheckman", "svyheckprob",
"svyintreg", "svyintreg_7", "svyintrg", "svyivreg", "svylc",
"svylog_p", "svylogit", "svymarkout", "svymarkout_8",
"svymean", "svymlog", "svymlogit", "svynbreg", "svyolog",
"svyologit", "svyoprob", "svyoprobit", "svyopts",
"svypois", "svypois_7", "svypoisson", "svyprobit", "svyprobt",
"svyprop", "svyprop_7", "svyratio", "svyreg", "svyreg_p",
"svyregress", "svyset", "svyset_7", "svyset_8", "svytab",
"svytab_7", "svytest", "svytotal", "sw", "sw_8", "swcnreg",
"swcox", "swereg", "swilk", "swlogis", "swlogit",
"swologit", "swoprbt", "swpois", "swprobit", "swqreg",
"swtobit", "swweib", "symmetry", "symmi", "symplot",
"symplot_7", "syntax", "sysdescribe", "sysdir", "sysuse",
"szroeter", "ta", "tab", "tab1", "tab2", "tab_or", "tabd",
"tabdi", "tabdis", "tabdisp", "tabi", "table", "tabodds",
"tabodds_7", "tabstat", "tabu", "tabul", "tabula", "tabulat",
"tabulate", "te", "tempfile", "tempname", "tempvar", "tes",
"test", "testnl", "testparm", "teststd", "tetrachoric",
"time_it", "timer", "tis", "tob", "tobi", "tobit", "tobit_p",
"tobit_sw", "token", "tokeni", "tokeniz", "tokenize",
"tostring", "total", "translate", "translator", "transmap",
"treat_ll", "treatr_p", "treatreg", "trim", "trnb_cons",
"trnb_mean", "trpoiss_d2", "trunc_ll", "truncr_p", "truncreg",
"tsappend", "tset", "tsfill", "tsline", "tsline_ex",
"tsreport", "tsrevar", "tsrline", "tsset", "tssmooth",
"tsunab", "ttest", "ttesti", "tut_chk", "tut_wait", "tutorial",
"tw", "tware_st", "two", "twoway", "twoway__fpfit_serset",
"twoway__function_gen", "twoway__histogram_gen",
"twoway__ipoint_serset", "twoway__ipoints_serset",
"twoway__kdensity_gen", "twoway__lfit_serset",
"twoway__normgen_gen", "twoway__pci_serset",
"twoway__qfit_serset", "twoway__scatteri_serset",
"twoway__sunflower_gen", "twoway_ksm_serset", "ty", "typ",
"type", "typeof", "u", "unab", "unabbrev", "unabcmd",
"update", "us", "use", "uselabel", "var", "var_mkcompanion",
"var_p", "varbasic", "varfcast", "vargranger", "varirf",
"varirf_add", "varirf_cgraph", "varirf_create", "varirf_ctable",
"varirf_describe", "varirf_dir", "varirf_drop", "varirf_erase",
"varirf_graph", "varirf_ograph", "varirf_rename", "varirf_set",
"varirf_table", "varlist", "varlmar", "varnorm", "varsoc",
"varstable", "varstable_w", "varstable_w2", "varwle",
"vce", "vec", "vec_fevd", "vec_mkphi", "vec_p", "vec_p_w",
"vecirf_create", "veclmar", "veclmar_w", "vecnorm",
"vecnorm_w", "vecrank", "vecstable", "verinst", "vers",
"versi", "versio", "version", "view", "viewsource", "vif",
"vwls", "wdatetof", "webdescribe", "webseek", "webuse",
"weib1_lf", "weib2_lf", "weib_lf", "weib_lf0", "weibhet_glf",
"weibhet_glf_sh", "weibhet_glfa", "weibhet_glfa_sh",
"weibhet_gp", "weibhet_ilf", "weibhet_ilf_sh", "weibhet_ilfa",
"weibhet_ilfa_sh", "weibhet_ip", "weibu_sw", "weibul_p",
"weibull", "weibull_c", "weibull_s", "weibullhet",
"wh", "whelp", "whi", "which", "whil", "while", "wilc_st",
"wilcoxon", "win", "wind", "windo", "window", "winexec",
"wntestb", "wntestb_7", "wntestq", "xchart", "xchart_7",
"xcorr", "xcorr_7", "xi", "xi_6", "xmlsav", "xmlsave",
"xmluse", "xpose", "xsh", "xshe", "xshel", "xshell",
"xt_iis", "xt_tis", "xtab_p", "xtabond", "xtbin_p",
"xtclog", "xtcloglog", "xtcloglog_8", "xtcloglog_d2",
"xtcloglog_pa_p", "xtcloglog_re_p", "xtcnt_p", "xtcorr",
"xtdata", "xtdes", "xtfront_p", "xtfrontier", "xtgee",
"xtgee_elink", "xtgee_estat", "xtgee_makeivar", "xtgee_p",
"xtgee_plink", "xtgls", "xtgls_p", "xthaus", "xthausman",
"xtht_p", "xthtaylor", "xtile", "xtint_p", "xtintreg",
"xtintreg_8", "xtintreg_d2", "xtintreg_p", "xtivp_1",
"xtivp_2", "xtivreg", "xtline", "xtline_ex", "xtlogit",
"xtlogit_8", "xtlogit_d2", "xtlogit_fe_p", "xtlogit_pa_p",
"xtlogit_re_p", "xtmixed", "xtmixed_estat", "xtmixed_p",
"xtnb_fe", "xtnb_lf", "xtnbreg", "xtnbreg_pa_p",
"xtnbreg_refe_p", "xtpcse", "xtpcse_p", "xtpois", "xtpoisson",
"xtpoisson_d2", "xtpoisson_pa_p", "xtpoisson_refe_p", "xtpred",
"xtprobit", "xtprobit_8", "xtprobit_d2", "xtprobit_re_p",
"xtps_fe", "xtps_lf", "xtps_ren", "xtps_ren_8", "xtrar_p",
"xtrc", "xtrc_p", "xtrchh", "xtrefe_p", "xtreg", "xtreg_be",
"xtreg_fe", "xtreg_ml", "xtreg_pa_p", "xtreg_re",
"xtregar", "xtrere_p", "xtset", "xtsf_ll", "xtsf_llti",
"xtsum", "xttab", "xttest0", "xttobit", "xttobit_8",
"xttobit_p", "xttrans", "yx", "yxview__barlike_draw",
"yxview_area_draw", "yxview_bar_draw", "yxview_dot_draw",
"yxview_dropline_draw", "yxview_function_draw",
"yxview_iarrow_draw", "yxview_ilabels_draw",
"yxview_normal_draw", "yxview_pcarrow_draw",
"yxview_pcbarrow_draw", "yxview_pccapsym_draw",
"yxview_pcscatter_draw", "yxview_pcspike_draw",
"yxview_rarea_draw", "yxview_rbar_draw", "yxview_rbarm_draw",
"yxview_rcap_draw", "yxview_rcapsym_draw",
"yxview_rconnected_draw", "yxview_rline_draw",
"yxview_rscatter_draw", "yxview_rspike_draw",
"yxview_spike_draw", "yxview_sunflower_draw", "zap_s", "zinb",
"zinb_llf", "zinb_plf", "zip", "zip_llf", "zip_p", "zip_plf",
"zt_ct_5", "zt_hc_5", "zt_hcd_5", "zt_is_5", "zt_iss_5",
"zt_sho_5", "zt_smp_5", "ztbase_5", "ztcox_5", "ztdes_5",
"ztereg_5", "ztfill_5", "ztgen_5", "ztir_5", "ztjoin_5", "ztnb",
"ztnb_p", "ztp", "ztp_p", "zts_5", "ztset_5", "ztspli_5",
"ztsum_5", "zttoct_5", "ztvary_5", "ztweib_5"
)
builtins_functions = (
"abbrev", "abs", "acos", "acosh", "asin", "asinh", "atan",
"atan2", "atanh", "autocode", "betaden", "binomial",
"binomialp", "binomialtail", "binormal", "bofd",
"byteorder", "c", "_caller", "cauchy", "cauchyden",
"cauchytail", "Cdhms", "ceil", "char", "chi2", "chi2den",
"chi2tail", "Chms", "chop", "cholesky", "clip", "Clock",
"clock", "cloglog", "Cmdyhms", "Cofc", "cofC", "Cofd", "cofd",
"coleqnumb", "collatorlocale", "collatorversion",
"colnfreeparms", "colnumb", "colsof", "comb", "cond", "corr",
"cos", "cosh", "daily", "date", "day", "det", "dgammapda",
"dgammapdada", "dgammapdadx", "dgammapdx", "dgammapdxdx",
"dhms", "diag", "diag0cnt", "digamma", "dofb", "dofC", "dofc",
"dofh", "dofm", "dofq", "dofw", "dofy", "dow", "doy",
"dunnettprob", "e", "el", "esample", "epsdouble", "epsfloat",
"exp", "expm1", "exponential", "exponentialden",
"exponentialtail", "F", "Fden", "fileexists", "fileread",
"filereaderror", "filewrite", "float", "floor", "fmtwidth",
"frval", "_frval", "Ftail", "gammaden", "gammap", "gammaptail",
"get", "hadamard", "halfyear", "halfyearly", "has_eprop", "hh",
"hhC", "hms", "hofd", "hours", "hypergeometric",
"hypergeometricp", "I", "ibeta", "ibetatail", "igaussian",
"igaussianden", "igaussiantail", "indexnot", "inlist",
"inrange", "int", "inv", "invbinomial", "invbinomialtail",
"invcauchy", "invcauchytail", "invchi2", "invchi2tail",
"invcloglog", "invdunnettprob", "invexponential",
"invexponentialtail", "invF", "invFtail", "invgammap",
"invgammaptail", "invibeta", "invibetatail", "invigaussian",
"invigaussiantail", "invlaplace", "invlaplacetail",
"invlogisticp", "invlogisticsp", "invlogisticmsp",
"invlogistictailp", "invlogistictailsp", "invlogistictailmsp",
"invlogit", "invnbinomial", "invnbinomialtail", "invnchi2",
"invnchi2tail", "invnF", "invnFtail", "invnibeta",
"invnormal", "invnt", "invnttail", "invpoisson",
"invpoissontail", "invsym", "invt", "invttail", "invtukeyprob",
"invweibullabp", "invweibullabgp", "invweibullphabp",
"invweibullphabgp", "invweibullphtailabp",
"invweibullphtailabgp", "invweibulltailabp",
"invweibulltailabgp", "irecode", "issymmetric", "J", "laplace",
"laplaceden", "laplacetail", "ln", "ln1m", "ln1p", "lncauchyden",
"lnfactorial", "lngamma", "lnigammaden", "lnigaussianden",
"lniwishartden", "lnlaplaceden", "lnmvnormalden", "lnnormal",
"lnnormalden", "lnnormaldenxs", "lnnormaldenxms", "lnwishartden",
"log", "log10", "log1m", "log1p", "logisticx", "logisticsx",
"logisticmsx", "logisticdenx", "logisticdensx", "logisticdenmsx",
"logistictailx", "logistictailsx", "logistictailmsx", "logit",
"matmissing", "matrix", "matuniform", "max", "maxbyte",
"maxdouble", "maxfloat", "maxint", "maxlong", "mdy", "mdyhms",
"mi", "min", "minbyte", "mindouble", "minfloat", "minint",
"minlong", "minutes", "missing", "mm", "mmC", "mod", "mofd",
"month", "monthly", "mreldif", "msofhours", "msofminutes",
"msofseconds", "nbetaden", "nbinomial", "nbinomialp",
"nbinomialtail", "nchi2", "nchi2den", "nchi2tail", "nF",
"nFden", "nFtail", "nibeta", "normal", "normalden",
"normaldenxs", "normaldenxms", "npnchi2", "npnF", "npnt",
"nt", "ntden", "nttail", "nullmat", "plural", "plurals1",
"poisson", "poissonp", "poissontail", "qofd", "quarter",
"quarterly", "r", "rbeta", "rbinomial", "rcauchy", "rchi2",
"recode", "real", "regexm", "regexr", "regexs", "reldif",
"replay", "return", "rexponential", "rgamma", "rhypergeometric",
"rigaussian", "rlaplace", "rlogistic", "rlogistics",
"rlogisticms", "rnbinomial", "rnormal", "rnormalm", "rnormalms",
"round", "roweqnumb", "rownfreeparms", "rownumb", "rowsof",
"rpoisson", "rt", "runiform", "runiformab", "runiformint",
"rweibullab", "rweibullabg", "rweibullphab", "rweibullphabg",
"s", "scalar", "seconds", "sign", "sin", "sinh",
"smallestdouble", "soundex", "soundex_nara", "sqrt", "ss",
"ssC", "strcat", "strdup", "string", "stringns", "stritrim",
"strlen", "strlower", "strltrim", "strmatch", "strofreal",
"strofrealns", "strpos", "strproper", "strreverse", "strrpos",
"strrtrim", "strtoname", "strtrim", "strupper", "subinstr",
"subinword", "substr", "sum", "sweep", "t", "tan", "tanh",
"tC", "tc", "td", "tden", "th", "tin", "tm", "tobytes", "tq",
"trace", "trigamma", "trunc", "ttail", "tukeyprob", "tw",
"twithin", "uchar", "udstrlen", "udsubstr", "uisdigit",
"uisletter", "ustrcompare", "ustrfix", "ustrfrom",
"ustrinvalidcnt", "ustrleft", "ustrlen", "ustrlower",
"ustrltrim", "ustrnormalize", "ustrpos", "ustrregexm",
"ustrregexra", "ustrregexrf", "ustrregexs", "ustrreverse",
"ustrright", "ustrrpos", "ustrrtrim", "ustrsortkey",
"ustrtitle", "ustrto", "ustrtohex", "ustrtoname",
"ustrtrim", "ustrunescape", "ustrupper", "ustrword",
"ustrwordcount", "usubinstr", "usubstr", "vec", "vecdiag",
"week", "weekly", "weibullabx", "weibullabgx", "weibulldenabx",
"weibulldenabgx", "weibullphabx", "weibullphabgx",
"weibullphdenabx", "weibullphdenabgx", "weibullphtailabx",
"weibullphtailabgx", "weibulltailabx", "weibulltailabgx",
"wofd", "word", "wordbreaklocale", "wordcount",
"year", "yearly", "yh", "ym", "yofd", "yq", "yw"
)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,112 @@
"""
pygments.lexers._usd_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A collection of known USD-related keywords, attributes, and types.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
COMMON_ATTRIBUTES = [
"extent",
"xformOpOrder",
]
KEYWORDS = [
"class",
"clips",
"custom",
"customData",
"def",
"dictionary",
"inherits",
"over",
"payload",
"references",
"rel",
"subLayers",
"timeSamples",
"uniform",
"variantSet",
"variantSets",
"variants",
]
OPERATORS = [
"add",
"append",
"delete",
"prepend",
"reorder",
]
SPECIAL_NAMES = [
"active",
"apiSchemas",
"defaultPrim",
"elementSize",
"endTimeCode",
"hidden",
"instanceable",
"interpolation",
"kind",
"startTimeCode",
"upAxis",
]
TYPES = [
"asset",
"bool",
"color3d",
"color3f",
"color3h",
"color4d",
"color4f",
"color4h",
"double",
"double2",
"double3",
"double4",
"float",
"float2",
"float3",
"float4",
"frame4d",
"half",
"half2",
"half3",
"half4",
"int",
"int2",
"int3",
"int4",
"keyword",
"matrix2d",
"matrix3d",
"matrix4d",
"normal3d",
"normal3f",
"normal3h",
"point3d",
"point3f",
"point3h",
"quatd",
"quatf",
"quath",
"string",
"syn",
"token",
"uchar",
"uchar2",
"uchar3",
"uchar4",
"uint",
"uint2",
"uint3",
"uint4",
"usdaType",
"vector3d",
"vector3f",
"vector3h",
]

View File

@@ -0,0 +1,279 @@
"""
pygments.lexers._vbscript_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
These are manually translated lists from
http://www.indusoft.com/pdf/VBScript%20Reference.pdf.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
KEYWORDS = [
'ByRef',
'ByVal',
# dim: special rule
'call',
'case',
'class',
# const: special rule
'do',
'each',
'else',
'elseif',
'end',
'erase',
'execute',
'function',
'exit',
'for',
'function',
'GetRef',
'global',
'if',
'let',
'loop',
'next',
'new',
# option: special rule
'private',
'public',
'redim',
'select',
'set',
'sub',
'then',
'wend',
'while',
'with',
]
BUILTIN_FUNCTIONS = [
'Abs',
'Array',
'Asc',
'Atn',
'CBool',
'CByte',
'CCur',
'CDate',
'CDbl',
'Chr',
'CInt',
'CLng',
'Cos',
'CreateObject',
'CSng',
'CStr',
'Date',
'DateAdd',
'DateDiff',
'DatePart',
'DateSerial',
'DateValue',
'Day',
'Eval',
'Exp',
'Filter',
'Fix',
'FormatCurrency',
'FormatDateTime',
'FormatNumber',
'FormatPercent',
'GetObject',
'GetLocale',
'Hex',
'Hour',
'InStr',
'inStrRev',
'Int',
'IsArray',
'IsDate',
'IsEmpty',
'IsNull',
'IsNumeric',
'IsObject',
'Join',
'LBound',
'LCase',
'Left',
'Len',
'LoadPicture',
'Log',
'LTrim',
'Mid',
'Minute',
'Month',
'MonthName',
'MsgBox',
'Now',
'Oct',
'Randomize',
'RegExp',
'Replace',
'RGB',
'Right',
'Rnd',
'Round',
'RTrim',
'ScriptEngine',
'ScriptEngineBuildVersion',
'ScriptEngineMajorVersion',
'ScriptEngineMinorVersion',
'Second',
'SetLocale',
'Sgn',
'Space',
'Split',
'Sqr',
'StrComp',
'String',
'StrReverse',
'Tan',
'Time',
'Timer',
'TimeSerial',
'TimeValue',
'Trim',
'TypeName',
'UBound',
'UCase',
'VarType',
'Weekday',
'WeekdayName',
'Year',
]
BUILTIN_VARIABLES = [
'Debug',
'Dictionary',
'Drive',
'Drives',
'Err',
'File',
'Files',
'FileSystemObject',
'Folder',
'Folders',
'Match',
'Matches',
'RegExp',
'Submatches',
'TextStream',
]
OPERATORS = [
'+',
'-',
'*',
'/',
'\\',
'^',
'|',
'<',
'<=',
'>',
'>=',
'=',
'<>',
'&',
'$',
]
OPERATOR_WORDS = [
'mod',
'and',
'or',
'xor',
'eqv',
'imp',
'is',
'not',
]
BUILTIN_CONSTANTS = [
'False',
'True',
'vbAbort',
'vbAbortRetryIgnore',
'vbApplicationModal',
'vbArray',
'vbBinaryCompare',
'vbBlack',
'vbBlue',
'vbBoole',
'vbByte',
'vbCancel',
'vbCr',
'vbCritical',
'vbCrLf',
'vbCurrency',
'vbCyan',
'vbDataObject',
'vbDate',
'vbDefaultButton1',
'vbDefaultButton2',
'vbDefaultButton3',
'vbDefaultButton4',
'vbDouble',
'vbEmpty',
'vbError',
'vbExclamation',
'vbFalse',
'vbFirstFullWeek',
'vbFirstJan1',
'vbFormFeed',
'vbFriday',
'vbGeneralDate',
'vbGreen',
'vbIgnore',
'vbInformation',
'vbInteger',
'vbLf',
'vbLong',
'vbLongDate',
'vbLongTime',
'vbMagenta',
'vbMonday',
'vbMsgBoxHelpButton',
'vbMsgBoxRight',
'vbMsgBoxRtlReading',
'vbMsgBoxSetForeground',
'vbNewLine',
'vbNo',
'vbNull',
'vbNullChar',
'vbNullString',
'vbObject',
'vbObjectError',
'vbOK',
'vbOKCancel',
'vbOKOnly',
'vbQuestion',
'vbRed',
'vbRetry',
'vbRetryCancel',
'vbSaturday',
'vbShortDate',
'vbShortTime',
'vbSingle',
'vbString',
'vbSunday',
'vbSystemModal',
'vbTab',
'vbTextCompare',
'vbThursday',
'vbTrue',
'vbTuesday',
'vbUseDefault',
'vbUseSystem',
'vbUseSystem',
'vbVariant',
'vbVerticalTab',
'vbWednesday',
'vbWhite',
'vbYellow',
'vbYes',
'vbYesNo',
'vbYesNoCancel',
]

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,243 @@
"""
pygments.lexers.actionscript
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for ActionScript and MXML.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, using, this, words, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
__all__ = ['ActionScriptLexer', 'ActionScript3Lexer', 'MxmlLexer']
class ActionScriptLexer(RegexLexer):
"""
For ActionScript source code.
"""
name = 'ActionScript'
aliases = ['actionscript', 'as']
filenames = ['*.as']
mimetypes = ['application/x-actionscript', 'text/x-actionscript',
'text/actionscript']
url = 'https://en.wikipedia.org/wiki/ActionScript'
version_added = '0.9'
flags = re.DOTALL
tokens = {
'root': [
(r'\s+', Whitespace),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'/(\\\\|\\[^\\]|[^/\\\n])*/[gim]*', String.Regex),
(r'[~^*!%&<>|+=:;,/?\\-]+', Operator),
(r'[{}\[\]();.]+', Punctuation),
(words((
'case', 'default', 'for', 'each', 'in', 'while', 'do', 'break',
'return', 'continue', 'if', 'else', 'throw', 'try', 'catch',
'var', 'with', 'new', 'typeof', 'arguments', 'instanceof', 'this',
'switch'), suffix=r'\b'),
Keyword),
(words((
'class', 'public', 'final', 'internal', 'native', 'override', 'private',
'protected', 'static', 'import', 'extends', 'implements', 'interface',
'intrinsic', 'return', 'super', 'dynamic', 'function', 'const', 'get',
'namespace', 'package', 'set'), suffix=r'\b'),
Keyword.Declaration),
(r'(true|false|null|NaN|Infinity|-Infinity|undefined|Void)\b',
Keyword.Constant),
(words((
'Accessibility', 'AccessibilityProperties', 'ActionScriptVersion',
'ActivityEvent', 'AntiAliasType', 'ApplicationDomain', 'AsBroadcaster', 'Array',
'AsyncErrorEvent', 'AVM1Movie', 'BevelFilter', 'Bitmap', 'BitmapData',
'BitmapDataChannel', 'BitmapFilter', 'BitmapFilterQuality', 'BitmapFilterType',
'BlendMode', 'BlurFilter', 'Boolean', 'ByteArray', 'Camera', 'Capabilities', 'CapsStyle',
'Class', 'Color', 'ColorMatrixFilter', 'ColorTransform', 'ContextMenu',
'ContextMenuBuiltInItems', 'ContextMenuEvent', 'ContextMenuItem',
'ConvultionFilter', 'CSMSettings', 'DataEvent', 'Date', 'DefinitionError',
'DeleteObjectSample', 'Dictionary', 'DisplacmentMapFilter', 'DisplayObject',
'DisplacmentMapFilterMode', 'DisplayObjectContainer', 'DropShadowFilter',
'Endian', 'EOFError', 'Error', 'ErrorEvent', 'EvalError', 'Event', 'EventDispatcher',
'EventPhase', 'ExternalInterface', 'FileFilter', 'FileReference',
'FileReferenceList', 'FocusDirection', 'FocusEvent', 'Font', 'FontStyle', 'FontType',
'FrameLabel', 'FullScreenEvent', 'Function', 'GlowFilter', 'GradientBevelFilter',
'GradientGlowFilter', 'GradientType', 'Graphics', 'GridFitType', 'HTTPStatusEvent',
'IBitmapDrawable', 'ID3Info', 'IDataInput', 'IDataOutput', 'IDynamicPropertyOutput'
'IDynamicPropertyWriter', 'IEventDispatcher', 'IExternalizable',
'IllegalOperationError', 'IME', 'IMEConversionMode', 'IMEEvent', 'int',
'InteractiveObject', 'InterpolationMethod', 'InvalidSWFError', 'InvokeEvent',
'IOError', 'IOErrorEvent', 'JointStyle', 'Key', 'Keyboard', 'KeyboardEvent', 'KeyLocation',
'LineScaleMode', 'Loader', 'LoaderContext', 'LoaderInfo', 'LoadVars', 'LocalConnection',
'Locale', 'Math', 'Matrix', 'MemoryError', 'Microphone', 'MorphShape', 'Mouse', 'MouseEvent',
'MovieClip', 'MovieClipLoader', 'Namespace', 'NetConnection', 'NetStatusEvent',
'NetStream', 'NewObjectSample', 'Number', 'Object', 'ObjectEncoding', 'PixelSnapping',
'Point', 'PrintJob', 'PrintJobOptions', 'PrintJobOrientation', 'ProgressEvent', 'Proxy',
'QName', 'RangeError', 'Rectangle', 'ReferenceError', 'RegExp', 'Responder', 'Sample',
'Scene', 'ScriptTimeoutError', 'Security', 'SecurityDomain', 'SecurityError',
'SecurityErrorEvent', 'SecurityPanel', 'Selection', 'Shape', 'SharedObject',
'SharedObjectFlushStatus', 'SimpleButton', 'Socket', 'Sound', 'SoundChannel',
'SoundLoaderContext', 'SoundMixer', 'SoundTransform', 'SpreadMethod', 'Sprite',
'StackFrame', 'StackOverflowError', 'Stage', 'StageAlign', 'StageDisplayState',
'StageQuality', 'StageScaleMode', 'StaticText', 'StatusEvent', 'String', 'StyleSheet',
'SWFVersion', 'SyncEvent', 'SyntaxError', 'System', 'TextColorType', 'TextField',
'TextFieldAutoSize', 'TextFieldType', 'TextFormat', 'TextFormatAlign',
'TextLineMetrics', 'TextRenderer', 'TextSnapshot', 'Timer', 'TimerEvent', 'Transform',
'TypeError', 'uint', 'URIError', 'URLLoader', 'URLLoaderDataFormat', 'URLRequest',
'URLRequestHeader', 'URLRequestMethod', 'URLStream', 'URLVariabeles', 'VerifyError',
'Video', 'XML', 'XMLDocument', 'XMLList', 'XMLNode', 'XMLNodeType', 'XMLSocket',
'XMLUI'), suffix=r'\b'),
Name.Builtin),
(words((
'decodeURI', 'decodeURIComponent', 'encodeURI', 'escape', 'eval', 'isFinite', 'isNaN',
'isXMLName', 'clearInterval', 'fscommand', 'getTimer', 'getURL', 'getVersion',
'parseFloat', 'parseInt', 'setInterval', 'trace', 'updateAfterEvent',
'unescape'), suffix=r'\b'),
Name.Function),
(r'[$a-zA-Z_]\w*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
]
}
def analyse_text(text):
"""This is only used to disambiguate between ActionScript and
ActionScript3. We return 0 here; the ActionScript3 lexer will match
AS3 variable definitions and that will hopefully suffice."""
return 0
class ActionScript3Lexer(RegexLexer):
"""
For ActionScript 3 source code.
"""
name = 'ActionScript 3'
url = 'https://help.adobe.com/en_US/FlashPlatform/reference/actionscript/3/index.html'
aliases = ['actionscript3', 'as3']
filenames = ['*.as']
mimetypes = ['application/x-actionscript3', 'text/x-actionscript3',
'text/actionscript3']
version_added = '0.11'
identifier = r'[$a-zA-Z_]\w*'
typeidentifier = identifier + r'(?:\.<\w+>)?'
flags = re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'\s+', Whitespace),
(r'(function\s+)(' + identifier + r')(\s*)(\()',
bygroups(Keyword.Declaration, Name.Function, Text, Operator),
'funcparams'),
(r'(var|const)(\s+)(' + identifier + r')(\s*)(:)(\s*)(' +
typeidentifier + r')',
bygroups(Keyword.Declaration, Whitespace, Name, Whitespace, Punctuation, Whitespace,
Keyword.Type)),
(r'(import|package)(\s+)((?:' + identifier + r'|\.)+)(\s*)',
bygroups(Keyword, Whitespace, Name.Namespace, Whitespace)),
(r'(new)(\s+)(' + typeidentifier + r')(\s*)(\()',
bygroups(Keyword, Whitespace, Keyword.Type, Whitespace, Operator)),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'/(\\\\|\\[^\\]|[^\\\n])*/[gisx]*', String.Regex),
(r'(\.)(' + identifier + r')', bygroups(Operator, Name.Attribute)),
(r'(case|default|for|each|in|while|do|break|return|continue|if|else|'
r'throw|try|catch|with|new|typeof|arguments|instanceof|this|'
r'switch|import|include|as|is)\b',
Keyword),
(r'(class|public|final|internal|native|override|private|protected|'
r'static|import|extends|implements|interface|intrinsic|return|super|'
r'dynamic|function|const|get|namespace|package|set)\b',
Keyword.Declaration),
(r'(true|false|null|NaN|Infinity|-Infinity|undefined|void)\b',
Keyword.Constant),
(r'(decodeURI|decodeURIComponent|encodeURI|escape|eval|isFinite|isNaN|'
r'isXMLName|clearInterval|fscommand|getTimer|getURL|getVersion|'
r'isFinite|parseFloat|parseInt|setInterval|trace|updateAfterEvent|'
r'unescape)\b', Name.Function),
(identifier, Name),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
(r'[~^*!%&<>|+=:;,/?\\{}\[\]().-]+', Operator),
],
'funcparams': [
(r'\s+', Whitespace),
(r'(\s*)(\.\.\.)?(' + identifier + r')(\s*)(:)(\s*)(' +
typeidentifier + r'|\*)(\s*)',
bygroups(Whitespace, Punctuation, Name, Whitespace, Operator, Whitespace,
Keyword.Type, Whitespace), 'defval'),
(r'\)', Operator, 'type')
],
'type': [
(r'(\s*)(:)(\s*)(' + typeidentifier + r'|\*)',
bygroups(Whitespace, Operator, Whitespace, Keyword.Type), '#pop:2'),
(r'\s+', Text, '#pop:2'),
default('#pop:2')
],
'defval': [
(r'(=)(\s*)([^(),]+)(\s*)(,?)',
bygroups(Operator, Whitespace, using(this), Whitespace, Operator), '#pop'),
(r',', Operator, '#pop'),
default('#pop')
]
}
def analyse_text(text):
if re.match(r'\w+\s*:\s*\w', text):
return 0.3
return 0
class MxmlLexer(RegexLexer):
"""
For MXML markup.
Nested AS3 in <script> tags is highlighted by the appropriate lexer.
"""
flags = re.MULTILINE | re.DOTALL
name = 'MXML'
aliases = ['mxml']
filenames = ['*.mxml']
url = 'https://en.wikipedia.org/wiki/MXML'
version_added = '1.1'
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'(\<\!\[CDATA\[)(.*?)(\]\]\>)',
bygroups(String, using(ActionScript3Lexer), String)),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*[\w:.-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[\w:.-]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Whitespace),
(r'[\w.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
(r'\s+', Whitespace),
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}

View File

@@ -0,0 +1,144 @@
"""
pygments.lexers.ada
~~~~~~~~~~~~~~~~~~~
Lexers for Ada family languages.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, words, using, this, \
default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
from pygments.lexers._ada_builtins import KEYWORD_LIST, BUILTIN_LIST
__all__ = ['AdaLexer']
class AdaLexer(RegexLexer):
"""
For Ada source code.
"""
name = 'Ada'
aliases = ['ada', 'ada95', 'ada2005']
filenames = ['*.adb', '*.ads', '*.ada']
mimetypes = ['text/x-ada']
url = 'https://www.adaic.org'
version_added = '1.3'
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'[^\S\n]+', Text),
(r'--.*?\n', Comment.Single),
(r'[^\S\n]+', Text),
(r'function|procedure|entry', Keyword.Declaration, 'subprogram'),
(r'(subtype|type)(\s+)(\w+)',
bygroups(Keyword.Declaration, Text, Keyword.Type), 'type_def'),
(r'task|protected', Keyword.Declaration),
(r'(subtype)(\s+)', bygroups(Keyword.Declaration, Text)),
(r'(end)(\s+)', bygroups(Keyword.Reserved, Text), 'end'),
(r'(pragma)(\s+)(\w+)', bygroups(Keyword.Reserved, Text,
Comment.Preproc)),
(r'(true|false|null)\b', Keyword.Constant),
# builtin types
(words(BUILTIN_LIST, suffix=r'\b'), Keyword.Type),
(r'(and(\s+then)?|in|mod|not|or(\s+else)|rem)\b', Operator.Word),
(r'generic|private', Keyword.Declaration),
(r'package', Keyword.Declaration, 'package'),
(r'array\b', Keyword.Reserved, 'array_def'),
(r'(with|use)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r'(\w+)(\s*)(:)(\s*)(constant)',
bygroups(Name.Constant, Text, Punctuation, Text,
Keyword.Reserved)),
(r'<<\w+>>', Name.Label),
(r'(\w+)(\s*)(:)(\s*)(declare|begin|loop|for|while)',
bygroups(Name.Label, Text, Punctuation, Text, Keyword.Reserved)),
# keywords
(words(KEYWORD_LIST, prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
(r'"[^"]*"', String),
include('attribute'),
include('numbers'),
(r"'[^']'", String.Character),
(r'(\w+)(\s*|[(,])', bygroups(Name, using(this))),
(r"(<>|=>|:=|@|[\[\]]|[()|:;,.'])", Punctuation),
(r'[*<>+=/&-]', Operator),
(r'\n+', Text),
],
'numbers': [
(r'[0-9_]+#[0-9a-f_\.]+#', Number.Hex),
(r'[0-9_]+\.[0-9_]*', Number.Float),
(r'[0-9_]+', Number.Integer),
],
'attribute': [
(r"(')(\w+)", bygroups(Punctuation, Name.Attribute)),
],
'subprogram': [
(r'\(', Punctuation, ('#pop', 'formal_part')),
(r';', Punctuation, '#pop'),
(r'is\b', Keyword.Reserved, '#pop'),
(r'"[^"]+"|\w+', Name.Function),
include('root'),
],
'end': [
('(if|case|record|loop|select)', Keyword.Reserved),
(r'"[^"]+"|[\w.]+', Name.Function),
(r'\s+', Text),
(';', Punctuation, '#pop'),
],
'type_def': [
(r';', Punctuation, '#pop'),
(r'\(', Punctuation, 'formal_part'),
(r'\[', Punctuation, 'formal_part'),
(r'with|and|use', Keyword.Reserved),
(r'array\b', Keyword.Reserved, ('#pop', 'array_def')),
(r'record\b', Keyword.Reserved, ('record_def')),
(r'(null record)(;)', bygroups(Keyword.Reserved, Punctuation), '#pop'),
include('root'),
],
'array_def': [
(r';', Punctuation, '#pop'),
(r'(\w+)(\s+)(range)', bygroups(Keyword.Type, Text, Keyword.Reserved)),
include('root'),
],
'record_def': [
(r'end record', Keyword.Reserved, '#pop'),
include('root'),
],
'import': [
# TODO: use Name.Namespace if appropriate. This needs
# work to disinguish imports from aspects.
(r'[\w.]+', Name, '#pop'),
default('#pop'),
],
'formal_part': [
(r'\)', Punctuation, '#pop'),
(r'\]', Punctuation, '#pop'),
(r'\w+', Name.Variable),
(r',|:[^=]', Punctuation),
(r'(in|not|null|out|access)\b', Keyword.Reserved),
include('root'),
],
'package': [
('body', Keyword.Declaration),
(r'is\s+new|renames', Keyword.Reserved),
('is', Keyword.Reserved, '#pop'),
(';', Punctuation, '#pop'),
(r'\(', Punctuation, 'package_instantiation'),
(r'([\w.]+)', Name.Class),
include('root'),
],
'package_instantiation': [
(r'("[^"]+"|\w+)(\s+)(=>)', bygroups(Name.Variable, Text, Punctuation)),
(r'[\w.\'"]', Text),
(r'\)', Punctuation, '#pop'),
include('root'),
],
}

View File

@@ -0,0 +1,25 @@
"""
pygments.lexers.agile
~~~~~~~~~~~~~~~~~~~~~
Just export lexer classes previously contained in this module.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# ruff: noqa: F401
from pygments.lexers.lisp import SchemeLexer
from pygments.lexers.jvm import IokeLexer, ClojureLexer
from pygments.lexers.python import PythonLexer, PythonConsoleLexer, \
PythonTracebackLexer, Python3Lexer, Python3TracebackLexer, DgLexer
from pygments.lexers.ruby import RubyLexer, RubyConsoleLexer, FancyLexer
from pygments.lexers.perl import PerlLexer, Perl6Lexer
from pygments.lexers.d import CrocLexer, MiniDLexer
from pygments.lexers.iolang import IoLexer
from pygments.lexers.tcl import TclLexer
from pygments.lexers.factor import FactorLexer
from pygments.lexers.scripting import LuaLexer, MoonScriptLexer
__all__ = []

View File

@@ -0,0 +1,299 @@
"""
pygments.lexers.algebra
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for computer algebra systems.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, bygroups, do_insertions, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic, Whitespace
__all__ = ['GAPLexer', 'GAPConsoleLexer', 'MathematicaLexer', 'MuPADLexer',
'BCLexer']
class GAPLexer(RegexLexer):
"""
For GAP source code.
"""
name = 'GAP'
url = 'https://www.gap-system.org'
aliases = ['gap']
filenames = ['*.g', '*.gd', '*.gi', '*.gap']
version_added = '2.0'
tokens = {
'root': [
(r'#.*$', Comment.Single),
(r'"(?:[^"\\]|\\.)*"', String),
(r'\(|\)|\[|\]|\{|\}', Punctuation),
(r'''(?x)\b(?:
if|then|elif|else|fi|
for|while|do|od|
repeat|until|
break|continue|
function|local|return|end|
rec|
quit|QUIT|
IsBound|Unbind|
TryNextMethod|
Info|Assert
)\b''', Keyword),
(r'''(?x)\b(?:
true|false|fail|infinity
)\b''',
Name.Constant),
(r'''(?x)\b(?:
(Declare|Install)([A-Z][A-Za-z]+)|
BindGlobal|BIND_GLOBAL
)\b''',
Name.Builtin),
(r'\.|,|:=|;|=|\+|-|\*|/|\^|>|<', Operator),
(r'''(?x)\b(?:
and|or|not|mod|in
)\b''',
Operator.Word),
(r'''(?x)
(?:\w+|`[^`]*`)
(?:::\w+|`[^`]*`)*''', Name.Variable),
(r'[0-9]+(?:\.[0-9]*)?(?:e[0-9]+)?', Number),
(r'\.[0-9]+(?:e[0-9]+)?', Number),
(r'.', Text)
],
}
def analyse_text(text):
score = 0.0
# Declaration part
if re.search(
r"(InstallTrueMethod|Declare(Attribute|Category|Filter|Operation" +
r"|GlobalFunction|Synonym|SynonymAttr|Property))", text
):
score += 0.7
# Implementation part
if re.search(
r"(DeclareRepresentation|Install(GlobalFunction|Method|" +
r"ImmediateMethod|OtherMethod)|New(Family|Type)|Objectify)", text
):
score += 0.7
return min(score, 1.0)
class GAPConsoleLexer(Lexer):
"""
For GAP console sessions. Modeled after JuliaConsoleLexer.
"""
name = 'GAP session'
aliases = ['gap-console', 'gap-repl']
filenames = ['*.tst']
url = 'https://www.gap-system.org'
version_added = '2.14'
_example = "gap-repl/euclidean.tst"
def get_tokens_unprocessed(self, text):
gaplexer = GAPLexer(**self.options)
start = 0
curcode = ''
insertions = []
output = False
error = False
for line in text.splitlines(keepends=True):
if line.startswith('gap> ') or line.startswith('brk> '):
insertions.append((len(curcode), [(0, Generic.Prompt, line[:5])]))
curcode += line[5:]
output = False
error = False
elif not output and line.startswith('> '):
insertions.append((len(curcode), [(0, Generic.Prompt, line[:2])]))
curcode += line[2:]
else:
if curcode:
yield from do_insertions(
insertions, gaplexer.get_tokens_unprocessed(curcode))
curcode = ''
insertions = []
if line.startswith('Error, ') or error:
yield start, Generic.Error, line
error = True
else:
yield start, Generic.Output, line
output = True
start += len(line)
if curcode:
yield from do_insertions(
insertions, gaplexer.get_tokens_unprocessed(curcode))
# the following is needed to distinguish Scilab and GAP .tst files
def analyse_text(text):
# GAP prompts are a dead give away, although hypothetical;y a
# file in another language could be trying to compare a variable
# "gap" as in "gap> 0.1". But that this should happen at the
# start of a line seems unlikely...
if re.search(r"^gap> ", text):
return 0.9
else:
return 0.0
class MathematicaLexer(RegexLexer):
"""
Lexer for Mathematica source code.
"""
name = 'Mathematica'
url = 'http://www.wolfram.com/mathematica/'
aliases = ['mathematica', 'mma', 'nb']
filenames = ['*.nb', '*.cdf', '*.nbp', '*.ma']
mimetypes = ['application/mathematica',
'application/vnd.wolfram.mathematica',
'application/vnd.wolfram.mathematica.package',
'application/vnd.wolfram.cdf']
version_added = '2.0'
# http://reference.wolfram.com/mathematica/guide/Syntax.html
operators = (
";;", "=", "=.", "!=" "==", ":=", "->", ":>", "/.", "+", "-", "*", "/",
"^", "&&", "||", "!", "<>", "|", "/;", "?", "@", "//", "/@", "@@",
"@@@", "~~", "===", "&", "<", ">", "<=", ">=",
)
punctuation = (",", ";", "(", ")", "[", "]", "{", "}")
def _multi_escape(entries):
return '({})'.format('|'.join(re.escape(entry) for entry in entries))
tokens = {
'root': [
(r'(?s)\(\*.*?\*\)', Comment),
(r'([a-zA-Z]+[A-Za-z0-9]*`)', Name.Namespace),
(r'([A-Za-z0-9]*_+[A-Za-z0-9]*)', Name.Variable),
(r'#\d*', Name.Variable),
(r'([a-zA-Z]+[a-zA-Z0-9]*)', Name),
(r'-?\d+\.\d*', Number.Float),
(r'-?\d*\.\d+', Number.Float),
(r'-?\d+', Number.Integer),
(words(operators), Operator),
(words(punctuation), Punctuation),
(r'".*?"', String),
(r'\s+', Text.Whitespace),
],
}
class MuPADLexer(RegexLexer):
"""
A MuPAD lexer.
Contributed by Christopher Creutzig <christopher@creutzig.de>.
"""
name = 'MuPAD'
url = 'http://www.mupad.com'
aliases = ['mupad']
filenames = ['*.mu']
version_added = '0.8'
tokens = {
'root': [
(r'//.*?$', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
(r'"(?:[^"\\]|\\.)*"', String),
(r'\(|\)|\[|\]|\{|\}', Punctuation),
(r'''(?x)\b(?:
next|break|end|
axiom|end_axiom|category|end_category|domain|end_domain|inherits|
if|%if|then|elif|else|end_if|
case|of|do|otherwise|end_case|
while|end_while|
repeat|until|end_repeat|
for|from|to|downto|step|end_for|
proc|local|option|save|begin|end_proc|
delete|frame
)\b''', Keyword),
(r'''(?x)\b(?:
DOM_ARRAY|DOM_BOOL|DOM_COMPLEX|DOM_DOMAIN|DOM_EXEC|DOM_EXPR|
DOM_FAIL|DOM_FLOAT|DOM_FRAME|DOM_FUNC_ENV|DOM_HFARRAY|DOM_IDENT|
DOM_INT|DOM_INTERVAL|DOM_LIST|DOM_NIL|DOM_NULL|DOM_POLY|DOM_PROC|
DOM_PROC_ENV|DOM_RAT|DOM_SET|DOM_STRING|DOM_TABLE|DOM_VAR
)\b''', Name.Class),
(r'''(?x)\b(?:
PI|EULER|E|CATALAN|
NIL|FAIL|undefined|infinity|
TRUE|FALSE|UNKNOWN
)\b''',
Name.Constant),
(r'\b(?:dom|procname)\b', Name.Builtin.Pseudo),
(r'\.|,|:|;|=|\+|-|\*|/|\^|@|>|<|\$|\||!|\'|%|~=', Operator),
(r'''(?x)\b(?:
and|or|not|xor|
assuming|
div|mod|
union|minus|intersect|in|subset
)\b''',
Operator.Word),
(r'\b(?:I|RDN_INF|RD_NINF|RD_NAN)\b', Number),
# (r'\b(?:adt|linalg|newDomain|hold)\b', Name.Builtin),
(r'''(?x)
((?:[a-zA-Z_#][\w#]*|`[^`]*`)
(?:::[a-zA-Z_#][\w#]*|`[^`]*`)*)(\s*)([(])''',
bygroups(Name.Function, Text, Punctuation)),
(r'''(?x)
(?:[a-zA-Z_#][\w#]*|`[^`]*`)
(?:::[a-zA-Z_#][\w#]*|`[^`]*`)*''', Name.Variable),
(r'[0-9]+(?:\.[0-9]*)?(?:e[0-9]+)?', Number),
(r'\.[0-9]+(?:e[0-9]+)?', Number),
(r'\s+', Whitespace),
(r'.', Text)
],
'comment': [
(r'[^/*]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
}
class BCLexer(RegexLexer):
"""
A BC lexer.
"""
name = 'BC'
url = 'https://www.gnu.org/software/bc/'
aliases = ['bc']
filenames = ['*.bc']
version_added = '2.1'
tokens = {
'root': [
(r'/\*', Comment.Multiline, 'comment'),
(r'"(?:[^"\\]|\\.)*"', String),
(r'[{}();,]', Punctuation),
(words(('if', 'else', 'while', 'for', 'break', 'continue',
'halt', 'return', 'define', 'auto', 'print', 'read',
'length', 'scale', 'sqrt', 'limits', 'quit',
'warranty'), suffix=r'\b'), Keyword),
(r'\+\+|--|\|\||&&|'
r'([-<>+*%\^/!=])=?', Operator),
# bc doesn't support exponential
(r'[0-9]+(\.[0-9]*)?', Number),
(r'\.[0-9]+', Number),
(r'.', Text)
],
'comment': [
(r'[^*/]+', Comment.Multiline),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
}

View File

@@ -0,0 +1,75 @@
"""
pygments.lexers.ambient
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for AmbientTalk language.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, words, bygroups
from pygments.token import Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
__all__ = ['AmbientTalkLexer']
class AmbientTalkLexer(RegexLexer):
"""
Lexer for AmbientTalk source code.
"""
name = 'AmbientTalk'
url = 'https://code.google.com/p/ambienttalk'
filenames = ['*.at']
aliases = ['ambienttalk', 'ambienttalk/2', 'at']
mimetypes = ['text/x-ambienttalk']
version_added = '2.0'
flags = re.MULTILINE | re.DOTALL
builtin = words(('if:', 'then:', 'else:', 'when:', 'whenever:', 'discovered:',
'disconnected:', 'reconnected:', 'takenOffline:', 'becomes:',
'export:', 'as:', 'object:', 'actor:', 'mirror:', 'taggedAs:',
'mirroredBy:', 'is:'))
tokens = {
'root': [
(r'\s+', Whitespace),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'(def|deftype|import|alias|exclude)\b', Keyword),
(builtin, Name.Builtin),
(r'(true|false|nil)\b', Keyword.Constant),
(r'(~|lobby|jlobby|/)\.', Keyword.Constant, 'namespace'),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String),
(r'\|', Punctuation, 'arglist'),
(r'<:|[*^!%&<>+=,./?-]|:=', Operator),
(r"`[a-zA-Z_]\w*", String.Symbol),
(r"[a-zA-Z_]\w*:", Name.Function),
(r"[{}()\[\];`]", Punctuation),
(r'(self|super)\b', Name.Variable.Instance),
(r"[a-zA-Z_]\w*", Name.Variable),
(r"@[a-zA-Z_]\w*", Name.Class),
(r"@\[", Name.Class, 'annotations'),
include('numbers'),
],
'numbers': [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+', Number.Integer)
],
'namespace': [
(r'[a-zA-Z_]\w*\.', Name.Namespace),
(r'[a-zA-Z_]\w*:', Name.Function, '#pop'),
(r'[a-zA-Z_]\w*(?!\.)', Name.Function, '#pop')
],
'annotations': [
(r"(.*?)\]", Name.Class, '#pop')
],
'arglist': [
(r'\|', Punctuation, '#pop'),
(r'(\s*)(,)(\s*)', bygroups(Whitespace, Punctuation, Whitespace)),
(r'[a-zA-Z_]\w*', Name.Variable),
],
}

View File

@@ -0,0 +1,54 @@
"""
pygments.lexers.amdgpu
~~~~~~~~~~~~~~~~~~~~~~
Lexers for the AMDGPU ISA assembly.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words
from pygments.token import Name, Text, Keyword, Whitespace, Number, Comment
import re
__all__ = ['AMDGPULexer']
class AMDGPULexer(RegexLexer):
"""
For AMD GPU assembly.
"""
name = 'AMDGPU'
aliases = ['amdgpu']
filenames = ['*.isa']
url = 'https://gpuopen.com/amd-isa-documentation'
version_added = '2.8'
flags = re.IGNORECASE
tokens = {
'root': [
(r'\s+', Whitespace),
(r'[\r\n]+', Text),
(r'(([a-z_0-9])*:([a-z_0-9])*)', Name.Attribute),
(r'(\[|\]|\(|\)|,|\:|\&)', Text),
(r'([;#]|//).*?\n', Comment.Single),
(r'((s_)?(scratch|ds|buffer|flat|image)_[a-z0-9_]+)', Keyword.Reserved),
(r'(_lo|_hi)', Name.Variable),
(r'(vmcnt|lgkmcnt|expcnt)', Name.Attribute),
(r'(attr[0-9].[a-z])', Name.Attribute),
(words((
'op', 'vaddr', 'vdata', 'off', 'soffset', 'srsrc', 'format',
'offset', 'offen', 'idxen', 'glc', 'dlc', 'slc', 'tfe', 'lds',
'lit', 'unorm'), suffix=r'\b'), Name.Attribute),
(r'(label_[a-z0-9]+)', Keyword),
(r'(_L[0-9]*)', Name.Variable),
(r'(s|v)_[a-z0-9_]+', Keyword),
(r'(v[0-9.]+|vcc|exec|v)', Name.Variable),
(r's[0-9.]+|s', Name.Variable),
(r'[0-9]+\.[^0-9]+', Number.Float),
(r'(0[xX][a-z0-9]+)|([0-9]+)', Number.Integer)
]
}

View File

@@ -0,0 +1,87 @@
"""
pygments.lexers.ampl
~~~~~~~~~~~~~~~~~~~~
Lexers for the AMPL language.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, using, this, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
__all__ = ['AmplLexer']
class AmplLexer(RegexLexer):
"""
For AMPL source code.
"""
name = 'Ampl'
url = 'http://ampl.com/'
aliases = ['ampl']
filenames = ['*.run']
version_added = '2.2'
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Whitespace),
(r'#.*?\n', Comment.Single),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(words((
'call', 'cd', 'close', 'commands', 'data', 'delete', 'display',
'drop', 'end', 'environ', 'exit', 'expand', 'include', 'load',
'model', 'objective', 'option', 'problem', 'purge', 'quit',
'redeclare', 'reload', 'remove', 'reset', 'restore', 'shell',
'show', 'solexpand', 'solution', 'solve', 'update', 'unload',
'xref', 'coeff', 'coef', 'cover', 'obj', 'interval', 'default',
'from', 'to', 'to_come', 'net_in', 'net_out', 'dimen',
'dimension', 'check', 'complements', 'write', 'function',
'pipe', 'format', 'if', 'then', 'else', 'in', 'while', 'repeat',
'for'), suffix=r'\b'), Keyword.Reserved),
(r'(integer|binary|symbolic|ordered|circular|reversed|INOUT|IN|OUT|LOCAL)',
Keyword.Type),
(r'\".*?\"', String.Double),
(r'\'.*?\'', String.Single),
(r'[()\[\]{},;:]+', Punctuation),
(r'\b(\w+)(\.)(astatus|init0|init|lb0|lb1|lb2|lb|lrc|'
r'lslack|rc|relax|slack|sstatus|status|ub0|ub1|ub2|'
r'ub|urc|uslack|val)',
bygroups(Name.Variable, Punctuation, Keyword.Reserved)),
(r'(set|param|var|arc|minimize|maximize|subject to|s\.t\.|subj to|'
r'node|table|suffix|read table|write table)(\s+)(\w+)',
bygroups(Keyword.Declaration, Whitespace, Name.Variable)),
(r'(param)(\s*)(:)(\s*)(\w+)(\s*)(:)(\s*)((\w|\s)+)',
bygroups(Keyword.Declaration, Whitespace, Punctuation, Whitespace,
Name.Variable, Whitespace, Punctuation, Whitespace, Name.Variable)),
(r'(let|fix|unfix)(\s*)((?:\{.*\})?)(\s*)(\w+)',
bygroups(Keyword.Declaration, Whitespace, using(this), Whitespace,
Name.Variable)),
(words((
'abs', 'acos', 'acosh', 'alias', 'asin', 'asinh', 'atan', 'atan2',
'atanh', 'ceil', 'ctime', 'cos', 'exp', 'floor', 'log', 'log10',
'max', 'min', 'precision', 'round', 'sin', 'sinh', 'sqrt', 'tan',
'tanh', 'time', 'trunc', 'Beta', 'Cauchy', 'Exponential', 'Gamma',
'Irand224', 'Normal', 'Normal01', 'Poisson', 'Uniform', 'Uniform01',
'num', 'num0', 'ichar', 'char', 'length', 'substr', 'sprintf',
'match', 'sub', 'gsub', 'print', 'printf', 'next', 'nextw', 'prev',
'prevw', 'first', 'last', 'ord', 'ord0', 'card', 'arity',
'indexarity'), prefix=r'\b', suffix=r'\b'), Name.Builtin),
(r'(\+|\-|\*|/|\*\*|=|<=|>=|==|\||\^|<|>|\!|\.\.|:=|\&|\!=|<<|>>)',
Operator),
(words((
'or', 'exists', 'forall', 'and', 'in', 'not', 'within', 'union',
'diff', 'difference', 'symdiff', 'inter', 'intersect',
'intersection', 'cross', 'setof', 'by', 'less', 'sum', 'prod',
'product', 'div', 'mod'), suffix=r'\b'),
Keyword.Reserved), # Operator.Name but not enough emphasized with that
(r'(\d+\.(?!\.)\d*|\.(?!.)\d+)([eE][+-]?\d+)?', Number.Float),
(r'\d+([eE][+-]?\d+)?', Number.Integer),
(r'[+-]?Infinity', Number.Integer),
(r'(\w+|(\.(?!\.)))', Text)
]
}

View File

@@ -0,0 +1,593 @@
"""
pygments.lexers.apdlexer
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for ANSYS Parametric Design Language.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, words, default
from pygments.token import Comment, Keyword, Name, Number, Operator, \
String, Generic, Punctuation, Whitespace, Escape
__all__ = ['apdlexer']
class apdlexer(RegexLexer):
"""
For APDL source code.
"""
name = 'ANSYS parametric design language'
aliases = ['ansys', 'apdl']
filenames = ['*.ans']
url = 'https://www.ansys.com'
version_added = '2.9'
flags = re.IGNORECASE
# list of elements
elafunb = ("SURF152", "SURF153", "SURF154", "SURF156", "SHELL157",
"SURF159", "LINK160", "BEAM161", "PLANE162",
"SHELL163", "SOLID164", "COMBI165", "MASS166",
"LINK167", "SOLID168", "TARGE169", "TARGE170",
"CONTA171", "CONTA172", "CONTA173", "CONTA174",
"CONTA175", "CONTA176", "CONTA177", "CONTA178",
"PRETS179", "LINK180", "SHELL181", "PLANE182",
"PLANE183", "MPC184", "SOLID185", "SOLID186",
"SOLID187", "BEAM188", "BEAM189", "SOLSH190",
"INTER192", "INTER193", "INTER194", "INTER195",
"MESH200", "FOLLW201", "INTER202", "INTER203",
"INTER204", "INTER205", "SHELL208", "SHELL209",
"CPT212", "CPT213", "COMBI214", "CPT215", "CPT216",
"CPT217", "FLUID220", "FLUID221", "PLANE223",
"SOLID226", "SOLID227", "PLANE230", "SOLID231",
"SOLID232", "PLANE233", "SOLID236", "SOLID237",
"PLANE238", "SOLID239", "SOLID240", "HSFLD241",
"HSFLD242", "SURF251", "SURF252", "REINF263",
"REINF264", "REINF265", "SOLID272", "SOLID273",
"SOLID278", "SOLID279", "SHELL281", "SOLID285",
"PIPE288", "PIPE289", "ELBOW290", "USER300", "BEAM3",
"BEAM4", "BEAM23", "BEAM24", "BEAM44", "BEAM54",
"COMBIN7", "FLUID79", "FLUID80", "FLUID81", "FLUID141",
"FLUID142", "INFIN9", "INFIN47", "PLANE13", "PLANE25",
"PLANE42", "PLANE53", "PLANE67", "PLANE82", "PLANE83",
"PLANE145", "PLANE146", "CONTAC12", "CONTAC52",
"LINK1", "LINK8", "LINK10", "LINK32", "PIPE16",
"PIPE17", "PIPE18", "PIPE20", "PIPE59", "PIPE60",
"SHELL41", "SHELL43", "SHELL57", "SHELL63", "SHELL91",
"SHELL93", "SHELL99", "SHELL150", "SOLID5", "SOLID45",
"SOLID46", "SOLID65", "SOLID69", "SOLID92", "SOLID95",
"SOLID117", "SOLID127", "SOLID128", "SOLID147",
"SOLID148", "SOLID191", "VISCO88", "VISCO89",
"VISCO106", "VISCO107", "VISCO108", "TRANS109")
elafunc = ("PGRAPH", "/VT", "VTIN", "VTRFIL", "VTTEMP", "PGRSET",
"VTCLR", "VTMETH", "VTRSLT", "VTVMOD", "PGSELE",
"VTDISC", "VTMP", "VTSEC", "PGWRITE", "VTEVAL", "VTOP",
"VTSFE", "POUTRES", "VTFREQ", "VTPOST", "VTSL",
"FLDATA1-40", "HFPCSWP", "MSDATA", "MSVARY", "QFACT",
"FLOCHECK", "HFPOWER", "MSMASS", "PERI", "SPADP",
"FLREAD", "HFPORT", "MSMETH", "PLFSS", "SPARM",
"FLOTRAN", "HFSCAT", "MSMIR", "PLSCH", "SPFSS",
"HFADP", "ICE", "MSNOMF", "PLSYZ", "SPICE", "HFARRAY",
"ICEDELE", "MSPROP", "PLTD", "SPSCAN", "HFDEEM",
"ICELIST", "MSQUAD", "PLTLINE", "SPSWP", "HFEIGOPT",
"ICVFRC", "MSRELAX", "PLVFRC", "HFEREFINE", "LPRT",
"MSSOLU", "/PICE", "HFMODPRT", "MSADV", "MSSPEC",
"PLWAVE", "HFPA", "MSCAP", "MSTERM", "PRSYZ")
elafund = ("*VOPER", "VOVLAP", "*VPLOT", "VPLOT", "VPTN", "*VPUT",
"VPUT", "*VREAD", "VROTAT", "VSBA", "VSBV", "VSBW",
"/VSCALE", "*VSCFUN", "VSEL", "VSLA", "*VSTAT", "VSUM",
"VSWEEP", "VSYMM", "VTRAN", "VTYPE", "/VUP", "*VWRITE",
"/WAIT", "WAVES", "WERASE", "WFRONT", "/WINDOW",
"WMID", "WMORE", "WPAVE", "WPCSYS", "WPLANE", "WPOFFS",
"WPROTA", "WPSTYL", "WRFULL", "WRITE", "WRITEMAP",
"*WRK", "WSORT", "WSPRINGS", "WSTART", "WTBCREATE",
"XFDATA", "XFENRICH", "XFLIST", "/XFRM", "/XRANGE",
"XVAR", "/YRANGE", "/ZOOM", "/WB", "XMLO", "/XML",
"CNTR", "EBLOCK", "CMBLOCK", "NBLOCK", "/TRACK",
"CWZPLOT", "~EUI", "NELE", "EALL", "NALL", "FLITEM",
"LSLN", "PSOLVE", "ASLN", "/VERIFY", "/SSS", "~CFIN",
"*EVAL", "*MOONEY", "/RUNSTAT", "ALPFILL",
"ARCOLLAPSE", "ARDETACH", "ARFILL", "ARMERGE",
"ARSPLIT", "FIPLOT", "GAPFINISH", "GAPLIST",
"GAPMERGE", "GAPOPT", "GAPPLOT", "LNCOLLAPSE",
"LNDETACH", "LNFILL", "LNMERGE", "LNSPLIT", "PCONV",
"PLCONV", "PEMOPTS", "PEXCLUDE", "PINCLUDE", "PMETH",
"/PMETH", "PMOPTS", "PPLOT", "PPRANGE", "PRCONV",
"PRECISION", "RALL", "RFILSZ", "RITER", "RMEMRY",
"RSPEED", "RSTAT", "RTIMST", "/RUNST", "RWFRNT",
"SARPLOT", "SHSD", "SLPPLOT", "SLSPLOT", "VCVFILL",
"/OPT", "OPEQN", "OPFACT", "OPFRST", "OPGRAD",
"OPKEEP", "OPLOOP", "OPPRNT", "OPRAND", "OPSUBP",
"OPSWEEP", "OPTYPE", "OPUSER", "OPVAR", "OPADD",
"OPCLR", "OPDEL", "OPMAKE", "OPSEL", "OPANL", "OPDATA",
"OPRESU", "OPSAVE", "OPEXE", "OPLFA", "OPLGR",
"OPLIST", "OPLSW", "OPRFA", "OPRGR", "OPRSW",
"PILECALC", "PILEDISPSET", "PILEGEN", "PILELOAD",
"PILEMASS", "PILERUN", "PILESEL", "PILESTIF",
"PLVAROPT", "PRVAROPT", "TOCOMP", "TODEF", "TOFREQ",
"TOTYPE", "TOVAR", "TOEXE", "TOLOOP", "TOGRAPH",
"TOLIST", "TOPLOT", "TOPRINT", "TOSTAT", "TZAMESH",
"TZDELE", "TZEGEN", "XVAROPT", "PGSAVE", "SOLCONTROL",
"TOTAL", "VTGEOM", "VTREAL", "VTSTAT")
elafune = ("/ANUM", "AOFFST", "AOVLAP", "APLOT", "APPEND", "APTN",
"ARCLEN", "ARCTRM", "AREAS", "AREFINE", "AREMESH",
"AREVERSE", "AROTAT", "ARSCALE", "ARSYM", "ASBA",
"ASBL", "ASBV", "ASBW", "ASCRES", "ASEL", "ASIFILE",
"*ASK", "ASKIN", "ASLL", "ASLV", "ASOL", "/ASSIGN",
"ASUB", "ASUM", "ATAN", "ATRAN", "ATYPE", "/AUTO",
"AUTOTS", "/AUX2", "/AUX3", "/AUX12", "/AUX15",
"AVPRIN", "AVRES", "AWAVE", "/AXLAB", "*AXPY",
"/BATCH", "BCSOPTION", "BETAD", "BF", "BFA", "BFADELE",
"BFALIST", "BFCUM", "BFDELE", "BFE", "BFECUM",
"BFEDELE", "BFELIST", "BFESCAL", "BFINT", "BFK",
"BFKDELE", "BFKLIST", "BFL", "BFLDELE", "BFLIST",
"BFLLIST", "BFSCALE", "BFTRAN", "BFUNIF", "BFV",
"BFVDELE", "BFVLIST", "BIOOPT", "BIOT", "BLC4", "BLC5",
"BLOCK", "BOOL", "BOPTN", "BSAX", "BSMD", "BSM1",
"BSM2", "BSPLIN", "BSS1", "BSS2", "BSTE", "BSTQ",
"BTOL", "BUCOPT", "C", "CALC", "CAMPBELL", "CBDOF",
"CBMD", "CBMX", "CBTE", "CBTMP", "CDOPT", "CDREAD",
"CDWRITE", "CE", "CECHECK", "CECMOD", "CECYC",
"CEDELE", "CEINTF", "CELIST", "CENTER", "CEQN",
"CERIG", "CESGEN", "CFACT", "*CFCLOS", "*CFOPEN",
"*CFWRITE", "/CFORMAT", "CGLOC", "CGOMGA", "CGROW",
"CHECK", "CHKMSH", "CINT", "CIRCLE", "CISOL",
"/CLABEL", "/CLEAR", "CLOCAL", "CLOG", "/CLOG",
"CLRMSHLN", "CM", "CMACEL", "/CMAP", "CMATRIX",
"CMDELE", "CMDOMEGA", "CMEDIT", "CMGRP", "CMLIST",
"CMMOD", "CMOMEGA", "CMPLOT", "CMROTATE", "CMSEL",
"CMSFILE", "CMSOPT", "CMWRITE", "CNCHECK", "CNKMOD",
"CNTR", "CNVTOL", "/COLOR", "*COMP", "COMBINE",
"COMPRESS", "CON4", "CONE", "/CONFIG", "CONJUG",
"/CONTOUR", "/COPY", "CORIOLIS", "COUPLE", "COVAL",
"CP", "CPCYC", "CPDELE", "CPINTF", "/CPLANE", "CPLGEN",
"CPLIST", "CPMERGE", "CPNGEN", "CPSGEN", "CQC",
"*CREATE", "CRPLIM", "CS", "CSCIR", "CSDELE", "CSKP",
"CSLIST", "CSWPLA", "CSYS", "/CTYPE", "CURR2D",
"CUTCONTROL", "/CVAL", "CVAR", "/CWD", "CYCCALC",
"/CYCEXPAND", "CYCFILES", "CYCFREQ", "*CYCLE",
"CYCLIC", "CYCOPT", "CYCPHASE", "CYCSPEC", "CYL4",
"CYL5", "CYLIND", "CZDEL", "CZMESH", "D", "DA",
"DADELE", "DALIST", "DAMORPH", "DATA", "DATADEF",
"DCGOMG", "DCUM", "DCVSWP", "DDASPEC", "DDELE",
"DDOPTION", "DEACT", "DEFINE", "*DEL", "DELETE",
"/DELETE", "DELTIM", "DELTIME", "DEMORPH", "DERIV", "DESIZE",
"DESOL", "DETAB", "/DEVDISP", "/DEVICE", "/DFLAB",
"DFLX", "DFSWAVE", "DIG", "DIGIT", "*DIM",
"/DIRECTORY", "DISPLAY", "/DIST", "DJ", "DJDELE",
"DJLIST", "DK", "DKDELE", "DKLIST", "DL", "DLDELE",
"DLIST", "DLLIST", "*DMAT", "DMOVE", "DMPEXT",
"DMPOPTION", "DMPRAT", "DMPSTR", "DNSOL", "*DO", "DOF",
"DOFSEL", "DOMEGA", "*DOT", "*DOWHILE", "DSCALE",
"/DSCALE", "DSET", "DSPOPTION", "DSUM", "DSURF",
"DSYM", "DSYS", "DTRAN", "DUMP", "/DV3D", "DVAL",
"DVMORPH", "DYNOPT", "E", "EALIVE", "EDADAPT", "EDALE",
"EDASMP", "EDBOUND", "EDBX", "EDBVIS", "EDCADAPT",
"EDCGEN", "EDCLIST", "EDCMORE", "EDCNSTR", "EDCONTACT",
"EDCPU", "EDCRB", "EDCSC", "EDCTS", "EDCURVE",
"EDDAMP", "EDDBL", "EDDC", "EDDRELAX", "EDDUMP",
"EDELE", "EDENERGY", "EDFPLOT", "EDGCALE", "/EDGE",
"EDHGLS", "EDHIST", "EDHTIME", "EDINT", "EDIPART",
"EDIS", "EDLCS", "EDLOAD", "EDMP", "EDNB", "EDNDTSD",
"EDNROT", "EDOPT", "EDOUT", "EDPART", "EDPC", "EDPL",
"EDPVEL", "EDRC", "EDRD", "EDREAD", "EDRI", "EDRST",
"EDRUN", "EDSHELL", "EDSOLV", "EDSP", "EDSTART",
"EDTERM", "EDTP", "EDVEL", "EDWELD", "EDWRITE",
"EEXTRUDE", "/EFACET", "EGEN", "*EIGEN", "EINFIN",
"EINTF", "EKILL", "ELBOW", "ELEM", "ELIST", "*ELSE",
"*ELSEIF", "EMAGERR", "EMATWRITE", "EMF", "EMFT",
"EMID", "EMIS", "EMODIF", "EMORE", "EMSYM", "EMTGEN",
"EMUNIT", "EN", "*END", "*ENDDO", "*ENDIF",
"ENDRELEASE", "ENERSOL", "ENGEN", "ENORM", "ENSYM",
"EORIENT", "EPLOT", "EQSLV", "ERASE", "/ERASE",
"EREAD", "EREFINE", "EREINF", "ERESX", "ERNORM",
"ERRANG", "ESCHECK", "ESEL", "/ESHAPE", "ESIZE",
"ESLA", "ESLL", "ESLN", "ESLV", "ESOL", "ESORT",
"ESSOLV", "ESTIF", "ESURF", "ESYM", "ESYS", "ET",
"ETABLE", "ETCHG", "ETCONTROL", "ETDELE", "ETLIST",
"ETYPE", "EUSORT", "EWRITE", "*EXIT", "/EXIT", "EXP",
"EXPAND", "/EXPAND", "EXPASS", "*EXPORT", "EXPROFILE",
"EXPSOL", "EXTOPT", "EXTREM", "EXUNIT", "F", "/FACET",
"FATIGUE", "FC", "FCCHECK", "FCDELE", "FCLIST", "FCUM",
"FCTYP", "FDELE", "/FDELE", "FE", "FEBODY", "FECONS",
"FEFOR", "FELIST", "FESURF", "*FFT", "FILE",
"FILEAUX2", "FILEAUX3", "FILEDISP", "FILL", "FILLDATA",
"/FILNAME", "FINISH", "FITEM", "FJ", "FJDELE",
"FJLIST", "FK", "FKDELE", "FKLIST", "FL", "FLIST",
"FLLIST", "FLST", "FLUXV", "FLUREAD", "FMAGBC",
"FMAGSUM", "/FOCUS", "FOR2D", "FORCE", "FORM",
"/FORMAT", "FP", "FPLIST", "*FREE", "FREQ", "FRQSCL",
"FS", "FSCALE", "FSDELE", "FSLIST", "FSNODE", "FSPLOT",
"FSSECT", "FSSPARM", "FSUM", "FTCALC", "FTRAN",
"FTSIZE", "FTWRITE", "FTYPE", "FVMESH", "GAP", "GAPF",
"GAUGE", "GCDEF", "GCGEN", "/GCMD", "/GCOLUMN",
"GENOPT", "GEOM", "GEOMETRY", "*GET", "/GFILE",
"/GFORMAT", "/GLINE", "/GMARKER", "GMATRIX", "GMFACE",
"*GO", "/GO", "/GOLIST", "/GOPR", "GP", "GPDELE",
"GPLIST", "GPLOT", "/GRAPHICS", "/GRESUME", "/GRID",
"/GROPT", "GRP", "/GRTYP", "/GSAVE", "GSBDATA",
"GSGDATA", "GSLIST", "GSSOL", "/GST", "GSUM", "/GTHK",
"/GTYPE", "HARFRQ", "/HBC", "HBMAT", "/HEADER", "HELP",
"HELPDISP", "HEMIOPT", "HFANG", "HFSYM", "HMAGSOLV",
"HPGL", "HPTCREATE", "HPTDELETE", "HRCPLX", "HREXP",
"HROPT", "HROCEAN", "HROUT", "IC", "ICDELE", "ICLIST",
"/ICLWID", "/ICSCALE", "*IF", "IGESIN", "IGESOUT",
"/IMAGE", "IMAGIN", "IMESH", "IMMED", "IMPD",
"INISTATE", "*INIT", "/INPUT", "/INQUIRE", "INRES",
"INRTIA", "INT1", "INTSRF", "IOPTN", "IRLF", "IRLIST",
"*ITENGINE", "JPEG", "JSOL", "K", "KATT", "KBC",
"KBETW", "KCALC", "KCENTER", "KCLEAR", "KDELE",
"KDIST", "KEEP", "KESIZE", "KEYOPT", "KEYPTS", "KEYW",
"KFILL", "KGEN", "KL", "KLIST", "KMESH", "KMODIF",
"KMOVE", "KNODE", "KPLOT", "KPSCALE", "KREFINE",
"KSCALE", "KSCON", "KSEL", "KSLL", "KSLN", "KSUM",
"KSYMM", "KTRAN", "KUSE", "KWPAVE", "KWPLAN", "L",
"L2ANG", "L2TAN", "LANG", "LARC", "/LARC", "LAREA",
"LARGE", "LATT", "LAYER", "LAYERP26", "LAYLIST",
"LAYPLOT", "LCABS", "LCASE", "LCCALC", "LCCAT",
"LCDEF", "LCFACT", "LCFILE", "LCLEAR", "LCOMB",
"LCOPER", "LCSEL", "LCSL", "LCSUM", "LCWRITE",
"LCZERO", "LDELE", "LDIV", "LDRAG", "LDREAD", "LESIZE",
"LEXTND", "LFILLT", "LFSURF", "LGEN", "LGLUE",
"LGWRITE", "/LIGHT", "LINA", "LINE", "/LINE", "LINES",
"LINL", "LINP", "LINV", "LIST", "*LIST", "LLIST",
"LMATRIX", "LMESH", "LNSRCH", "LOCAL", "LOVLAP",
"LPLOT", "LPTN", "LREFINE", "LREVERSE", "LROTAT",
"LSBA", "*LSBAC", "LSBL", "LSBV", "LSBW", "LSCLEAR",
"LSDELE", "*LSDUMP", "LSEL", "*LSENGINE", "*LSFACTOR",
"LSLA", "LSLK", "LSOPER", "/LSPEC", "LSREAD",
"*LSRESTORE", "LSSCALE", "LSSOLVE", "LSTR", "LSUM",
"LSWRITE", "/LSYMBOL", "LSYMM", "LTAN", "LTRAN",
"LUMPM", "LVSCALE", "LWPLAN", "M", "MADAPT", "MAGOPT",
"MAGSOLV", "/MAIL", "MAP", "/MAP", "MAP2DTO3D",
"MAPSOLVE", "MAPVAR", "MASTER", "MAT", "MATER",
"MCHECK", "MDAMP", "MDELE", "MDPLOT", "MEMM", "/MENU",
"MESHING", "MFANALYSIS", "MFBUCKET", "MFCALC", "MFCI",
"MFCLEAR", "MFCMMAND", "MFCONV", "MFDTIME", "MFELEM",
"MFEM", "MFEXTER", "MFFNAME", "MFFR", "MFIMPORT",
"MFINTER", "MFITER", "MFLCOMM", "MFLIST", "MFMAP",
"MFORDER", "MFOUTPUT", "*MFOURI", "MFPSIMUL", "MFRC",
"MFRELAX", "MFRSTART", "MFSORDER", "MFSURFACE",
"MFTIME", "MFTOL", "*MFUN", "MFVOLUME", "MFWRITE",
"MGEN", "MIDTOL", "/MKDIR", "MLIST", "MMASS", "MMF",
"MODCONT", "MODE", "MODIFY", "MODMSH", "MODSELOPTION",
"MODOPT", "MONITOR", "*MOPER", "MOPT", "MORPH", "MOVE",
"MP", "MPAMOD", "MPCHG", "MPCOPY", "MPDATA", "MPDELE",
"MPDRES", "/MPLIB", "MPLIST", "MPPLOT", "MPREAD",
"MPRINT", "MPTEMP", "MPTGEN", "MPTRES", "MPWRITE",
"/MREP", "MSAVE", "*MSG", "MSHAPE", "MSHCOPY",
"MSHKEY", "MSHMID", "MSHPATTERN", "MSOLVE", "/MSTART",
"MSTOLE", "*MULT", "*MWRITE", "MXPAND", "N", "NANG",
"NAXIS", "NCNV", "NDELE", "NDIST", "NDSURF", "NEQIT",
"/NERR", "NFORCE", "NGEN", "NKPT", "NLADAPTIVE",
"NLDIAG", "NLDPOST", "NLGEOM", "NLHIST", "NLIST",
"NLMESH", "NLOG", "NLOPT", "NMODIF", "NOCOLOR",
"NODES", "/NOERASE", "/NOLIST", "NOOFFSET", "NOORDER",
"/NOPR", "NORA", "NORL", "/NORMAL", "NPLOT", "NPRINT",
"NREAD", "NREFINE", "NRLSUM", "*NRM", "NROPT",
"NROTAT", "NRRANG", "NSCALE", "NSEL", "NSLA", "NSLE",
"NSLK", "NSLL", "NSLV", "NSMOOTH", "NSOL", "NSORT",
"NSTORE", "NSUBST", "NSVR", "NSYM", "/NUMBER",
"NUMCMP", "NUMEXP", "NUMMRG", "NUMOFF", "NUMSTR",
"NUMVAR", "NUSORT", "NWPAVE", "NWPLAN", "NWRITE",
"OCDATA", "OCDELETE", "OCLIST", "OCREAD", "OCTABLE",
"OCTYPE", "OCZONE", "OMEGA", "OPERATE", "OPNCONTROL",
"OUTAERO", "OUTOPT", "OUTPR", "/OUTPUT", "OUTRES",
"OVCHECK", "PADELE", "/PAGE", "PAGET", "PAPUT",
"PARESU", "PARTSEL", "PARRES", "PARSAV", "PASAVE",
"PATH", "PAUSE", "/PBC", "/PBF", "PCALC", "PCGOPT",
"PCIRC", "/PCIRCLE", "/PCOPY", "PCROSS", "PDANL",
"PDCDF", "PDCFLD", "PDCLR", "PDCMAT", "PDCORR",
"PDDMCS", "PDDOEL", "PDEF", "PDEXE", "PDHIST",
"PDINQR", "PDLHS", "PDMETH", "PDOT", "PDPINV",
"PDPLOT", "PDPROB", "PDRESU", "PDROPT", "/PDS",
"PDSAVE", "PDSCAT", "PDSENS", "PDSHIS", "PDUSER",
"PDVAR", "PDWRITE", "PERBC2D", "PERTURB", "PFACT",
"PHYSICS", "PIVCHECK", "PLCAMP", "PLCFREQ", "PLCHIST",
"PLCINT", "PLCPLX", "PLCRACK", "PLDISP", "PLESOL",
"PLETAB", "PLFAR", "PLF2D", "PLGEOM", "PLLS", "PLMAP",
"PLMC", "PLNEAR", "PLNSOL", "/PLOPTS", "PLORB", "PLOT",
"PLOTTING", "PLPAGM", "PLPATH", "PLSECT", "PLST",
"PLTIME", "PLTRAC", "PLVAR", "PLVECT", "PLZZ",
"/PMACRO", "PMAP", "PMGTRAN", "PMLOPT", "PMLSIZE",
"/PMORE", "PNGR", "/PNUM", "POINT", "POLY", "/POLYGON",
"/POST1", "/POST26", "POWERH", "PPATH", "PRANGE",
"PRAS", "PRCAMP", "PRCINT", "PRCPLX", "PRED",
"PRENERGY", "/PREP7", "PRERR", "PRESOL", "PRETAB",
"PRFAR", "PRI2", "PRIM", "PRINT", "*PRINT", "PRISM",
"PRITER", "PRJSOL", "PRNEAR", "PRNLD", "PRNSOL",
"PROD", "PRORB", "PRPATH", "PRRFOR", "PRRSOL",
"PRSCONTROL", "PRSECT", "PRTIME", "PRVAR", "PRVECT",
"PSCONTROL", "PSCR", "PSDCOM", "PSDFRQ", "PSDGRAPH",
"PSDRES", "PSDSPL", "PSDUNIT", "PSDVAL", "PSDWAV",
"/PSEARCH", "PSEL", "/PSF", "PSMAT", "PSMESH",
"/PSPEC", "/PSTATUS", "PSTRES", "/PSYMB", "PTR",
"PTXY", "PVECT", "/PWEDGE", "QDVAL", "QRDOPT", "QSOPT",
"QUAD", "/QUIT", "QUOT", "R", "RACE", "RADOPT",
"RAPPND", "RATE", "/RATIO", "RBE3", "RCON", "RCYC",
"RDEC", "RDELE", "READ", "REAL", "REALVAR", "RECTNG",
"REMESH", "/RENAME", "REORDER", "*REPEAT", "/REPLOT",
"RESCOMBINE", "RESCONTROL", "RESET", "/RESET", "RESP",
"RESUME", "RESVEC", "RESWRITE", "*RETURN", "REXPORT",
"REZONE", "RFORCE", "/RGB", "RIGID", "RIGRESP",
"RIMPORT", "RLIST", "RMALIST", "RMANL", "RMASTER",
"RMCAP", "RMCLIST", "/RMDIR", "RMFLVEC", "RMLVSCALE",
"RMMLIST", "RMMRANGE", "RMMSELECT", "RMNDISP",
"RMNEVEC", "RMODIF", "RMORE", "RMPORDER", "RMRESUME",
"RMRGENERATE", "RMROPTIONS", "RMRPLOT", "RMRSTATUS",
"RMSAVE", "RMSMPLE", "RMUSE", "RMXPORT", "ROCK",
"ROSE", "RPOLY", "RPR4", "RPRISM", "RPSD", "RSFIT",
"RSOPT", "RSPLIT", "RSPLOT", "RSPRNT", "RSSIMS",
"RSTMAC", "RSTOFF", "RSURF", "RSYMM", "RSYS", "RTHICK",
"SABS", "SADD", "SALLOW", "SAVE", "SBCLIST", "SBCTRAN",
"SDELETE", "SE", "SECCONTROL", "SECDATA",
"SECFUNCTION", "SECJOINT", "/SECLIB", "SECLOCK",
"SECMODIF", "SECNUM", "SECOFFSET", "SECPLOT",
"SECREAD", "SECSTOP", "SECTYPE", "SECWRITE", "SED",
"SEDLIST", "SEEXP", "/SEG", "SEGEN", "SELIST", "SELM",
"SELTOL", "SENERGY", "SEOPT", "SESYMM", "*SET", "SET",
"SETFGAP", "SETRAN", "SEXP", "SF", "SFA", "SFACT",
"SFADELE", "SFALIST", "SFBEAM", "SFCALC", "SFCUM",
"SFDELE", "SFE", "SFEDELE", "SFELIST", "SFFUN",
"SFGRAD", "SFL", "SFLDELE", "SFLEX", "SFLIST",
"SFLLIST", "SFSCALE", "SFTRAN", "/SHADE", "SHELL",
"/SHOW", "/SHOWDISP", "SHPP", "/SHRINK", "SLIST",
"SLOAD", "SMALL", "*SMAT", "SMAX", "/SMBC", "SMBODY",
"SMCONS", "SMFOR", "SMIN", "SMOOTH", "SMRTSIZE",
"SMSURF", "SMULT", "SNOPTION", "SOLU", "/SOLU",
"SOLUOPT", "SOLVE", "SORT", "SOURCE", "SPACE",
"SPCNOD", "SPCTEMP", "SPDAMP", "SPEC", "SPFREQ",
"SPGRAPH", "SPH4", "SPH5", "SPHERE", "SPLINE", "SPLOT",
"SPMWRITE", "SPOINT", "SPOPT", "SPREAD", "SPTOPT",
"SPOWER", "SPUNIT", "SPVAL", "SQRT", "*SREAD", "SRSS",
"SSBT", "/SSCALE", "SSLN", "SSMT", "SSPA", "SSPB",
"SSPD", "SSPE", "SSPM", "SSUM", "SSTATE", "STABILIZE",
"STAOPT", "STAT", "*STATUS", "/STATUS", "STEF",
"STORE", "SUBOPT", "SUBSET", "SUCALC",
"SUCR", "SUDEL", "SUEVAL", "SUGET", "SUMAP", "SUMTYPE",
"SUPL", "SUPR", "SURESU", "SUSAVE", "SUSEL", "SUVECT",
"SV", "SVPLOT", "SVTYP", "SWADD", "SWDEL", "SWGEN",
"SWLIST", "SYNCHRO", "/SYP", "/SYS", "TALLOW",
"TARGET", "*TAXIS", "TB", "TBCOPY", "TBDATA", "TBDELE",
"TBEO", "TBIN", "TBFIELD", "TBFT", "TBLE", "TBLIST",
"TBMODIF", "TBPLOT", "TBPT", "TBTEMP", "TCHG", "/TEE",
"TERM", "THEXPAND", "THOPT", "TIFF", "TIME",
"TIMERANGE", "TIMINT", "TIMP", "TINTP",
"/TLABEL", "TOFFST", "*TOPER", "TORQ2D", "TORQC2D",
"TORQSUM", "TORUS", "TRANS", "TRANSFER", "*TREAD",
"TREF", "/TRIAD", "/TRLCY", "TRNOPT", "TRPDEL",
"TRPLIS", "TRPOIN", "TRTIME", "TSHAP", "/TSPEC",
"TSRES", "TUNIF", "TVAR", "/TXTRE", "/TYPE", "TYPE",
"/UCMD", "/UDOC", "/UI", "UIMP", "/UIS", "*ULIB", "/UPF",
"UNDELETE", "UNDO", "/UNITS", "UNPAUSE", "UPCOORD",
"UPGEOM", "*USE", "/USER", "USRCAL", "USRDOF",
"USRELEM", "V", "V2DOPT", "VA", "*VABS", "VADD",
"VARDEL", "VARNAM", "VATT", "VCLEAR", "*VCOL",
"/VCONE", "VCROSS", "*VCUM", "VDDAM", "VDELE", "VDGL",
"VDOT", "VDRAG", "*VEC", "*VEDIT", "VEORIENT", "VEXT",
"*VFACT", "*VFILL", "VFOPT", "VFQUERY", "VFSM",
"*VFUN", "VGEN", "*VGET", "VGET", "VGLUE", "/VIEW",
"VIMP", "VINP", "VINV", "*VITRP", "*VLEN", "VLIST",
"VLSCALE", "*VMASK", "VMESH", "VOFFST", "VOLUMES")
# list of in-built () functions
elafunf = ("NX()", "NY()", "NZ()", "KX()", "KY()", "KZ()", "LX()",
"LY()", "LZ()", "LSX()", "LSY()", "LSZ()", "NODE()",
"KP()", "DISTND()", "DISTKP()", "DISTEN()", "ANGLEN()",
"ANGLEK()", "NNEAR()", "KNEAR()", "ENEARN()",
"AREAND()", "AREAKP()", "ARNODE()", "NORMNX()",
"NORMNY()", "NORMNZ()", "NORMKX()", "NORMKY()",
"NORMKZ()", "ENEXTN()", "NELEM()", "NODEDOF()",
"ELADJ()", "NDFACE()", "NMFACE()", "ARFACE()", "UX()",
"UY()", "UZ()", "ROTX()", "ROTY()", "ROTZ()", "TEMP()",
"PRES()", "VX()", "VY()", "VZ()", "ENKE()", "ENDS()",
"VOLT()", "MAG()", "AX()", "AY()", "AZ()",
"VIRTINQR()", "KWGET()", "VALCHR()", "VALHEX()",
"CHRHEX()", "STRFILL()", "STRCOMP()", "STRPOS()",
"STRLENG()", "UPCASE()", "LWCASE()", "JOIN()",
"SPLIT()", "ABS()", "SIGN()", "CXABS()", "EXP()",
"LOG()", "LOG10()", "SQRT()", "NINT()", "MOD()",
"RAND()", "GDIS()", "SIN()", "COS()", "TAN()",
"SINH()", "COSH()", "TANH()", "ASIN()", "ACOS()",
"ATAN()", "ATAN2()")
elafung = ("NSEL()", "ESEL()", "KSEL()", "LSEL()", "ASEL()",
"VSEL()", "NDNEXT()", "ELNEXT()", "KPNEXT()",
"LSNEXT()", "ARNEXT()", "VLNEXT()", "CENTRX()",
"CENTRY()", "CENTRZ()")
elafunh = ("~CAT5IN", "~CATIAIN", "~PARAIN", "~PROEIN", "~SATIN",
"~UGIN", "A", "AADD", "AATT", "ABEXTRACT", "*ABBR",
"ABBRES", "ABBSAV", "ABS", "ACCAT", "ACCOPTION",
"ACEL", "ACLEAR", "ADAMS", "ADAPT", "ADD", "ADDAM",
"ADELE", "ADGL", "ADRAG", "AESIZE", "AFILLT", "AFLIST",
"AFSURF", "*AFUN", "AGEN", "AGLUE", "AINA", "AINP",
"AINV", "AL", "ALIST", "ALLSEL", "ALPHAD", "AMAP",
"AMESH", "/AN3D", "ANCNTR", "ANCUT", "ANCYC", "ANDATA",
"ANDSCL", "ANDYNA", "/ANFILE", "ANFLOW", "/ANGLE",
"ANHARM", "ANIM", "ANISOS", "ANMODE", "ANMRES",
"/ANNOT", "ANORM", "ANPRES", "ANSOL", "ANSTOAQWA",
"ANSTOASAS", "ANTIME", "ANTYPE")
special = ("/COM", "/TITLE", "STITLE")
elements = ("SOLID5",
"LINK11",
"PLANE13",
"COMBIN14",
"MASS2",
"PLANE25",
"MATRIX27",
"FLUID29",
"FLUID30",
"LINK31",
"LINK33",
"LINK34",
"PLANE35",
"SOURC36",
"COMBIN37",
"FLUID38",
"COMBIN39",
"COMBIN40",
"INFIN47",
"MATRIX50",
"PLANE55",
"SHELL61",
"LINK68",
"SOLID70",
"MASS71",
"PLANE75",
"PLANE77",
"PLANE78",
"PLANE83",
"SOLID87",
"SOLID90",
"CIRCU94",
"SOLID96",
"SOLID98",
"INFIN110",
"INFIN111",
"FLUID116",
"PLANE121",
"SOLID122",
"SOLID123",
"CIRCU124",
"CIRCU125",
"TRANS126",
"FLUID129",
"FLUID130",
"SHELL131",
"SHELL132",
"FLUID136",
"FLUID138",
"FLUID139",
"SURF151",
"SURF152",
"SURF153",
"SURF154",
"SURF155",
"SURF156",
"SHELL157",
"SURF159",
"TARGE169",
"TARGE170",
"CONTA172",
"CONTA174",
"CONTA175",
"CONTA177",
"CONTA178",
"PRETS179",
"LINK180",
"SHELL181",
"PLANE182",
"PLANE183",
"MPC184",
"SOLID185",
"SOLID186",
"SOLID187",
"BEAM188",
"BEAM189",
"SOLSH190",
"INTER192",
"INTER193",
"INTER194",
"INTER195",
"MESH200",
"FOLLW201",
"INTER202",
"INTER203",
"INTER204",
"INTER205",
"SHELL208",
"SHELL209",
"CPT212",
"CPT213",
"COMBI214",
"CPT215",
"CPT216",
"CPT217",
"FLUID218",
"FLUID220",
"FLUID221",
"PLANE222",
"PLANE223",
"SOLID225",
"SOLID226",
"SOLID227",
"PLANE230",
"SOLID231",
"SOLID232",
"PLANE233",
"SOLID236",
"SOLID237",
"PLANE238",
"SOLID239",
"SOLID240",
"HSFLD241",
"HSFLD242",
"COMBI250",
"SURF251",
"SURF252",
"INFIN257",
"REINF263",
"REINF264",
"REINF265",
"SOLID272",
"SOLID273",
"SOLID278",
"SOLID279",
"CABLE280",
"SHELL281",
"SOLID285",
"PIPE288",
"PIPE289",
"ELBOW290",
"SOLID291",
"PLANE292",
"PLANE293",
"USER300")
tokens = {
'root': [
(r'[^\S\n]+', Whitespace),
(words((elafunb+elafunc+elafund+elafune+elafunh+special), suffix=r'\b'), Keyword, 'non-keyword'),
default('non-keyword'),
],
'non-keyword': [
(r'!.*\n', Comment, '#pop'),
(r'%.*?%', Escape),
include('strings'),
include('nums'),
(words((elafunf+elafung), suffix=r'\b'), Name.Builtin),
(words((elements), suffix=r'\b'), Name.Property),
include('core'),
(r'AR[0-9]+', Name.Variable.Instance),
(r'[a-z_][a-z0-9_]*', Name.Variable),
(r'\n+', Whitespace, '#pop'),
(r'[^\S\n]+', Whitespace),
],
'core': [
# Operators
(r'(\*\*|\*|\+|-|\/|<|>|<=|>=|==|\/=|=|\(|\))', Operator),
(r'/EOF', Generic.Emph),
(r'[\.(),:&;]', Punctuation),
],
'strings': [
(r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
(r'[$%]', String.Symbol),
],
'nums': [
(r'[+-]?\d*\.\d+([efEF][-+]?\d+)?', Number.Float), # with dot
(r'([+-]?\d+([efEF][-+]?\d+))', Number.Float), # With scientific notation
(r'\b\d+(?![.ef])', Number.Integer), # integer simple
]
}

View File

@@ -0,0 +1,103 @@
"""
pygments.lexers.apl
~~~~~~~~~~~~~~~~~~~
Lexers for APL.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer
from pygments.token import Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
__all__ = ['APLLexer']
class APLLexer(RegexLexer):
"""
A simple APL lexer.
"""
name = 'APL'
url = 'https://en.m.wikipedia.org/wiki/APL_(programming_language)'
aliases = ['apl']
filenames = [
'*.apl', '*.aplf', '*.aplo', '*.apln',
'*.aplc', '*.apli', '*.dyalog',
]
version_added = '2.0'
tokens = {
'root': [
# Whitespace
# ==========
(r'\s+', Whitespace),
#
# Comment
# =======
# '⍝' is traditional; '#' is supported by GNU APL and NGN (but not Dyalog)
(r'[⍝#].*$', Comment.Single),
#
# Strings
# =======
(r'\'((\'\')|[^\'])*\'', String.Single),
(r'"(("")|[^"])*"', String.Double), # supported by NGN APL
#
# Punctuation
# ===========
# This token type is used for diamond and parenthesis
# but not for bracket and ; (see below)
(r'[⋄◇()]', Punctuation),
#
# Array indexing
# ==============
# Since this token type is very important in APL, it is not included in
# the punctuation token type but rather in the following one
(r'[\[\];]', String.Regex),
#
# Distinguished names
# ===================
# following IBM APL2 standard
(r'⎕[A-Za-zΔ∆⍙][A-Za-zΔ∆⍙_¯0-9]*', Name.Function),
#
# Labels
# ======
# following IBM APL2 standard
# (r'[A-Za-zΔ∆⍙][A-Za-zΔ∆⍙_¯0-9]*:', Name.Label),
#
# Variables
# =========
# following IBM APL2 standard (with a leading _ ok for GNU APL and Dyalog)
(r'[A-Za-zΔ∆⍙_][A-Za-zΔ∆⍙_¯0-9]*', Name.Variable),
#
# Numbers
# =======
(r'¯?(0[Xx][0-9A-Fa-f]+|[0-9]*\.?[0-9]+([Ee][+¯]?[0-9]+)?|¯|∞)'
r'([Jj]¯?(0[Xx][0-9A-Fa-f]+|[0-9]*\.?[0-9]+([Ee][+¯]?[0-9]+)?|¯|∞))?',
Number),
#
# Operators
# ==========
(r'[\.\\\/⌿⍀¨⍣⍨⍠⍤∘⌸&⌶@⌺⍥⍛⍢]', Name.Attribute), # closest token type
(r'[+\-×÷⌈⌊∣|?*⍟○!⌹<≤=>≥≠≡≢∊⍷∪∩~∨∧⍱⍲⍴,⍪⌽⊖⍉↑↓⊂⊃⌷⍋⍒⊤⊥⍕⍎⊣⊢⍁⍂≈⌸⍯↗⊆⊇⍸√⌾…⍮]',
Operator),
#
# Constant
# ========
(r'', Name.Constant),
#
# Quad symbol
# ===========
(r'[⎕⍞]', Name.Variable.Global),
#
# Arrows left/right
# =================
(r'[←→]', Keyword.Declaration),
#
# D-Fn
# ====
(r'[⍺⍵⍶⍹∇:]', Name.Builtin.Pseudo),
(r'[{}]', Keyword.Type),
],
}

View File

@@ -0,0 +1,315 @@
"""
pygments.lexers.archetype
~~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for Archetype-related syntaxes, including ODIN, ADL and cADL.
For uses of this syntax, see the openEHR archetypes <http://www.openEHR.org/ckm>
Contributed by Thomas Beale <https://github.com/wolandscat>,
<https://bitbucket.org/thomas_beale>.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, bygroups, using, default
from pygments.token import Text, Comment, Name, Literal, Number, String, \
Punctuation, Keyword, Operator, Generic, Whitespace
__all__ = ['OdinLexer', 'CadlLexer', 'AdlLexer']
class AtomsLexer(RegexLexer):
"""
Lexer for Values used in ADL and ODIN.
.. versionadded:: 2.1
"""
tokens = {
# ----- pseudo-states for inclusion -----
'whitespace': [
(r'\n', Whitespace),
(r'\s+', Whitespace),
(r'([ \t]*)(--.*)$', bygroups(Whitespace, Comment)),
],
'archetype_id': [
(r'([ \t]*)(([a-zA-Z]\w+(\.[a-zA-Z]\w+)*::)?[a-zA-Z]\w+(-[a-zA-Z]\w+){2}'
r'\.\w+[\w-]*\.v\d+(\.\d+){,2}((-[a-z]+)(\.\d+)?)?)',
bygroups(Whitespace, Name.Decorator)),
],
'date_constraints': [
# ISO 8601-based date/time constraints
(r'[Xx?YyMmDdHhSs\d]{2,4}([:-][Xx?YyMmDdHhSs\d]{2}){2}', Literal.Date),
# ISO 8601-based duration constraints + optional trailing slash
(r'(P[YyMmWwDd]+(T[HhMmSs]+)?|PT[HhMmSs]+)/?', Literal.Date),
],
'ordered_values': [
# ISO 8601 date with optional 'T' ligature
(r'\d{4}-\d{2}-\d{2}T?', Literal.Date),
# ISO 8601 time
(r'\d{2}:\d{2}:\d{2}(\.\d+)?([+-]\d{4}|Z)?', Literal.Date),
# ISO 8601 duration
(r'P((\d*(\.\d+)?[YyMmWwDd]){1,3}(T(\d*(\.\d+)?[HhMmSs]){,3})?|'
r'T(\d*(\.\d+)?[HhMmSs]){,3})', Literal.Date),
(r'[+-]?(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
(r'[+-]?\d*\.\d+%?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[+-]?\d+%?', Number.Integer),
],
'values': [
include('ordered_values'),
(r'([Tt]rue|[Ff]alse)', Literal),
(r'"', String, 'string'),
(r"'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'[a-z][a-z0-9+.-]*:', Literal, 'uri'),
# term code
(r'(\[)(\w[\w-]*(?:\([^)\n]+\))?)(::)(\w[\w-]*)(\])',
bygroups(Punctuation, Name.Decorator, Punctuation, Name.Decorator,
Punctuation)),
(r'\|', Punctuation, 'interval'),
# list continuation
(r'\.\.\.', Punctuation),
],
'constraint_values': [
(r'(\[)(\w[\w-]*(?:\([^)\n]+\))?)(::)',
bygroups(Punctuation, Name.Decorator, Punctuation), 'adl14_code_constraint'),
# ADL 1.4 ordinal constraint
(r'(\d*)(\|)(\[\w[\w-]*::\w[\w-]*\])((?:[,;])?)',
bygroups(Number, Punctuation, Name.Decorator, Punctuation)),
include('date_constraints'),
include('values'),
],
# ----- real states -----
'string': [
('"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|'
r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape),
# all other characters
(r'[^\\"]+', String),
# stray backslash
(r'\\', String),
],
'uri': [
# effective URI terminators
(r'[,>\s]', Punctuation, '#pop'),
(r'[^>\s,]+', Literal),
],
'interval': [
(r'\|', Punctuation, '#pop'),
include('ordered_values'),
(r'\.\.', Punctuation),
(r'[<>=] *', Punctuation),
# handle +/-
(r'\+/-', Punctuation),
(r'\s+', Whitespace),
],
'any_code': [
include('archetype_id'),
# if it is a code
(r'[a-z_]\w*[0-9.]+(@[^\]]+)?', Name.Decorator),
# if it is tuple with attribute names
(r'[a-z_]\w*', Name.Class),
# if it is an integer, i.e. Xpath child index
(r'[0-9]+', Text),
(r'\|', Punctuation, 'code_rubric'),
(r'\]', Punctuation, '#pop'),
# handle use_archetype statement
(r'(\s*)(,)(\s*)', bygroups(Whitespace, Punctuation, Whitespace)),
],
'code_rubric': [
(r'\|', Punctuation, '#pop'),
(r'[^|]+', String),
],
'adl14_code_constraint': [
(r'\]', Punctuation, '#pop'),
(r'\|', Punctuation, 'code_rubric'),
(r'(\w[\w-]*)([;,]?)', bygroups(Name.Decorator, Punctuation)),
include('whitespace'),
],
}
class OdinLexer(AtomsLexer):
"""
Lexer for ODIN syntax.
"""
name = 'ODIN'
aliases = ['odin']
filenames = ['*.odin']
mimetypes = ['text/odin']
url = 'https://github.com/openEHR/odin'
version_added = '2.1'
tokens = {
'path': [
(r'>', Punctuation, '#pop'),
# attribute name
(r'[a-z_]\w*', Name.Class),
(r'/', Punctuation),
(r'\[', Punctuation, 'key'),
(r'(\s*)(,)(\s*)', bygroups(Whitespace, Punctuation, Whitespace), '#pop'),
(r'\s+', Whitespace, '#pop'),
],
'key': [
include('values'),
(r'\]', Punctuation, '#pop'),
],
'type_cast': [
(r'\)', Punctuation, '#pop'),
(r'[^)]+', Name.Class),
],
'root': [
include('whitespace'),
(r'([Tt]rue|[Ff]alse)', Literal),
include('values'),
# x-ref path
(r'/', Punctuation, 'path'),
# x-ref path starting with key
(r'\[', Punctuation, 'key'),
# attribute name
(r'[a-z_]\w*', Name.Class),
(r'=', Operator),
(r'\(', Punctuation, 'type_cast'),
(r',', Punctuation),
(r'<', Punctuation),
(r'>', Punctuation),
(r';', Punctuation),
],
}
class CadlLexer(AtomsLexer):
"""
Lexer for cADL syntax.
"""
name = 'cADL'
aliases = ['cadl']
filenames = ['*.cadl']
url = 'https://specifications.openehr.org/releases/AM/latest/ADL2.html#_cadl_constraint_adl'
version_added = '2.1'
tokens = {
'path': [
# attribute name
(r'[a-z_]\w*', Name.Class),
(r'/', Punctuation),
(r'\[', Punctuation, 'any_code'),
(r'\s+', Punctuation, '#pop'),
],
'root': [
include('whitespace'),
(r'(cardinality|existence|occurrences|group|include|exclude|'
r'allow_archetype|use_archetype|use_node)\W', Keyword.Type),
(r'(and|or|not|there_exists|xor|implies|for_all)\W', Keyword.Type),
(r'(after|before|closed)\W', Keyword.Type),
(r'(not)\W', Operator),
(r'(matches|is_in)\W', Operator),
# is_in / not is_in char
('(\u2208|\u2209)', Operator),
# there_exists / not there_exists / for_all / and / or
('(\u2203|\u2204|\u2200|\u2227|\u2228|\u22BB|\223C)',
Operator),
# regex in slot or as string constraint
(r'(\{)(\s*)(/[^}]+/)(\s*)(\})',
bygroups(Punctuation, Whitespace, String.Regex, Whitespace, Punctuation)),
# regex in slot or as string constraint
(r'(\{)(\s*)(\^[^}]+\^)(\s*)(\})',
bygroups(Punctuation, Whitespace, String.Regex, Whitespace, Punctuation)),
(r'/', Punctuation, 'path'),
# for cardinality etc
(r'(\{)((?:\d+\.\.)?(?:\d+|\*))'
r'((?:\s*;\s*(?:ordered|unordered|unique)){,2})(\})',
bygroups(Punctuation, Number, Number, Punctuation)),
# [{ is start of a tuple value
(r'\[\{', Punctuation),
(r'\}\]', Punctuation),
(r'\{', Punctuation),
(r'\}', Punctuation),
include('constraint_values'),
# type name
(r'[A-Z]\w+(<[A-Z]\w+([A-Za-z_<>]*)>)?', Name.Class),
# attribute name
(r'[a-z_]\w*', Name.Class),
(r'\[', Punctuation, 'any_code'),
(r'(~|//|\\\\|\+|-|/|\*|\^|!=|=|<=|>=|<|>]?)', Operator),
(r'\(', Punctuation),
(r'\)', Punctuation),
# for lists of values
(r',', Punctuation),
(r'"', String, 'string'),
# for assumed value
(r';', Punctuation),
],
}
class AdlLexer(AtomsLexer):
"""
Lexer for ADL syntax.
"""
name = 'ADL'
aliases = ['adl']
filenames = ['*.adl', '*.adls', '*.adlf', '*.adlx']
url = 'https://specifications.openehr.org/releases/AM/latest/ADL2.html'
version_added = '2.1'
tokens = {
'whitespace': [
# blank line ends
(r'\s*\n', Whitespace),
# comment-only line
(r'^([ \t]*)(--.*)$', bygroups(Whitespace, Comment)),
],
'odin_section': [
# repeating the following two rules from the root state enable multi-line
# strings that start in the first column to be dealt with
(r'^(language|description|ontology|terminology|annotations|'
r'component_terminologies|revision_history)([ \t]*\n)',
bygroups(Generic.Heading, Whitespace)),
(r'^(definition)([ \t]*\n)', bygroups(Generic.Heading, Whitespace), 'cadl_section'),
(r'^([ \t]*|[ \t]+.*)\n', using(OdinLexer)),
(r'^([^"]*")(>[ \t]*\n)', bygroups(String, Punctuation)),
# template overlay delimiter
(r'^----------*\n', Text, '#pop'),
(r'^.*\n', String),
default('#pop'),
],
'cadl_section': [
(r'^([ \t]*|[ \t]+.*)\n', using(CadlLexer)),
default('#pop'),
],
'rules_section': [
(r'^[ \t]+.*\n', using(CadlLexer)),
default('#pop'),
],
'metadata': [
(r'\)', Punctuation, '#pop'),
(r';', Punctuation),
(r'([Tt]rue|[Ff]alse)', Literal),
# numbers and version ids
(r'\d+(\.\d+)*', Literal),
# Guids
(r'(\d|[a-fA-F])+(-(\d|[a-fA-F])+){3,}', Literal),
(r'\w+', Name.Class),
(r'"', String, 'string'),
(r'=', Operator),
(r'[ \t]+', Whitespace),
default('#pop'),
],
'root': [
(r'^(archetype|template_overlay|operational_template|template|'
r'speciali[sz]e)', Generic.Heading),
(r'^(language|description|ontology|terminology|annotations|'
r'component_terminologies|revision_history)[ \t]*\n',
Generic.Heading, 'odin_section'),
(r'^(definition)[ \t]*\n', Generic.Heading, 'cadl_section'),
(r'^(rules)[ \t]*\n', Generic.Heading, 'rules_section'),
include('archetype_id'),
(r'([ \t]*)(\()', bygroups(Whitespace, Punctuation), 'metadata'),
include('whitespace'),
],
}

View File

@@ -0,0 +1,116 @@
"""
pygments.lexers.arrow
~~~~~~~~~~~~~~~~~~~~~
Lexer for Arrow.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, default, include
from pygments.token import Text, Operator, Keyword, Punctuation, Name, \
String, Number, Whitespace
__all__ = ['ArrowLexer']
TYPES = r'\b(int|bool|char)((?:\[\])*)(?=\s+)'
IDENT = r'([a-zA-Z_][a-zA-Z0-9_]*)'
DECL = TYPES + r'(\s+)' + IDENT
class ArrowLexer(RegexLexer):
"""
Lexer for Arrow
"""
name = 'Arrow'
url = 'https://pypi.org/project/py-arrow-lang/'
aliases = ['arrow']
filenames = ['*.arw']
version_added = '2.7'
tokens = {
'root': [
(r'\s+', Whitespace),
(r'^[|\s]+', Punctuation),
include('blocks'),
include('statements'),
include('expressions'),
],
'blocks': [
(r'(function)(\n+)(/-->)(\s*)' +
DECL + # 4 groups
r'(\()', bygroups(
Keyword.Reserved, Whitespace, Punctuation,
Whitespace, Keyword.Type, Punctuation, Whitespace,
Name.Function, Punctuation
), 'fparams'),
(r'/-->$|\\-->$|/--<|\\--<|\^', Punctuation),
],
'statements': [
(DECL, bygroups(Keyword.Type, Punctuation, Text, Name.Variable)),
(r'\[', Punctuation, 'index'),
(r'=', Operator),
(r'require|main', Keyword.Reserved),
(r'print', Keyword.Reserved, 'print'),
],
'expressions': [
(r'\s+', Whitespace),
(r'[0-9]+', Number.Integer),
(r'true|false', Keyword.Constant),
(r"'", String.Char, 'char'),
(r'"', String.Double, 'string'),
(r'\{', Punctuation, 'array'),
(r'==|!=|<|>|\+|-|\*|/|%', Operator),
(r'and|or|not|length', Operator.Word),
(r'(input)(\s+)(int|char\[\])', bygroups(
Keyword.Reserved, Whitespace, Keyword.Type
)),
(IDENT + r'(\()', bygroups(
Name.Function, Punctuation
), 'fargs'),
(IDENT, Name.Variable),
(r'\[', Punctuation, 'index'),
(r'\(', Punctuation, 'expressions'),
(r'\)', Punctuation, '#pop'),
],
'print': [
include('expressions'),
(r',', Punctuation),
default('#pop'),
],
'fparams': [
(DECL, bygroups(Keyword.Type, Punctuation, Whitespace, Name.Variable)),
(r',', Punctuation),
(r'\)', Punctuation, '#pop'),
],
'escape': [
(r'\\(["\\/abfnrtv]|[0-9]{1,3}|x[0-9a-fA-F]{2}|u[0-9a-fA-F]{4})',
String.Escape),
],
'char': [
(r"'", String.Char, '#pop'),
include('escape'),
(r"[^'\\]", String.Char),
],
'string': [
(r'"', String.Double, '#pop'),
include('escape'),
(r'[^"\\]+', String.Double),
],
'array': [
include('expressions'),
(r'\}', Punctuation, '#pop'),
(r',', Punctuation),
],
'fargs': [
include('expressions'),
(r'\)', Punctuation, '#pop'),
(r',', Punctuation),
],
'index': [
include('expressions'),
(r'\]', Punctuation, '#pop'),
],
}

View File

@@ -0,0 +1,249 @@
"""
pygments.lexers.arturo
~~~~~~~~~~~~~~~~~~~~~~
Lexer for the Arturo language.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, do_insertions, include, \
this, using, words
from pygments.token import Comment, Error, Keyword, Name, Number, Operator, \
Punctuation, String, Text
from pygments.util import ClassNotFound, get_bool_opt
__all__ = ['ArturoLexer']
class ArturoLexer(RegexLexer):
"""
For Arturo source code.
See `Arturo's Github <https://github.com/arturo-lang/arturo>`_
and `Arturo's Website <https://arturo-lang.io/>`_.
"""
name = 'Arturo'
aliases = ['arturo', 'art']
filenames = ['*.art']
url = 'https://arturo-lang.io/'
version_added = '2.14'
def __init__(self, **options):
self.handle_annotateds = get_bool_opt(options, 'handle_annotateds',
True)
RegexLexer.__init__(self, **options)
def handle_annotated_strings(self, match):
"""Adds syntax from another languages inside annotated strings
match args:
1:open_string,
2:exclamation_mark,
3:lang_name,
4:space_or_newline,
5:code,
6:close_string
"""
from pygments.lexers import get_lexer_by_name
# Header's section
yield match.start(1), String.Double, match.group(1)
yield match.start(2), String.Interpol, match.group(2)
yield match.start(3), String.Interpol, match.group(3)
yield match.start(4), Text.Whitespace, match.group(4)
lexer = None
if self.handle_annotateds:
try:
lexer = get_lexer_by_name(match.group(3).strip())
except ClassNotFound:
pass
code = match.group(5)
if lexer is None:
yield match.group(5), String, code
else:
yield from do_insertions([], lexer.get_tokens_unprocessed(code))
yield match.start(6), String.Double, match.group(6)
tokens = {
'root': [
(r';.*?$', Comment.Single),
(r'^((\s#!)|(#!)).*?$', Comment.Hashbang),
# Constants
(words(('false', 'true', 'maybe'), # boolean
suffix=r'\b'), Name.Constant),
(words(('this', 'init'), # class related keywords
prefix=r'\b', suffix=r'\b\??:?'), Name.Builtin.Pseudo),
(r'`.`', String.Char), # character
(r'\\\w+\b\??:?', Name.Property), # array index
(r'#\w+', Name.Constant), # color
(r'\b[0-9]+\.[0-9]+', Number.Float), # float
(r'\b[0-9]+', Number.Integer), # integer
(r'\w+\b\??:', Name.Label), # label
# Note: Literals can be labeled too
(r'\'(?:\w+\b\??:?)', Keyword.Declaration), # literal
(r'\:\w+', Keyword.Type), # type
# Note: Attributes can be labeled too
(r'\.\w+\??:?', Name.Attribute), # attributes
# Switch structure
(r'(\()(.*?)(\)\?)',
bygroups(Punctuation, using(this), Punctuation)),
# Single Line Strings
(r'"', String.Double, 'inside-simple-string'),
(r'»', String.Single, 'inside-smart-string'),
(r'«««', String.Double, 'inside-safe-string'),
(r'\{\/', String.Single, 'inside-regex-string'),
# Multi Line Strings
(r'\{\:', String.Double, 'inside-curly-verb-string'),
(r'(\{)(\!)(\w+)(\s|\n)([\w\W]*?)(^\})', handle_annotated_strings),
(r'\{', String.Single, 'inside-curly-string'),
(r'\-{3,}', String.Single, 'inside-eof-string'),
include('builtin-functions'),
# Operators
(r'[()[\],]', Punctuation),
(words(('->', '==>', '|', '::', '@', '#', # sugar syntax
'$', '&', '!', '!!', './')), Name.Decorator),
(words(('<:', ':>', ':<', '>:', '<\\', '<>', '<', '>',
'ø', '',
'+', '-', '*', '~', '=', '^', '%', '/', '//',
'==>', '<=>', '<==>',
'=>>', '<<=>>', '<<==>>',
'-->', '<->', '<-->',
'=|', '|=', '-:', ':-',
'_', '.', '..', '\\')), Operator),
(r'\b\w+', Name),
(r'\s+', Text.Whitespace),
(r'.+$', Error),
],
'inside-interpol': [
(r'\|', String.Interpol, '#pop'),
(r'[^|]+', using(this)),
],
'inside-template': [
(r'\|\|\>', String.Interpol, '#pop'),
(r'[^|]+', using(this)),
],
'string-escape': [
(words(('\\\\', '\\n', '\\t', '\\"')), String.Escape),
],
'inside-simple-string': [
include('string-escape'),
(r'\|', String.Interpol, 'inside-interpol'), # Interpolation
(r'\<\|\|', String.Interpol, 'inside-template'), # Templates
(r'"', String.Double, '#pop'), # Closing Quote
(r'[^|"]+', String) # String Content
],
'inside-smart-string': [
include('string-escape'),
(r'\|', String.Interpol, 'inside-interpol'), # Interpolation
(r'\<\|\|', String.Interpol, 'inside-template'), # Templates
(r'\n', String.Single, '#pop'), # Closing Quote
(r'[^|\n]+', String) # String Content
],
'inside-safe-string': [
include('string-escape'),
(r'\|', String.Interpol, 'inside-interpol'), # Interpolation
(r'\<\|\|', String.Interpol, 'inside-template'), # Templates
(r'»»»', String.Double, '#pop'), # Closing Quote
(r'[^|»]+', String) # String Content
],
'inside-regex-string': [
(r'\\[sSwWdDbBZApPxucItnvfr0]+', String.Escape),
(r'\|', String.Interpol, 'inside-interpol'), # Interpolation
(r'\<\|\|', String.Interpol, 'inside-template'), # Templates
(r'\/\}', String.Single, '#pop'), # Closing Quote
(r'[^|\/]+', String.Regex), # String Content
],
'inside-curly-verb-string': [
include('string-escape'),
(r'\|', String.Interpol, 'inside-interpol'), # Interpolation
(r'\<\|\|', String.Interpol, 'inside-template'), # Templates
(r'\:\}', String.Double, '#pop'), # Closing Quote
(r'[^|<:]+', String), # String Content
],
'inside-curly-string': [
include('string-escape'),
(r'\|', String.Interpol, 'inside-interpol'), # Interpolation
(r'\<\|\|', String.Interpol, 'inside-template'), # Templates
(r'\}', String.Single, '#pop'), # Closing Quote
(r'[^|<}]+', String), # String Content
],
'inside-eof-string': [
include('string-escape'),
(r'\|', String.Interpol, 'inside-interpol'), # Interpolation
(r'\<\|\|', String.Interpol, 'inside-template'), # Templates
(r'\Z', String.Single, '#pop'), # Closing Quote
(r'[^|<]+', String), # String Content
],
'builtin-functions': [
(words((
'all', 'and', 'any', 'ascii', 'attr', 'attribute',
'attributeLabel', 'binary', 'block' 'char', 'contains',
'database', 'date', 'dictionary', 'empty', 'equal', 'even',
'every', 'exists', 'false', 'floatin', 'function', 'greater',
'greaterOrEqual', 'if', 'in', 'inline', 'integer', 'is',
'key', 'label', 'leap', 'less', 'lessOrEqual', 'literal',
'logical', 'lower', 'nand', 'negative', 'nor', 'not',
'notEqual', 'null', 'numeric', 'odd', 'or', 'path',
'pathLabel', 'positive', 'prefix', 'prime', 'set', 'some',
'sorted', 'standalone', 'string', 'subset', 'suffix',
'superset', 'ymbol', 'true', 'try', 'type', 'unless', 'upper',
'when', 'whitespace', 'word', 'xnor', 'xor', 'zero',
), prefix=r'\b', suffix=r'\b\?'), Name.Builtin),
(words((
'abs', 'acos', 'acosh', 'acsec', 'acsech', 'actan', 'actanh',
'add', 'after', 'alphabet', 'and', 'angle', 'append', 'arg',
'args', 'arity', 'array', 'as', 'asec', 'asech', 'asin',
'asinh', 'atan', 'atan2', 'atanh', 'attr', 'attrs', 'average',
'before', 'benchmark', 'blend', 'break', 'builtins1',
'builtins2', 'call', 'capitalize', 'case', 'ceil', 'chop',
'chunk', 'clear', 'close', 'cluster', 'color', 'combine',
'conj', 'continue', 'copy', 'cos', 'cosh', 'couple', 'csec',
'csech', 'ctan', 'ctanh', 'cursor', 'darken', 'dec', 'decode',
'decouple', 'define', 'delete', 'desaturate', 'deviation',
'dictionary', 'difference', 'digest', 'digits', 'div', 'do',
'download', 'drop', 'dup', 'e', 'else', 'empty', 'encode',
'ensure', 'env', 'epsilon', 'escape', 'execute', 'exit', 'exp',
'extend', 'extract', 'factors', 'false', 'fdiv', 'filter',
'first', 'flatten', 'floor', 'fold', 'from', 'function',
'gamma', 'gcd', 'get', 'goto', 'hash', 'help', 'hypot', 'if',
'in', 'inc', 'indent', 'index', 'infinity', 'info', 'input',
'insert', 'inspect', 'intersection', 'invert', 'join', 'keys',
'kurtosis', 'last', 'let', 'levenshtein', 'lighten', 'list',
'ln', 'log', 'loop', 'lower', 'mail', 'map', 'match', 'max',
'maybe', 'median', 'min', 'mod', 'module', 'mul', 'nand',
'neg', 'new', 'nor', 'normalize', 'not', 'now', 'null', 'open',
'or', 'outdent', 'pad', 'panic', 'path', 'pause',
'permissions', 'permutate', 'pi', 'pop', 'pow', 'powerset',
'powmod', 'prefix', 'print', 'prints', 'process', 'product',
'query', 'random', 'range', 'read', 'relative', 'remove',
'rename', 'render', 'repeat', 'replace', 'request', 'return',
'reverse', 'round', 'sample', 'saturate', 'script', 'sec',
'sech', 'select', 'serve', 'set', 'shl', 'shr', 'shuffle',
'sin', 'sinh', 'size', 'skewness', 'slice', 'sort', 'split',
'sqrt', 'squeeze', 'stack', 'strip', 'sub', 'suffix', 'sum',
'switch', 'symbols', 'symlink', 'sys', 'take', 'tan', 'tanh',
'terminal', 'to', 'true', 'truncate', 'try', 'type', 'union',
'unique', 'unless', 'until', 'unzip', 'upper', 'values', 'var',
'variance', 'volume', 'webview', 'while', 'with', 'wordwrap',
'write', 'xnor', 'xor', 'zip'
), prefix=r'\b', suffix=r'\b'), Name.Builtin)
],
}

View File

@@ -0,0 +1,55 @@
"""
pygments.lexers.asc
~~~~~~~~~~~~~~~~~~~
Lexer for various ASCII armored files.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups
from pygments.token import Comment, Generic, Name, Operator, String, Whitespace
__all__ = ['AscLexer']
class AscLexer(RegexLexer):
"""
Lexer for ASCII armored files, containing `-----BEGIN/END ...-----` wrapped
base64 data.
"""
name = 'ASCII armored'
aliases = ['asc', 'pem']
filenames = [
'*.asc', # PGP; *.gpg, *.pgp, and *.sig too, but those can be binary
'*.pem', # X.509; *.cer, *.crt, *.csr, and key etc too, but those can be binary
'id_dsa', 'id_ecdsa', 'id_ecdsa_sk', 'id_ed25519', 'id_ed25519_sk',
'id_rsa', # SSH private keys
]
mimetypes = ['application/pgp-keys', 'application/pgp-encrypted',
'application/pgp-signature', 'application/pem-certificate-chain']
url = 'https://www.openpgp.org'
version_added = '2.10'
flags = re.MULTILINE
tokens = {
'root': [
(r'\s+', Whitespace),
(r'^-----BEGIN [^\n]+-----$', Generic.Heading, 'data'),
(r'\S+', Comment),
],
'data': [
(r'\s+', Whitespace),
(r'^([^:]+)(:)([ \t]+)(.*)',
bygroups(Name.Attribute, Operator, Whitespace, String)),
(r'^-----END [^\n]+-----$', Generic.Heading, 'root'),
(r'\S+', String),
],
}
def analyse_text(text):
if re.search(r'^-----BEGIN [^\n]+-----\r?\n', text):
return True

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,178 @@
"""
pygments.lexers.asn1
~~~~~~~~~~~~~~~~~~~~
Pygments lexers for ASN.1.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.token import Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
from pygments.lexer import RegexLexer, words, bygroups
__all__ = ['Asn1Lexer']
SINGLE_WORD_KEYWORDS = [
"ENCODED",
"ABSTRACT-SYNTAX",
"END",
"APPLICATION",
"EXPLICIT",
"IMPLICIT",
"AUTOMATIC",
"TAGS",
"BEGIN",
"EXTENSIBILITY",
"BY",
"FROM",
"COMPONENT",
"UNIVERSAL",
"COMPONENTS",
"CONSTRAINED",
"IMPLIED",
"DEFINITIONS",
"INCLUDES",
"PRIVATE",
"WITH",
"OF",
]
OPERATOR_WORDS = [
"EXCEPT",
"UNION",
"INTERSECTION",
]
SINGLE_WORD_NAMESPACE_KEYWORDS = [
"EXPORTS",
"IMPORTS",
]
MULTI_WORDS_DECLARATIONS = [
"SEQUENCE OF",
"SET OF",
"INSTANCE OF",
"WITH SYNTAX",
]
SINGLE_WORDS_DECLARATIONS = [
"SIZE",
"SEQUENCE",
"SET",
"CLASS",
"UNIQUE",
"DEFAULT",
"CHOICE",
"PATTERN",
"OPTIONAL",
"PRESENT",
"ABSENT",
"CONTAINING",
"ENUMERATED",
"ALL",
]
TWO_WORDS_TYPES = [
"OBJECT IDENTIFIER",
"BIT STRING",
"OCTET STRING",
"CHARACTER STRING",
"EMBEDDED PDV",
]
SINGLE_WORD_TYPES = [
"RELATIVE-OID",
"TYPE-IDENTIFIER",
"ObjectDescriptor",
"IA5String",
"INTEGER",
"ISO646String",
"T61String",
"BMPString",
"NumericString",
"TeletexString",
"GeneralizedTime",
"REAL",
"BOOLEAN",
"GeneralString",
"GraphicString",
"UniversalString",
"UTCTime",
"VisibleString",
"UTF8String",
"PrintableString",
"VideotexString",
"EXTERNAL",
]
def word_sequences(tokens):
return "(" + '|'.join(token.replace(' ', r'\s+') for token in tokens) + r')\b'
class Asn1Lexer(RegexLexer):
"""
Lexer for ASN.1 module definition
"""
flags = re.MULTILINE
name = 'ASN.1'
aliases = ['asn1']
filenames = ["*.asn1"]
url = "https://www.itu.int/ITU-T/studygroups/com17/languages/X.680-0207.pdf"
version_added = '2.16'
tokens = {
'root': [
# Whitespace:
(r'\s+', Whitespace),
# Comments:
(r'--.*$', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
# Numbers:
(r'\d+\.\d*([eE][-+]?\d+)?', Number.Float),
(r'\d+', Number.Integer),
# Identifier:
(r"&?[a-z][-a-zA-Z0-9]*[a-zA-Z0-9]\b", Name.Variable),
# Constants:
(words(("TRUE", "FALSE", "NULL", "MINUS-INFINITY", "PLUS-INFINITY", "MIN", "MAX"), suffix=r'\b'), Keyword.Constant),
# Builtin types:
(word_sequences(TWO_WORDS_TYPES), Keyword.Type),
(words(SINGLE_WORD_TYPES, suffix=r'\b'), Keyword.Type),
# Other keywords:
(r"EXPORTS\s+ALL\b", Keyword.Namespace),
(words(SINGLE_WORD_NAMESPACE_KEYWORDS, suffix=r'\b'), Operator.Namespace),
(word_sequences(MULTI_WORDS_DECLARATIONS), Keyword.Declaration),
(words(SINGLE_WORDS_DECLARATIONS, suffix=r'\b'), Keyword.Declaration),
(words(OPERATOR_WORDS, suffix=r'\b'), Operator.Word),
(words(SINGLE_WORD_KEYWORDS), Keyword),
# Type identifier:
(r"&?[A-Z][-a-zA-Z0-9]*[a-zA-Z0-9]\b", Name.Type),
# Operators:
(r"(::=|\.\.\.|\.\.|\[\[|\]\]|\||\^)", Operator),
# Punctuation:
(r"(\.|,|\{|\}|\(|\)|\[|\])", Punctuation),
# String:
(r'"', String, 'string'),
# Binary string:
(r"('[01 ]*')(B)\b", bygroups(String, String.Affix)),
(r"('[0-9A-F ]*')(H)\b",bygroups(String, String.Affix)),
],
'comment': [
(r'[^*/]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
'string': [
(r'""', String),
(r'"', String, "#pop"),
(r'[^"]', String),
]
}

View File

@@ -0,0 +1,379 @@
"""
pygments.lexers.automation
~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for automation scripting languages.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, bygroups, combined
from pygments.token import Text, Comment, Operator, Name, String, \
Number, Punctuation, Generic
__all__ = ['AutohotkeyLexer', 'AutoItLexer']
class AutohotkeyLexer(RegexLexer):
"""
For autohotkey source code.
"""
name = 'autohotkey'
url = 'http://www.autohotkey.com/'
aliases = ['autohotkey', 'ahk']
filenames = ['*.ahk', '*.ahkl']
mimetypes = ['text/x-autohotkey']
version_added = '1.4'
tokens = {
'root': [
(r'^(\s*)(/\*)', bygroups(Text, Comment.Multiline), 'incomment'),
(r'^(\s*)(\()', bygroups(Text, Generic), 'incontinuation'),
(r'\s+;.*?$', Comment.Single),
(r'^;.*?$', Comment.Single),
(r'[]{}(),;[]', Punctuation),
(r'(in|is|and|or|not)\b', Operator.Word),
(r'\%[a-zA-Z_#@$][\w#@$]*\%', Name.Variable),
(r'!=|==|:=|\.=|<<|>>|[-~+/*%=<>&^|?:!.]', Operator),
include('commands'),
include('labels'),
include('builtInFunctions'),
include('builtInVariables'),
(r'"', String, combined('stringescape', 'dqs')),
include('numbers'),
(r'[a-zA-Z_#@$][\w#@$]*', Name),
(r'\\|\'', Text),
(r'\`([,%`abfnrtv\-+;])', String.Escape),
include('garbage'),
],
'incomment': [
(r'^\s*\*/', Comment.Multiline, '#pop'),
(r'[^*]+', Comment.Multiline),
(r'\*', Comment.Multiline)
],
'incontinuation': [
(r'^\s*\)', Generic, '#pop'),
(r'[^)]', Generic),
(r'[)]', Generic),
],
'commands': [
(r'(?i)^(\s*)(global|local|static|'
r'#AllowSameLineComments|#ClipboardTimeout|#CommentFlag|'
r'#ErrorStdOut|#EscapeChar|#HotkeyInterval|#HotkeyModifierTimeout|'
r'#Hotstring|#IfWinActive|#IfWinExist|#IfWinNotActive|'
r'#IfWinNotExist|#IncludeAgain|#Include|#InstallKeybdHook|'
r'#InstallMouseHook|#KeyHistory|#LTrim|#MaxHotkeysPerInterval|'
r'#MaxMem|#MaxThreads|#MaxThreadsBuffer|#MaxThreadsPerHotkey|'
r'#NoEnv|#NoTrayIcon|#Persistent|#SingleInstance|#UseHook|'
r'#WinActivateForce|AutoTrim|BlockInput|Break|Click|ClipWait|'
r'Continue|Control|ControlClick|ControlFocus|ControlGetFocus|'
r'ControlGetPos|ControlGetText|ControlGet|ControlMove|ControlSend|'
r'ControlSendRaw|ControlSetText|CoordMode|Critical|'
r'DetectHiddenText|DetectHiddenWindows|Drive|DriveGet|'
r'DriveSpaceFree|Edit|Else|EnvAdd|EnvDiv|EnvGet|EnvMult|EnvSet|'
r'EnvSub|EnvUpdate|Exit|ExitApp|FileAppend|'
r'FileCopy|FileCopyDir|FileCreateDir|FileCreateShortcut|'
r'FileDelete|FileGetAttrib|FileGetShortcut|FileGetSize|'
r'FileGetTime|FileGetVersion|FileInstall|FileMove|FileMoveDir|'
r'FileRead|FileReadLine|FileRecycle|FileRecycleEmpty|'
r'FileRemoveDir|FileSelectFile|FileSelectFolder|FileSetAttrib|'
r'FileSetTime|FormatTime|GetKeyState|Gosub|Goto|GroupActivate|'
r'GroupAdd|GroupClose|GroupDeactivate|Gui|GuiControl|'
r'GuiControlGet|Hotkey|IfEqual|IfExist|IfGreaterOrEqual|IfGreater|'
r'IfInString|IfLess|IfLessOrEqual|IfMsgBox|IfNotEqual|IfNotExist|'
r'IfNotInString|IfWinActive|IfWinExist|IfWinNotActive|'
r'IfWinNotExist|If |ImageSearch|IniDelete|IniRead|IniWrite|'
r'InputBox|Input|KeyHistory|KeyWait|ListHotkeys|ListLines|'
r'ListVars|Loop|Menu|MouseClickDrag|MouseClick|MouseGetPos|'
r'MouseMove|MsgBox|OnExit|OutputDebug|Pause|PixelGetColor|'
r'PixelSearch|PostMessage|Process|Progress|Random|RegDelete|'
r'RegRead|RegWrite|Reload|Repeat|Return|RunAs|RunWait|Run|'
r'SendEvent|SendInput|SendMessage|SendMode|SendPlay|SendRaw|Send|'
r'SetBatchLines|SetCapslockState|SetControlDelay|'
r'SetDefaultMouseSpeed|SetEnv|SetFormat|SetKeyDelay|'
r'SetMouseDelay|SetNumlockState|SetScrollLockState|'
r'SetStoreCapslockMode|SetTimer|SetTitleMatchMode|'
r'SetWinDelay|SetWorkingDir|Shutdown|Sleep|Sort|SoundBeep|'
r'SoundGet|SoundGetWaveVolume|SoundPlay|SoundSet|'
r'SoundSetWaveVolume|SplashImage|SplashTextOff|SplashTextOn|'
r'SplitPath|StatusBarGetText|StatusBarWait|StringCaseSense|'
r'StringGetPos|StringLeft|StringLen|StringLower|StringMid|'
r'StringReplace|StringRight|StringSplit|StringTrimLeft|'
r'StringTrimRight|StringUpper|Suspend|SysGet|Thread|ToolTip|'
r'Transform|TrayTip|URLDownloadToFile|While|WinActivate|'
r'WinActivateBottom|WinClose|WinGetActiveStats|WinGetActiveTitle|'
r'WinGetClass|WinGetPos|WinGetText|WinGetTitle|WinGet|WinHide|'
r'WinKill|WinMaximize|WinMenuSelectItem|WinMinimizeAllUndo|'
r'WinMinimizeAll|WinMinimize|WinMove|WinRestore|WinSetTitle|'
r'WinSet|WinShow|WinWaitActive|WinWaitClose|WinWaitNotActive|'
r'WinWait)\b', bygroups(Text, Name.Builtin)),
],
'builtInFunctions': [
(r'(?i)(Abs|ACos|Asc|ASin|ATan|Ceil|Chr|Cos|DllCall|Exp|FileExist|'
r'Floor|GetKeyState|IL_Add|IL_Create|IL_Destroy|InStr|IsFunc|'
r'IsLabel|Ln|Log|LV_Add|LV_Delete|LV_DeleteCol|LV_GetCount|'
r'LV_GetNext|LV_GetText|LV_Insert|LV_InsertCol|LV_Modify|'
r'LV_ModifyCol|LV_SetImageList|Mod|NumGet|NumPut|OnMessage|'
r'RegExMatch|RegExReplace|RegisterCallback|Round|SB_SetIcon|'
r'SB_SetParts|SB_SetText|Sin|Sqrt|StrLen|SubStr|Tan|TV_Add|'
r'TV_Delete|TV_GetChild|TV_GetCount|TV_GetNext|TV_Get|'
r'TV_GetParent|TV_GetPrev|TV_GetSelection|TV_GetText|TV_Modify|'
r'VarSetCapacity|WinActive|WinExist|Object|ComObjActive|'
r'ComObjArray|ComObjEnwrap|ComObjUnwrap|ComObjParameter|'
r'ComObjType|ComObjConnect|ComObjCreate|ComObjGet|ComObjError|'
r'ComObjValue|Insert|MinIndex|MaxIndex|Remove|SetCapacity|'
r'GetCapacity|GetAddress|_NewEnum|FileOpen|Read|Write|ReadLine|'
r'WriteLine|ReadNumType|WriteNumType|RawRead|RawWrite|Seek|Tell|'
r'Close|Next|IsObject|StrPut|StrGet|Trim|LTrim|RTrim)\b',
Name.Function),
],
'builtInVariables': [
(r'(?i)(A_AhkPath|A_AhkVersion|A_AppData|A_AppDataCommon|'
r'A_AutoTrim|A_BatchLines|A_CaretX|A_CaretY|A_ComputerName|'
r'A_ControlDelay|A_Cursor|A_DDDD|A_DDD|A_DD|A_DefaultMouseSpeed|'
r'A_Desktop|A_DesktopCommon|A_DetectHiddenText|'
r'A_DetectHiddenWindows|A_EndChar|A_EventInfo|A_ExitReason|'
r'A_FormatFloat|A_FormatInteger|A_Gui|A_GuiEvent|A_GuiControl|'
r'A_GuiControlEvent|A_GuiHeight|A_GuiWidth|A_GuiX|A_GuiY|A_Hour|'
r'A_IconFile|A_IconHidden|A_IconNumber|A_IconTip|A_Index|'
r'A_IPAddress1|A_IPAddress2|A_IPAddress3|A_IPAddress4|A_ISAdmin|'
r'A_IsCompiled|A_IsCritical|A_IsPaused|A_IsSuspended|A_KeyDelay|'
r'A_Language|A_LastError|A_LineFile|A_LineNumber|A_LoopField|'
r'A_LoopFileAttrib|A_LoopFileDir|A_LoopFileExt|A_LoopFileFullPath|'
r'A_LoopFileLongPath|A_LoopFileName|A_LoopFileShortName|'
r'A_LoopFileShortPath|A_LoopFileSize|A_LoopFileSizeKB|'
r'A_LoopFileSizeMB|A_LoopFileTimeAccessed|A_LoopFileTimeCreated|'
r'A_LoopFileTimeModified|A_LoopReadLine|A_LoopRegKey|'
r'A_LoopRegName|A_LoopRegSubkey|A_LoopRegTimeModified|'
r'A_LoopRegType|A_MDAY|A_Min|A_MM|A_MMM|A_MMMM|A_Mon|A_MouseDelay|'
r'A_MSec|A_MyDocuments|A_Now|A_NowUTC|A_NumBatchLines|A_OSType|'
r'A_OSVersion|A_PriorHotkey|A_ProgramFiles|A_Programs|'
r'A_ProgramsCommon|A_ScreenHeight|A_ScreenWidth|A_ScriptDir|'
r'A_ScriptFullPath|A_ScriptName|A_Sec|A_Space|A_StartMenu|'
r'A_StartMenuCommon|A_Startup|A_StartupCommon|A_StringCaseSense|'
r'A_Tab|A_Temp|A_ThisFunc|A_ThisHotkey|A_ThisLabel|A_ThisMenu|'
r'A_ThisMenuItem|A_ThisMenuItemPos|A_TickCount|A_TimeIdle|'
r'A_TimeIdlePhysical|A_TimeSincePriorHotkey|A_TimeSinceThisHotkey|'
r'A_TitleMatchMode|A_TitleMatchModeSpeed|A_UserName|A_WDay|'
r'A_WinDelay|A_WinDir|A_WorkingDir|A_YDay|A_YEAR|A_YWeek|A_YYYY|'
r'Clipboard|ClipboardAll|ComSpec|ErrorLevel|ProgramFiles|True|'
r'False|A_IsUnicode|A_FileEncoding|A_OSVersion|A_PtrSize)\b',
Name.Variable),
],
'labels': [
# hotkeys and labels
# technically, hotkey names are limited to named keys and buttons
(r'(^\s*)([^:\s("]+?:{1,2})', bygroups(Text, Name.Label)),
(r'(^\s*)(::[^:\s]+?::)', bygroups(Text, Name.Label)),
],
'numbers': [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+[eE][+-]?[0-9]+', Number.Float),
(r'0\d+', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer)
],
'stringescape': [
(r'\"\"|\`([,%`abfnrtv])', String.Escape),
],
'strings': [
(r'[^"\n]+', String),
],
'dqs': [
(r'"', String, '#pop'),
include('strings')
],
'garbage': [
(r'[^\S\n]', Text),
# (r'.', Text), # no cheating
],
}
class AutoItLexer(RegexLexer):
"""
For AutoIt files.
AutoIt is a freeware BASIC-like scripting language
designed for automating the Windows GUI and general scripting
"""
name = 'AutoIt'
url = 'http://www.autoitscript.com/site/autoit/'
aliases = ['autoit']
filenames = ['*.au3']
mimetypes = ['text/x-autoit']
version_added = '1.6'
# Keywords, functions, macros from au3.keywords.properties
# which can be found in AutoIt installed directory, e.g.
# c:\Program Files (x86)\AutoIt3\SciTE\au3.keywords.properties
keywords = """\
#include-once #include #endregion #forcedef #forceref #region
and byref case continueloop dim do else elseif endfunc endif
endselect exit exitloop for func global
if local next not or return select step
then to until wend while exit""".split()
functions = """\
abs acos adlibregister adlibunregister asc ascw asin assign atan
autoitsetoption autoitwingettitle autoitwinsettitle beep binary binarylen
binarymid binarytostring bitand bitnot bitor bitrotate bitshift bitxor
blockinput break call cdtray ceiling chr chrw clipget clipput consoleread
consolewrite consolewriteerror controlclick controlcommand controldisable
controlenable controlfocus controlgetfocus controlgethandle controlgetpos
controlgettext controlhide controllistview controlmove controlsend
controlsettext controlshow controltreeview cos dec dircopy dircreate
dirgetsize dirmove dirremove dllcall dllcalladdress dllcallbackfree
dllcallbackgetptr dllcallbackregister dllclose dllopen dllstructcreate
dllstructgetdata dllstructgetptr dllstructgetsize dllstructsetdata
drivegetdrive drivegetfilesystem drivegetlabel drivegetserial drivegettype
drivemapadd drivemapdel drivemapget drivesetlabel drivespacefree
drivespacetotal drivestatus envget envset envupdate eval execute exp
filechangedir fileclose filecopy filecreatentfslink filecreateshortcut
filedelete fileexists filefindfirstfile filefindnextfile fileflush
filegetattrib filegetencoding filegetlongname filegetpos filegetshortcut
filegetshortname filegetsize filegettime filegetversion fileinstall filemove
fileopen fileopendialog fileread filereadline filerecycle filerecycleempty
filesavedialog fileselectfolder filesetattrib filesetpos filesettime
filewrite filewriteline floor ftpsetproxy guicreate guictrlcreateavi
guictrlcreatebutton guictrlcreatecheckbox guictrlcreatecombo
guictrlcreatecontextmenu guictrlcreatedate guictrlcreatedummy
guictrlcreateedit guictrlcreategraphic guictrlcreategroup guictrlcreateicon
guictrlcreateinput guictrlcreatelabel guictrlcreatelist
guictrlcreatelistview guictrlcreatelistviewitem guictrlcreatemenu
guictrlcreatemenuitem guictrlcreatemonthcal guictrlcreateobj
guictrlcreatepic guictrlcreateprogress guictrlcreateradio
guictrlcreateslider guictrlcreatetab guictrlcreatetabitem
guictrlcreatetreeview guictrlcreatetreeviewitem guictrlcreateupdown
guictrldelete guictrlgethandle guictrlgetstate guictrlread guictrlrecvmsg
guictrlregisterlistviewsort guictrlsendmsg guictrlsendtodummy
guictrlsetbkcolor guictrlsetcolor guictrlsetcursor guictrlsetdata
guictrlsetdefbkcolor guictrlsetdefcolor guictrlsetfont guictrlsetgraphic
guictrlsetimage guictrlsetlimit guictrlsetonevent guictrlsetpos
guictrlsetresizing guictrlsetstate guictrlsetstyle guictrlsettip guidelete
guigetcursorinfo guigetmsg guigetstyle guiregistermsg guisetaccelerators
guisetbkcolor guisetcoord guisetcursor guisetfont guisethelp guiseticon
guisetonevent guisetstate guisetstyle guistartgroup guiswitch hex hotkeyset
httpsetproxy httpsetuseragent hwnd inetclose inetget inetgetinfo inetgetsize
inetread inidelete iniread inireadsection inireadsectionnames
inirenamesection iniwrite iniwritesection inputbox int isadmin isarray
isbinary isbool isdeclared isdllstruct isfloat ishwnd isint iskeyword
isnumber isobj isptr isstring log memgetstats mod mouseclick mouseclickdrag
mousedown mousegetcursor mousegetpos mousemove mouseup mousewheel msgbox
number objcreate objcreateinterface objevent objevent objget objname
onautoitexitregister onautoitexitunregister opt ping pixelchecksum
pixelgetcolor pixelsearch pluginclose pluginopen processclose processexists
processgetstats processlist processsetpriority processwait processwaitclose
progressoff progresson progressset ptr random regdelete regenumkey
regenumval regread regwrite round run runas runaswait runwait send
sendkeepactive seterror setextended shellexecute shellexecutewait shutdown
sin sleep soundplay soundsetwavevolume splashimageon splashoff splashtexton
sqrt srandom statusbargettext stderrread stdinwrite stdioclose stdoutread
string stringaddcr stringcompare stringformat stringfromasciiarray
stringinstr stringisalnum stringisalpha stringisascii stringisdigit
stringisfloat stringisint stringislower stringisspace stringisupper
stringisxdigit stringleft stringlen stringlower stringmid stringregexp
stringregexpreplace stringreplace stringright stringsplit stringstripcr
stringstripws stringtoasciiarray stringtobinary stringtrimleft
stringtrimright stringupper tan tcpaccept tcpclosesocket tcpconnect
tcplisten tcpnametoip tcprecv tcpsend tcpshutdown tcpstartup timerdiff
timerinit tooltip traycreateitem traycreatemenu traygetmsg trayitemdelete
trayitemgethandle trayitemgetstate trayitemgettext trayitemsetonevent
trayitemsetstate trayitemsettext traysetclick trayseticon traysetonevent
traysetpauseicon traysetstate traysettooltip traytip ubound udpbind
udpclosesocket udpopen udprecv udpsend udpshutdown udpstartup vargettype
winactivate winactive winclose winexists winflash wingetcaretpos
wingetclasslist wingetclientsize wingethandle wingetpos wingetprocess
wingetstate wingettext wingettitle winkill winlist winmenuselectitem
winminimizeall winminimizeallundo winmove winsetontop winsetstate
winsettitle winsettrans winwait winwaitactive winwaitclose
winwaitnotactive""".split()
macros = """\
@appdatacommondir @appdatadir @autoitexe @autoitpid @autoitversion
@autoitx64 @com_eventobj @commonfilesdir @compiled @computername @comspec
@cpuarch @cr @crlf @desktopcommondir @desktopdepth @desktopdir
@desktopheight @desktoprefresh @desktopwidth @documentscommondir @error
@exitcode @exitmethod @extended @favoritescommondir @favoritesdir
@gui_ctrlhandle @gui_ctrlid @gui_dragfile @gui_dragid @gui_dropid
@gui_winhandle @homedrive @homepath @homeshare @hotkeypressed @hour
@ipaddress1 @ipaddress2 @ipaddress3 @ipaddress4 @kblayout @lf
@logondnsdomain @logondomain @logonserver @mday @min @mon @msec @muilang
@mydocumentsdir @numparams @osarch @osbuild @oslang @osservicepack @ostype
@osversion @programfilesdir @programscommondir @programsdir @scriptdir
@scriptfullpath @scriptlinenumber @scriptname @sec @startmenucommondir
@startmenudir @startupcommondir @startupdir @sw_disable @sw_enable @sw_hide
@sw_lock @sw_maximize @sw_minimize @sw_restore @sw_show @sw_showdefault
@sw_showmaximized @sw_showminimized @sw_showminnoactive @sw_showna
@sw_shownoactivate @sw_shownormal @sw_unlock @systemdir @tab @tempdir
@tray_id @trayiconflashing @trayiconvisible @username @userprofiledir @wday
@windowsdir @workingdir @yday @year""".split()
tokens = {
'root': [
(r';.*\n', Comment.Single),
(r'(#comments-start|#cs)(.|\n)*?(#comments-end|#ce)',
Comment.Multiline),
(r'[\[\]{}(),;]', Punctuation),
(r'(and|or|not)\b', Operator.Word),
(r'[$|@][a-zA-Z_]\w*', Name.Variable),
(r'!=|==|:=|\.=|<<|>>|[-~+/*%=<>&^|?:!.]', Operator),
include('commands'),
include('labels'),
include('builtInFunctions'),
include('builtInMarcros'),
(r'"', String, combined('stringescape', 'dqs')),
(r"'", String, 'sqs'),
include('numbers'),
(r'[a-zA-Z_#@$][\w#@$]*', Name),
(r'\\|\'', Text),
(r'\`([,%`abfnrtv\-+;])', String.Escape),
(r'_\n', Text), # Line continuation
include('garbage'),
],
'commands': [
(r'(?i)(\s*)({})\b'.format('|'.join(keywords)),
bygroups(Text, Name.Builtin)),
],
'builtInFunctions': [
(r'(?i)({})\b'.format('|'.join(functions)),
Name.Function),
],
'builtInMarcros': [
(r'(?i)({})\b'.format('|'.join(macros)),
Name.Variable.Global),
],
'labels': [
# sendkeys
(r'(^\s*)(\{\S+?\})', bygroups(Text, Name.Label)),
],
'numbers': [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+[eE][+-]?[0-9]+', Number.Float),
(r'0\d+', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer)
],
'stringescape': [
(r'\"\"|\`([,%`abfnrtv])', String.Escape),
],
'strings': [
(r'[^"\n]+', String),
],
'dqs': [
(r'"', String, '#pop'),
include('strings')
],
'sqs': [
(r'\'\'|\`([,%`abfnrtv])', String.Escape),
(r"'", String, '#pop'),
(r"[^'\n]+", String)
],
'garbage': [
(r'[^\S\n]', Text),
],
}

View File

@@ -0,0 +1,101 @@
"""
pygments.lexers.bare
~~~~~~~~~~~~~~~~~~~~
Lexer for the BARE schema.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words, bygroups
from pygments.token import Text, Comment, Keyword, Name, Literal, Whitespace
__all__ = ['BareLexer']
class BareLexer(RegexLexer):
"""
For BARE schema source.
"""
name = 'BARE'
url = 'https://baremessages.org'
filenames = ['*.bare']
aliases = ['bare']
version_added = '2.7'
keywords = [
'type',
'enum',
'u8',
'u16',
'u32',
'u64',
'uint',
'i8',
'i16',
'i32',
'i64',
'int',
'f32',
'f64',
'bool',
'void',
'data',
'string',
'optional',
'map',
]
tokens = {
'root': [
(r'(type)(\s+)([A-Z][a-zA-Z0-9]+)(\s+)(\{)',
bygroups(Keyword, Whitespace, Name.Class, Whitespace, Text), 'struct'),
(r'(type)(\s+)([A-Z][a-zA-Z0-9]+)(\s+)(\()',
bygroups(Keyword, Whitespace, Name.Class, Whitespace, Text), 'union'),
(r'(type)(\s+)([A-Z][a-zA-Z0-9]+)(\s+)',
bygroups(Keyword, Whitespace, Name, Whitespace), 'typedef'),
(r'(enum)(\s+)([A-Z][a-zA-Z0-9]+)(\s+\{)',
bygroups(Keyword, Whitespace, Name.Class, Whitespace), 'enum'),
(r'#.*?$', Comment),
(r'\s+', Whitespace),
],
'struct': [
(r'\{', Text, '#push'),
(r'\}', Text, '#pop'),
(r'([a-zA-Z0-9]+)(:)(\s*)',
bygroups(Name.Attribute, Text, Whitespace), 'typedef'),
(r'\s+', Whitespace),
],
'union': [
(r'\)', Text, '#pop'),
(r'(\s*)(\|)(\s*)', bygroups(Whitespace, Text, Whitespace)),
(r'[A-Z][a-zA-Z0-9]+', Name.Class),
(words(keywords), Keyword),
(r'\s+', Whitespace),
],
'typedef': [
(r'\[\]', Text),
(r'#.*?$', Comment, '#pop'),
(r'(\[)(\d+)(\])', bygroups(Text, Literal, Text)),
(r'<|>', Text),
(r'\(', Text, 'union'),
(r'(\[)([a-z][a-z-A-Z0-9]+)(\])', bygroups(Text, Keyword, Text)),
(r'(\[)([A-Z][a-z-A-Z0-9]+)(\])', bygroups(Text, Name.Class, Text)),
(r'([A-Z][a-z-A-Z0-9]+)', Name.Class),
(words(keywords), Keyword),
(r'\n', Text, '#pop'),
(r'\{', Text, 'struct'),
(r'\s+', Whitespace),
(r'\d+', Literal),
],
'enum': [
(r'\{', Text, '#push'),
(r'\}', Text, '#pop'),
(r'([A-Z][A-Z0-9_]*)(\s*=\s*)(\d+)',
bygroups(Name.Attribute, Text, Literal)),
(r'([A-Z][A-Z0-9_]*)', bygroups(Name.Attribute)),
(r'#.*?$', Comment),
(r'\s+', Whitespace),
],
}

View File

@@ -0,0 +1,656 @@
"""
pygments.lexers.basic
~~~~~~~~~~~~~~~~~~~~~
Lexers for BASIC like languages (other than VB.net).
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, default, words, include
from pygments.token import Comment, Error, Keyword, Name, Number, \
Punctuation, Operator, String, Text, Whitespace
from pygments.lexers import _vbscript_builtins
__all__ = ['BlitzBasicLexer', 'BlitzMaxLexer', 'MonkeyLexer', 'CbmBasicV2Lexer',
'QBasicLexer', 'VBScriptLexer', 'BBCBasicLexer']
class BlitzMaxLexer(RegexLexer):
"""
For BlitzMax source code.
"""
name = 'BlitzMax'
url = 'http://blitzbasic.com'
aliases = ['blitzmax', 'bmax']
filenames = ['*.bmx']
mimetypes = ['text/x-bmx']
version_added = '1.4'
bmax_vopwords = r'\b(Shl|Shr|Sar|Mod)\b'
bmax_sktypes = r'@{1,2}|[!#$%]'
bmax_lktypes = r'\b(Int|Byte|Short|Float|Double|Long)\b'
bmax_name = r'[a-z_]\w*'
bmax_var = (rf'({bmax_name})(?:(?:([ \t]*)({bmax_sktypes})|([ \t]*:[ \t]*\b(?:Shl|Shr|Sar|Mod)\b)'
rf'|([ \t]*)(:)([ \t]*)(?:{bmax_lktypes}|({bmax_name})))(?:([ \t]*)(Ptr))?)')
bmax_func = bmax_var + r'?((?:[ \t]|\.\.\n)*)([(])'
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
# Text
(r'\s+', Whitespace),
(r'(\.\.)(\n)', bygroups(Text, Whitespace)), # Line continuation
# Comments
(r"'.*?\n", Comment.Single),
(r'([ \t]*)\bRem\n(\n|.)*?\s*\bEnd([ \t]*)Rem', Comment.Multiline),
# Data types
('"', String.Double, 'string'),
# Numbers
(r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
(r'\.[0-9]*(?!\.)', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\$[0-9a-f]+', Number.Hex),
(r'\%[10]+', Number.Bin),
# Other
(rf'(?:(?:(:)?([ \t]*)(:?{bmax_vopwords}|([+\-*/&|~]))|Or|And|Not|[=<>^]))', Operator),
(r'[(),.:\[\]]', Punctuation),
(r'(?:#[\w \t]*)', Name.Label),
(r'(?:\?[\w \t]*)', Comment.Preproc),
# Identifiers
(rf'\b(New)\b([ \t]?)([(]?)({bmax_name})',
bygroups(Keyword.Reserved, Whitespace, Punctuation, Name.Class)),
(rf'\b(Import|Framework|Module)([ \t]+)({bmax_name}\.{bmax_name})',
bygroups(Keyword.Reserved, Whitespace, Keyword.Namespace)),
(bmax_func, bygroups(Name.Function, Whitespace, Keyword.Type,
Operator, Whitespace, Punctuation, Whitespace,
Keyword.Type, Name.Class, Whitespace,
Keyword.Type, Whitespace, Punctuation)),
(bmax_var, bygroups(Name.Variable, Whitespace, Keyword.Type, Operator,
Whitespace, Punctuation, Whitespace, Keyword.Type,
Name.Class, Whitespace, Keyword.Type)),
(rf'\b(Type|Extends)([ \t]+)({bmax_name})',
bygroups(Keyword.Reserved, Whitespace, Name.Class)),
# Keywords
(r'\b(Ptr)\b', Keyword.Type),
(r'\b(Pi|True|False|Null|Self|Super)\b', Keyword.Constant),
(r'\b(Local|Global|Const|Field)\b', Keyword.Declaration),
(words((
'TNullMethodException', 'TNullFunctionException',
'TNullObjectException', 'TArrayBoundsException',
'TRuntimeException'), prefix=r'\b', suffix=r'\b'), Name.Exception),
(words((
'Strict', 'SuperStrict', 'Module', 'ModuleInfo',
'End', 'Return', 'Continue', 'Exit', 'Public', 'Private',
'Var', 'VarPtr', 'Chr', 'Len', 'Asc', 'SizeOf', 'Sgn', 'Abs', 'Min', 'Max',
'New', 'Release', 'Delete', 'Incbin', 'IncbinPtr', 'IncbinLen',
'Framework', 'Include', 'Import', 'Extern', 'EndExtern',
'Function', 'EndFunction', 'Type', 'EndType', 'Extends', 'Method', 'EndMethod',
'Abstract', 'Final', 'If', 'Then', 'Else', 'ElseIf', 'EndIf',
'For', 'To', 'Next', 'Step', 'EachIn', 'While', 'Wend', 'EndWhile',
'Repeat', 'Until', 'Forever', 'Select', 'Case', 'Default', 'EndSelect',
'Try', 'Catch', 'EndTry', 'Throw', 'Assert', 'Goto', 'DefData', 'ReadData',
'RestoreData'), prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
# Final resolve (for variable names and such)
(rf'({bmax_name})', Name.Variable),
],
'string': [
(r'""', String.Double),
(r'"C?', String.Double, '#pop'),
(r'[^"]+', String.Double),
],
}
class BlitzBasicLexer(RegexLexer):
"""
For BlitzBasic source code.
"""
name = 'BlitzBasic'
url = 'http://blitzbasic.com'
aliases = ['blitzbasic', 'b3d', 'bplus']
filenames = ['*.bb', '*.decls']
mimetypes = ['text/x-bb']
version_added = '2.0'
bb_sktypes = r'@{1,2}|[#$%]'
bb_name = r'[a-z]\w*'
bb_var = (rf'({bb_name})(?:([ \t]*)({bb_sktypes})|([ \t]*)([.])([ \t]*)(?:({bb_name})))?')
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
# Text
(r'\s+', Whitespace),
# Comments
(r";.*?\n", Comment.Single),
# Data types
('"', String.Double, 'string'),
# Numbers
(r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
(r'\.[0-9]+(?!\.)', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\$[0-9a-f]+', Number.Hex),
(r'\%[10]+', Number.Bin),
# Other
(words(('Shl', 'Shr', 'Sar', 'Mod', 'Or', 'And', 'Not',
'Abs', 'Sgn', 'Handle', 'Int', 'Float', 'Str',
'First', 'Last', 'Before', 'After'),
prefix=r'\b', suffix=r'\b'),
Operator),
(r'([+\-*/~=<>^])', Operator),
(r'[(),:\[\]\\]', Punctuation),
(rf'\.([ \t]*)({bb_name})', Name.Label),
# Identifiers
(rf'\b(New)\b([ \t]+)({bb_name})',
bygroups(Keyword.Reserved, Whitespace, Name.Class)),
(rf'\b(Gosub|Goto)\b([ \t]+)({bb_name})',
bygroups(Keyword.Reserved, Whitespace, Name.Label)),
(rf'\b(Object)\b([ \t]*)([.])([ \t]*)({bb_name})\b',
bygroups(Operator, Whitespace, Punctuation, Whitespace, Name.Class)),
(rf'\b{bb_var}\b([ \t]*)(\()',
bygroups(Name.Function, Whitespace, Keyword.Type, Whitespace, Punctuation,
Whitespace, Name.Class, Whitespace, Punctuation)),
(rf'\b(Function)\b([ \t]+){bb_var}',
bygroups(Keyword.Reserved, Whitespace, Name.Function, Whitespace, Keyword.Type,
Whitespace, Punctuation, Whitespace, Name.Class)),
(rf'\b(Type)([ \t]+)({bb_name})',
bygroups(Keyword.Reserved, Whitespace, Name.Class)),
# Keywords
(r'\b(Pi|True|False|Null)\b', Keyword.Constant),
(r'\b(Local|Global|Const|Field|Dim)\b', Keyword.Declaration),
(words((
'End', 'Return', 'Exit', 'Chr', 'Len', 'Asc', 'New', 'Delete', 'Insert',
'Include', 'Function', 'Type', 'If', 'Then', 'Else', 'ElseIf', 'EndIf',
'For', 'To', 'Next', 'Step', 'Each', 'While', 'Wend',
'Repeat', 'Until', 'Forever', 'Select', 'Case', 'Default',
'Goto', 'Gosub', 'Data', 'Read', 'Restore'), prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
# Final resolve (for variable names and such)
# (r'(%s)' % (bb_name), Name.Variable),
(bb_var, bygroups(Name.Variable, Whitespace, Keyword.Type,
Whitespace, Punctuation, Whitespace, Name.Class)),
],
'string': [
(r'""', String.Double),
(r'"C?', String.Double, '#pop'),
(r'[^"\n]+', String.Double),
],
}
class MonkeyLexer(RegexLexer):
"""
For Monkey source code.
"""
name = 'Monkey'
aliases = ['monkey']
filenames = ['*.monkey']
mimetypes = ['text/x-monkey']
url = 'https://blitzresearch.itch.io/monkeyx'
version_added = '1.6'
name_variable = r'[a-z_]\w*'
name_function = r'[A-Z]\w*'
name_constant = r'[A-Z_][A-Z0-9_]*'
name_class = r'[A-Z]\w*'
name_module = r'[a-z0-9_]*'
keyword_type = r'(?:Int|Float|String|Bool|Object|Array|Void)'
# ? == Bool // % == Int // # == Float // $ == String
keyword_type_special = r'[?%#$]'
flags = re.MULTILINE
tokens = {
'root': [
# Text
(r'\s+', Whitespace),
# Comments
(r"'.*", Comment),
(r'(?i)^#rem\b', Comment.Multiline, 'comment'),
# preprocessor directives
(r'(?i)^(?:#If|#ElseIf|#Else|#EndIf|#End|#Print|#Error)\b', Comment.Preproc),
# preprocessor variable (any line starting with '#' that is not a directive)
(r'^#', Comment.Preproc, 'variables'),
# String
('"', String.Double, 'string'),
# Numbers
(r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
(r'\.[0-9]+(?!\.)', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\$[0-9a-fA-Z]+', Number.Hex),
(r'\%[10]+', Number.Bin),
# Native data types
(rf'\b{keyword_type}\b', Keyword.Type),
# Exception handling
(r'(?i)\b(?:Try|Catch|Throw)\b', Keyword.Reserved),
(r'Throwable', Name.Exception),
# Builtins
(r'(?i)\b(?:Null|True|False)\b', Name.Builtin),
(r'(?i)\b(?:Self|Super)\b', Name.Builtin.Pseudo),
(r'\b(?:HOST|LANG|TARGET|CONFIG)\b', Name.Constant),
# Keywords
(r'(?i)^(Import)(\s+)(.*)(\n)',
bygroups(Keyword.Namespace, Whitespace, Name.Namespace, Whitespace)),
(r'(?i)^Strict\b.*\n', Keyword.Reserved),
(r'(?i)(Const|Local|Global|Field)(\s+)',
bygroups(Keyword.Declaration, Whitespace), 'variables'),
(r'(?i)(New|Class|Interface|Extends|Implements)(\s+)',
bygroups(Keyword.Reserved, Whitespace), 'classname'),
(r'(?i)(Function|Method)(\s+)',
bygroups(Keyword.Reserved, Whitespace), 'funcname'),
(r'(?i)(?:End|Return|Public|Private|Extern|Property|'
r'Final|Abstract)\b', Keyword.Reserved),
# Flow Control stuff
(r'(?i)(?:If|Then|Else|ElseIf|EndIf|'
r'Select|Case|Default|'
r'While|Wend|'
r'Repeat|Until|Forever|'
r'For|To|Until|Step|EachIn|Next|'
r'Exit|Continue)(?=\s)', Keyword.Reserved),
# not used yet
(r'(?i)\b(?:Module|Inline)\b', Keyword.Reserved),
# Array
(r'[\[\]]', Punctuation),
# Other
(r'<=|>=|<>|\*=|/=|\+=|-=|&=|~=|\|=|[-&*/^+=<>|~]', Operator),
(r'(?i)(?:Not|Mod|Shl|Shr|And|Or)', Operator.Word),
(r'[(){}!#,.:]', Punctuation),
# catch the rest
(rf'{name_constant}\b', Name.Constant),
(rf'{name_function}\b', Name.Function),
(rf'{name_variable}\b', Name.Variable),
],
'funcname': [
(rf'(?i){name_function}\b', Name.Function),
(r':', Punctuation, 'classname'),
(r'\s+', Whitespace),
(r'\(', Punctuation, 'variables'),
(r'\)', Punctuation, '#pop')
],
'classname': [
(rf'{name_module}\.', Name.Namespace),
(rf'{keyword_type}\b', Keyword.Type),
(rf'{name_class}\b', Name.Class),
# array (of given size)
(r'(\[)(\s*)(\d*)(\s*)(\])',
bygroups(Punctuation, Whitespace, Number.Integer, Whitespace, Punctuation)),
# generics
(r'\s+(?!<)', Whitespace, '#pop'),
(r'<', Punctuation, '#push'),
(r'>', Punctuation, '#pop'),
(r'\n', Whitespace, '#pop'),
default('#pop')
],
'variables': [
(rf'{name_constant}\b', Name.Constant),
(rf'{name_variable}\b', Name.Variable),
(rf'{keyword_type_special}', Keyword.Type),
(r'\s+', Whitespace),
(r':', Punctuation, 'classname'),
(r',', Punctuation, '#push'),
default('#pop')
],
'string': [
(r'[^"~]+', String.Double),
(r'~q|~n|~r|~t|~z|~~', String.Escape),
(r'"', String.Double, '#pop'),
],
'comment': [
(r'(?i)^#rem.*?', Comment.Multiline, "#push"),
(r'(?i)^#end.*?', Comment.Multiline, "#pop"),
(r'\n', Comment.Multiline),
(r'.+', Comment.Multiline),
],
}
class CbmBasicV2Lexer(RegexLexer):
"""
For CBM BASIC V2 sources.
"""
name = 'CBM BASIC V2'
aliases = ['cbmbas']
filenames = ['*.bas']
url = 'https://en.wikipedia.org/wiki/Commodore_BASIC'
version_added = '1.6'
flags = re.IGNORECASE
tokens = {
'root': [
(r'rem.*\n', Comment.Single),
(r'\s+', Whitespace),
(r'new|run|end|for|to|next|step|go(to|sub)?|on|return|stop|cont'
r'|if|then|input#?|read|wait|load|save|verify|poke|sys|print#?'
r'|list|clr|cmd|open|close|get#?', Keyword.Reserved),
(r'data|restore|dim|let|def|fn', Keyword.Declaration),
(r'tab|spc|sgn|int|abs|usr|fre|pos|sqr|rnd|log|exp|cos|sin|tan|atn'
r'|peek|len|val|asc|(str|chr|left|right|mid)\$', Name.Builtin),
(r'[-+*/^<>=]', Operator),
(r'not|and|or', Operator.Word),
(r'"[^"\n]*.', String),
(r'\d+|[-+]?\d*\.\d*(e[-+]?\d+)?', Number.Float),
(r'[(),:;]', Punctuation),
(r'\w+[$%]?', Name),
]
}
def analyse_text(text):
# if it starts with a line number, it shouldn't be a "modern" Basic
# like VB.net
if re.match(r'^\d+', text):
return 0.2
class QBasicLexer(RegexLexer):
"""
For QBasic source code.
"""
name = 'QBasic'
aliases = ['qbasic', 'basic']
filenames = ['*.BAS', '*.bas']
mimetypes = ['text/basic']
url = 'https://en.wikipedia.org/wiki/QBasic'
version_added = '2.0'
declarations = ('DATA', 'LET')
functions = (
'ABS', 'ASC', 'ATN', 'CDBL', 'CHR$', 'CINT', 'CLNG',
'COMMAND$', 'COS', 'CSNG', 'CSRLIN', 'CVD', 'CVDMBF', 'CVI',
'CVL', 'CVS', 'CVSMBF', 'DATE$', 'ENVIRON$', 'EOF', 'ERDEV',
'ERDEV$', 'ERL', 'ERR', 'EXP', 'FILEATTR', 'FIX', 'FRE',
'FREEFILE', 'HEX$', 'INKEY$', 'INP', 'INPUT$', 'INSTR', 'INT',
'IOCTL$', 'LBOUND', 'LCASE$', 'LEFT$', 'LEN', 'LOC', 'LOF',
'LOG', 'LPOS', 'LTRIM$', 'MID$', 'MKD$', 'MKDMBF$', 'MKI$',
'MKL$', 'MKS$', 'MKSMBF$', 'OCT$', 'PEEK', 'PEN', 'PLAY',
'PMAP', 'POINT', 'POS', 'RIGHT$', 'RND', 'RTRIM$', 'SADD',
'SCREEN', 'SEEK', 'SETMEM', 'SGN', 'SIN', 'SPACE$', 'SPC',
'SQR', 'STICK', 'STR$', 'STRIG', 'STRING$', 'TAB', 'TAN',
'TIME$', 'TIMER', 'UBOUND', 'UCASE$', 'VAL', 'VARPTR',
'VARPTR$', 'VARSEG'
)
metacommands = ('$DYNAMIC', '$INCLUDE', '$STATIC')
operators = ('AND', 'EQV', 'IMP', 'NOT', 'OR', 'XOR')
statements = (
'BEEP', 'BLOAD', 'BSAVE', 'CALL', 'CALL ABSOLUTE',
'CALL INTERRUPT', 'CALLS', 'CHAIN', 'CHDIR', 'CIRCLE', 'CLEAR',
'CLOSE', 'CLS', 'COLOR', 'COM', 'COMMON', 'CONST', 'DATA',
'DATE$', 'DECLARE', 'DEF FN', 'DEF SEG', 'DEFDBL', 'DEFINT',
'DEFLNG', 'DEFSNG', 'DEFSTR', 'DEF', 'DIM', 'DO', 'LOOP',
'DRAW', 'END', 'ENVIRON', 'ERASE', 'ERROR', 'EXIT', 'FIELD',
'FILES', 'FOR', 'NEXT', 'FUNCTION', 'GET', 'GOSUB', 'GOTO',
'IF', 'THEN', 'INPUT', 'INPUT #', 'IOCTL', 'KEY', 'KEY',
'KILL', 'LET', 'LINE', 'LINE INPUT', 'LINE INPUT #', 'LOCATE',
'LOCK', 'UNLOCK', 'LPRINT', 'LSET', 'MID$', 'MKDIR', 'NAME',
'ON COM', 'ON ERROR', 'ON KEY', 'ON PEN', 'ON PLAY',
'ON STRIG', 'ON TIMER', 'ON UEVENT', 'ON', 'OPEN', 'OPEN COM',
'OPTION BASE', 'OUT', 'PAINT', 'PALETTE', 'PCOPY', 'PEN',
'PLAY', 'POKE', 'PRESET', 'PRINT', 'PRINT #', 'PRINT USING',
'PSET', 'PUT', 'PUT', 'RANDOMIZE', 'READ', 'REDIM', 'REM',
'RESET', 'RESTORE', 'RESUME', 'RETURN', 'RMDIR', 'RSET', 'RUN',
'SCREEN', 'SEEK', 'SELECT CASE', 'SHARED', 'SHELL', 'SLEEP',
'SOUND', 'STATIC', 'STOP', 'STRIG', 'SUB', 'SWAP', 'SYSTEM',
'TIME$', 'TIMER', 'TROFF', 'TRON', 'TYPE', 'UEVENT', 'UNLOCK',
'VIEW', 'WAIT', 'WHILE', 'WEND', 'WIDTH', 'WINDOW', 'WRITE'
)
keywords = (
'ACCESS', 'ALIAS', 'ANY', 'APPEND', 'AS', 'BASE', 'BINARY',
'BYVAL', 'CASE', 'CDECL', 'DOUBLE', 'ELSE', 'ELSEIF', 'ENDIF',
'INTEGER', 'IS', 'LIST', 'LOCAL', 'LONG', 'LOOP', 'MOD',
'NEXT', 'OFF', 'ON', 'OUTPUT', 'RANDOM', 'SIGNAL', 'SINGLE',
'STEP', 'STRING', 'THEN', 'TO', 'UNTIL', 'USING', 'WEND'
)
tokens = {
'root': [
(r'\n+', Text),
(r'\s+', Text.Whitespace),
(r'^(\s*)(\d*)(\s*)(REM .*)$',
bygroups(Text.Whitespace, Name.Label, Text.Whitespace,
Comment.Single)),
(r'^(\s*)(\d+)(\s*)',
bygroups(Text.Whitespace, Name.Label, Text.Whitespace)),
(r'(?=[\s]*)(\w+)(?=[\s]*=)', Name.Variable.Global),
(r'(?=[^"]*)\'.*$', Comment.Single),
(r'"[^\n"]*"', String.Double),
(r'(END)(\s+)(FUNCTION|IF|SELECT|SUB)',
bygroups(Keyword.Reserved, Text.Whitespace, Keyword.Reserved)),
(r'(DECLARE)(\s+)([A-Z]+)(\s+)(\S+)',
bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable,
Text.Whitespace, Name)),
(r'(DIM)(\s+)(SHARED)(\s+)([^\s(]+)',
bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable,
Text.Whitespace, Name.Variable.Global)),
(r'(DIM)(\s+)([^\s(]+)',
bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable.Global)),
(r'^(\s*)([a-zA-Z_]+)(\s*)(\=)',
bygroups(Text.Whitespace, Name.Variable.Global, Text.Whitespace,
Operator)),
(r'(GOTO|GOSUB)(\s+)(\w+\:?)',
bygroups(Keyword.Reserved, Text.Whitespace, Name.Label)),
(r'(SUB)(\s+)(\w+\:?)',
bygroups(Keyword.Reserved, Text.Whitespace, Name.Label)),
include('declarations'),
include('functions'),
include('metacommands'),
include('operators'),
include('statements'),
include('keywords'),
(r'[a-zA-Z_]\w*[$@#&!]', Name.Variable.Global),
(r'[a-zA-Z_]\w*\:', Name.Label),
(r'\-?\d*\.\d+[@|#]?', Number.Float),
(r'\-?\d+[@|#]', Number.Float),
(r'\-?\d+#?', Number.Integer.Long),
(r'\-?\d+#?', Number.Integer),
(r'!=|==|:=|\.=|<<|>>|[-~+/\\*%=<>&^|?:!.]', Operator),
(r'[\[\]{}(),;]', Punctuation),
(r'[\w]+', Name.Variable.Global),
],
# can't use regular \b because of X$()
# XXX: use words() here
'declarations': [
(r'\b({})(?=\(|\b)'.format('|'.join(map(re.escape, declarations))),
Keyword.Declaration),
],
'functions': [
(r'\b({})(?=\(|\b)'.format('|'.join(map(re.escape, functions))),
Keyword.Reserved),
],
'metacommands': [
(r'\b({})(?=\(|\b)'.format('|'.join(map(re.escape, metacommands))),
Keyword.Constant),
],
'operators': [
(r'\b({})(?=\(|\b)'.format('|'.join(map(re.escape, operators))), Operator.Word),
],
'statements': [
(r'\b({})\b'.format('|'.join(map(re.escape, statements))),
Keyword.Reserved),
],
'keywords': [
(r'\b({})\b'.format('|'.join(keywords)), Keyword),
],
}
def analyse_text(text):
if '$DYNAMIC' in text or '$STATIC' in text:
return 0.9
class VBScriptLexer(RegexLexer):
"""
VBScript is scripting language that is modeled on Visual Basic.
"""
name = 'VBScript'
aliases = ['vbscript']
filenames = ['*.vbs', '*.VBS']
url = 'https://learn.microsoft.com/en-us/previous-versions/t0aew7h6(v=vs.85)'
version_added = '2.4'
flags = re.IGNORECASE
tokens = {
'root': [
(r"'[^\n]*", Comment.Single),
(r'\s+', Whitespace),
('"', String.Double, 'string'),
('&h[0-9a-f]+', Number.Hex),
# Float variant 1, for example: 1., 1.e2, 1.2e3
(r'[0-9]+\.[0-9]*(e[+-]?[0-9]+)?', Number.Float),
(r'\.[0-9]+(e[+-]?[0-9]+)?', Number.Float), # Float variant 2, for example: .1, .1e2
(r'[0-9]+e[+-]?[0-9]+', Number.Float), # Float variant 3, for example: 123e45
(r'[0-9]+', Number.Integer),
('#.+#', String), # date or time value
(r'(dim)(\s+)([a-z_][a-z0-9_]*)',
bygroups(Keyword.Declaration, Whitespace, Name.Variable), 'dim_more'),
(r'(function|sub)(\s+)([a-z_][a-z0-9_]*)',
bygroups(Keyword.Declaration, Whitespace, Name.Function)),
(r'(class)(\s+)([a-z_][a-z0-9_]*)',
bygroups(Keyword.Declaration, Whitespace, Name.Class)),
(r'(const)(\s+)([a-z_][a-z0-9_]*)',
bygroups(Keyword.Declaration, Whitespace, Name.Constant)),
(r'(end)(\s+)(class|function|if|property|sub|with)',
bygroups(Keyword, Whitespace, Keyword)),
(r'(on)(\s+)(error)(\s+)(goto)(\s+)(0)',
bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword, Whitespace, Number.Integer)),
(r'(on)(\s+)(error)(\s+)(resume)(\s+)(next)',
bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword, Whitespace, Keyword)),
(r'(option)(\s+)(explicit)', bygroups(Keyword, Whitespace, Keyword)),
(r'(property)(\s+)(get|let|set)(\s+)([a-z_][a-z0-9_]*)',
bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration, Whitespace, Name.Property)),
(r'rem\s.*[^\n]*', Comment.Single),
(words(_vbscript_builtins.KEYWORDS, suffix=r'\b'), Keyword),
(words(_vbscript_builtins.OPERATORS), Operator),
(words(_vbscript_builtins.OPERATOR_WORDS, suffix=r'\b'), Operator.Word),
(words(_vbscript_builtins.BUILTIN_CONSTANTS, suffix=r'\b'), Name.Constant),
(words(_vbscript_builtins.BUILTIN_FUNCTIONS, suffix=r'\b'), Name.Builtin),
(words(_vbscript_builtins.BUILTIN_VARIABLES, suffix=r'\b'), Name.Builtin),
(r'[a-z_][a-z0-9_]*', Name),
(r'\b_\n', Operator),
(words(r'(),.:'), Punctuation),
(r'.+(\n)?', Error)
],
'dim_more': [
(r'(\s*)(,)(\s*)([a-z_][a-z0-9]*)',
bygroups(Whitespace, Punctuation, Whitespace, Name.Variable)),
default('#pop'),
],
'string': [
(r'[^"\n]+', String.Double),
(r'\"\"', String.Double),
(r'"', String.Double, '#pop'),
(r'\n', Error, '#pop'), # Unterminated string
],
}
class BBCBasicLexer(RegexLexer):
"""
BBC Basic was supplied on the BBC Micro, and later Acorn RISC OS.
It is also used by BBC Basic For Windows.
"""
base_keywords = ['OTHERWISE', 'AND', 'DIV', 'EOR', 'MOD', 'OR', 'ERROR',
'LINE', 'OFF', 'STEP', 'SPC', 'TAB', 'ELSE', 'THEN',
'OPENIN', 'PTR', 'PAGE', 'TIME', 'LOMEM', 'HIMEM', 'ABS',
'ACS', 'ADVAL', 'ASC', 'ASN', 'ATN', 'BGET', 'COS', 'COUNT',
'DEG', 'ERL', 'ERR', 'EVAL', 'EXP', 'EXT', 'FALSE', 'FN',
'GET', 'INKEY', 'INSTR', 'INT', 'LEN', 'LN', 'LOG', 'NOT',
'OPENUP', 'OPENOUT', 'PI', 'POINT', 'POS', 'RAD', 'RND',
'SGN', 'SIN', 'SQR', 'TAN', 'TO', 'TRUE', 'USR', 'VAL',
'VPOS', 'CHR$', 'GET$', 'INKEY$', 'LEFT$', 'MID$',
'RIGHT$', 'STR$', 'STRING$', 'EOF', 'PTR', 'PAGE', 'TIME',
'LOMEM', 'HIMEM', 'SOUND', 'BPUT', 'CALL', 'CHAIN', 'CLEAR',
'CLOSE', 'CLG', 'CLS', 'DATA', 'DEF', 'DIM', 'DRAW', 'END',
'ENDPROC', 'ENVELOPE', 'FOR', 'GOSUB', 'GOTO', 'GCOL', 'IF',
'INPUT', 'LET', 'LOCAL', 'MODE', 'MOVE', 'NEXT', 'ON',
'VDU', 'PLOT', 'PRINT', 'PROC', 'READ', 'REM', 'REPEAT',
'REPORT', 'RESTORE', 'RETURN', 'RUN', 'STOP', 'COLOUR',
'TRACE', 'UNTIL', 'WIDTH', 'OSCLI']
basic5_keywords = ['WHEN', 'OF', 'ENDCASE', 'ENDIF', 'ENDWHILE', 'CASE',
'CIRCLE', 'FILL', 'ORIGIN', 'POINT', 'RECTANGLE', 'SWAP',
'WHILE', 'WAIT', 'MOUSE', 'QUIT', 'SYS', 'INSTALL',
'LIBRARY', 'TINT', 'ELLIPSE', 'BEATS', 'TEMPO', 'VOICES',
'VOICE', 'STEREO', 'OVERLAY', 'APPEND', 'AUTO', 'CRUNCH',
'DELETE', 'EDIT', 'HELP', 'LIST', 'LOAD', 'LVAR', 'NEW',
'OLD', 'RENUMBER', 'SAVE', 'TEXTLOAD', 'TEXTSAVE',
'TWIN', 'TWINO', 'INSTALL', 'SUM', 'BEAT']
name = 'BBC Basic'
aliases = ['bbcbasic']
filenames = ['*.bbc']
url = 'https://www.bbcbasic.co.uk/bbcbasic.html'
version_added = '2.4'
tokens = {
'root': [
(r"[0-9]+", Name.Label),
(r"(\*)([^\n]*)",
bygroups(Keyword.Pseudo, Comment.Special)),
default('code'),
],
'code': [
(r"(REM)([^\n]*)",
bygroups(Keyword.Declaration, Comment.Single)),
(r'\n', Whitespace, 'root'),
(r'\s+', Whitespace),
(r':', Comment.Preproc),
# Some special cases to make functions come out nicer
(r'(DEF)(\s*)(FN|PROC)([A-Za-z_@][\w@]*)',
bygroups(Keyword.Declaration, Whitespace,
Keyword.Declaration, Name.Function)),
(r'(FN|PROC)([A-Za-z_@][\w@]*)',
bygroups(Keyword, Name.Function)),
(r'(GOTO|GOSUB|THEN|RESTORE)(\s*)(\d+)',
bygroups(Keyword, Whitespace, Name.Label)),
(r'(TRUE|FALSE)', Keyword.Constant),
(r'(PAGE|LOMEM|HIMEM|TIME|WIDTH|ERL|ERR|REPORT\$|POS|VPOS|VOICES)',
Keyword.Pseudo),
(words(base_keywords), Keyword),
(words(basic5_keywords), Keyword),
('"', String.Double, 'string'),
('%[01]{1,32}', Number.Bin),
('&[0-9a-f]{1,8}', Number.Hex),
(r'[+-]?[0-9]+\.[0-9]*(E[+-]?[0-9]+)?', Number.Float),
(r'[+-]?\.[0-9]+(E[+-]?[0-9]+)?', Number.Float),
(r'[+-]?[0-9]+E[+-]?[0-9]+', Number.Float),
(r'[+-]?\d+', Number.Integer),
(r'([A-Za-z_@][\w@]*[%$]?)', Name.Variable),
(r'([+\-]=|[$!|?+\-*/%^=><();]|>=|<=|<>|<<|>>|>>>|,)', Operator),
],
'string': [
(r'[^"\n]+', String.Double),
(r'"', String.Double, '#pop'),
(r'\n', Error, 'root'), # Unterminated string
],
}
def analyse_text(text):
if text.startswith('10REM >') or text.startswith('REM >'):
return 0.9

View File

@@ -0,0 +1,57 @@
"""
pygments.lexers.bdd
~~~~~~~~~~~~~~~~~~~
Lexer for BDD(Behavior-driven development).
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include
from pygments.token import Comment, Keyword, Name, String, Number, Text, \
Punctuation, Whitespace
__all__ = ['BddLexer']
class BddLexer(RegexLexer):
"""
Lexer for BDD(Behavior-driven development), which highlights not only
keywords, but also comments, punctuations, strings, numbers, and variables.
"""
name = 'Bdd'
aliases = ['bdd']
filenames = ['*.feature']
mimetypes = ['text/x-bdd']
url = 'https://en.wikipedia.org/wiki/Behavior-driven_development'
version_added = '2.11'
step_keywords = (r'Given|When|Then|Add|And|Feature|Scenario Outline|'
r'Scenario|Background|Examples|But')
tokens = {
'comments': [
(r'^\s*#.*$', Comment),
],
'miscellaneous': [
(r'(<|>|\[|\]|=|\||:|\(|\)|\{|\}|,|\.|;|-|_|\$)', Punctuation),
(r'((?<=\<)[^\\>]+(?=\>))', Name.Variable),
(r'"([^\"]*)"', String),
(r'^@\S+', Name.Label),
],
'numbers': [
(r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number),
],
'root': [
(r'\n|\s+', Whitespace),
(step_keywords, Keyword),
include('comments'),
include('miscellaneous'),
include('numbers'),
(r'\S+', Text),
]
}
def analyse_text(self, text):
return

View File

@@ -0,0 +1,99 @@
"""
pygments.lexers.berry
~~~~~~~~~~~~~~~~~~~~~
Lexer for Berry.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words, include, bygroups
from pygments.token import Comment, Whitespace, Operator, Keyword, Name, \
String, Number, Punctuation
__all__ = ['BerryLexer']
class BerryLexer(RegexLexer):
"""
For Berry source code.
"""
name = 'Berry'
aliases = ['berry', 'be']
filenames = ['*.be']
mimetypes = ['text/x-berry', 'application/x-berry']
url = 'https://berry-lang.github.io'
version_added = '2.12'
_name = r'\b[^\W\d]\w*'
tokens = {
'root': [
include('whitespace'),
include('numbers'),
include('keywords'),
(rf'(def)(\s+)({_name})',
bygroups(Keyword.Declaration, Whitespace, Name.Function)),
(rf'\b(class)(\s+)({_name})',
bygroups(Keyword.Declaration, Whitespace, Name.Class)),
(rf'\b(import)(\s+)({_name})',
bygroups(Keyword.Namespace, Whitespace, Name.Namespace)),
include('expr')
],
'expr': [
(r'[^\S\n]+', Whitespace),
(r'\.\.|[~!%^&*+=|?:<>/-]', Operator),
(r'[(){}\[\],.;]', Punctuation),
include('controls'),
include('builtins'),
include('funccall'),
include('member'),
include('name'),
include('strings')
],
'whitespace': [
(r'\s+', Whitespace),
(r'#-(.|\n)*?-#', Comment.Multiline),
(r'#.*?$', Comment.Single)
],
'keywords': [
(words((
'as', 'break', 'continue', 'import', 'static', 'self', 'super'),
suffix=r'\b'), Keyword.Reserved),
(r'(true|false|nil)\b', Keyword.Constant),
(r'(var|def)\b', Keyword.Declaration)
],
'controls': [
(words((
'if', 'elif', 'else', 'for', 'while', 'do', 'end', 'break',
'continue', 'return', 'try', 'except', 'raise'),
suffix=r'\b'), Keyword)
],
'builtins': [
(words((
'assert', 'bool', 'input', 'classname', 'classof', 'number', 'real',
'bytes', 'compile', 'map', 'list', 'int', 'isinstance', 'print',
'range', 'str', 'super', 'module', 'size', 'issubclass', 'open',
'file', 'type', 'call'),
suffix=r'\b'), Name.Builtin)
],
'numbers': [
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'-?\d+', Number.Integer),
(r'(-?\d+\.?|\.\d)\d*([eE][+-]?\d+)?', Number.Float)
],
'name': [
(_name, Name)
],
'funccall': [
(rf'{_name}(?=\s*\()', Name.Function, '#pop')
],
'member': [
(rf'(?<=\.){_name}\b(?!\()', Name.Attribute, '#pop')
],
'strings': [
(r'"([^\\]|\\.)*?"', String.Double, '#pop'),
(r'\'([^\\]|\\.)*?\'', String.Single, '#pop')
]
}

View File

@@ -0,0 +1,159 @@
"""
pygments.lexers.bibtex
~~~~~~~~~~~~~~~~~~~~~~
Lexers for BibTeX bibliography data and styles
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, default, \
words
from pygments.token import Name, Comment, String, Error, Number, Keyword, \
Punctuation, Whitespace
__all__ = ['BibTeXLexer', 'BSTLexer']
class BibTeXLexer(ExtendedRegexLexer):
"""
A lexer for BibTeX bibliography data format.
"""
name = 'BibTeX'
aliases = ['bibtex', 'bib']
filenames = ['*.bib']
mimetypes = ["text/x-bibtex"]
version_added = '2.2'
flags = re.IGNORECASE
url = 'https://texfaq.org/FAQ-BibTeXing'
ALLOWED_CHARS = r'@!$&*+\-./:;<>?\[\\\]^`|~'
IDENTIFIER = '[{}][{}]*'.format('a-z_' + ALLOWED_CHARS, r'\w' + ALLOWED_CHARS)
def open_brace_callback(self, match, ctx):
opening_brace = match.group()
ctx.opening_brace = opening_brace
yield match.start(), Punctuation, opening_brace
ctx.pos = match.end()
def close_brace_callback(self, match, ctx):
closing_brace = match.group()
if (
ctx.opening_brace == '{' and closing_brace != '}' or
ctx.opening_brace == '(' and closing_brace != ')'
):
yield match.start(), Error, closing_brace
else:
yield match.start(), Punctuation, closing_brace
del ctx.opening_brace
ctx.pos = match.end()
tokens = {
'root': [
include('whitespace'),
(r'@comment(?!ary)', Comment),
('@preamble', Name.Class, ('closing-brace', 'value', 'opening-brace')),
('@string', Name.Class, ('closing-brace', 'field', 'opening-brace')),
('@' + IDENTIFIER, Name.Class,
('closing-brace', 'command-body', 'opening-brace')),
('.+', Comment),
],
'opening-brace': [
include('whitespace'),
(r'[{(]', open_brace_callback, '#pop'),
],
'closing-brace': [
include('whitespace'),
(r'[})]', close_brace_callback, '#pop'),
],
'command-body': [
include('whitespace'),
(r'[^\s\,\}]+', Name.Label, ('#pop', 'fields')),
],
'fields': [
include('whitespace'),
(',', Punctuation, 'field'),
default('#pop'),
],
'field': [
include('whitespace'),
(IDENTIFIER, Name.Attribute, ('value', '=')),
default('#pop'),
],
'=': [
include('whitespace'),
('=', Punctuation, '#pop'),
],
'value': [
include('whitespace'),
(IDENTIFIER, Name.Variable),
('"', String, 'quoted-string'),
(r'\{', String, 'braced-string'),
(r'[\d]+', Number),
('#', Punctuation),
default('#pop'),
],
'quoted-string': [
(r'\{', String, 'braced-string'),
('"', String, '#pop'),
(r'[^\{\"]+', String),
],
'braced-string': [
(r'\{', String, '#push'),
(r'\}', String, '#pop'),
(r'[^\{\}]+', String),
],
'whitespace': [
(r'\s+', Whitespace),
],
}
class BSTLexer(RegexLexer):
"""
A lexer for BibTeX bibliography styles.
"""
name = 'BST'
aliases = ['bst', 'bst-pybtex']
filenames = ['*.bst']
version_added = '2.2'
flags = re.IGNORECASE | re.MULTILINE
url = 'https://texfaq.org/FAQ-BibTeXing'
tokens = {
'root': [
include('whitespace'),
(words(['read', 'sort']), Keyword),
(words(['execute', 'integers', 'iterate', 'reverse', 'strings']),
Keyword, ('group')),
(words(['function', 'macro']), Keyword, ('group', 'group')),
(words(['entry']), Keyword, ('group', 'group', 'group')),
],
'group': [
include('whitespace'),
(r'\{', Punctuation, ('#pop', 'group-end', 'body')),
],
'group-end': [
include('whitespace'),
(r'\}', Punctuation, '#pop'),
],
'body': [
include('whitespace'),
(r"\'[^#\"\{\}\s]+", Name.Function),
(r'[^#\"\{\}\s]+\$', Name.Builtin),
(r'[^#\"\{\}\s]+', Name.Variable),
(r'"[^\"]*"', String),
(r'#-?\d+', Number),
(r'\{', Punctuation, ('group-end', 'body')),
default('#pop'),
],
'whitespace': [
(r'\s+', Whitespace),
('%.*?$', Comment.Single),
],
}

View File

@@ -0,0 +1,173 @@
"""
pygments.lexers.blueprint
~~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for the Blueprint UI markup language.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, words
from pygments.token import (
Comment,
Operator,
Keyword,
Name,
String,
Number,
Punctuation,
Whitespace,
)
__all__ = ["BlueprintLexer"]
class BlueprintLexer(RegexLexer):
"""
For Blueprint UI markup.
"""
name = "Blueprint"
aliases = ["blueprint"]
filenames = ["*.blp"]
mimetypes = ["text/x-blueprint"]
url = "https://gitlab.gnome.org/jwestman/blueprint-compiler"
version_added = '2.16'
flags = re.IGNORECASE
tokens = {
"root": [
include("block-content"),
],
"type": [
(r"\$\s*[a-z_][a-z0-9_\-]*", Name.Class),
(r"(?:([a-z_][a-z0-9_\-]*)(\s*)(\.)(\s*))?([a-z_][a-z0-9_\-]*)",
bygroups(Name.Namespace, Whitespace, Punctuation, Whitespace, Name.Class)),
],
"whitespace": [
(r"\s+", Whitespace),
(r"//.*?\n", Comment.Single),
(r"/\*", Comment.Multiline, "comment-multiline"),
],
"comment-multiline": [
(r"\*/", Comment.Multiline, "#pop"),
(r"[^*]+", Comment.Multiline),
(r"\*", Comment.Multiline),
],
"value": [
(r"(typeof)(\s*)(<)", bygroups(Keyword, Whitespace, Punctuation), "typeof"),
(words(("true", "false", "null")), Keyword.Constant),
(r"[a-z_][a-z0-9_\-]*", Name.Variable),
(r"\|", Operator),
(r'".*?"', String.Double),
(r"\'.*?\'", String.Single),
(r"0x[\d_]*", Number.Hex),
(r"[0-9_]+", Number.Integer),
(r"\d[\d\.a-z_]*", Number),
],
"typeof": [
include("whitespace"),
include("type"),
(r">", Punctuation, "#pop"),
],
"content": [
include("whitespace"),
# Keywords
(words(("after", "bidirectional", "bind-property", "bind", "default",
"destructive", "disabled", "inverted", "no-sync-create",
"suggested", "swapped", "sync-create", "template")),
Keyword),
# Translated strings
(r"(C?_)(\s*)(\()",
bygroups(Name.Function.Builtin, Whitespace, Punctuation),
"paren-content"),
# Cast expressions
(r"(as)(\s*)(<)", bygroups(Keyword, Whitespace, Punctuation), "typeof"),
# Closures
(r"(\$?[a-z_][a-z0-9_\-]*)(\s*)(\()",
bygroups(Name.Function, Whitespace, Punctuation),
"paren-content"),
# Objects
(r"(?:(\$\s*[a-z_][a-z0-9_\-]+)|(?:([a-z_][a-z0-9_\-]*)(\s*)(\.)(\s*))?([a-z_][a-z0-9_\-]*))(?:(\s+)([a-z_][a-z0-9_\-]*))?(\s*)(\{)",
bygroups(Name.Class, Name.Namespace, Whitespace, Punctuation, Whitespace,
Name.Class, Whitespace, Name.Variable, Whitespace, Punctuation),
"brace-block"),
# Misc
include("value"),
(r",|\.", Punctuation),
],
"block-content": [
# Import statements
(r"(using)(\s+)([a-z_][a-z0-9_\-]*)(\s+)(\d[\d\.]*)(;)",
bygroups(Keyword, Whitespace, Name.Namespace, Whitespace,
Name.Namespace, Punctuation)),
# Menus
(r"(menu|section|submenu)(?:(\s+)([a-z_][a-z0-9_\-]*))?(\s*)(\{)",
bygroups(Keyword, Whitespace, Name.Variable, Whitespace, Punctuation),
"brace-block"),
(r"(item)(\s*)(\{)",
bygroups(Keyword, Whitespace, Punctuation),
"brace-block"),
(r"(item)(\s*)(\()",
bygroups(Keyword, Whitespace, Punctuation),
"paren-block"),
# Templates
(r"template", Keyword.Declaration, "template"),
# Nested blocks. When extensions are added, this is where they go.
(r"(responses|items|mime-types|patterns|suffixes|marks|widgets|strings|styles)(\s*)(\[)",
bygroups(Keyword, Whitespace, Punctuation),
"bracket-block"),
(r"(accessibility|setters|layout|item)(\s*)(\{)",
bygroups(Keyword, Whitespace, Punctuation),
"brace-block"),
(r"(condition|mark|item)(\s*)(\()",
bygroups(Keyword, Whitespace, Punctuation),
"paren-content"),
(r"\[", Punctuation, "child-type"),
# Properties and signals
(r"([a-z_][a-z0-9_\-]*(?:::[a-z0-9_]+)?)(\s*)(:|=>)",
bygroups(Name.Property, Whitespace, Punctuation),
"statement"),
include("content"),
],
"paren-block": [
include("block-content"),
(r"\)", Punctuation, "#pop"),
],
"paren-content": [
include("content"),
(r"\)", Punctuation, "#pop"),
],
"bracket-block": [
include("block-content"),
(r"\]", Punctuation, "#pop"),
],
"brace-block": [
include("block-content"),
(r"\}", Punctuation, "#pop"),
],
"statement": [
include("content"),
(r";", Punctuation, "#pop"),
],
"child-type": [
include("whitespace"),
(r"(action)(\s+)(response)(\s*)(=)(\s*)",
bygroups(Keyword, Whitespace, Name.Attribute, Whitespace,
Punctuation, Whitespace)),
(words(("default", "internal-child", "response")), Keyword),
(r"[a-z_][a-z0-9_\-]*", Name.Decorator),
include("value"),
(r"=", Punctuation),
(r"\]", Punctuation, "#pop"),
],
"template": [
include("whitespace"),
include("type"),
(r":", Punctuation),
(r"\{", Punctuation, ("#pop", "brace-block")),
],
}

View File

@@ -0,0 +1,97 @@
"""
pygments.lexers.boa
~~~~~~~~~~~~~~~~~~~
Lexers for the Boa language.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words
from pygments.token import String, Comment, Keyword, Name, Number, Operator, \
Punctuation, Whitespace
__all__ = ['BoaLexer']
class BoaLexer(RegexLexer):
"""
Lexer for the Boa language.
"""
name = 'Boa'
aliases = ['boa']
filenames = ['*.boa']
url = 'https://boa.cs.iastate.edu/docs'
version_added = '2.4'
reserved = words(
('input', 'output', 'of', 'weight', 'before', 'after', 'stop',
'ifall', 'foreach', 'exists', 'function', 'break', 'switch', 'case',
'visitor', 'default', 'return', 'visit', 'while', 'if', 'else'),
suffix=r'\b', prefix=r'\b')
keywords = words(
('bottom', 'collection', 'maximum', 'mean', 'minimum', 'set', 'sum',
'top', 'string', 'int', 'bool', 'float', 'time', 'false', 'true',
'array', 'map', 'stack', 'enum', 'type'), suffix=r'\b', prefix=r'\b')
classes = words(
('Project', 'ForgeKind', 'CodeRepository', 'Revision', 'RepositoryKind',
'ChangedFile', 'FileKind', 'ASTRoot', 'Namespace', 'Declaration', 'Type',
'Method', 'Variable', 'Statement', 'Expression', 'Modifier',
'StatementKind', 'ExpressionKind', 'ModifierKind', 'Visibility',
'TypeKind', 'Person', 'ChangeKind'),
suffix=r'\b', prefix=r'\b')
operators = ('->', ':=', ':', '=', '<<', '!', '++', '||',
'&&', '+', '-', '*', ">", "<")
string_sep = ('`', '\"')
built_in_functions = words(
(
# Array functions
'new', 'sort',
# Date & Time functions
'yearof', 'dayofyear', 'hourof', 'minuteof', 'secondof', 'now',
'addday', 'addmonth', 'addweek', 'addyear', 'dayofmonth', 'dayofweek',
'dayofyear', 'formattime', 'trunctoday', 'trunctohour', 'trunctominute',
'trunctomonth', 'trunctosecond', 'trunctoyear',
# Map functions
'clear', 'haskey', 'keys', 'lookup', 'remove', 'values',
# Math functions
'abs', 'acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh',
'ceil', 'cos', 'cosh', 'exp', 'floor', 'highbit', 'isfinite', 'isinf',
'isnan', 'isnormal', 'log', 'log10', 'max', 'min', 'nrand', 'pow',
'rand', 'round', 'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'trunc',
# Other functions
'def', 'hash', 'len',
# Set functions
'add', 'contains', 'remove',
# String functions
'format', 'lowercase', 'match', 'matchposns', 'matchstrs', 'regex',
'split', 'splitall', 'splitn', 'strfind', 'strreplace', 'strrfind',
'substring', 'trim', 'uppercase',
# Type Conversion functions
'bool', 'float', 'int', 'string', 'time',
# Domain-Specific functions
'getast', 'getsnapshot', 'hasfiletype', 'isfixingrevision', 'iskind',
'isliteral',
),
prefix=r'\b',
suffix=r'\(')
tokens = {
'root': [
(r'#.*?$', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(reserved, Keyword.Reserved),
(built_in_functions, Name.Function),
(keywords, Keyword.Type),
(classes, Name.Classes),
(words(operators), Operator),
(r'[][(),;{}\\.]', Punctuation),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"`(\\\\|\\[^\\]|[^`\\])*`", String.Backtick),
(words(string_sep), String.Delimiter),
(r'[a-zA-Z_]+', Name.Variable),
(r'[0-9]+', Number.Integer),
(r'\s+', Whitespace), # Whitespace
]
}

View File

@@ -0,0 +1,112 @@
"""
pygments.lexers.bqn
~~~~~~~~~~~~~~~~~~~
Lexer for BQN.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer
from pygments.token import Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
__all__ = ['BQNLexer']
class BQNLexer(RegexLexer):
"""
A simple BQN lexer.
"""
name = 'BQN'
url = 'https://mlochbaum.github.io/BQN/index.html'
aliases = ['bqn']
filenames = ['*.bqn']
mimetypes = []
version_added = '2.16'
# An inter_word_char. Necessary because \w matches all alphanumeric
# Unicode characters, including ones (e.g., 𝕊) that BQN treats special.
_iwc = r'((?=[^𝕎𝕏𝔽𝔾𝕊𝕨𝕩𝕗𝕘𝕤𝕣])\w)'
tokens = {
'root': [
# Whitespace
# ==========
(r'\s+', Whitespace),
#
# Comment
# =======
# '#' is a comment that continues to the end of the line
(r'#.*$', Comment.Single),
#
# Strings
# =======
(r'\'((\'\')|[^\'])*\'', String.Single),
(r'"(("")|[^"])*"', String.Double),
#
# Null Character
# ==============
# Literal representation of the null character
(r'@', String.Symbol),
#
# Punctuation
# ===========
# This token type is used for diamond, commas
# and array and list brackets and strand syntax
(r'[\.⋄,\[\]⟨⟩‿]', Punctuation),
#
# Expression Grouping
# ===================
# Since this token type is important in BQN, it is not included in
# the punctuation token type but rather in the following one
(r'[\(\)]', String.Regex),
#
# Numbers
# =======
# Includes the numeric literals and the Nothing character
(r'¯?[0-9](([0-9]|_)*\.?([0-9]|_)+|([0-9]|_)*)([Ee][¯]?([0-9]|_)+)?|¯|∞|π|·', Number),
#
# Variables
# =========
(r'[a-z]' + _iwc + r'*', Name.Variable),
#
# 2-Modifiers
# ===========
# Needs to come before the 1-modifiers due to _𝕣 and _𝕣_
(r'[∘○⊸⟜⌾⊘◶⎉⚇⍟⎊]', Name.Property),
(r'_(𝕣|[a-zA-Z0-9]+)_', Name.Property),
#
# 1-Modifiers
# ===========
(r'[˙˜˘¨⌜⁼´˝`𝕣]', Name.Attribute),
(r'_(𝕣|[a-zA-Z0-9]+)', Name.Attribute),
#
# Functions
# =========
# The monadic or dyadic function primitives and function
# operands and arguments, along with function self-reference
(r'[+\-×÷\⋆√⌊⌈∧∨¬|≤<>≥=≠≡≢⊣⊢⥊∾≍⋈↑↓↕«»⌽⍉/⍋⍒⊏⊑⊐⊒∊⍷⊔!𝕎𝕏𝔽𝔾𝕊]',
Operator),
(r'[A-Z]' + _iwc + r'*|•' + _iwc + r'+', Operator),
#
# Constant
# ========
(r'˙', Name.Constant),
#
# Define/Export/Change
# ====================
(r'[←↩⇐]', Keyword.Declaration),
#
# Blocks
# ======
(r'[{}]', Keyword.Type),
#
# Extra characters
# ================
(r'[;:?𝕨𝕩𝕗𝕘𝕤]', Name.Entity),
#
],
}

View File

@@ -0,0 +1,625 @@
"""
pygments.lexers.business
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for "business-oriented" languages.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, words, bygroups
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error, Whitespace
from pygments.lexers._openedge_builtins import OPENEDGEKEYWORDS
__all__ = ['CobolLexer', 'CobolFreeformatLexer', 'ABAPLexer', 'OpenEdgeLexer',
'GoodDataCLLexer', 'MaqlLexer']
class CobolLexer(RegexLexer):
"""
Lexer for OpenCOBOL code.
"""
name = 'COBOL'
aliases = ['cobol']
filenames = ['*.cob', '*.COB', '*.cpy', '*.CPY']
mimetypes = ['text/x-cobol']
url = 'https://en.wikipedia.org/wiki/COBOL'
version_added = '1.6'
flags = re.IGNORECASE | re.MULTILINE
# Data Types: by PICTURE and USAGE
# Operators: **, *, +, -, /, <, >, <=, >=, =, <>
# Logical (?): NOT, AND, OR
# Reserved words:
# http://opencobol.add1tocobol.com/#reserved-words
# Intrinsics:
# http://opencobol.add1tocobol.com/#does-opencobol-implement-any-intrinsic-functions
tokens = {
'root': [
include('comment'),
include('strings'),
include('core'),
include('nums'),
(r'[a-z0-9]([\w\-]*[a-z0-9]+)?', Name.Variable),
# (r'[\s]+', Text),
(r'[ \t]+', Whitespace),
],
'comment': [
(r'(^.{6}[*/].*\n|^.{6}|\*>.*\n)', Comment),
],
'core': [
# Figurative constants
(r'(^|(?<=[^\w\-]))(ALL\s+)?'
r'((ZEROES)|(HIGH-VALUE|LOW-VALUE|QUOTE|SPACE|ZERO)(S)?)'
r'\s*($|(?=[^\w\-]))',
Name.Constant),
# Reserved words STATEMENTS and other bolds
(words((
'ACCEPT', 'ADD', 'ALLOCATE', 'CALL', 'CANCEL', 'CLOSE', 'COMPUTE',
'CONFIGURATION', 'CONTINUE', 'DATA', 'DELETE', 'DISPLAY', 'DIVIDE',
'DIVISION', 'ELSE', 'END', 'END-ACCEPT',
'END-ADD', 'END-CALL', 'END-COMPUTE', 'END-DELETE', 'END-DISPLAY',
'END-DIVIDE', 'END-EVALUATE', 'END-IF', 'END-MULTIPLY', 'END-OF-PAGE',
'END-PERFORM', 'END-READ', 'END-RETURN', 'END-REWRITE', 'END-SEARCH',
'END-START', 'END-STRING', 'END-SUBTRACT', 'END-UNSTRING', 'END-WRITE',
'ENVIRONMENT', 'EVALUATE', 'EXIT', 'FD', 'FILE', 'FILE-CONTROL', 'FOREVER',
'FREE', 'GENERATE', 'GO', 'GOBACK', 'IDENTIFICATION', 'IF', 'INITIALIZE',
'INITIATE', 'INPUT-OUTPUT', 'INSPECT', 'INVOKE', 'I-O-CONTROL', 'LINKAGE',
'LOCAL-STORAGE', 'MERGE', 'MOVE', 'MULTIPLY', 'OPEN', 'PERFORM',
'PROCEDURE', 'PROGRAM-ID', 'RAISE', 'READ', 'RELEASE', 'RESUME',
'RETURN', 'REWRITE', 'SCREEN', 'SD', 'SEARCH', 'SECTION', 'SET',
'SORT', 'START', 'STOP', 'STRING', 'SUBTRACT', 'SUPPRESS',
'TERMINATE', 'THEN', 'UNLOCK', 'UNSTRING', 'USE', 'VALIDATE',
'WORKING-STORAGE', 'WRITE'), prefix=r'(^|(?<=[^\w\-]))',
suffix=r'\s*($|(?=[^\w\-]))'),
Keyword.Reserved),
# Reserved words
(words((
'ACCESS', 'ADDRESS', 'ADVANCING', 'AFTER', 'ALL',
'ALPHABET', 'ALPHABETIC', 'ALPHABETIC-LOWER', 'ALPHABETIC-UPPER',
'ALPHANUMERIC', 'ALPHANUMERIC-EDITED', 'ALSO', 'ALTER', 'ALTERNATE'
'ANY', 'ARE', 'AREA', 'AREAS', 'ARGUMENT-NUMBER', 'ARGUMENT-VALUE', 'AS',
'ASCENDING', 'ASSIGN', 'AT', 'AUTO', 'AUTO-SKIP', 'AUTOMATIC',
'AUTOTERMINATE', 'BACKGROUND-COLOR', 'BASED', 'BEEP', 'BEFORE', 'BELL',
'BLANK', 'BLINK', 'BLOCK', 'BOTTOM', 'BY', 'BYTE-LENGTH', 'CHAINING',
'CHARACTER', 'CHARACTERS', 'CLASS', 'CODE', 'CODE-SET', 'COL',
'COLLATING', 'COLS', 'COLUMN', 'COLUMNS', 'COMMA', 'COMMAND-LINE',
'COMMIT', 'COMMON', 'CONSTANT', 'CONTAINS', 'CONTENT', 'CONTROL',
'CONTROLS', 'CONVERTING', 'COPY', 'CORR', 'CORRESPONDING', 'COUNT', 'CRT',
'CURRENCY', 'CURSOR', 'CYCLE', 'DATE', 'DAY', 'DAY-OF-WEEK', 'DE',
'DEBUGGING', 'DECIMAL-POINT', 'DECLARATIVES', 'DEFAULT', 'DELIMITED',
'DELIMITER', 'DEPENDING', 'DESCENDING', 'DETAIL', 'DISK',
'DOWN', 'DUPLICATES', 'DYNAMIC', 'EBCDIC',
'ENTRY', 'ENVIRONMENT-NAME', 'ENVIRONMENT-VALUE', 'EOL', 'EOP',
'EOS', 'ERASE', 'ERROR', 'ESCAPE', 'EXCEPTION',
'EXCLUSIVE', 'EXTEND', 'EXTERNAL', 'FILE-ID', 'FILLER', 'FINAL',
'FIRST', 'FIXED', 'FLOAT-LONG', 'FLOAT-SHORT',
'FOOTING', 'FOR', 'FOREGROUND-COLOR', 'FORMAT', 'FROM', 'FULL',
'FUNCTION', 'FUNCTION-ID', 'GIVING', 'GLOBAL', 'GROUP',
'HEADING', 'HIGHLIGHT', 'I-O', 'ID',
'IGNORE', 'IGNORING', 'IN', 'INDEX', 'INDEXED', 'INDICATE',
'INITIAL', 'INITIALIZED', 'INPUT', 'INTO', 'INTRINSIC', 'INVALID',
'IS', 'JUST', 'JUSTIFIED', 'KEY', 'LABEL',
'LAST', 'LEADING', 'LEFT', 'LENGTH', 'LIMIT', 'LIMITS', 'LINAGE',
'LINAGE-COUNTER', 'LINE', 'LINES', 'LOCALE', 'LOCK',
'LOWLIGHT', 'MANUAL', 'MEMORY', 'MINUS', 'MODE', 'MULTIPLE',
'NATIONAL', 'NATIONAL-EDITED', 'NATIVE', 'NEGATIVE', 'NEXT', 'NO',
'NULL', 'NULLS', 'NUMBER', 'NUMBERS', 'NUMERIC', 'NUMERIC-EDITED',
'OBJECT-COMPUTER', 'OCCURS', 'OF', 'OFF', 'OMITTED', 'ON', 'ONLY',
'OPTIONAL', 'ORDER', 'ORGANIZATION', 'OTHER', 'OUTPUT', 'OVERFLOW',
'OVERLINE', 'PACKED-DECIMAL', 'PADDING', 'PAGE', 'PARAGRAPH',
'PLUS', 'POINTER', 'POSITION', 'POSITIVE', 'PRESENT', 'PREVIOUS',
'PRINTER', 'PRINTING', 'PROCEDURE-POINTER', 'PROCEDURES',
'PROCEED', 'PROGRAM', 'PROGRAM-POINTER', 'PROMPT', 'QUOTE',
'QUOTES', 'RANDOM', 'RD', 'RECORD', 'RECORDING', 'RECORDS', 'RECURSIVE',
'REDEFINES', 'REEL', 'REFERENCE', 'RELATIVE', 'REMAINDER', 'REMOVAL',
'RENAMES', 'REPLACING', 'REPORT', 'REPORTING', 'REPORTS', 'REPOSITORY',
'REQUIRED', 'RESERVE', 'RETURNING', 'REVERSE-VIDEO', 'REWIND',
'RIGHT', 'ROLLBACK', 'ROUNDED', 'RUN', 'SAME', 'SCROLL',
'SECURE', 'SEGMENT-LIMIT', 'SELECT', 'SENTENCE', 'SEPARATE',
'SEQUENCE', 'SEQUENTIAL', 'SHARING', 'SIGN', 'SIGNED', 'SIGNED-INT',
'SIGNED-LONG', 'SIGNED-SHORT', 'SIZE', 'SORT-MERGE', 'SOURCE',
'SOURCE-COMPUTER', 'SPECIAL-NAMES', 'STANDARD',
'STANDARD-1', 'STANDARD-2', 'STATUS', 'SUBKEY', 'SUM',
'SYMBOLIC', 'SYNC', 'SYNCHRONIZED', 'TALLYING', 'TAPE',
'TEST', 'THROUGH', 'THRU', 'TIME', 'TIMES', 'TO', 'TOP', 'TRAILING',
'TRANSFORM', 'TYPE', 'UNDERLINE', 'UNIT', 'UNSIGNED',
'UNSIGNED-INT', 'UNSIGNED-LONG', 'UNSIGNED-SHORT', 'UNTIL', 'UP',
'UPDATE', 'UPON', 'USAGE', 'USING', 'VALUE', 'VALUES', 'VARYING',
'WAIT', 'WHEN', 'WITH', 'WORDS', 'YYYYDDD', 'YYYYMMDD'),
prefix=r'(^|(?<=[^\w\-]))', suffix=r'\s*($|(?=[^\w\-]))'),
Keyword.Pseudo),
# inactive reserved words
(words((
'ACTIVE-CLASS', 'ALIGNED', 'ANYCASE', 'ARITHMETIC', 'ATTRIBUTE',
'B-AND', 'B-NOT', 'B-OR', 'B-XOR', 'BIT', 'BOOLEAN', 'CD', 'CENTER',
'CF', 'CH', 'CHAIN', 'CLASS-ID', 'CLASSIFICATION', 'COMMUNICATION',
'CONDITION', 'DATA-POINTER', 'DESTINATION', 'DISABLE', 'EC', 'EGI',
'EMI', 'ENABLE', 'END-RECEIVE', 'ENTRY-CONVENTION', 'EO', 'ESI',
'EXCEPTION-OBJECT', 'EXPANDS', 'FACTORY', 'FLOAT-BINARY-16',
'FLOAT-BINARY-34', 'FLOAT-BINARY-7', 'FLOAT-DECIMAL-16',
'FLOAT-DECIMAL-34', 'FLOAT-EXTENDED', 'FORMAT', 'FUNCTION-POINTER',
'GET', 'GROUP-USAGE', 'IMPLEMENTS', 'INFINITY', 'INHERITS',
'INTERFACE', 'INTERFACE-ID', 'INVOKE', 'LC_ALL', 'LC_COLLATE',
'LC_CTYPE', 'LC_MESSAGES', 'LC_MONETARY', 'LC_NUMERIC', 'LC_TIME',
'LINE-COUNTER', 'MESSAGE', 'METHOD', 'METHOD-ID', 'NESTED', 'NONE',
'NORMAL', 'OBJECT', 'OBJECT-REFERENCE', 'OPTIONS', 'OVERRIDE',
'PAGE-COUNTER', 'PF', 'PH', 'PROPERTY', 'PROTOTYPE', 'PURGE',
'QUEUE', 'RAISE', 'RAISING', 'RECEIVE', 'RELATION', 'REPLACE',
'REPRESENTS-NOT-A-NUMBER', 'RESET', 'RESUME', 'RETRY', 'RF', 'RH',
'SECONDS', 'SEGMENT', 'SELF', 'SEND', 'SOURCES', 'STATEMENT',
'STEP', 'STRONG', 'SUB-QUEUE-1', 'SUB-QUEUE-2', 'SUB-QUEUE-3',
'SUPER', 'SYMBOL', 'SYSTEM-DEFAULT', 'TABLE', 'TERMINAL', 'TEXT',
'TYPEDEF', 'UCS-4', 'UNIVERSAL', 'USER-DEFAULT', 'UTF-16', 'UTF-8',
'VAL-STATUS', 'VALID', 'VALIDATE', 'VALIDATE-STATUS'),
prefix=r'(^|(?<=[^\w\-]))', suffix=r'\s*($|(?=[^\w\-]))'),
Error),
# Data Types
(r'(^|(?<=[^\w\-]))'
r'(PIC\s+.+?(?=(\s|\.\s))|PICTURE\s+.+?(?=(\s|\.\s))|'
r'(COMPUTATIONAL)(-[1-5X])?|(COMP)(-[1-5X])?|'
r'BINARY-C-LONG|'
r'BINARY-CHAR|BINARY-DOUBLE|BINARY-LONG|BINARY-SHORT|'
r'BINARY)\s*($|(?=[^\w\-]))', Keyword.Type),
# Operators
(r'(\*\*|\*|\+|-|/|<=|>=|<|>|==|/=|=)', Operator),
# (r'(::)', Keyword.Declaration),
(r'([(),;:&%.])', Punctuation),
# Intrinsics
(r'(^|(?<=[^\w\-]))(ABS|ACOS|ANNUITY|ASIN|ATAN|BYTE-LENGTH|'
r'CHAR|COMBINED-DATETIME|CONCATENATE|COS|CURRENT-DATE|'
r'DATE-OF-INTEGER|DATE-TO-YYYYMMDD|DAY-OF-INTEGER|DAY-TO-YYYYDDD|'
r'EXCEPTION-(?:FILE|LOCATION|STATEMENT|STATUS)|EXP10|EXP|E|'
r'FACTORIAL|FRACTION-PART|INTEGER-OF-(?:DATE|DAY|PART)|INTEGER|'
r'LENGTH|LOCALE-(?:DATE|TIME(?:-FROM-SECONDS)?)|LOG(?:10)?|'
r'LOWER-CASE|MAX|MEAN|MEDIAN|MIDRANGE|MIN|MOD|NUMVAL(?:-C)?|'
r'ORD(?:-MAX|-MIN)?|PI|PRESENT-VALUE|RANDOM|RANGE|REM|REVERSE|'
r'SECONDS-FROM-FORMATTED-TIME|SECONDS-PAST-MIDNIGHT|SIGN|SIN|SQRT|'
r'STANDARD-DEVIATION|STORED-CHAR-LENGTH|SUBSTITUTE(?:-CASE)?|'
r'SUM|TAN|TEST-DATE-YYYYMMDD|TEST-DAY-YYYYDDD|TRIM|'
r'UPPER-CASE|VARIANCE|WHEN-COMPILED|YEAR-TO-YYYY)\s*'
r'($|(?=[^\w\-]))', Name.Function),
# Booleans
(r'(^|(?<=[^\w\-]))(true|false)\s*($|(?=[^\w\-]))', Name.Builtin),
# Comparing Operators
(r'(^|(?<=[^\w\-]))(equal|equals|ne|lt|le|gt|ge|'
r'greater|less|than|not|and|or)\s*($|(?=[^\w\-]))', Operator.Word),
],
# \"[^\"\n]*\"|\'[^\'\n]*\'
'strings': [
# apparently strings can be delimited by EOL if they are continued
# in the next line
(r'"[^"\n]*("|\n)', String.Double),
(r"'[^'\n]*('|\n)", String.Single),
],
'nums': [
(r'\d+(\s*|\.$|$)', Number.Integer),
(r'[+-]?\d*\.\d+(E[-+]?\d+)?', Number.Float),
(r'[+-]?\d+\.\d*(E[-+]?\d+)?', Number.Float),
],
}
class CobolFreeformatLexer(CobolLexer):
"""
Lexer for Free format OpenCOBOL code.
"""
name = 'COBOLFree'
aliases = ['cobolfree']
filenames = ['*.cbl', '*.CBL']
mimetypes = []
url = 'https://opencobol.add1tocobol.com'
version_added = '1.6'
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'comment': [
(r'(\*>.*\n|^\w*\*.*$)', Comment),
],
}
class ABAPLexer(RegexLexer):
"""
Lexer for ABAP, SAP's integrated language.
"""
name = 'ABAP'
aliases = ['abap']
filenames = ['*.abap', '*.ABAP']
mimetypes = ['text/x-abap']
url = 'https://community.sap.com/topics/abap'
version_added = '1.1'
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'common': [
(r'\s+', Whitespace),
(r'^\*.*$', Comment.Single),
(r'\".*?\n', Comment.Single),
(r'##\w+', Comment.Special),
],
'variable-names': [
(r'<\S+>', Name.Variable),
(r'\w[\w~]*(?:(\[\])|->\*)?', Name.Variable),
],
'root': [
include('common'),
# function calls
(r'CALL\s+(?:BADI|CUSTOMER-FUNCTION|FUNCTION)',
Keyword),
(r'(CALL\s+(?:DIALOG|SCREEN|SUBSCREEN|SELECTION-SCREEN|'
r'TRANSACTION|TRANSFORMATION))\b',
Keyword),
(r'(FORM|PERFORM)(\s+)(\w+)',
bygroups(Keyword, Whitespace, Name.Function)),
(r'(PERFORM)(\s+)(\()(\w+)(\))',
bygroups(Keyword, Whitespace, Punctuation, Name.Variable, Punctuation)),
(r'(MODULE)(\s+)(\S+)(\s+)(INPUT|OUTPUT)',
bygroups(Keyword, Whitespace, Name.Function, Whitespace, Keyword)),
# method implementation
(r'(METHOD)(\s+)([\w~]+)',
bygroups(Keyword, Whitespace, Name.Function)),
# method calls
(r'(\s+)([\w\-]+)([=\-]>)([\w\-~]+)',
bygroups(Whitespace, Name.Variable, Operator, Name.Function)),
# call methodnames returning style
(r'(?<=(=|-)>)([\w\-~]+)(?=\()', Name.Function),
# text elements
(r'(TEXT)(-)(\d{3})',
bygroups(Keyword, Punctuation, Number.Integer)),
(r'(TEXT)(-)(\w{3})',
bygroups(Keyword, Punctuation, Name.Variable)),
# keywords with dashes in them.
# these need to be first, because for instance the -ID part
# of MESSAGE-ID wouldn't get highlighted if MESSAGE was
# first in the list of keywords.
(r'(ADD-CORRESPONDING|AUTHORITY-CHECK|'
r'CLASS-DATA|CLASS-EVENTS|CLASS-METHODS|CLASS-POOL|'
r'DELETE-ADJACENT|DIVIDE-CORRESPONDING|'
r'EDITOR-CALL|ENHANCEMENT-POINT|ENHANCEMENT-SECTION|EXIT-COMMAND|'
r'FIELD-GROUPS|FIELD-SYMBOLS|FIELD-SYMBOL|FUNCTION-POOL|'
r'INTERFACE-POOL|INVERTED-DATE|'
r'LOAD-OF-PROGRAM|LOG-POINT|'
r'MESSAGE-ID|MOVE-CORRESPONDING|MULTIPLY-CORRESPONDING|'
r'NEW-LINE|NEW-PAGE|NEW-SECTION|NO-EXTENSION|'
r'OUTPUT-LENGTH|PRINT-CONTROL|'
r'SELECT-OPTIONS|START-OF-SELECTION|SUBTRACT-CORRESPONDING|'
r'SYNTAX-CHECK|SYSTEM-EXCEPTIONS|'
r'TYPE-POOL|TYPE-POOLS|NO-DISPLAY'
r')\b', Keyword),
# keyword kombinations
(r'(?<![-\>])(CREATE\s+(PUBLIC|PRIVATE|DATA|OBJECT)|'
r'(PUBLIC|PRIVATE|PROTECTED)\s+SECTION|'
r'(TYPE|LIKE)\s+((LINE\s+OF|REF\s+TO|'
r'(SORTED|STANDARD|HASHED)\s+TABLE\s+OF))?|'
r'FROM\s+(DATABASE|MEMORY)|CALL\s+METHOD|'
r'(GROUP|ORDER) BY|HAVING|SEPARATED BY|'
r'GET\s+(BADI|BIT|CURSOR|DATASET|LOCALE|PARAMETER|'
r'PF-STATUS|(PROPERTY|REFERENCE)\s+OF|'
r'RUN\s+TIME|TIME\s+(STAMP)?)?|'
r'SET\s+(BIT|BLANK\s+LINES|COUNTRY|CURSOR|DATASET|EXTENDED\s+CHECK|'
r'HANDLER|HOLD\s+DATA|LANGUAGE|LEFT\s+SCROLL-BOUNDARY|'
r'LOCALE|MARGIN|PARAMETER|PF-STATUS|PROPERTY\s+OF|'
r'RUN\s+TIME\s+(ANALYZER|CLOCK\s+RESOLUTION)|SCREEN|'
r'TITLEBAR|UPADTE\s+TASK\s+LOCAL|USER-COMMAND)|'
r'CONVERT\s+((INVERTED-)?DATE|TIME|TIME\s+STAMP|TEXT)|'
r'(CLOSE|OPEN)\s+(DATASET|CURSOR)|'
r'(TO|FROM)\s+(DATA BUFFER|INTERNAL TABLE|MEMORY ID|'
r'DATABASE|SHARED\s+(MEMORY|BUFFER))|'
r'DESCRIBE\s+(DISTANCE\s+BETWEEN|FIELD|LIST|TABLE)|'
r'FREE\s(MEMORY|OBJECT)?|'
r'PROCESS\s+(BEFORE\s+OUTPUT|AFTER\s+INPUT|'
r'ON\s+(VALUE-REQUEST|HELP-REQUEST))|'
r'AT\s+(LINE-SELECTION|USER-COMMAND|END\s+OF|NEW)|'
r'AT\s+SELECTION-SCREEN(\s+(ON(\s+(BLOCK|(HELP|VALUE)-REQUEST\s+FOR|'
r'END\s+OF|RADIOBUTTON\s+GROUP))?|OUTPUT))?|'
r'SELECTION-SCREEN:?\s+((BEGIN|END)\s+OF\s+((TABBED\s+)?BLOCK|LINE|'
r'SCREEN)|COMMENT|FUNCTION\s+KEY|'
r'INCLUDE\s+BLOCKS|POSITION|PUSHBUTTON|'
r'SKIP|ULINE)|'
r'LEAVE\s+(LIST-PROCESSING|PROGRAM|SCREEN|'
r'TO LIST-PROCESSING|TO TRANSACTION)'
r'(ENDING|STARTING)\s+AT|'
r'FORMAT\s+(COLOR|INTENSIFIED|INVERSE|HOTSPOT|INPUT|FRAMES|RESET)|'
r'AS\s+(CHECKBOX|SUBSCREEN|WINDOW)|'
r'WITH\s+(((NON-)?UNIQUE)?\s+KEY|FRAME)|'
r'(BEGIN|END)\s+OF|'
r'DELETE(\s+ADJACENT\s+DUPLICATES\sFROM)?|'
r'COMPARING(\s+ALL\s+FIELDS)?|'
r'(INSERT|APPEND)(\s+INITIAL\s+LINE\s+(IN)?TO|\s+LINES\s+OF)?|'
r'IN\s+((BYTE|CHARACTER)\s+MODE|PROGRAM)|'
r'END-OF-(DEFINITION|PAGE|SELECTION)|'
r'WITH\s+FRAME(\s+TITLE)|'
r'(REPLACE|FIND)\s+((FIRST|ALL)\s+OCCURRENCES?\s+OF\s+)?(SUBSTRING|REGEX)?|'
r'MATCH\s+(LENGTH|COUNT|LINE|OFFSET)|'
r'(RESPECTING|IGNORING)\s+CASE|'
r'IN\s+UPDATE\s+TASK|'
r'(SOURCE|RESULT)\s+(XML)?|'
r'REFERENCE\s+INTO|'
# simple kombinations
r'AND\s+(MARK|RETURN)|CLIENT\s+SPECIFIED|CORRESPONDING\s+FIELDS\s+OF|'
r'IF\s+FOUND|FOR\s+EVENT|INHERITING\s+FROM|LEAVE\s+TO\s+SCREEN|'
r'LOOP\s+AT\s+(SCREEN)?|LOWER\s+CASE|MATCHCODE\s+OBJECT|MODIF\s+ID|'
r'MODIFY\s+SCREEN|NESTING\s+LEVEL|NO\s+INTERVALS|OF\s+STRUCTURE|'
r'RADIOBUTTON\s+GROUP|RANGE\s+OF|REF\s+TO|SUPPRESS DIALOG|'
r'TABLE\s+OF|UPPER\s+CASE|TRANSPORTING\s+NO\s+FIELDS|'
r'VALUE\s+CHECK|VISIBLE\s+LENGTH|HEADER\s+LINE|COMMON\s+PART)\b', Keyword),
# single word keywords.
(r'(^|(?<=(\s|\.)))(ABBREVIATED|ABSTRACT|ADD|ALIASES|ALIGN|ALPHA|'
r'ASSERT|AS|ASSIGN(ING)?|AT(\s+FIRST)?|'
r'BACK|BLOCK|BREAK-POINT|'
r'CASE|CAST|CATCH|CHANGING|CHECK|CLASS|CLEAR|COLLECT|COLOR|COMMIT|COND|CONV|'
r'CREATE|COMMUNICATION|COMPONENTS?|COMPUTE|CONCATENATE|CONDENSE|'
r'CONSTANTS|CONTEXTS|CONTINUE|CONTROLS|COUNTRY|CURRENCY|'
r'DATA|DATE|DECIMALS|DEFAULT|DEFINE|DEFINITION|DEFERRED|DEMAND|'
r'DETAIL|DIRECTORY|DIVIDE|DO|DUMMY|'
r'ELSE(IF)?|ENDAT|ENDCASE|ENDCATCH|ENDCLASS|ENDDO|ENDFORM|ENDFUNCTION|'
r'ENDIF|ENDINTERFACE|ENDLOOP|ENDMETHOD|ENDMODULE|ENDSELECT|ENDTRY|ENDWHILE|'
r'ENHANCEMENT|EVENTS|EXACT|EXCEPTIONS?|EXIT|EXPONENT|EXPORT|EXPORTING|EXTRACT|'
r'FETCH|FIELDS?|FOR|FORM|FORMAT|FREE|FROM|FUNCTION|'
r'HIDE|'
r'ID|IF|IMPORT|IMPLEMENTATION|IMPORTING|IN|INCLUDE|INCLUDING|'
r'INDEX|INFOTYPES|INITIALIZATION|INTERFACE|INTERFACES|INTO|'
r'LANGUAGE|LEAVE|LENGTH|LINES|LOAD|LOCAL|'
r'JOIN|'
r'KEY|'
r'NEW|NEXT|'
r'MAXIMUM|MESSAGE|METHOD[S]?|MINIMUM|MODULE|MODIFIER|MODIFY|MOVE|MULTIPLY|'
r'NODES|NUMBER|'
r'OBLIGATORY|OBJECT|OF|OFF|ON|OTHERS|OVERLAY|'
r'PACK|PAD|PARAMETERS|PERCENTAGE|POSITION|PROGRAM|PROVIDE|PUBLIC|PUT|PF\d\d|'
r'RAISE|RAISING|RANGES?|READ|RECEIVE|REDEFINITION|REFRESH|REJECT|REPORT|RESERVE|'
r'REF|RESUME|RETRY|RETURN|RETURNING|RIGHT|ROLLBACK|REPLACE|'
r'SCROLL|SEARCH|SELECT|SHIFT|SIGN|SINGLE|SIZE|SKIP|SORT|SPLIT|STATICS|STOP|'
r'STYLE|SUBMATCHES|SUBMIT|SUBTRACT|SUM(?!\()|SUMMARY|SUMMING|SUPPLY|SWITCH|'
r'TABLE|TABLES|TIMESTAMP|TIMES?|TIMEZONE|TITLE|\??TO|'
r'TOP-OF-PAGE|TRANSFER|TRANSLATE|TRY|TYPES|'
r'ULINE|UNDER|UNPACK|UPDATE|USING|'
r'VALUE|VALUES|VIA|VARYING|VARY|'
r'WAIT|WHEN|WHERE|WIDTH|WHILE|WITH|WINDOW|WRITE|XSD|ZERO)\b', Keyword),
# builtins
(r'(abs|acos|asin|atan|'
r'boolc|boolx|bit_set|'
r'char_off|charlen|ceil|cmax|cmin|condense|contains|'
r'contains_any_of|contains_any_not_of|concat_lines_of|cos|cosh|'
r'count|count_any_of|count_any_not_of|'
r'dbmaxlen|distance|'
r'escape|exp|'
r'find|find_end|find_any_of|find_any_not_of|floor|frac|from_mixed|'
r'insert|'
r'lines|log|log10|'
r'match|matches|'
r'nmax|nmin|numofchar|'
r'repeat|replace|rescale|reverse|round|'
r'segment|shift_left|shift_right|sign|sin|sinh|sqrt|strlen|'
r'substring|substring_after|substring_from|substring_before|substring_to|'
r'tan|tanh|to_upper|to_lower|to_mixed|translate|trunc|'
r'xstrlen)(\()\b', bygroups(Name.Builtin, Punctuation)),
(r'&[0-9]', Name),
(r'[0-9]+', Number.Integer),
# operators which look like variable names before
# parsing variable names.
(r'(?<=(\s|.))(AND|OR|EQ|NE|GT|LT|GE|LE|CO|CN|CA|NA|CS|NOT|NS|CP|NP|'
r'BYTE-CO|BYTE-CN|BYTE-CA|BYTE-NA|BYTE-CS|BYTE-NS|'
r'IS\s+(NOT\s+)?(INITIAL|ASSIGNED|REQUESTED|BOUND))\b', Operator.Word),
include('variable-names'),
# standard operators after variable names,
# because < and > are part of field symbols.
(r'[?*<>=\-+&]', Operator),
(r"'(''|[^'])*'", String.Single),
(r"`([^`])*`", String.Single),
(r"([|}])([^{}|]*?)([|{])",
bygroups(Punctuation, String.Single, Punctuation)),
(r'[/;:()\[\],.]', Punctuation),
(r'(!)(\w+)', bygroups(Operator, Name)),
],
}
class OpenEdgeLexer(RegexLexer):
"""
Lexer for OpenEdge ABL (formerly Progress) source code.
"""
name = 'OpenEdge ABL'
aliases = ['openedge', 'abl', 'progress']
filenames = ['*.p', '*.cls']
mimetypes = ['text/x-openedge', 'application/x-openedge']
url = 'https://www.progress.com/openedge/features/abl'
version_added = '1.5'
types = (r'(?i)(^|(?<=[^\w\-]))(CHARACTER|CHAR|CHARA|CHARAC|CHARACT|CHARACTE|'
r'COM-HANDLE|DATE|DATETIME|DATETIME-TZ|'
r'DECIMAL|DEC|DECI|DECIM|DECIMA|HANDLE|'
r'INT64|INTEGER|INT|INTE|INTEG|INTEGE|'
r'LOGICAL|LONGCHAR|MEMPTR|RAW|RECID|ROWID)\s*($|(?=[^\w\-]))')
keywords = words(OPENEDGEKEYWORDS,
prefix=r'(?i)(^|(?<=[^\w\-]))',
suffix=r'\s*($|(?=[^\w\-]))')
tokens = {
'root': [
(r'/\*', Comment.Multiline, 'comment'),
(r'\{', Comment.Preproc, 'preprocessor'),
(r'\s*&.*', Comment.Preproc),
(r'0[xX][0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'(?i)(DEFINE|DEF|DEFI|DEFIN)\b', Keyword.Declaration),
(types, Keyword.Type),
(keywords, Name.Builtin),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\s+', Whitespace),
(r'[+*/=-]', Operator),
(r'[.:()]', Punctuation),
(r'.', Name.Variable), # Lazy catch-all
],
'comment': [
(r'[^*/]', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
'preprocessor': [
(r'[^{}]', Comment.Preproc),
(r'\{', Comment.Preproc, '#push'),
(r'\}', Comment.Preproc, '#pop'),
],
}
def analyse_text(text):
"""Try to identify OpenEdge ABL based on a few common constructs."""
result = 0
if 'END.' in text:
result += 0.05
if 'END PROCEDURE.' in text:
result += 0.05
if 'ELSE DO:' in text:
result += 0.05
return result
class GoodDataCLLexer(RegexLexer):
"""
Lexer for GoodData-CL script files.
"""
name = 'GoodData-CL'
aliases = ['gooddata-cl']
filenames = ['*.gdc']
mimetypes = ['text/x-gooddata-cl']
url = 'https://github.com/gooddata/GoodData-CL'
version_added = '1.4'
flags = re.IGNORECASE
# Syntax:
# https://github.com/gooddata/GoodData-CL/raw/master/cli/src/main/resources/com/gooddata/processor/COMMANDS.txt
tokens = {
'root': [
# Comments
(r'#.*', Comment.Single),
# Function call
(r'[a-z]\w*', Name.Function),
# Argument list
(r'\(', Punctuation, 'args-list'),
# Punctuation
(r';', Punctuation),
# Space is not significant
(r'\s+', Text)
],
'args-list': [
(r'\)', Punctuation, '#pop'),
(r',', Punctuation),
(r'[a-z]\w*', Name.Variable),
(r'=', Operator),
(r'"', String, 'string-literal'),
(r'[0-9]+(?:\.[0-9]+)?(?:e[+-]?[0-9]{1,3})?', Number),
# Space is not significant
(r'\s', Whitespace)
],
'string-literal': [
(r'\\[tnrfbae"\\]', String.Escape),
(r'"', String, '#pop'),
(r'[^\\"]+', String)
]
}
class MaqlLexer(RegexLexer):
"""
Lexer for GoodData MAQL scripts.
"""
name = 'MAQL'
aliases = ['maql']
filenames = ['*.maql']
mimetypes = ['text/x-gooddata-maql', 'application/x-gooddata-maql']
url = 'https://help.gooddata.com/doc/enterprise/en/dashboards-and-insights/maql-analytical-query-language'
version_added = '1.4'
flags = re.IGNORECASE
tokens = {
'root': [
# IDENTITY
(r'IDENTIFIER\b', Name.Builtin),
# IDENTIFIER
(r'\{[^}]+\}', Name.Variable),
# NUMBER
(r'[0-9]+(?:\.[0-9]+)?(?:e[+-]?[0-9]{1,3})?', Number),
# STRING
(r'"', String, 'string-literal'),
# RELATION
(r'\<\>|\!\=', Operator),
(r'\=|\>\=|\>|\<\=|\<', Operator),
# :=
(r'\:\=', Operator),
# OBJECT
(r'\[[^]]+\]', Name.Variable.Class),
# keywords
(words((
'DIMENSION', 'DIMENSIONS', 'BOTTOM', 'METRIC', 'COUNT', 'OTHER',
'FACT', 'WITH', 'TOP', 'OR', 'ATTRIBUTE', 'CREATE', 'PARENT',
'FALSE', 'ROW', 'ROWS', 'FROM', 'ALL', 'AS', 'PF', 'COLUMN',
'COLUMNS', 'DEFINE', 'REPORT', 'LIMIT', 'TABLE', 'LIKE', 'AND',
'BY', 'BETWEEN', 'EXCEPT', 'SELECT', 'MATCH', 'WHERE', 'TRUE',
'FOR', 'IN', 'WITHOUT', 'FILTER', 'ALIAS', 'WHEN', 'NOT', 'ON',
'KEYS', 'KEY', 'FULLSET', 'PRIMARY', 'LABELS', 'LABEL',
'VISUAL', 'TITLE', 'DESCRIPTION', 'FOLDER', 'ALTER', 'DROP',
'ADD', 'DATASET', 'DATATYPE', 'INT', 'BIGINT', 'DOUBLE', 'DATE',
'VARCHAR', 'DECIMAL', 'SYNCHRONIZE', 'TYPE', 'DEFAULT', 'ORDER',
'ASC', 'DESC', 'HYPERLINK', 'INCLUDE', 'TEMPLATE', 'MODIFY'),
suffix=r'\b'),
Keyword),
# FUNCNAME
(r'[a-z]\w*\b', Name.Function),
# Comments
(r'#.*', Comment.Single),
# Punctuation
(r'[,;()]', Punctuation),
# Space is not significant
(r'\s+', Whitespace)
],
'string-literal': [
(r'\\[tnrfbae"\\]', String.Escape),
(r'"', String, '#pop'),
(r'[^\\"]+', String)
],
}

View File

@@ -0,0 +1,414 @@
"""
pygments.lexers.c_cpp
~~~~~~~~~~~~~~~~~~~~~
Lexers for C/C++ languages.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, using, \
this, inherit, default, words
from pygments.util import get_bool_opt
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
__all__ = ['CLexer', 'CppLexer']
class CFamilyLexer(RegexLexer):
"""
For C family source code. This is used as a base class to avoid repetitious
definitions.
"""
# The trailing ?, rather than *, avoids a geometric performance drop here.
#: only one /* */ style comment
_ws1 = r'\s*(?:/[*].*?[*]/\s*)?'
# Hexadecimal part in an hexadecimal integer/floating-point literal.
# This includes decimal separators matching.
_hexpart = r'[0-9a-fA-F](\'?[0-9a-fA-F])*'
# Decimal part in an decimal integer/floating-point literal.
# This includes decimal separators matching.
_decpart = r'\d(\'?\d)*'
# Integer literal suffix (e.g. 'ull' or 'll').
_intsuffix = r'(([uU][lL]{0,2})|[lL]{1,2}[uU]?)?'
# Identifier regex with C and C++ Universal Character Name (UCN) support.
_ident = r'(?!\d)(?:[\w$]|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8})+'
_namespaced_ident = r'(?!\d)(?:[\w$]|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|::)+'
# Single and multiline comment regexes
# Beware not to use *? for the inner content! When these regexes
# are embedded in larger regexes, that can cause the stuff*? to
# match more than it would have if the regex had been used in
# a standalone way ...
_comment_single = r'//(?:.|(?<=\\)\n)*\n'
_comment_multiline = r'/(?:\\\n)?[*](?:[^*]|[*](?!(?:\\\n)?/))*[*](?:\\\n)?/'
# Regex to match optional comments
_possible_comments = rf'\s*(?:(?:(?:{_comment_single})|(?:{_comment_multiline}))\s*)*'
tokens = {
'whitespace': [
# preprocessor directives: without whitespace
(r'^#if\s+0', Comment.Preproc, 'if0'),
('^#', Comment.Preproc, 'macro'),
# or with whitespace
('^(' + _ws1 + r')(#if\s+0)',
bygroups(using(this), Comment.Preproc), 'if0'),
('^(' + _ws1 + ')(#)',
bygroups(using(this), Comment.Preproc), 'macro'),
# Labels:
# Line start and possible indentation.
(r'(^[ \t]*)'
# Not followed by keywords which can be mistaken as labels.
r'(?!(?:public|private|protected|default)\b)'
# Actual label, followed by a single colon.
r'(' + _ident + r')(\s*)(:)(?!:)',
bygroups(Whitespace, Name.Label, Whitespace, Punctuation)),
(r'\n', Whitespace),
(r'[^\S\n]+', Whitespace),
(r'\\\n', Text), # line continuation
(_comment_single, Comment.Single),
(_comment_multiline, Comment.Multiline),
# Open until EOF, so no ending delimiter
(r'/(\\\n)?[*][\w\W]*', Comment.Multiline),
],
'statements': [
include('keywords'),
include('types'),
(r'([LuU]|u8)?(")', bygroups(String.Affix, String), 'string'),
(r"([LuU]|u8)?(')(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])(')",
bygroups(String.Affix, String.Char, String.Char, String.Char)),
# Hexadecimal floating-point literals (C11, C++17)
(r'0[xX](' + _hexpart + r'\.' + _hexpart + r'|\.' + _hexpart +
r'|' + _hexpart + r')[pP][+-]?' + _hexpart + r'[lL]?', Number.Float),
(r'(-)?(' + _decpart + r'\.' + _decpart + r'|\.' + _decpart + r'|' +
_decpart + r')[eE][+-]?' + _decpart + r'[fFlL]?', Number.Float),
(r'(-)?((' + _decpart + r'\.(' + _decpart + r')?|\.' +
_decpart + r')[fFlL]?)|(' + _decpart + r'[fFlL])', Number.Float),
(r'(-)?0[xX]' + _hexpart + _intsuffix, Number.Hex),
(r'(-)?0[bB][01](\'?[01])*' + _intsuffix, Number.Bin),
(r'(-)?0(\'?[0-7])+' + _intsuffix, Number.Oct),
(r'(-)?' + _decpart + _intsuffix, Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.]', Punctuation),
(r'(true|false|NULL)\b', Name.Builtin),
(_ident, Name)
],
'types': [
(words(('int8', 'int16', 'int32', 'int64', 'wchar_t'), prefix=r'__',
suffix=r'\b'), Keyword.Reserved),
(words(('bool', 'int', 'long', 'float', 'short', 'double', 'char',
'unsigned', 'signed', 'void', '_BitInt',
'__int128'), suffix=r'\b'), Keyword.Type)
],
'keywords': [
(r'(struct|union)(\s+)', bygroups(Keyword, Whitespace), 'classname'),
(r'case\b', Keyword, 'case-value'),
(words(('asm', 'auto', 'break', 'const', 'continue', 'default',
'do', 'else', 'enum', 'extern', 'for', 'goto', 'if',
'register', 'restricted', 'return', 'sizeof', 'struct',
'static', 'switch', 'typedef', 'volatile', 'while', 'union',
'thread_local', 'alignas', 'alignof', 'static_assert', '_Pragma'),
suffix=r'\b'), Keyword),
(words(('inline', '_inline', '__inline', 'naked', 'restrict',
'thread'), suffix=r'\b'), Keyword.Reserved),
# Vector intrinsics
(r'(__m(128i|128d|128|64))\b', Keyword.Reserved),
# Microsoft-isms
(words((
'asm', 'based', 'except', 'stdcall', 'cdecl',
'fastcall', 'declspec', 'finally', 'try',
'leave', 'w64', 'unaligned', 'raise', 'noop',
'identifier', 'forceinline', 'assume'),
prefix=r'__', suffix=r'\b'), Keyword.Reserved)
],
'root': [
include('whitespace'),
include('keywords'),
# functions
(r'(' + _namespaced_ident + r'(?:[&*\s])+)' # return arguments
r'(' + _possible_comments + r')'
r'(' + _namespaced_ident + r')' # method name
r'(' + _possible_comments + r')'
r'(\([^;"\')]*?\))' # signature
r'(' + _possible_comments + r')'
r'([^;{/"\']*)(\{)',
bygroups(using(this), using(this, state='whitespace'),
Name.Function, using(this, state='whitespace'),
using(this), using(this, state='whitespace'),
using(this), Punctuation),
'function'),
# function declarations
(r'(' + _namespaced_ident + r'(?:[&*\s])+)' # return arguments
r'(' + _possible_comments + r')'
r'(' + _namespaced_ident + r')' # method name
r'(' + _possible_comments + r')'
r'(\([^;"\')]*?\))' # signature
r'(' + _possible_comments + r')'
r'([^;/"\']*)(;)',
bygroups(using(this), using(this, state='whitespace'),
Name.Function, using(this, state='whitespace'),
using(this), using(this, state='whitespace'),
using(this), Punctuation)),
include('types'),
default('statement'),
],
'statement': [
include('whitespace'),
include('statements'),
(r'\}', Punctuation),
(r'[{;]', Punctuation, '#pop'),
],
'function': [
include('whitespace'),
include('statements'),
(';', Punctuation),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|'
r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'('+_ws1+r')(include)('+_ws1+r')("[^"]+")([^\n]*)',
bygroups(using(this), Comment.Preproc, using(this),
Comment.PreprocFile, Comment.Single)),
(r'('+_ws1+r')(include)('+_ws1+r')(<[^>]+>)([^\n]*)',
bygroups(using(this), Comment.Preproc, using(this),
Comment.PreprocFile, Comment.Single)),
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#el(?:se|if).*\n', Comment.Preproc, '#pop'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
],
'classname': [
(_ident, Name.Class, '#pop'),
# template specification
(r'\s*(?=>)', Text, '#pop'),
default('#pop')
],
# Mark identifiers preceded by `case` keyword as constants.
'case-value': [
(r'(?<!:)(:)(?!:)', Punctuation, '#pop'),
(_ident, Name.Constant),
include('whitespace'),
include('statements'),
]
}
stdlib_types = {
'size_t', 'ssize_t', 'off_t', 'wchar_t', 'ptrdiff_t', 'sig_atomic_t', 'fpos_t',
'clock_t', 'time_t', 'va_list', 'jmp_buf', 'FILE', 'DIR', 'div_t', 'ldiv_t',
'mbstate_t', 'wctrans_t', 'wint_t', 'wctype_t'}
c99_types = {
'int8_t', 'int16_t', 'int32_t', 'int64_t', 'uint8_t',
'uint16_t', 'uint32_t', 'uint64_t', 'int_least8_t', 'int_least16_t',
'int_least32_t', 'int_least64_t', 'uint_least8_t', 'uint_least16_t',
'uint_least32_t', 'uint_least64_t', 'int_fast8_t', 'int_fast16_t', 'int_fast32_t',
'int_fast64_t', 'uint_fast8_t', 'uint_fast16_t', 'uint_fast32_t', 'uint_fast64_t',
'intptr_t', 'uintptr_t', 'intmax_t', 'uintmax_t'}
linux_types = {
'clockid_t', 'cpu_set_t', 'cpumask_t', 'dev_t', 'gid_t', 'id_t', 'ino_t', 'key_t',
'mode_t', 'nfds_t', 'pid_t', 'rlim_t', 'sig_t', 'sighandler_t', 'siginfo_t',
'sigset_t', 'sigval_t', 'socklen_t', 'timer_t', 'uid_t'}
c11_atomic_types = {
'atomic_bool', 'atomic_char', 'atomic_schar', 'atomic_uchar', 'atomic_short',
'atomic_ushort', 'atomic_int', 'atomic_uint', 'atomic_long', 'atomic_ulong',
'atomic_llong', 'atomic_ullong', 'atomic_char16_t', 'atomic_char32_t', 'atomic_wchar_t',
'atomic_int_least8_t', 'atomic_uint_least8_t', 'atomic_int_least16_t',
'atomic_uint_least16_t', 'atomic_int_least32_t', 'atomic_uint_least32_t',
'atomic_int_least64_t', 'atomic_uint_least64_t', 'atomic_int_fast8_t',
'atomic_uint_fast8_t', 'atomic_int_fast16_t', 'atomic_uint_fast16_t',
'atomic_int_fast32_t', 'atomic_uint_fast32_t', 'atomic_int_fast64_t',
'atomic_uint_fast64_t', 'atomic_intptr_t', 'atomic_uintptr_t', 'atomic_size_t',
'atomic_ptrdiff_t', 'atomic_intmax_t', 'atomic_uintmax_t'}
def __init__(self, **options):
self.stdlibhighlighting = get_bool_opt(options, 'stdlibhighlighting', True)
self.c99highlighting = get_bool_opt(options, 'c99highlighting', True)
self.c11highlighting = get_bool_opt(options, 'c11highlighting', True)
self.platformhighlighting = get_bool_opt(options, 'platformhighlighting', True)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text, stack=('root',)):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name:
if self.stdlibhighlighting and value in self.stdlib_types:
token = Keyword.Type
elif self.c99highlighting and value in self.c99_types:
token = Keyword.Type
elif self.c11highlighting and value in self.c11_atomic_types:
token = Keyword.Type
elif self.platformhighlighting and value in self.linux_types:
token = Keyword.Type
yield index, token, value
class CLexer(CFamilyLexer):
"""
For C source code with preprocessor directives.
Additional options accepted:
`stdlibhighlighting`
Highlight common types found in the C/C++ standard library (e.g. `size_t`).
(default: ``True``).
`c99highlighting`
Highlight common types found in the C99 standard library (e.g. `int8_t`).
Actually, this includes all fixed-width integer types.
(default: ``True``).
`c11highlighting`
Highlight atomic types found in the C11 standard library (e.g. `atomic_bool`).
(default: ``True``).
`platformhighlighting`
Highlight common types found in the platform SDK headers (e.g. `clockid_t` on Linux).
(default: ``True``).
"""
name = 'C'
aliases = ['c']
filenames = ['*.c', '*.h', '*.idc', '*.x[bp]m']
mimetypes = ['text/x-chdr', 'text/x-csrc', 'image/x-xbitmap', 'image/x-xpixmap']
url = 'https://en.wikipedia.org/wiki/C_(programming_language)'
version_added = ''
priority = 0.1
tokens = {
'keywords': [
(words((
'_Alignas', '_Alignof', '_Noreturn', '_Generic', '_Thread_local',
'_Static_assert', '_Imaginary', 'noreturn', 'imaginary', 'complex'),
suffix=r'\b'), Keyword),
inherit
],
'types': [
(words(('_Bool', '_Complex', '_Atomic'), suffix=r'\b'), Keyword.Type),
inherit
]
}
def analyse_text(text):
if re.search(r'^\s*#include [<"]', text, re.MULTILINE):
return 0.1
if re.search(r'^\s*#ifn?def ', text, re.MULTILINE):
return 0.1
class CppLexer(CFamilyLexer):
"""
For C++ source code with preprocessor directives.
Additional options accepted:
`stdlibhighlighting`
Highlight common types found in the C/C++ standard library (e.g. `size_t`).
(default: ``True``).
`c99highlighting`
Highlight common types found in the C99 standard library (e.g. `int8_t`).
Actually, this includes all fixed-width integer types.
(default: ``True``).
`c11highlighting`
Highlight atomic types found in the C11 standard library (e.g. `atomic_bool`).
(default: ``True``).
`platformhighlighting`
Highlight common types found in the platform SDK headers (e.g. `clockid_t` on Linux).
(default: ``True``).
"""
name = 'C++'
url = 'https://isocpp.org/'
aliases = ['cpp', 'c++']
filenames = ['*.cpp', '*.hpp', '*.c++', '*.h++',
'*.cc', '*.hh', '*.cxx', '*.hxx',
'*.C', '*.H', '*.cp', '*.CPP', '*.tpp']
mimetypes = ['text/x-c++hdr', 'text/x-c++src']
version_added = ''
priority = 0.1
tokens = {
'statements': [
# C++11 raw strings
(r'((?:[LuU]|u8)?R)(")([^\\()\s]{,16})(\()((?:.|\n)*?)(\)\3)(")',
bygroups(String.Affix, String, String.Delimiter, String.Delimiter,
String, String.Delimiter, String)),
inherit,
],
'root': [
inherit,
# C++ Microsoft-isms
(words(('virtual_inheritance', 'uuidof', 'super', 'single_inheritance',
'multiple_inheritance', 'interface', 'event'),
prefix=r'__', suffix=r'\b'), Keyword.Reserved),
# Offload C++ extensions, http://offload.codeplay.com/
(r'__(offload|blockingoffload|outer)\b', Keyword.Pseudo),
],
'enumname': [
include('whitespace'),
# 'enum class' and 'enum struct' C++11 support
(words(('class', 'struct'), suffix=r'\b'), Keyword),
(CFamilyLexer._ident, Name.Class, '#pop'),
# template specification
(r'\s*(?=>)', Text, '#pop'),
default('#pop')
],
'keywords': [
(r'(class|concept|typename)(\s+)', bygroups(Keyword, Whitespace), 'classname'),
(words((
'catch', 'const_cast', 'delete', 'dynamic_cast', 'explicit',
'export', 'friend', 'mutable', 'new', 'operator',
'private', 'protected', 'public', 'reinterpret_cast', 'class',
'__restrict', 'static_cast', 'template', 'this', 'throw', 'throws',
'try', 'typeid', 'using', 'virtual', 'constexpr', 'nullptr', 'concept',
'decltype', 'noexcept', 'override', 'final', 'constinit', 'consteval',
'co_await', 'co_return', 'co_yield', 'requires', 'import', 'module',
'typename', 'and', 'and_eq', 'bitand', 'bitor', 'compl', 'not',
'not_eq', 'or', 'or_eq', 'xor', 'xor_eq'),
suffix=r'\b'), Keyword),
(r'namespace\b', Keyword, 'namespace'),
(r'(enum)(\s+)', bygroups(Keyword, Whitespace), 'enumname'),
inherit
],
'types': [
(r'char(16_t|32_t|8_t)\b', Keyword.Type),
inherit
],
'namespace': [
(r'[;{]', Punctuation, ('#pop', 'root')),
(r'inline\b', Keyword.Reserved),
(CFamilyLexer._ident, Name.Namespace),
include('statement')
]
}
def analyse_text(text):
if re.search('#include <[a-z_]+>', text):
return 0.2
if re.search('using namespace ', text):
return 0.4

View File

@@ -0,0 +1,738 @@
"""
pygments.lexers.c_like
~~~~~~~~~~~~~~~~~~~~~~
Lexers for other C-like languages.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, inherit, words, \
default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
from pygments.lexers.c_cpp import CLexer, CppLexer
from pygments.lexers import _mql_builtins
__all__ = ['PikeLexer', 'NesCLexer', 'ClayLexer', 'ECLexer', 'ValaLexer',
'CudaLexer', 'SwigLexer', 'MqlLexer', 'ArduinoLexer', 'CharmciLexer',
'OmgIdlLexer', 'PromelaLexer']
class PikeLexer(CppLexer):
"""
For `Pike <http://pike.lysator.liu.se/>`_ source code.
"""
name = 'Pike'
aliases = ['pike']
filenames = ['*.pike', '*.pmod']
mimetypes = ['text/x-pike']
version_added = '2.0'
tokens = {
'statements': [
(words((
'catch', 'new', 'private', 'protected', 'public', 'gauge',
'throw', 'throws', 'class', 'interface', 'implement', 'abstract',
'extends', 'from', 'this', 'super', 'constant', 'final', 'static',
'import', 'use', 'extern', 'inline', 'proto', 'break', 'continue',
'if', 'else', 'for', 'while', 'do', 'switch', 'case', 'as', 'in',
'version', 'return', 'true', 'false', 'null',
'__VERSION__', '__MAJOR__', '__MINOR__', '__BUILD__', '__REAL_VERSION__',
'__REAL_MAJOR__', '__REAL_MINOR__', '__REAL_BUILD__', '__DATE__', '__TIME__',
'__FILE__', '__DIR__', '__LINE__', '__AUTO_BIGNUM__', '__NT__', '__PIKE__',
'__amigaos__', '_Pragma', 'static_assert', 'defined', 'sscanf'), suffix=r'\b'),
Keyword),
(r'(bool|int|long|float|short|double|char|string|object|void|mapping|'
r'array|multiset|program|function|lambda|mixed|'
r'[a-z_][a-z0-9_]*_t)\b',
Keyword.Type),
(r'(class)(\s+)', bygroups(Keyword, Whitespace), 'classname'),
(r'[~!%^&*+=|?:<>/@-]', Operator),
inherit,
],
'classname': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
# template specification
(r'\s*(?=>)', Whitespace, '#pop'),
],
}
class NesCLexer(CLexer):
"""
For `nesC <https://github.com/tinyos/nesc>`_ source code with preprocessor
directives.
"""
name = 'nesC'
aliases = ['nesc']
filenames = ['*.nc']
mimetypes = ['text/x-nescsrc']
version_added = '2.0'
tokens = {
'statements': [
(words((
'abstract', 'as', 'async', 'atomic', 'call', 'command', 'component',
'components', 'configuration', 'event', 'extends', 'generic',
'implementation', 'includes', 'interface', 'module', 'new', 'norace',
'post', 'provides', 'signal', 'task', 'uses'), suffix=r'\b'),
Keyword),
(words(('nx_struct', 'nx_union', 'nx_int8_t', 'nx_int16_t', 'nx_int32_t',
'nx_int64_t', 'nx_uint8_t', 'nx_uint16_t', 'nx_uint32_t',
'nx_uint64_t'), suffix=r'\b'),
Keyword.Type),
inherit,
],
}
class ClayLexer(RegexLexer):
"""
For Clay source.
"""
name = 'Clay'
filenames = ['*.clay']
aliases = ['clay']
mimetypes = ['text/x-clay']
url = 'http://claylabs.com/clay'
version_added = '2.0'
tokens = {
'root': [
(r'\s+', Whitespace),
(r'//.*?$', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'\b(public|private|import|as|record|variant|instance'
r'|define|overload|default|external|alias'
r'|rvalue|ref|forward|inline|noinline|forceinline'
r'|enum|var|and|or|not|if|else|goto|return|while'
r'|switch|case|break|continue|for|in|true|false|try|catch|throw'
r'|finally|onerror|staticassert|eval|when|newtype'
r'|__FILE__|__LINE__|__COLUMN__|__ARG__'
r')\b', Keyword),
(r'[~!%^&*+=|:<>/-]', Operator),
(r'[#(){}\[\],;.]', Punctuation),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'\d+[LlUu]*', Number.Integer),
(r'\b(true|false)\b', Name.Builtin),
(r'(?i)[a-z_?][\w?]*', Name),
(r'"""', String, 'tdqs'),
(r'"', String, 'dqs'),
],
'strings': [
(r'(?i)\\(x[0-9a-f]{2}|.)', String.Escape),
(r'[^\\"]+', String),
],
'nl': [
(r'\n', String),
],
'dqs': [
(r'"', String, '#pop'),
include('strings'),
],
'tdqs': [
(r'"""', String, '#pop'),
include('strings'),
include('nl'),
],
}
class ECLexer(CLexer):
"""
For eC source code with preprocessor directives.
"""
name = 'eC'
aliases = ['ec']
filenames = ['*.ec', '*.eh']
mimetypes = ['text/x-echdr', 'text/x-ecsrc']
url = 'https://ec-lang.org'
version_added = '1.5'
tokens = {
'statements': [
(words((
'virtual', 'class', 'private', 'public', 'property', 'import',
'delete', 'new', 'new0', 'renew', 'renew0', 'define', 'get',
'set', 'remote', 'dllexport', 'dllimport', 'stdcall', 'subclass',
'__on_register_module', 'namespace', 'using', 'typed_object',
'any_object', 'incref', 'register', 'watch', 'stopwatching', 'firewatchers',
'watchable', 'class_designer', 'class_fixed', 'class_no_expansion', 'isset',
'class_default_property', 'property_category', 'class_data',
'class_property', 'thisclass', 'dbtable', 'dbindex',
'database_open', 'dbfield'), suffix=r'\b'), Keyword),
(words(('uint', 'uint16', 'uint32', 'uint64', 'bool', 'byte',
'unichar', 'int64'), suffix=r'\b'),
Keyword.Type),
(r'(class)(\s+)', bygroups(Keyword, Whitespace), 'classname'),
(r'(null|value|this)\b', Name.Builtin),
inherit,
]
}
class ValaLexer(RegexLexer):
"""
For Vala source code with preprocessor directives.
"""
name = 'Vala'
aliases = ['vala', 'vapi']
filenames = ['*.vala', '*.vapi']
mimetypes = ['text/x-vala']
url = 'https://vala.dev'
version_added = '1.1'
tokens = {
'whitespace': [
(r'^\s*#if\s+0', Comment.Preproc, 'if0'),
(r'\n', Whitespace),
(r'\s+', Whitespace),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'statements': [
(r'[L@]?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
String.Char),
(r'(?s)""".*?"""', String), # verbatim strings
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'(\[)(Compact|Immutable|(?:Boolean|Simple)Type)(\])',
bygroups(Punctuation, Name.Decorator, Punctuation)),
# TODO: "correctly" parse complex code attributes
(r'(\[)(CCode|(?:Integer|Floating)Type)',
bygroups(Punctuation, Name.Decorator)),
(r'[()\[\],.]', Punctuation),
(words((
'as', 'base', 'break', 'case', 'catch', 'construct', 'continue',
'default', 'delete', 'do', 'else', 'enum', 'finally', 'for',
'foreach', 'get', 'if', 'in', 'is', 'lock', 'new', 'out', 'params',
'return', 'set', 'sizeof', 'switch', 'this', 'throw', 'try',
'typeof', 'while', 'yield'), suffix=r'\b'),
Keyword),
(words((
'abstract', 'const', 'delegate', 'dynamic', 'ensures', 'extern',
'inline', 'internal', 'override', 'owned', 'private', 'protected',
'public', 'ref', 'requires', 'signal', 'static', 'throws', 'unowned',
'var', 'virtual', 'volatile', 'weak', 'yields'), suffix=r'\b'),
Keyword.Declaration),
(r'(namespace|using)(\s+)', bygroups(Keyword.Namespace, Whitespace),
'namespace'),
(r'(class|errordomain|interface|struct)(\s+)',
bygroups(Keyword.Declaration, Whitespace), 'class'),
(r'(\.)([a-zA-Z_]\w*)',
bygroups(Operator, Name.Attribute)),
# void is an actual keyword, others are in glib-2.0.vapi
(words((
'void', 'bool', 'char', 'double', 'float', 'int', 'int8', 'int16',
'int32', 'int64', 'long', 'short', 'size_t', 'ssize_t', 'string',
'time_t', 'uchar', 'uint', 'uint8', 'uint16', 'uint32', 'uint64',
'ulong', 'unichar', 'ushort'), suffix=r'\b'),
Keyword.Type),
(r'(true|false|null)\b', Name.Builtin),
(r'[a-zA-Z_]\w*', Name),
],
'root': [
include('whitespace'),
default('statement'),
],
'statement': [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#el(?:se|if).*\n', Comment.Preproc, '#pop'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
],
'class': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
],
'namespace': [
(r'[a-zA-Z_][\w.]*', Name.Namespace, '#pop')
],
}
class CudaLexer(CLexer):
"""
For NVIDIA CUDA™ source.
"""
name = 'CUDA'
filenames = ['*.cu', '*.cuh']
aliases = ['cuda', 'cu']
mimetypes = ['text/x-cuda']
url = 'https://developer.nvidia.com/category/zone/cuda-zone'
version_added = '1.6'
function_qualifiers = {'__device__', '__global__', '__host__',
'__noinline__', '__forceinline__'}
variable_qualifiers = {'__device__', '__constant__', '__shared__',
'__restrict__'}
vector_types = {'char1', 'uchar1', 'char2', 'uchar2', 'char3', 'uchar3',
'char4', 'uchar4', 'short1', 'ushort1', 'short2', 'ushort2',
'short3', 'ushort3', 'short4', 'ushort4', 'int1', 'uint1',
'int2', 'uint2', 'int3', 'uint3', 'int4', 'uint4', 'long1',
'ulong1', 'long2', 'ulong2', 'long3', 'ulong3', 'long4',
'ulong4', 'longlong1', 'ulonglong1', 'longlong2',
'ulonglong2', 'float1', 'float2', 'float3', 'float4',
'double1', 'double2', 'dim3'}
variables = {'gridDim', 'blockIdx', 'blockDim', 'threadIdx', 'warpSize'}
functions = {'__threadfence_block', '__threadfence', '__threadfence_system',
'__syncthreads', '__syncthreads_count', '__syncthreads_and',
'__syncthreads_or'}
execution_confs = {'<<<', '>>>'}
def get_tokens_unprocessed(self, text, stack=('root',)):
for index, token, value in CLexer.get_tokens_unprocessed(self, text, stack):
if token is Name:
if value in self.variable_qualifiers:
token = Keyword.Type
elif value in self.vector_types:
token = Keyword.Type
elif value in self.variables:
token = Name.Builtin
elif value in self.execution_confs:
token = Keyword.Pseudo
elif value in self.function_qualifiers:
token = Keyword.Reserved
elif value in self.functions:
token = Name.Function
yield index, token, value
class SwigLexer(CppLexer):
"""
For `SWIG <http://www.swig.org/>`_ source code.
"""
name = 'SWIG'
aliases = ['swig']
filenames = ['*.swg', '*.i']
mimetypes = ['text/swig']
version_added = '2.0'
priority = 0.04 # Lower than C/C++ and Objective C/C++
tokens = {
'root': [
# Match it here so it won't be matched as a function in the rest of root
(r'\$\**\&?\w+', Name),
inherit
],
'statements': [
# SWIG directives
(r'(%[a-z_][a-z0-9_]*)', Name.Function),
# Special variables
(r'\$\**\&?\w+', Name),
# Stringification / additional preprocessor directives
(r'##*[a-zA-Z_]\w*', Comment.Preproc),
inherit,
],
}
# This is a far from complete set of SWIG directives
swig_directives = {
# Most common directives
'%apply', '%define', '%director', '%enddef', '%exception', '%extend',
'%feature', '%fragment', '%ignore', '%immutable', '%import', '%include',
'%inline', '%insert', '%module', '%newobject', '%nspace', '%pragma',
'%rename', '%shared_ptr', '%template', '%typecheck', '%typemap',
# Less common directives
'%arg', '%attribute', '%bang', '%begin', '%callback', '%catches', '%clear',
'%constant', '%copyctor', '%csconst', '%csconstvalue', '%csenum',
'%csmethodmodifiers', '%csnothrowexception', '%default', '%defaultctor',
'%defaultdtor', '%defined', '%delete', '%delobject', '%descriptor',
'%exceptionclass', '%exceptionvar', '%extend_smart_pointer', '%fragments',
'%header', '%ifcplusplus', '%ignorewarn', '%implicit', '%implicitconv',
'%init', '%javaconst', '%javaconstvalue', '%javaenum', '%javaexception',
'%javamethodmodifiers', '%kwargs', '%luacode', '%mutable', '%naturalvar',
'%nestedworkaround', '%perlcode', '%pythonabc', '%pythonappend',
'%pythoncallback', '%pythoncode', '%pythondynamic', '%pythonmaybecall',
'%pythonnondynamic', '%pythonprepend', '%refobject', '%shadow', '%sizeof',
'%trackobjects', '%types', '%unrefobject', '%varargs', '%warn',
'%warnfilter'}
def analyse_text(text):
rv = 0
# Search for SWIG directives, which are conventionally at the beginning of
# a line. The probability of them being within a line is low, so let another
# lexer win in this case.
matches = re.findall(r'^\s*(%[a-z_][a-z0-9_]*)', text, re.M)
for m in matches:
if m in SwigLexer.swig_directives:
rv = 0.98
break
else:
rv = 0.91 # Fraction higher than MatlabLexer
return rv
class MqlLexer(CppLexer):
"""
For `MQL4 <http://docs.mql4.com/>`_ and
`MQL5 <http://www.mql5.com/en/docs>`_ source code.
"""
name = 'MQL'
aliases = ['mql', 'mq4', 'mq5', 'mql4', 'mql5']
filenames = ['*.mq4', '*.mq5', '*.mqh']
mimetypes = ['text/x-mql']
version_added = '2.0'
tokens = {
'statements': [
(words(_mql_builtins.keywords, suffix=r'\b'), Keyword),
(words(_mql_builtins.c_types, suffix=r'\b'), Keyword.Type),
(words(_mql_builtins.types, suffix=r'\b'), Name.Function),
(words(_mql_builtins.constants, suffix=r'\b'), Name.Constant),
(words(_mql_builtins.colors, prefix='(clr)?', suffix=r'\b'),
Name.Constant),
inherit,
],
}
class ArduinoLexer(CppLexer):
"""
For `Arduino(tm) <https://arduino.cc/>`_ source.
This is an extension of the CppLexer, as the Arduino® Language is a superset
of C++
"""
name = 'Arduino'
aliases = ['arduino']
filenames = ['*.ino']
mimetypes = ['text/x-arduino']
version_added = '2.1'
# Language sketch main structure functions
structure = {'setup', 'loop'}
# Language operators
operators = {'not', 'or', 'and', 'xor'}
# Language 'variables'
variables = {
'DIGITAL_MESSAGE', 'FIRMATA_STRING', 'ANALOG_MESSAGE', 'REPORT_DIGITAL',
'REPORT_ANALOG', 'INPUT_PULLUP', 'SET_PIN_MODE', 'INTERNAL2V56', 'SYSTEM_RESET',
'LED_BUILTIN', 'INTERNAL1V1', 'SYSEX_START', 'INTERNAL', 'EXTERNAL', 'HIGH',
'LOW', 'INPUT', 'OUTPUT', 'INPUT_PULLUP', 'LED_BUILTIN', 'true', 'false',
'void', 'boolean', 'char', 'unsigned char', 'byte', 'int', 'unsigned int',
'word', 'long', 'unsigned long', 'short', 'float', 'double', 'string', 'String',
'array', 'static', 'volatile', 'const', 'boolean', 'byte', 'word', 'string',
'String', 'array', 'int', 'float', 'private', 'char', 'virtual', 'operator',
'sizeof', 'uint8_t', 'uint16_t', 'uint32_t', 'uint64_t', 'int8_t', 'int16_t',
'int32_t', 'int64_t', 'dynamic_cast', 'typedef', 'const_cast', 'const',
'struct', 'static_cast', 'union', 'unsigned', 'long', 'volatile', 'static',
'protected', 'bool', 'public', 'friend', 'auto', 'void', 'enum', 'extern',
'class', 'short', 'reinterpret_cast', 'double', 'register', 'explicit',
'signed', 'inline', 'delete', '_Bool', 'complex', '_Complex', '_Imaginary',
'atomic_bool', 'atomic_char', 'atomic_schar', 'atomic_uchar', 'atomic_short',
'atomic_ushort', 'atomic_int', 'atomic_uint', 'atomic_long', 'atomic_ulong',
'atomic_llong', 'atomic_ullong', 'PROGMEM'}
# Language shipped functions and class ( )
functions = {
'KeyboardController', 'MouseController', 'SoftwareSerial', 'EthernetServer',
'EthernetClient', 'LiquidCrystal', 'RobotControl', 'GSMVoiceCall',
'EthernetUDP', 'EsploraTFT', 'HttpClient', 'RobotMotor', 'WiFiClient',
'GSMScanner', 'FileSystem', 'Scheduler', 'GSMServer', 'YunClient', 'YunServer',
'IPAddress', 'GSMClient', 'GSMModem', 'Keyboard', 'Ethernet', 'Console',
'GSMBand', 'Esplora', 'Stepper', 'Process', 'WiFiUDP', 'GSM_SMS', 'Mailbox',
'USBHost', 'Firmata', 'PImage', 'Client', 'Server', 'GSMPIN', 'FileIO',
'Bridge', 'Serial', 'EEPROM', 'Stream', 'Mouse', 'Audio', 'Servo', 'File',
'Task', 'GPRS', 'WiFi', 'Wire', 'TFT', 'GSM', 'SPI', 'SD',
'runShellCommandAsynchronously', 'analogWriteResolution',
'retrieveCallingNumber', 'printFirmwareVersion', 'analogReadResolution',
'sendDigitalPortPair', 'noListenOnLocalhost', 'readJoystickButton',
'setFirmwareVersion', 'readJoystickSwitch', 'scrollDisplayRight',
'getVoiceCallStatus', 'scrollDisplayLeft', 'writeMicroseconds',
'delayMicroseconds', 'beginTransmission', 'getSignalStrength',
'runAsynchronously', 'getAsynchronously', 'listenOnLocalhost',
'getCurrentCarrier', 'readAccelerometer', 'messageAvailable',
'sendDigitalPorts', 'lineFollowConfig', 'countryNameWrite', 'runShellCommand',
'readStringUntil', 'rewindDirectory', 'readTemperature', 'setClockDivider',
'readLightSensor', 'endTransmission', 'analogReference', 'detachInterrupt',
'countryNameRead', 'attachInterrupt', 'encryptionType', 'readBytesUntil',
'robotNameWrite', 'readMicrophone', 'robotNameRead', 'cityNameWrite',
'userNameWrite', 'readJoystickY', 'readJoystickX', 'mouseReleased',
'openNextFile', 'scanNetworks', 'noInterrupts', 'digitalWrite', 'beginSpeaker',
'mousePressed', 'isActionDone', 'mouseDragged', 'displayLogos', 'noAutoscroll',
'addParameter', 'remoteNumber', 'getModifiers', 'keyboardRead', 'userNameRead',
'waitContinue', 'processInput', 'parseCommand', 'printVersion', 'readNetworks',
'writeMessage', 'blinkVersion', 'cityNameRead', 'readMessage', 'setDataMode',
'parsePacket', 'isListening', 'setBitOrder', 'beginPacket', 'isDirectory',
'motorsWrite', 'drawCompass', 'digitalRead', 'clearScreen', 'serialEvent',
'rightToLeft', 'setTextSize', 'leftToRight', 'requestFrom', 'keyReleased',
'compassRead', 'analogWrite', 'interrupts', 'WiFiServer', 'disconnect',
'playMelody', 'parseFloat', 'autoscroll', 'getPINUsed', 'setPINUsed',
'setTimeout', 'sendAnalog', 'readSlider', 'analogRead', 'beginWrite',
'createChar', 'motorsStop', 'keyPressed', 'tempoWrite', 'readButton',
'subnetMask', 'debugPrint', 'macAddress', 'writeGreen', 'randomSeed',
'attachGPRS', 'readString', 'sendString', 'remotePort', 'releaseAll',
'mouseMoved', 'background', 'getXChange', 'getYChange', 'answerCall',
'getResult', 'voiceCall', 'endPacket', 'constrain', 'getSocket', 'writeJSON',
'getButton', 'available', 'connected', 'findUntil', 'readBytes', 'exitValue',
'readGreen', 'writeBlue', 'startLoop', 'IPAddress', 'isPressed', 'sendSysex',
'pauseMode', 'gatewayIP', 'setCursor', 'getOemKey', 'tuneWrite', 'noDisplay',
'loadImage', 'switchPIN', 'onRequest', 'onReceive', 'changePIN', 'playFile',
'noBuffer', 'parseInt', 'overflow', 'checkPIN', 'knobRead', 'beginTFT',
'bitClear', 'updateIR', 'bitWrite', 'position', 'writeRGB', 'highByte',
'writeRed', 'setSpeed', 'readBlue', 'noStroke', 'remoteIP', 'transfer',
'shutdown', 'hangCall', 'beginSMS', 'endWrite', 'attached', 'maintain',
'noCursor', 'checkReg', 'checkPUK', 'shiftOut', 'isValid', 'shiftIn', 'pulseIn',
'connect', 'println', 'localIP', 'pinMode', 'getIMEI', 'display', 'noBlink',
'process', 'getBand', 'running', 'beginSD', 'drawBMP', 'lowByte', 'setBand',
'release', 'bitRead', 'prepare', 'pointTo', 'readRed', 'setMode', 'noFill',
'remove', 'listen', 'stroke', 'detach', 'attach', 'noTone', 'exists', 'buffer',
'height', 'bitSet', 'circle', 'config', 'cursor', 'random', 'IRread', 'setDNS',
'endSMS', 'getKey', 'micros', 'millis', 'begin', 'print', 'write', 'ready',
'flush', 'width', 'isPIN', 'blink', 'clear', 'press', 'mkdir', 'rmdir', 'close',
'point', 'yield', 'image', 'BSSID', 'click', 'delay', 'read', 'text', 'move',
'peek', 'beep', 'rect', 'line', 'open', 'seek', 'fill', 'size', 'turn', 'stop',
'home', 'find', 'step', 'tone', 'sqrt', 'RSSI', 'SSID', 'end', 'bit', 'tan',
'cos', 'sin', 'pow', 'map', 'abs', 'max', 'min', 'get', 'run', 'put',
'isAlphaNumeric', 'isAlpha', 'isAscii', 'isWhitespace', 'isControl', 'isDigit',
'isGraph', 'isLowerCase', 'isPrintable', 'isPunct', 'isSpace', 'isUpperCase',
'isHexadecimalDigit'}
# do not highlight
suppress_highlight = {
'namespace', 'template', 'mutable', 'using', 'asm', 'typeid',
'typename', 'this', 'alignof', 'constexpr', 'decltype', 'noexcept',
'static_assert', 'thread_local', 'restrict'}
def get_tokens_unprocessed(self, text, stack=('root',)):
for index, token, value in CppLexer.get_tokens_unprocessed(self, text, stack):
if value in self.structure:
yield index, Name.Builtin, value
elif value in self.operators:
yield index, Operator, value
elif value in self.variables:
yield index, Keyword.Reserved, value
elif value in self.suppress_highlight:
yield index, Name, value
elif value in self.functions:
yield index, Name.Function, value
else:
yield index, token, value
class CharmciLexer(CppLexer):
"""
For `Charm++ <https://charm.cs.illinois.edu>`_ interface files (.ci).
"""
name = 'Charmci'
aliases = ['charmci']
filenames = ['*.ci']
version_added = '2.4'
mimetypes = []
tokens = {
'keywords': [
(r'(module)(\s+)', bygroups(Keyword, Text), 'classname'),
(words(('mainmodule', 'mainchare', 'chare', 'array', 'group',
'nodegroup', 'message', 'conditional')), Keyword),
(words(('entry', 'aggregate', 'threaded', 'sync', 'exclusive',
'nokeep', 'notrace', 'immediate', 'expedited', 'inline',
'local', 'python', 'accel', 'readwrite', 'writeonly',
'accelblock', 'memcritical', 'packed', 'varsize',
'initproc', 'initnode', 'initcall', 'stacksize',
'createhere', 'createhome', 'reductiontarget', 'iget',
'nocopy', 'mutable', 'migratable', 'readonly')), Keyword),
inherit,
],
}
class OmgIdlLexer(CLexer):
"""
Lexer for Object Management Group Interface Definition Language.
"""
name = 'OMG Interface Definition Language'
url = 'https://www.omg.org/spec/IDL/About-IDL/'
aliases = ['omg-idl']
filenames = ['*.idl', '*.pidl']
mimetypes = []
version_added = '2.9'
scoped_name = r'((::)?\w+)+'
tokens = {
'values': [
(words(('true', 'false'), prefix=r'(?i)', suffix=r'\b'), Number),
(r'([Ll]?)(")', bygroups(String.Affix, String.Double), 'string'),
(r'([Ll]?)(\')(\\[^\']+)(\')',
bygroups(String.Affix, String.Char, String.Escape, String.Char)),
(r'([Ll]?)(\')(\\\')(\')',
bygroups(String.Affix, String.Char, String.Escape, String.Char)),
(r'([Ll]?)(\'.\')', bygroups(String.Affix, String.Char)),
(r'[+-]?\d+(\.\d*)?[Ee][+-]?\d+', Number.Float),
(r'[+-]?(\d+\.\d*)|(\d*\.\d+)([Ee][+-]?\d+)?', Number.Float),
(r'(?i)[+-]?0x[0-9a-f]+', Number.Hex),
(r'[+-]?[1-9]\d*', Number.Integer),
(r'[+-]?0[0-7]*', Number.Oct),
(r'[\+\-\*\/%^&\|~]', Operator),
(words(('<<', '>>')), Operator),
(scoped_name, Name),
(r'[{};:,<>\[\]]', Punctuation),
],
'annotation_params': [
include('whitespace'),
(r'\(', Punctuation, '#push'),
include('values'),
(r'=', Punctuation),
(r'\)', Punctuation, '#pop'),
],
'annotation_params_maybe': [
(r'\(', Punctuation, 'annotation_params'),
include('whitespace'),
default('#pop'),
],
'annotation_appl': [
(r'@' + scoped_name, Name.Decorator, 'annotation_params_maybe'),
],
'enum': [
include('whitespace'),
(r'[{,]', Punctuation),
(r'\w+', Name.Constant),
include('annotation_appl'),
(r'\}', Punctuation, '#pop'),
],
'root': [
include('whitespace'),
(words((
'typedef', 'const',
'in', 'out', 'inout', 'local',
), prefix=r'(?i)', suffix=r'\b'), Keyword.Declaration),
(words((
'void', 'any', 'native', 'bitfield',
'unsigned', 'boolean', 'char', 'wchar', 'octet', 'short', 'long',
'int8', 'uint8', 'int16', 'int32', 'int64', 'uint16', 'uint32', 'uint64',
'float', 'double', 'fixed',
'sequence', 'string', 'wstring', 'map',
), prefix=r'(?i)', suffix=r'\b'), Keyword.Type),
(words((
'@annotation', 'struct', 'union', 'bitset', 'interface',
'exception', 'valuetype', 'eventtype', 'component',
), prefix=r'(?i)', suffix=r'(\s+)(\w+)'), bygroups(Keyword, Whitespace, Name.Class)),
(words((
'abstract', 'alias', 'attribute', 'case', 'connector',
'consumes', 'context', 'custom', 'default', 'emits', 'factory',
'finder', 'getraises', 'home', 'import', 'manages', 'mirrorport',
'multiple', 'Object', 'oneway', 'primarykey', 'private', 'port',
'porttype', 'provides', 'public', 'publishes', 'raises',
'readonly', 'setraises', 'supports', 'switch', 'truncatable',
'typeid', 'typename', 'typeprefix', 'uses', 'ValueBase',
), prefix=r'(?i)', suffix=r'\b'), Keyword),
(r'(?i)(enum|bitmask)(\s+)(\w+)',
bygroups(Keyword, Whitespace, Name.Class), 'enum'),
(r'(?i)(module)(\s+)(\w+)',
bygroups(Keyword.Namespace, Whitespace, Name.Namespace)),
(r'(\w+)(\s*)(=)', bygroups(Name.Constant, Whitespace, Operator)),
(r'[\(\)]', Punctuation),
include('values'),
include('annotation_appl'),
],
}
class PromelaLexer(CLexer):
"""
For the Promela language used with SPIN.
"""
name = 'Promela'
aliases = ['promela']
filenames = ['*.pml', '*.prom', '*.prm', '*.promela', '*.pr', '*.pm']
mimetypes = ['text/x-promela']
url = 'https://spinroot.com/spin/whatispin.html'
version_added = '2.18'
# Promela's language reference:
# https://spinroot.com/spin/Man/promela.html
# Promela's grammar definition:
# https://spinroot.com/spin/Man/grammar.html
tokens = {
'statements': [
(r'(\[\]|<>|/\\|\\/)|(U|W|V)\b', Operator), # LTL Operators
(r'@', Punctuation), #remoterefs
(r'(\.)([a-zA-Z_]\w*)', bygroups(Operator, Name.Attribute)),
inherit
],
'types': [
# Predefined (data types)
(words((
'bit', 'bool', 'byte', 'pid', 'short', 'int', 'unsigned'),
suffix=r'\b'),
Keyword.Type),
],
'keywords': [
# ControlFlow
(words((
'atomic', 'break', 'd_step', 'do', 'od', 'for', 'in', 'goto',
'if', 'fi', 'unless'), suffix=r'\b'),
Keyword),
# BasicStatements
(words((
'assert', 'get_priority', 'printf', 'printm', 'set_priority'),
suffix=r'\b'),
Name.Function),
# Embedded C Code
(words((
'c_code', 'c_decl', 'c_expr', 'c_state', 'c_track'),
suffix=r'\b'),
Keyword),
# Predefined (local/global variables)
(words((
'_', '_last', '_nr_pr', '_pid', '_priority', 'else', 'np_',
'STDIN'), suffix=r'\b'),
Name.Builtin),
# Predefined (functions)
(words((
'empty', 'enabled', 'eval', 'full', 'len', 'nempty', 'nfull',
'pc_value'), suffix=r'\b'),
Name.Function),
# Predefined (operators)
(r'run\b', Operator.Word),
# Declarators
(words((
'active', 'chan', 'D_proctype', 'hidden', 'init', 'local',
'mtype', 'never', 'notrace', 'proctype', 'show', 'trace',
'typedef', 'xr', 'xs'), suffix=r'\b'),
Keyword.Declaration),
# Declarators (suffixes)
(words((
'priority', 'provided'), suffix=r'\b'),
Keyword),
# MetaTerms (declarators)
(words((
'inline', 'ltl', 'select'), suffix=r'\b'),
Keyword.Declaration),
# MetaTerms (keywords)
(r'skip\b', Keyword),
],
}

View File

@@ -0,0 +1,74 @@
"""
pygments.lexers.capnproto
~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for the Cap'n Proto schema language.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, default
from pygments.token import Text, Comment, Keyword, Name, Literal, Whitespace
__all__ = ['CapnProtoLexer']
class CapnProtoLexer(RegexLexer):
"""
For Cap'n Proto source.
"""
name = 'Cap\'n Proto'
url = 'https://capnproto.org'
filenames = ['*.capnp']
aliases = ['capnp']
version_added = '2.2'
tokens = {
'root': [
(r'#.*?$', Comment.Single),
(r'@[0-9a-zA-Z]*', Name.Decorator),
(r'=', Literal, 'expression'),
(r':', Name.Class, 'type'),
(r'\$', Name.Attribute, 'annotation'),
(r'(struct|enum|interface|union|import|using|const|annotation|'
r'extends|in|of|on|as|with|from|fixed)\b',
Keyword),
(r'[\w.]+', Name),
(r'[^#@=:$\w\s]+', Text),
(r'\s+', Whitespace),
],
'type': [
(r'[^][=;,(){}$]+', Name.Class),
(r'[\[(]', Name.Class, 'parentype'),
default('#pop'),
],
'parentype': [
(r'[^][;()]+', Name.Class),
(r'[\[(]', Name.Class, '#push'),
(r'[])]', Name.Class, '#pop'),
default('#pop'),
],
'expression': [
(r'[^][;,(){}$]+', Literal),
(r'[\[(]', Literal, 'parenexp'),
default('#pop'),
],
'parenexp': [
(r'[^][;()]+', Literal),
(r'[\[(]', Literal, '#push'),
(r'[])]', Literal, '#pop'),
default('#pop'),
],
'annotation': [
(r'[^][;,(){}=:]+', Name.Attribute),
(r'[\[(]', Name.Attribute, 'annexp'),
default('#pop'),
],
'annexp': [
(r'[^][;()]+', Name.Attribute),
(r'[\[(]', Name.Attribute, '#push'),
(r'[])]', Name.Attribute, '#pop'),
default('#pop'),
],
}

View File

@@ -0,0 +1,95 @@
"""
pygments.lexers.carbon
~~~~~~~~~~~~~~~~~~~~~~
Lexers for the Carbon programming language.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
__all__ = ['CarbonLexer']
class CarbonLexer(RegexLexer):
"""
For Carbon source.
"""
name = 'Carbon'
url = 'https://github.com/carbon-language/carbon-lang'
filenames = ['*.carbon']
aliases = ['carbon']
mimetypes = ['text/x-carbon']
version_added = '2.15'
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
(r'\n', Whitespace),
(r'\s+', Whitespace),
(r'\\\n', Text),
# comments
(r'//(.*?)\n', Comment.Single),
(r'/(\\\n)?[*].*?[*](\\\n)?/', Comment.Multiline),
# Declaration
(r'(package|import|api|namespace|library)\b', Keyword.Namespace),
(r'(abstract|alias|fn|class|interface|let|var|virtual|external|'
r'base|addr|extends|choice|constraint|impl)\b', Keyword.Declaration),
# Keywords
(words(('as', 'or', 'not', 'and', 'break', 'continue', 'case',
'default', 'if', 'else', 'destructor', 'for', 'forall',
'while', 'where', 'then', 'in', 'is', 'return', 'returned',
'friend', 'partial', 'private', 'protected', 'observe', 'Self',
'override', 'final', 'match', 'type', 'like'), suffix=r'\b'), Keyword),
(r'(self)\b', Keyword.Pseudo),
(r'(true|false)\b', Keyword.Constant),
(r'(auto|bool|string|i8|i16|i32|i64|u8|u16|u32|u64|'
r'f8|f16|f32|f64)\b', Keyword.Type),
# numeric literals
(r'[0-9]*[.][0-9]+', Number.Double),
(r'0b[01]+', Number.Bin),
(r'0o[0-7]+', Number.Oct),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
# string literal
(r'"(\\.|[^"\\])*"', String),
# char literal
(r'\'(\\.|[^\'\\])\'', String.Char),
# tokens
(r'<<=|>>=|<<|>>|<=|>=|\+=|-=|\*=|/=|\%=|\|=|&=|\^=|&&|\|\||&|\||'
r'\+\+|--|\%|\^|\~|==|!=|::|[.]{3}|->|=>|[+\-*/&]', Operator),
(r'[|<>=!()\[\]{}.,;:\?]', Punctuation),
# identifiers
(r'[^\W\d]\w*', Name.Other),
]
}
def analyse_text(text):
result = 0
if 'forall' in text:
result += 0.1
if 'type' in text:
result += 0.1
if 'Self' in text:
result += 0.1
if 'observe' in text:
result += 0.1
if 'package' in text:
result += 0.1
if 'library' in text:
result += 0.1
if 'choice' in text:
result += 0.1
if 'addr' in text:
result += 0.1
if 'constraint' in text:
result += 0.1
if 'impl' in text:
result += 0.1
return result

View File

@@ -0,0 +1,172 @@
"""
pygments.lexers.cddl
~~~~~~~~~~~~~~~~~~~~
Lexer for the Concise data definition language (CDDL), a notational
convention to express CBOR and JSON data structures.
More information:
https://datatracker.ietf.org/doc/rfc8610/
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, include, words
from pygments.token import Comment, Error, Keyword, Name, Number, Operator, \
Punctuation, String, Whitespace
__all__ = ['CddlLexer']
class CddlLexer(RegexLexer):
"""
Lexer for CDDL definitions.
"""
name = "CDDL"
url = 'https://datatracker.ietf.org/doc/rfc8610/'
aliases = ["cddl"]
filenames = ["*.cddl"]
mimetypes = ["text/x-cddl"]
version_added = '2.8'
_prelude_types = [
"any",
"b64legacy",
"b64url",
"bigfloat",
"bigint",
"bignint",
"biguint",
"bool",
"bstr",
"bytes",
"cbor-any",
"decfrac",
"eb16",
"eb64legacy",
"eb64url",
"encoded-cbor",
"false",
"float",
"float16",
"float16-32",
"float32",
"float32-64",
"float64",
"int",
"integer",
"mime-message",
"nil",
"nint",
"null",
"number",
"regexp",
"tdate",
"text",
"time",
"true",
"tstr",
"uint",
"undefined",
"unsigned",
"uri",
]
_controls = [
".and",
".bits",
".cbor",
".cborseq",
".default",
".eq",
".ge",
".gt",
".le",
".lt",
".ne",
".regexp",
".size",
".within",
]
_re_id = (
r"[$@A-Z_a-z]"
r"(?:[\-\.]+(?=[$@0-9A-Z_a-z])|[$@0-9A-Z_a-z])*"
)
# While the spec reads more like "an int must not start with 0" we use a
# lookahead here that says "after a 0 there must be no digit". This makes the
# '0' the invalid character in '01', which looks nicer when highlighted.
_re_uint = r"(?:0b[01]+|0x[0-9a-fA-F]+|[1-9]\d*|0(?!\d))"
_re_int = r"-?" + _re_uint
tokens = {
"commentsandwhitespace": [(r"\s+", Whitespace), (r";.+$", Comment.Single)],
"root": [
include("commentsandwhitespace"),
# tag types
(rf"#(\d\.{_re_uint})?", Keyword.Type), # type or any
# occurrence
(
rf"({_re_uint})?(\*)({_re_uint})?",
bygroups(Number, Operator, Number),
),
(r"\?|\+", Operator), # occurrence
(r"\^", Operator), # cuts
(r"(\.\.\.|\.\.)", Operator), # rangeop
(words(_controls, suffix=r"\b"), Operator.Word), # ctlops
# into choice op
(rf"&(?=\s*({_re_id}|\())", Operator),
(rf"~(?=\s*{_re_id})", Operator), # unwrap op
(r"//|/(?!/)", Operator), # double und single slash
(r"=>|/==|/=|=", Operator),
(r"[\[\]{}\(\),<>:]", Punctuation),
# Bytestrings
(r"(b64)(')", bygroups(String.Affix, String.Single), "bstrb64url"),
(r"(h)(')", bygroups(String.Affix, String.Single), "bstrh"),
(r"'", String.Single, "bstr"),
# Barewords as member keys (must be matched before values, types, typenames,
# groupnames).
# Token type is String as barewords are always interpreted as such.
(rf"({_re_id})(\s*)(:)",
bygroups(String, Whitespace, Punctuation)),
# predefined types
(words(_prelude_types, prefix=r"(?![\-_$@])\b", suffix=r"\b(?![\-_$@])"),
Name.Builtin),
# user-defined groupnames, typenames
(_re_id, Name.Class),
# values
(r"0b[01]+", Number.Bin),
(r"0o[0-7]+", Number.Oct),
(r"0x[0-9a-fA-F]+(\.[0-9a-fA-F]+)?p[+-]?\d+", Number.Hex), # hexfloat
(r"0x[0-9a-fA-F]+", Number.Hex), # hex
# Float
(rf"{_re_int}(?=(\.\d|e[+-]?\d))(?:\.\d+)?(?:e[+-]?\d+)?",
Number.Float),
# Int
(_re_int, Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
],
"bstrb64url": [
(r"'", String.Single, "#pop"),
include("commentsandwhitespace"),
(r"\\.", String.Escape),
(r"[0-9a-zA-Z\-_=]+", String.Single),
(r".", Error),
# (r";.+$", Token.Other),
],
"bstrh": [
(r"'", String.Single, "#pop"),
include("commentsandwhitespace"),
(r"\\.", String.Escape),
(r"[0-9a-fA-F]+", String.Single),
(r".", Error),
],
"bstr": [
(r"'", String.Single, "#pop"),
(r"\\.", String.Escape),
(r"[^'\\]+", String.Single),
],
}

View File

@@ -0,0 +1,139 @@
"""
pygments.lexers.chapel
~~~~~~~~~~~~~~~~~~~~~~
Lexer for the Chapel language.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
__all__ = ['ChapelLexer']
class ChapelLexer(RegexLexer):
"""
For Chapel source.
"""
name = 'Chapel'
url = 'https://chapel-lang.org/'
filenames = ['*.chpl']
aliases = ['chapel', 'chpl']
version_added = '2.0'
# mimetypes = ['text/x-chapel']
known_types = ('bool', 'bytes', 'complex', 'imag', 'int', 'locale',
'nothing', 'opaque', 'range', 'real', 'string', 'uint',
'void')
type_modifiers_par = ('atomic', 'single', 'sync')
type_modifiers_mem = ('borrowed', 'owned', 'shared', 'unmanaged')
type_modifiers = (*type_modifiers_par, *type_modifiers_mem)
declarations = ('config', 'const', 'in', 'inout', 'out', 'param', 'ref',
'type', 'var')
constants = ('false', 'nil', 'none', 'true')
other_keywords = ('align', 'as',
'begin', 'break', 'by',
'catch', 'cobegin', 'coforall', 'continue',
'defer', 'delete', 'dmapped', 'do', 'domain',
'else', 'enum', 'except', 'export', 'extern',
'for', 'forall', 'foreach', 'forwarding',
'if', 'implements', 'import', 'index', 'init', 'inline',
'label', 'lambda', 'let', 'lifetime', 'local',
'new', 'noinit',
'on', 'only', 'otherwise', 'override',
'pragma', 'primitive', 'private', 'prototype', 'public',
'reduce', 'require', 'return',
'scan', 'select', 'serial', 'sparse', 'subdomain',
'then', 'this', 'throw', 'throws', 'try',
'use',
'when', 'where', 'while', 'with',
'yield',
'zip')
tokens = {
'root': [
(r'\n', Whitespace),
(r'\s+', Whitespace),
(r'\\\n', Text),
(r'//(.*?)\n', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(words(declarations, suffix=r'\b'), Keyword.Declaration),
(words(constants, suffix=r'\b'), Keyword.Constant),
(words(known_types, suffix=r'\b'), Keyword.Type),
(words((*type_modifiers, *other_keywords), suffix=r'\b'), Keyword),
(r'@', Keyword, 'attributename'),
(r'(iter)(\s+)', bygroups(Keyword, Whitespace), 'procname'),
(r'(proc)(\s+)', bygroups(Keyword, Whitespace), 'procname'),
(r'(operator)(\s+)', bygroups(Keyword, Whitespace), 'procname'),
(r'(class|interface|module|record|union)(\s+)', bygroups(Keyword, Whitespace),
'classname'),
# imaginary integers
(r'\d+i', Number),
(r'\d+\.\d*([Ee][-+]\d+)?i', Number),
(r'\.\d+([Ee][-+]\d+)?i', Number),
(r'\d+[Ee][-+]\d+i', Number),
# reals cannot end with a period due to lexical ambiguity with
# .. operator. See reference for rationale.
(r'(\d*\.\d+)([eE][+-]?[0-9]+)?i?', Number.Float),
(r'\d+[eE][+-]?[0-9]+i?', Number.Float),
# integer literals
# -- binary
(r'0[bB][01]+', Number.Bin),
# -- hex
(r'0[xX][0-9a-fA-F]+', Number.Hex),
# -- octal
(r'0[oO][0-7]+', Number.Oct),
# -- decimal
(r'[0-9]+', Number.Integer),
# strings
(r'"(\\\\|\\"|[^"])*"', String),
(r"'(\\\\|\\'|[^'])*'", String),
# tokens
(r'(=|\+=|-=|\*=|/=|\*\*=|%=|&=|\|=|\^=|&&=|\|\|=|<<=|>>=|'
r'<=>|<~>|\.\.|by|#|\.\.\.|'
r'&&|\|\||!|&|\||\^|~|<<|>>|'
r'==|!=|<=|>=|<|>|'
r'[+\-*/%]|\*\*)', Operator),
(r'[:;,.?()\[\]{}]', Punctuation),
# identifiers
(r'[a-zA-Z_][\w$]*', Name.Other),
],
'classname': [
(r'[a-zA-Z_][\w$]*', Name.Class, '#pop'),
],
'procname': [
(r'([a-zA-Z_][.\w$]*|' # regular function name, including secondary
r'\~[a-zA-Z_][.\w$]*|' # support for legacy destructors
r'[+*/!~%<>=&^|\-:]{1,2})', # operators
Name.Function, '#pop'),
# allow `proc (atomic T).foo`
(r'\(', Punctuation, "receivertype"),
(r'\)+\.', Punctuation),
],
'receivertype': [
(words(type_modifiers, suffix=r'\b'), Keyword),
(words(known_types, suffix=r'\b'), Keyword.Type),
(r'[^()]*', Name.Other, '#pop'),
],
'attributename': [
(r'[a-zA-Z_][.\w$]*', Name.Decorator, '#pop'),
],
}

View File

@@ -0,0 +1,180 @@
"""
pygments.lexers.clean
~~~~~~~~~~~~~~~~~~~~~
Lexer for the Clean language.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import ExtendedRegexLexer, words, default, include, bygroups
from pygments.token import Comment, Error, Keyword, Literal, Name, Number, \
Operator, Punctuation, String, Whitespace
__all__ = ['CleanLexer']
class CleanLexer(ExtendedRegexLexer):
"""
Lexer for the general purpose, state-of-the-art, pure and lazy functional
programming language Clean.
.. versionadded: 2.2
"""
name = 'Clean'
url = 'http://clean.cs.ru.nl/Clean'
aliases = ['clean']
filenames = ['*.icl', '*.dcl']
version_added = ''
keywords = (
'case', 'ccall', 'class', 'code', 'code inline', 'derive', 'export',
'foreign', 'generic', 'if', 'in', 'infix', 'infixl', 'infixr',
'instance', 'let', 'of', 'otherwise', 'special', 'stdcall', 'where',
'with')
modulewords = ('implementation', 'definition', 'system')
lowerId = r'[a-z`][\w`]*'
upperId = r'[A-Z`][\w`]*'
funnyId = r'[~@#$%\^?!+\-*<>\\/|&=:]+'
scoreUpperId = r'_' + upperId
scoreLowerId = r'_' + lowerId
moduleId = r'[a-zA-Z_][a-zA-Z0-9_.`]+'
classId = '|'.join([lowerId, upperId, funnyId])
tokens = {
'root': [
include('comments'),
include('keywords'),
include('module'),
include('import'),
include('whitespace'),
include('literals'),
include('operators'),
include('delimiters'),
include('names'),
],
'whitespace': [
(r'\s+', Whitespace),
],
'comments': [
(r'//.*\n', Comment.Single),
(r'/\*', Comment.Multiline, 'comments.in'),
(r'/\*\*', Comment.Special, 'comments.in'),
],
'comments.in': [
(r'\*\/', Comment.Multiline, '#pop'),
(r'/\*', Comment.Multiline, '#push'),
(r'[^*/]+', Comment.Multiline),
(r'\*(?!/)', Comment.Multiline),
(r'/', Comment.Multiline),
],
'keywords': [
(words(keywords, prefix=r'\b', suffix=r'\b'), Keyword),
],
'module': [
(words(modulewords, prefix=r'\b', suffix=r'\b'), Keyword.Namespace),
(r'\bmodule\b', Keyword.Namespace, 'module.name'),
],
'module.name': [
include('whitespace'),
(moduleId, Name.Class, '#pop'),
],
'import': [
(r'\b(import)\b(\s*)', bygroups(Keyword, Whitespace), 'import.module'),
(r'\b(from)\b(\s*)\b(' + moduleId + r')\b(\s*)\b(import)\b',
bygroups(Keyword, Whitespace, Name.Class, Whitespace, Keyword),
'import.what'),
],
'import.module': [
(r'\b(qualified)\b(\s*)', bygroups(Keyword, Whitespace)),
(r'(\s*)\b(as)\b', bygroups(Whitespace, Keyword), ('#pop', 'import.module.as')),
(moduleId, Name.Class),
(r'(\s*)(,)(\s*)', bygroups(Whitespace, Punctuation, Whitespace)),
(r'\s+', Whitespace),
default('#pop'),
],
'import.module.as': [
include('whitespace'),
(lowerId, Name.Class, '#pop'),
(upperId, Name.Class, '#pop'),
],
'import.what': [
(r'\b(class)\b(\s+)(' + classId + r')',
bygroups(Keyword, Whitespace, Name.Class), 'import.what.class'),
(r'\b(instance)(\s+)(' + classId + r')(\s+)',
bygroups(Keyword, Whitespace, Name.Class, Whitespace), 'import.what.instance'),
(r'(::)(\s*)\b(' + upperId + r')\b',
bygroups(Punctuation, Whitespace, Name.Class), 'import.what.type'),
(r'\b(generic)\b(\s+)\b(' + lowerId + '|' + upperId + r')\b',
bygroups(Keyword, Whitespace, Name)),
include('names'),
(r'(,)(\s+)', bygroups(Punctuation, Whitespace)),
(r'$', Whitespace, '#pop'),
include('whitespace'),
],
'import.what.class': [
(r',', Punctuation, '#pop'),
(r'\(', Punctuation, 'import.what.class.members'),
(r'$', Whitespace, '#pop:2'),
include('whitespace'),
],
'import.what.class.members': [
(r',', Punctuation),
(r'\.\.', Punctuation),
(r'\)', Punctuation, '#pop'),
include('names'),
],
'import.what.instance': [
(r'[,)]', Punctuation, '#pop'),
(r'\(', Punctuation, 'import.what.instance'),
(r'$', Whitespace, '#pop:2'),
include('whitespace'),
include('names'),
],
'import.what.type': [
(r',', Punctuation, '#pop'),
(r'[({]', Punctuation, 'import.what.type.consesandfields'),
(r'$', Whitespace, '#pop:2'),
include('whitespace'),
],
'import.what.type.consesandfields': [
(r',', Punctuation),
(r'\.\.', Punctuation),
(r'[)}]', Punctuation, '#pop'),
include('names'),
],
'literals': [
(r'\'([^\'\\]|\\(x[\da-fA-F]+|\d+|.))\'', Literal.Char),
(r'[+~-]?0[0-7]+\b', Number.Oct),
(r'[+~-]?\d+\.\d+(E[+-]?\d+)?', Number.Float),
(r'[+~-]?\d+\b', Number.Integer),
(r'[+~-]?0x[\da-fA-F]+\b', Number.Hex),
(r'True|False', Literal),
(r'"', String.Double, 'literals.stringd'),
],
'literals.stringd': [
(r'[^\\"\n]+', String.Double),
(r'"', String.Double, '#pop'),
(r'\\.', String.Double),
(r'[$\n]', Error, '#pop'),
],
'operators': [
(r'[-~@#$%\^?!+*<>\\/|&=:.]+', Operator),
(r'\b_+\b', Operator),
],
'delimiters': [
(r'[,;(){}\[\]]', Punctuation),
(r'(\')([\w`.]+)(\')',
bygroups(Punctuation, Name.Class, Punctuation)),
],
'names': [
(lowerId, Name),
(scoreLowerId, Name),
(funnyId, Name.Function),
(upperId, Name.Class),
(scoreUpperId, Name.Class),
]
}

View File

@@ -0,0 +1,80 @@
"""
pygments.lexers.codeql
~~~~~~~~~~~~~~~~~~~~~~
Lexer for CodeQL query language.
The grammar is originating from:
https://github.com/github/vscode-codeql/blob/main/syntaxes/README.md
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
__all__ = ['CodeQLLexer']
import re
from pygments.lexer import RegexLexer, words
from pygments.token import Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
class CodeQLLexer(RegexLexer):
name = 'CodeQL'
aliases = ['codeql', 'ql']
filenames = ['*.ql', '*.qll']
mimetypes = []
url = 'https://github.com/github/codeql'
version_added = '2.19'
flags = re.MULTILINE | re.UNICODE
tokens = {
'root': [
# Whitespace and comments
(r'\s+', Whitespace),
(r'//.*?\n', Comment.Single),
(r'/\*', Comment.Multiline, 'multiline-comments'),
# Keywords
(words((
'module', 'import', 'class', 'extends', 'implements',
'predicate', 'select', 'where', 'from', 'as', 'and', 'or', 'not',
'in', 'if', 'then', 'else', 'exists', 'forall', 'instanceof',
'private', 'predicate', 'abstract', 'cached', 'external',
'final', 'library', 'override', 'query'
), suffix=r'\b'), Keyword.Builtin),
# Special Keywords
(words(('this'), # class related keywords
prefix=r'\b', suffix=r'\b\??:?'), Name.Builtin.Pseudo),
# Types
(words((
'boolean', 'date', 'float', 'int', 'string'
), suffix=r'\b'), Keyword.Type),
# Literals
(r'"(\\\\|\\[^\\]|[^"\\])*"', String),
(r'[0-9]+\.[0-9]+', Number.Float),
(r'[0-9]+', Number.Integer),
# Operators
(r'<=|>=|<|>|=|!=|\+|-|\*|/', Operator),
# Punctuation
(r'[.,;:\[\]{}()]+', Punctuation),
# Identifiers
(r'@[a-zA-Z_]\w*', Name.Variable), # Variables with @ prefix
(r'[A-Z][a-zA-Z0-9_]*', Name.Class), # Types and classes
(r'[a-z][a-zA-Z0-9_]*', Name.Variable), # Variables and predicates
],
'multiline-comments': [
(r'[^*/]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline),
],
}

View File

@@ -0,0 +1,81 @@
"""
pygments.lexers.comal
~~~~~~~~~~~~~~~~~~~~~
Lexer for COMAL-80.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, words
from pygments.token import Comment, Whitespace, Operator, Keyword, String, \
Number, Name, Punctuation
__all__ = ["Comal80Lexer"]
class Comal80Lexer(RegexLexer):
"""
For COMAL-80 source code.
"""
name = 'COMAL-80'
url = 'https://en.wikipedia.org/wiki/COMAL'
aliases = ['comal', 'comal80']
filenames = ['*.cml', '*.comal']
version_added = ''
flags = re.IGNORECASE
#
# COMAL allows for some strange characters in names which we list here so
# keywords and word operators will not be recognized at the start of an
# identifier.
#
_suffix = r"\b(?!['\[\]←£\\])"
_identifier = r"[a-z]['\[\]←£\\\w]*"
tokens = {
'root': [
(r'//.*\n', Comment.Single),
(r'\s+', Whitespace),
(r':[=+-]|\<\>|[-+*/^↑<>=]', Operator),
(r'(and +then|or +else)' + _suffix, Operator.Word),
(words([
'and', 'bitand', 'bitor', 'bitxor', 'div', 'in', 'mod', 'not',
'or'], suffix=_suffix,), Operator.Word),
(words([
'append', 'at', 'case', 'chain', 'close', 'copy', 'create', 'cursor',
'data', 'delete', 'dir', 'do', 'elif', 'else', 'end', 'endcase', 'endif',
'endfor', 'endloop', 'endtrap', 'endwhile', 'exec', 'exit', 'file',
'for', 'goto', 'handler', 'if', 'input', 'let', 'loop', 'mount', 'null',
'of', 'open', 'otherwise', 'output', 'page', 'pass', 'poke', 'print',
'random', 'read', 'repeat', 'report', 'return', 'rename', 'restore',
'select', 'step', 'stop', 'sys', 'then', 'to', 'trap', 'unit', 'unit$',
'until', 'using', 'when', 'while', 'write', 'zone'], suffix=_suffix),
Keyword.Reserved),
(words([
'closed', 'dim', 'endfunc', 'endproc', 'external', 'func', 'import',
'proc', 'ref', 'use'], suffix=_suffix), Keyword.Declaration),
(words([
'abs', 'atn', 'chr$', 'cos', 'eod', 'eof', 'err', 'errfile', 'errtext',
'esc', 'exp', 'int', 'key$', 'len', 'log', 'ord', 'peek', 'randomize',
'rnd', 'sgn', 'sin', 'spc$', 'sqr', 'status$', 'str$', 'tab', 'tan',
'time', 'val'], suffix=_suffix), Name.Builtin),
(words(['false', 'pi', 'true'], suffix=_suffix), Keyword.Constant),
(r'"', String, 'string'),
(_identifier + r":(?=[ \n/])", Name.Label),
(_identifier + r"[$#]?", Name),
(r'%[01]+', Number.Bin),
(r'\$[0-9a-f]+', Number.Hex),
(r'\d*\.\d*(e[-+]?\d+)?', Number.Float),
(r'\d+', Number.Integer),
(r'[(),:;]', Punctuation),
],
'string': [
(r'[^"]+', String),
(r'"[0-9]*"', String.Escape),
(r'"', String, '#pop'),
],
}

View File

@@ -0,0 +1,35 @@
"""
pygments.lexers.compiled
~~~~~~~~~~~~~~~~~~~~~~~~
Just export lexer classes previously contained in this module.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# ruff: noqa: F401
from pygments.lexers.jvm import JavaLexer, ScalaLexer
from pygments.lexers.c_cpp import CLexer, CppLexer
from pygments.lexers.d import DLexer
from pygments.lexers.objective import ObjectiveCLexer, \
ObjectiveCppLexer, LogosLexer
from pygments.lexers.go import GoLexer
from pygments.lexers.rust import RustLexer
from pygments.lexers.c_like import ECLexer, ValaLexer, CudaLexer
from pygments.lexers.pascal import DelphiLexer, PortugolLexer, Modula2Lexer
from pygments.lexers.ada import AdaLexer
from pygments.lexers.business import CobolLexer, CobolFreeformatLexer
from pygments.lexers.fortran import FortranLexer
from pygments.lexers.prolog import PrologLexer
from pygments.lexers.python import CythonLexer
from pygments.lexers.graphics import GLShaderLexer
from pygments.lexers.ml import OcamlLexer
from pygments.lexers.basic import BlitzBasicLexer, BlitzMaxLexer, MonkeyLexer
from pygments.lexers.dylan import DylanLexer, DylanLidLexer, DylanConsoleLexer
from pygments.lexers.ooc import OocLexer
from pygments.lexers.felix import FelixLexer
from pygments.lexers.nimrod import NimrodLexer
from pygments.lexers.crystal import CrystalLexer
__all__ = []

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,114 @@
"""
pygments.lexers.console
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for misc console output.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Generic, Comment, String, Text, Keyword, Name, \
Punctuation, Number, Whitespace
__all__ = ['VCTreeStatusLexer', 'PyPyLogLexer']
class VCTreeStatusLexer(RegexLexer):
"""
For colorizing output of version control status commands, like "hg
status" or "svn status".
"""
name = 'VCTreeStatus'
aliases = ['vctreestatus']
filenames = []
mimetypes = []
url = ""
version_added = '2.0'
tokens = {
'root': [
(r'^A \+ C\s+', Generic.Error),
(r'^A\s+\+?\s+', String),
(r'^M\s+', Generic.Inserted),
(r'^C\s+', Generic.Error),
(r'^D\s+', Generic.Deleted),
(r'^[?!]\s+', Comment.Preproc),
(r' >\s+.*\n', Comment.Preproc),
(r'\S+', Text),
(r'\s+', Whitespace),
]
}
class PyPyLogLexer(RegexLexer):
"""
Lexer for PyPy log files.
"""
name = "PyPy Log"
aliases = ["pypylog", "pypy"]
filenames = ["*.pypylog"]
mimetypes = ['application/x-pypylog']
url = 'pypy.org'
version_added = '1.5'
tokens = {
"root": [
(r"\[\w+\] \{jit-log-.*?$", Keyword, "jit-log"),
(r"\[\w+\] \{jit-backend-counts$", Keyword, "jit-backend-counts"),
include("extra-stuff"),
],
"jit-log": [
(r"\[\w+\] jit-log-.*?}$", Keyword, "#pop"),
(r"^\+\d+: ", Comment),
(r"--end of the loop--", Comment),
(r"[ifp]\d+", Name),
(r"ptr\d+", Name),
(r"(\()(\w+(?:\.\w+)?)(\))",
bygroups(Punctuation, Name.Builtin, Punctuation)),
(r"[\[\]=,()]", Punctuation),
(r"(\d+\.\d+|inf|-inf)", Number.Float),
(r"-?\d+", Number.Integer),
(r"'.*'", String),
(r"(None|descr|ConstClass|ConstPtr|TargetToken)", Name),
(r"<.*?>+", Name.Builtin),
(r"(label|debug_merge_point|jump|finish)", Name.Class),
(r"(int_add_ovf|int_add|int_sub_ovf|int_sub|int_mul_ovf|int_mul|"
r"int_floordiv|int_mod|int_lshift|int_rshift|int_and|int_or|"
r"int_xor|int_eq|int_ne|int_ge|int_gt|int_le|int_lt|int_is_zero|"
r"int_is_true|"
r"uint_floordiv|uint_ge|uint_lt|"
r"float_add|float_sub|float_mul|float_truediv|float_neg|"
r"float_eq|float_ne|float_ge|float_gt|float_le|float_lt|float_abs|"
r"ptr_eq|ptr_ne|instance_ptr_eq|instance_ptr_ne|"
r"cast_int_to_float|cast_float_to_int|"
r"force_token|quasiimmut_field|same_as|virtual_ref_finish|"
r"virtual_ref|mark_opaque_ptr|"
r"call_may_force|call_assembler|call_loopinvariant|"
r"call_release_gil|call_pure|call|"
r"new_with_vtable|new_array|newstr|newunicode|new|"
r"arraylen_gc|"
r"getarrayitem_gc_pure|getarrayitem_gc|setarrayitem_gc|"
r"getarrayitem_raw|setarrayitem_raw|getfield_gc_pure|"
r"getfield_gc|getinteriorfield_gc|setinteriorfield_gc|"
r"getfield_raw|setfield_gc|setfield_raw|"
r"strgetitem|strsetitem|strlen|copystrcontent|"
r"unicodegetitem|unicodesetitem|unicodelen|"
r"guard_true|guard_false|guard_value|guard_isnull|"
r"guard_nonnull_class|guard_nonnull|guard_class|guard_no_overflow|"
r"guard_not_forced|guard_no_exception|guard_not_invalidated)",
Name.Builtin),
include("extra-stuff"),
],
"jit-backend-counts": [
(r"\[\w+\] jit-backend-counts}$", Keyword, "#pop"),
(r":", Punctuation),
(r"\d+", Number),
include("extra-stuff"),
],
"extra-stuff": [
(r"\s+", Whitespace),
(r"#.*?$", Comment),
],
}

View File

@@ -0,0 +1,43 @@
"""
pygments.lexers.cplint
~~~~~~~~~~~~~~~~~~~~~~
Lexer for the cplint language
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import bygroups, inherit, words
from pygments.lexers import PrologLexer
from pygments.token import Operator, Keyword, Name, String, Punctuation
__all__ = ['CplintLexer']
class CplintLexer(PrologLexer):
"""
Lexer for cplint files, including CP-logic, Logic Programs with Annotated
Disjunctions, Distributional Clauses syntax, ProbLog, DTProbLog.
"""
name = 'cplint'
url = 'https://cplint.eu'
aliases = ['cplint']
filenames = ['*.ecl', '*.prolog', '*.pro', '*.pl', '*.P', '*.lpad', '*.cpl']
mimetypes = ['text/x-cplint']
version_added = '2.12'
tokens = {
'root': [
(r'map_query', Keyword),
(words(('gaussian', 'uniform_dens', 'dirichlet', 'gamma', 'beta',
'poisson', 'binomial', 'geometric', 'exponential', 'pascal',
'multinomial', 'user', 'val', 'uniform', 'discrete',
'finite')), Name.Builtin),
# annotations of atoms
(r'([a-z]+)(:)', bygroups(String.Atom, Punctuation)),
(r':(-|=)|::?|~=?|=>', Operator),
(r'\?', Name.Builtin),
inherit,
],
}

View File

@@ -0,0 +1,364 @@
"""
pygments.lexers.crystal
~~~~~~~~~~~~~~~~~~~~~~~
Lexer for Crystal.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import ExtendedRegexLexer, include, bygroups, default, \
words, line_re
from pygments.token import Comment, Operator, Keyword, Name, String, Number, \
Punctuation, Error, Whitespace
__all__ = ['CrystalLexer']
CRYSTAL_OPERATORS = [
'!=', '!~', '!', '%', '&&', '&', '**', '*', '+', '-', '/', '<=>', '<<', '<=', '<',
'===', '==', '=~', '=', '>=', '>>', '>', '[]=', '[]?', '[]', '^', '||', '|', '~'
]
class CrystalLexer(ExtendedRegexLexer):
"""
For Crystal source code.
"""
name = 'Crystal'
url = 'https://crystal-lang.org'
aliases = ['cr', 'crystal']
filenames = ['*.cr']
mimetypes = ['text/x-crystal']
version_added = '2.2'
flags = re.DOTALL | re.MULTILINE
def heredoc_callback(self, match, ctx):
# okay, this is the hardest part of parsing Crystal...
# match: 1 = <<-?, 2 = quote? 3 = name 4 = quote? 5 = rest of line
start = match.start(1)
yield start, Operator, match.group(1) # <<-?
yield match.start(2), String.Heredoc, match.group(2) # quote ", ', `
yield match.start(3), String.Delimiter, match.group(3) # heredoc name
yield match.start(4), String.Heredoc, match.group(4) # quote again
heredocstack = ctx.__dict__.setdefault('heredocstack', [])
outermost = not bool(heredocstack)
heredocstack.append((match.group(1) == '<<-', match.group(3)))
ctx.pos = match.start(5)
ctx.end = match.end(5)
# this may find other heredocs, so limit the recursion depth
if len(heredocstack) < 100:
yield from self.get_tokens_unprocessed(context=ctx)
else:
yield ctx.pos, String.Heredoc, match.group(5)
ctx.pos = match.end()
if outermost:
# this is the outer heredoc again, now we can process them all
for tolerant, hdname in heredocstack:
lines = []
for match in line_re.finditer(ctx.text, ctx.pos):
if tolerant:
check = match.group().strip()
else:
check = match.group().rstrip()
if check == hdname:
for amatch in lines:
yield amatch.start(), String.Heredoc, amatch.group()
yield match.start(), String.Delimiter, match.group()
ctx.pos = match.end()
break
else:
lines.append(match)
else:
# end of heredoc not found -- error!
for amatch in lines:
yield amatch.start(), Error, amatch.group()
ctx.end = len(ctx.text)
del heredocstack[:]
def gen_crystalstrings_rules():
states = {}
states['strings'] = [
(r'\:\w+[!?]?', String.Symbol),
(words(CRYSTAL_OPERATORS, prefix=r'\:'), String.Symbol),
(r":'(\\\\|\\[^\\]|[^'\\])*'", String.Symbol),
# This allows arbitrary text after '\ for simplicity
(r"'(\\\\|\\'|[^']|\\[^'\\]+)'", String.Char),
(r':"', String.Symbol, 'simple-sym'),
# Crystal doesn't have "symbol:"s but this simplifies function args
(r'([a-zA-Z_]\w*)(:)(?!:)', bygroups(String.Symbol, Punctuation)),
(r'"', String.Double, 'simple-string'),
(r'(?<!\.)`', String.Backtick, 'simple-backtick'),
]
# double-quoted string and symbol
for name, ttype, end in ('string', String.Double, '"'), \
('sym', String.Symbol, '"'), \
('backtick', String.Backtick, '`'):
states['simple-'+name] = [
include('string-escaped' if name == 'sym' else 'string-intp-escaped'),
(rf'[^\\{end}#]+', ttype),
(r'[\\#]', ttype),
(end, ttype, '#pop'),
]
# https://crystal-lang.org/docs/syntax_and_semantics/literals/string.html#percent-string-literals
for lbrace, rbrace, bracecc, name in \
('\\{', '\\}', '{}', 'cb'), \
('\\[', '\\]', '\\[\\]', 'sb'), \
('\\(', '\\)', '()', 'pa'), \
('<', '>', '<>', 'ab'), \
('\\|', '\\|', '\\|', 'pi'):
states[name+'-intp-string'] = [
(r'\\' + lbrace, String.Other),
] + (lbrace != rbrace) * [
(lbrace, String.Other, '#push'),
] + [
(rbrace, String.Other, '#pop'),
include('string-intp-escaped'),
(r'[\\#' + bracecc + ']', String.Other),
(r'[^\\#' + bracecc + ']+', String.Other),
]
states['strings'].append((r'%Q?' + lbrace, String.Other,
name+'-intp-string'))
states[name+'-string'] = [
(r'\\[\\' + bracecc + ']', String.Other),
] + (lbrace != rbrace) * [
(lbrace, String.Other, '#push'),
] + [
(rbrace, String.Other, '#pop'),
(r'[\\#' + bracecc + ']', String.Other),
(r'[^\\#' + bracecc + ']+', String.Other),
]
# https://crystal-lang.org/docs/syntax_and_semantics/literals/array.html#percent-array-literals
states['strings'].append((r'%[qwi]' + lbrace, String.Other,
name+'-string'))
states[name+'-regex'] = [
(r'\\[\\' + bracecc + ']', String.Regex),
] + (lbrace != rbrace) * [
(lbrace, String.Regex, '#push'),
] + [
(rbrace + '[imsx]*', String.Regex, '#pop'),
include('string-intp'),
(r'[\\#' + bracecc + ']', String.Regex),
(r'[^\\#' + bracecc + ']+', String.Regex),
]
states['strings'].append((r'%r' + lbrace, String.Regex,
name+'-regex'))
return states
tokens = {
'root': [
(r'#.*?$', Comment.Single),
# keywords
(words('''
abstract asm begin break case do else elsif end ensure extend if in
include next of private protected require rescue return select self super
then unless until when while with yield
'''.split(), suffix=r'\b'), Keyword),
(words('''
previous_def forall out uninitialized __DIR__ __FILE__ __LINE__
__END_LINE__
'''.split(), prefix=r'(?<!\.)', suffix=r'\b'), Keyword.Pseudo),
# https://crystal-lang.org/docs/syntax_and_semantics/is_a.html
(r'\.(is_a\?|nil\?|responds_to\?|as\?|as\b)', Keyword.Pseudo),
(words(['true', 'false', 'nil'], suffix=r'\b'), Keyword.Constant),
# start of function, class and module names
(r'(module|lib)(\s+)([a-zA-Z_]\w*(?:::[a-zA-Z_]\w*)*)',
bygroups(Keyword, Whitespace, Name.Namespace)),
(r'(def|fun|macro)(\s+)((?:[a-zA-Z_]\w*::)*)',
bygroups(Keyword, Whitespace, Name.Namespace), 'funcname'),
(r'def(?=[*%&^`~+-/\[<>=])', Keyword, 'funcname'),
(r'(annotation|class|struct|union|type|alias|enum)(\s+)((?:[a-zA-Z_]\w*::)*)',
bygroups(Keyword, Whitespace, Name.Namespace), 'classname'),
# https://crystal-lang.org/api/toplevel.html
(words('''
instance_sizeof offsetof pointerof sizeof typeof
'''.split(), prefix=r'(?<!\.)', suffix=r'\b'), Keyword.Pseudo),
# macros
(r'(?<!\.)(debugger\b|p!|pp!|record\b|spawn\b)', Name.Builtin.Pseudo),
# builtins
(words('''
abort at_exit caller exit gets loop main p pp print printf puts
raise rand read_line sleep spawn sprintf system
'''.split(), prefix=r'(?<!\.)', suffix=r'\b'), Name.Builtin),
# https://crystal-lang.org/api/Object.html#macro-summary
(r'(?<!\.)(((class_)?((getter|property)\b[!?]?|setter\b))|'
r'(def_(clone|equals|equals_and_hash|hash)|delegate|forward_missing_to)\b)',
Name.Builtin.Pseudo),
# normal heredocs
(r'(?<!\w)(<<-?)(["`\']?)([a-zA-Z_]\w*)(\2)(.*?\n)',
heredoc_callback),
# empty string heredocs
(r'(<<-?)("|\')()(\2)(.*?\n)', heredoc_callback),
(r'__END__', Comment.Preproc, 'end-part'),
# multiline regex (after keywords or assignments)
(r'(?:^|(?<=[=<>~!:])|'
r'(?<=(?:\s|;)when\s)|'
r'(?<=(?:\s|;)or\s)|'
r'(?<=(?:\s|;)and\s)|'
r'(?<=\.index\s)|'
r'(?<=\.scan\s)|'
r'(?<=\.sub\s)|'
r'(?<=\.sub!\s)|'
r'(?<=\.gsub\s)|'
r'(?<=\.gsub!\s)|'
r'(?<=\.match\s)|'
r'(?<=(?:\s|;)if\s)|'
r'(?<=(?:\s|;)elsif\s)|'
r'(?<=^when\s)|'
r'(?<=^index\s)|'
r'(?<=^scan\s)|'
r'(?<=^sub\s)|'
r'(?<=^gsub\s)|'
r'(?<=^sub!\s)|'
r'(?<=^gsub!\s)|'
r'(?<=^match\s)|'
r'(?<=^if\s)|'
r'(?<=^elsif\s)'
r')(\s*)(/)', bygroups(Whitespace, String.Regex), 'multiline-regex'),
# multiline regex (in method calls or subscripts)
(r'(?<=\(|,|\[)/', String.Regex, 'multiline-regex'),
# multiline regex (this time the funny no whitespace rule)
(r'(\s+)(/)(?![\s=])', bygroups(Whitespace, String.Regex),
'multiline-regex'),
# lex numbers and ignore following regular expressions which
# are division operators in fact (grrrr. i hate that. any
# better ideas?)
# since pygments 0.7 we also eat a "?" operator after numbers
# so that the char operator does not work. Chars are not allowed
# there so that you can use the ternary operator.
# stupid example:
# x>=0?n[x]:""
(r'(0o[0-7]+(?:_[0-7]+)*(?:_?[iu][0-9]+)?)\b(\s*)([/?])?',
bygroups(Number.Oct, Whitespace, Operator)),
(r'(0x[0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*(?:_?[iu][0-9]+)?)\b(\s*)([/?])?',
bygroups(Number.Hex, Whitespace, Operator)),
(r'(0b[01]+(?:_[01]+)*(?:_?[iu][0-9]+)?)\b(\s*)([/?])?',
bygroups(Number.Bin, Whitespace, Operator)),
# 3 separate expressions for floats because any of the 3 optional
# parts makes it a float
(r'((?:0(?![0-9])|[1-9][\d_]*)(?:\.\d[\d_]*)(?:e[+-]?[0-9]+)?'
r'(?:_?f[0-9]+)?)(\s*)([/?])?',
bygroups(Number.Float, Whitespace, Operator)),
(r'((?:0(?![0-9])|[1-9][\d_]*)(?:\.\d[\d_]*)?(?:e[+-]?[0-9]+)'
r'(?:_?f[0-9]+)?)(\s*)([/?])?',
bygroups(Number.Float, Whitespace, Operator)),
(r'((?:0(?![0-9])|[1-9][\d_]*)(?:\.\d[\d_]*)?(?:e[+-]?[0-9]+)?'
r'(?:_?f[0-9]+))(\s*)([/?])?',
bygroups(Number.Float, Whitespace, Operator)),
(r'(0\b|[1-9][\d]*(?:_\d+)*(?:_?[iu][0-9]+)?)\b(\s*)([/?])?',
bygroups(Number.Integer, Whitespace, Operator)),
# Names
(r'@@[a-zA-Z_]\w*', Name.Variable.Class),
(r'@[a-zA-Z_]\w*', Name.Variable.Instance),
(r'\$\w+', Name.Variable.Global),
(r'\$[!@&`\'+~=/\\,;.<>_*$?:"^-]', Name.Variable.Global),
(r'\$-[0adFiIlpvw]', Name.Variable.Global),
(r'::', Operator),
include('strings'),
# https://crystal-lang.org/reference/syntax_and_semantics/literals/char.html
(r'\?(\\[MC]-)*' # modifiers
r'(\\([\\abefnrtv#"\']|[0-7]{1,3}|x[a-fA-F0-9]{2}|u[a-fA-F0-9]{4}|u\{[a-fA-F0-9 ]+\})|\S)'
r'(?!\w)',
String.Char),
(r'[A-Z][A-Z_]+\b(?!::|\.)', Name.Constant),
# macro expansion
(r'\{%', String.Interpol, 'in-macro-control'),
(r'\{\{', String.Interpol, 'in-macro-expr'),
# annotations
(r'(@\[)(\s*)([A-Z]\w*(::[A-Z]\w*)*)',
bygroups(Operator, Whitespace, Name.Decorator), 'in-annot'),
# this is needed because Crystal attributes can look
# like keywords (class) or like this: ` ?!?
(words(CRYSTAL_OPERATORS, prefix=r'(\.|::)'),
bygroups(Operator, Name.Operator)),
(r'(\.|::)([a-zA-Z_]\w*[!?]?|[*%&^`~+\-/\[<>=])',
bygroups(Operator, Name)),
# Names can end with [!?] unless it's "!="
(r'[a-zA-Z_]\w*(?:[!?](?!=))?', Name),
(r'(\[|\]\??|\*\*|<=>?|>=|<<?|>>?|=~|===|'
r'!~|&&?|\|\||\.{1,3})', Operator),
(r'[-+/*%=<>&!^|~]=?', Operator),
(r'[(){};,/?:\\]', Punctuation),
(r'\s+', Whitespace)
],
'funcname': [
(r'(?:([a-zA-Z_]\w*)(\.))?'
r'([a-zA-Z_]\w*[!?]?|\*\*?|[-+]@?|'
r'[/%&|^`~]|\[\]=?|<<|>>|<=?>|>=?|===?)',
bygroups(Name.Class, Operator, Name.Function), '#pop'),
default('#pop')
],
'classname': [
(r'[A-Z_]\w*', Name.Class),
(r'(\()(\s*)([A-Z_]\w*)(\s*)(\))',
bygroups(Punctuation, Whitespace, Name.Class, Whitespace, Punctuation)),
default('#pop')
],
'in-intp': [
(r'\{', String.Interpol, '#push'),
(r'\}', String.Interpol, '#pop'),
include('root'),
],
'string-intp': [
(r'#\{', String.Interpol, 'in-intp'),
],
'string-escaped': [
# https://crystal-lang.org/reference/syntax_and_semantics/literals/string.html
(r'\\([\\abefnrtv#"\']|[0-7]{1,3}|x[a-fA-F0-9]{2}|u[a-fA-F0-9]{4}|u\{[a-fA-F0-9 ]+\})',
String.Escape)
],
'string-intp-escaped': [
include('string-intp'),
include('string-escaped'),
],
'interpolated-regex': [
include('string-intp'),
(r'[\\#]', String.Regex),
(r'[^\\#]+', String.Regex),
],
'interpolated-string': [
include('string-intp'),
(r'[\\#]', String.Other),
(r'[^\\#]+', String.Other),
],
'multiline-regex': [
include('string-intp'),
(r'\\\\', String.Regex),
(r'\\/', String.Regex),
(r'[\\#]', String.Regex),
(r'[^\\/#]+', String.Regex),
(r'/[imsx]*', String.Regex, '#pop'),
],
'end-part': [
(r'.+', Comment.Preproc, '#pop')
],
'in-macro-control': [
(r'\{%', String.Interpol, '#push'),
(r'%\}', String.Interpol, '#pop'),
(r'(for|verbatim)\b', Keyword),
include('root'),
],
'in-macro-expr': [
(r'\{\{', String.Interpol, '#push'),
(r'\}\}', String.Interpol, '#pop'),
include('root'),
],
'in-annot': [
(r'\[', Operator, '#push'),
(r'\]', Operator, '#pop'),
include('root'),
],
}
tokens.update(gen_crystalstrings_rules())

View File

@@ -0,0 +1,466 @@
"""
pygments.lexers.csound
~~~~~~~~~~~~~~~~~~~~~~
Lexers for Csound languages.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, default, include, using, words
from pygments.token import Comment, Error, Keyword, Name, Number, Operator, Punctuation, \
String, Text, Whitespace
from pygments.lexers._csound_builtins import OPCODES, DEPRECATED_OPCODES, REMOVED_OPCODES
from pygments.lexers.html import HtmlLexer
from pygments.lexers.python import PythonLexer
from pygments.lexers.scripting import LuaLexer
__all__ = ['CsoundScoreLexer', 'CsoundOrchestraLexer', 'CsoundDocumentLexer']
newline = (r'((?:(?:;|//).*)*)(\n)', bygroups(Comment.Single, Text))
class CsoundLexer(RegexLexer):
url = 'https://csound.com/'
tokens = {
'whitespace': [
(r'[ \t]+', Whitespace),
(r'/[*](?:.|\n)*?[*]/', Comment.Multiline),
(r'(?:;|//).*$', Comment.Single),
(r'(\\)(\n)', bygroups(Text, Whitespace))
],
'preprocessor directives': [
(r'#(?:e(?:nd(?:if)?|lse)\b|##)|@@?[ \t]*\d+', Comment.Preproc),
(r'#includestr', Comment.Preproc, 'includestr directive'),
(r'#include', Comment.Preproc, 'include directive'),
(r'#[ \t]*define', Comment.Preproc, 'define directive'),
(r'#(?:ifn?def|undef)\b', Comment.Preproc, 'macro directive')
],
'include directive': [
include('whitespace'),
(r'([^ \t]).*?\1', String, '#pop')
],
'includestr directive': [
include('whitespace'),
(r'"', String, ('#pop', 'quoted string'))
],
'define directive': [
(r'\n', Whitespace),
include('whitespace'),
(r'([A-Z_a-z]\w*)(\()', bygroups(Comment.Preproc, Punctuation),
('#pop', 'macro parameter name list')),
(r'[A-Z_a-z]\w*', Comment.Preproc, ('#pop', 'before macro body'))
],
'macro parameter name list': [
include('whitespace'),
(r'[A-Z_a-z]\w*', Comment.Preproc),
(r"['#]", Punctuation),
(r'\)', Punctuation, ('#pop', 'before macro body'))
],
'before macro body': [
(r'\n', Whitespace),
include('whitespace'),
(r'#', Punctuation, ('#pop', 'macro body'))
],
'macro body': [
(r'(?:\\(?!#)|[^#\\]|\n)+', Comment.Preproc),
(r'\\#', Comment.Preproc),
(r'(?<!\\)#', Punctuation, '#pop')
],
'macro directive': [
include('whitespace'),
(r'[A-Z_a-z]\w*', Comment.Preproc, '#pop')
],
'macro uses': [
(r'(\$[A-Z_a-z]\w*\.?)(\()', bygroups(Comment.Preproc, Punctuation),
'macro parameter value list'),
(r'\$[A-Z_a-z]\w*(?:\.|\b)', Comment.Preproc)
],
'macro parameter value list': [
(r'(?:[^\'#"{()]|\{(?!\{))+', Comment.Preproc),
(r"['#]", Punctuation),
(r'"', String, 'macro parameter value quoted string'),
(r'\{\{', String, 'macro parameter value braced string'),
(r'\(', Comment.Preproc, 'macro parameter value parenthetical'),
(r'\)', Punctuation, '#pop')
],
'macro parameter value quoted string': [
(r"\\[#'()]", Comment.Preproc),
(r"[#'()]", Error),
include('quoted string')
],
'macro parameter value braced string': [
(r"\\[#'()]", Comment.Preproc),
(r"[#'()]", Error),
include('braced string')
],
'macro parameter value parenthetical': [
(r'(?:[^\\()]|\\\))+', Comment.Preproc),
(r'\(', Comment.Preproc, '#push'),
(r'\)', Comment.Preproc, '#pop')
],
'whitespace and macro uses': [
include('whitespace'),
include('macro uses')
],
'numbers': [
(r'\d+[Ee][+-]?\d+|(\d+\.\d*|\d*\.\d+)([Ee][+-]?\d+)?', Number.Float),
(r'(0[Xx])([0-9A-Fa-f]+)', bygroups(Keyword.Type, Number.Hex)),
(r'\d+', Number.Integer)
],
'quoted string': [
(r'"', String, '#pop'),
(r'[^"$]+', String),
include('macro uses'),
(r'[$]', String)
],
'braced string': [
# Do nothing. This must be defined in subclasses.
]
}
class CsoundScoreLexer(CsoundLexer):
"""
For `Csound <https://csound.com>`_ scores.
"""
name = 'Csound Score'
aliases = ['csound-score', 'csound-sco']
filenames = ['*.sco']
version_added = '2.1'
tokens = {
'root': [
(r'\n', Whitespace),
include('whitespace and macro uses'),
include('preprocessor directives'),
(r'[aBbCdefiqstvxy]', Keyword),
# There is also a w statement that is generated internally and should not be
# used; see https://github.com/csound/csound/issues/750.
(r'z', Keyword.Constant),
# z is a constant equal to 800,000,000,000. 800 billion seconds is about
# 25,367.8 years. See also
# https://csound.com/docs/manual/ScoreTop.html and
# https://github.com/csound/csound/search?q=stof+path%3AEngine+filename%3Asread.c.
(r'([nNpP][pP])(\d+)', bygroups(Keyword, Number.Integer)),
(r'[mn]', Keyword, 'mark statement'),
include('numbers'),
(r'[!+\-*/^%&|<>#~.]', Operator),
(r'[()\[\]]', Punctuation),
(r'"', String, 'quoted string'),
(r'\{', Comment.Preproc, 'loop after left brace'),
],
'mark statement': [
include('whitespace and macro uses'),
(r'[A-Z_a-z]\w*', Name.Label),
(r'\n', Whitespace, '#pop')
],
'loop after left brace': [
include('whitespace and macro uses'),
(r'\d+', Number.Integer, ('#pop', 'loop after repeat count')),
],
'loop after repeat count': [
include('whitespace and macro uses'),
(r'[A-Z_a-z]\w*', Comment.Preproc, ('#pop', 'loop'))
],
'loop': [
(r'\}', Comment.Preproc, '#pop'),
include('root')
],
# Braced strings are not allowed in Csound scores, but this is needed because the
# superclass includes it.
'braced string': [
(r'\}\}', String, '#pop'),
(r'[^}]|\}(?!\})', String)
]
}
class CsoundOrchestraLexer(CsoundLexer):
"""
For `Csound <https://csound.com>`_ orchestras.
"""
name = 'Csound Orchestra'
aliases = ['csound', 'csound-orc']
filenames = ['*.orc', '*.udo']
version_added = '2.1'
user_defined_opcodes = set()
def opcode_name_callback(lexer, match):
opcode = match.group(0)
lexer.user_defined_opcodes.add(opcode)
yield match.start(), Name.Function, opcode
def name_callback(lexer, match):
type_annotation_token = Keyword.Type
name = match.group(1)
if name in OPCODES or name in DEPRECATED_OPCODES or name in REMOVED_OPCODES:
yield match.start(), Name.Builtin, name
elif name in lexer.user_defined_opcodes:
yield match.start(), Name.Function, name
else:
type_annotation_token = Name
name_match = re.search(r'^(g?[afikSw])(\w+)', name)
if name_match:
yield name_match.start(1), Keyword.Type, name_match.group(1)
yield name_match.start(2), Name, name_match.group(2)
else:
yield match.start(), Name, name
if match.group(2):
yield match.start(2), Punctuation, match.group(2)
yield match.start(3), type_annotation_token, match.group(3)
tokens = {
'root': [
(r'\n', Whitespace),
(r'^([ \t]*)(\w+)(:)([ \t]+|$)', bygroups(Whitespace, Name.Label, Punctuation, Whitespace)),
include('whitespace and macro uses'),
include('preprocessor directives'),
(r'\binstr\b', Keyword.Declaration, 'instrument numbers and identifiers'),
(r'\bopcode\b', Keyword.Declaration, 'after opcode keyword'),
(r'\b(?:end(?:in|op))\b', Keyword.Declaration),
include('partial statements')
],
'partial statements': [
(r'\b(?:0dbfs|A4|k(?:r|smps)|nchnls(?:_i)?|sr)\b', Name.Variable.Global),
include('numbers'),
(r'\+=|-=|\*=|/=|<<|>>|<=|>=|==|!=|&&|\|\||[~¬]|[=!+\-*/^%&|<>#?:]', Operator),
(r'[(),\[\]]', Punctuation),
(r'"', String, 'quoted string'),
(r'\{\{', String, 'braced string'),
(words((
'do', 'else', 'elseif', 'endif', 'enduntil', 'fi', 'if', 'ithen', 'kthen',
'od', 'then', 'until', 'while',
), prefix=r'\b', suffix=r'\b'), Keyword),
(words(('return', 'rireturn'), prefix=r'\b', suffix=r'\b'), Keyword.Pseudo),
(r'\b[ik]?goto\b', Keyword, 'goto label'),
(r'\b(r(?:einit|igoto)|tigoto)(\(|\b)', bygroups(Keyword.Pseudo, Punctuation),
'goto label'),
(r'\b(c(?:g|in?|k|nk?)goto)(\(|\b)', bygroups(Keyword.Pseudo, Punctuation),
('goto label', 'goto argument')),
(r'\b(timout)(\(|\b)', bygroups(Keyword.Pseudo, Punctuation),
('goto label', 'goto argument', 'goto argument')),
(r'\b(loop_[gl][et])(\(|\b)', bygroups(Keyword.Pseudo, Punctuation),
('goto label', 'goto argument', 'goto argument', 'goto argument')),
(r'\bprintk?s\b', Name.Builtin, 'prints opcode'),
(r'\b(?:readscore|scoreline(?:_i)?)\b', Name.Builtin, 'Csound score opcode'),
(r'\bpyl?run[it]?\b', Name.Builtin, 'Python opcode'),
(r'\blua_(?:exec|opdef)\b', Name.Builtin, 'Lua opcode'),
(r'\bp\d+\b', Name.Variable.Instance),
(r'\b([A-Z_a-z]\w*)(?:(:)([A-Za-z]))?\b', name_callback)
],
'instrument numbers and identifiers': [
include('whitespace and macro uses'),
(r'\d+|[A-Z_a-z]\w*', Name.Function),
(r'[+,]', Punctuation),
(r'\n', Whitespace, '#pop')
],
'after opcode keyword': [
include('whitespace and macro uses'),
(r'[A-Z_a-z]\w*', opcode_name_callback, ('#pop', 'opcode type signatures')),
(r'\n', Whitespace, '#pop')
],
'opcode type signatures': [
include('whitespace and macro uses'),
# https://github.com/csound/csound/search?q=XIDENT+path%3AEngine+filename%3Acsound_orc.lex
(r'0|[afijkKoOpPStV\[\]]+', Keyword.Type),
(r',', Punctuation),
(r'\n', Whitespace, '#pop')
],
'quoted string': [
(r'"', String, '#pop'),
(r'[^\\"$%)]+', String),
include('macro uses'),
include('escape sequences'),
include('format specifiers'),
(r'[\\$%)]', String)
],
'braced string': [
(r'\}\}', String, '#pop'),
(r'(?:[^\\%)}]|\}(?!\}))+', String),
include('escape sequences'),
include('format specifiers'),
(r'[\\%)]', String)
],
'escape sequences': [
# https://github.com/csound/csound/search?q=unquote_string+path%3AEngine+filename%3Acsound_orc_compile.c
(r'\\(?:[\\abnrt"]|[0-7]{1,3})', String.Escape)
],
# Format specifiers are highlighted in all strings, even though only
# fprintks https://csound.com/docs/manual/fprintks.html
# fprints https://csound.com/docs/manual/fprints.html
# printf/printf_i https://csound.com/docs/manual/printf.html
# printks https://csound.com/docs/manual/printks.html
# prints https://csound.com/docs/manual/prints.html
# sprintf https://csound.com/docs/manual/sprintf.html
# sprintfk https://csound.com/docs/manual/sprintfk.html
# work with strings that contain format specifiers. In addition, these opcodes
# handling of format specifiers is inconsistent:
# - fprintks and fprints accept %a and %A specifiers, and accept %s specifiers
# starting in Csound 6.15.0.
# - printks and prints accept %a and %A specifiers, but dont accept %s
# specifiers.
# - printf, printf_i, sprintf, and sprintfk dont accept %a and %A specifiers,
# but accept %s specifiers.
# See https://github.com/csound/csound/issues/747 for more information.
'format specifiers': [
(r'%[#0\- +]*\d*(?:\.\d+)?[AE-GXac-giosux]', String.Interpol),
(r'%%', String.Escape)
],
'goto argument': [
include('whitespace and macro uses'),
(r',', Punctuation, '#pop'),
include('partial statements')
],
'goto label': [
include('whitespace and macro uses'),
(r'\w+', Name.Label, '#pop'),
default('#pop')
],
'prints opcode': [
include('whitespace and macro uses'),
(r'"', String, 'prints quoted string'),
default('#pop')
],
'prints quoted string': [
(r'\\\\[aAbBnNrRtT]', String.Escape),
(r'%[!nNrRtT]|[~^]{1,2}', String.Escape),
include('quoted string')
],
'Csound score opcode': [
include('whitespace and macro uses'),
(r'"', String, 'quoted string'),
(r'\{\{', String, 'Csound score'),
(r'\n', Whitespace, '#pop')
],
'Csound score': [
(r'\}\}', String, '#pop'),
(r'([^}]+)|\}(?!\})', using(CsoundScoreLexer))
],
'Python opcode': [
include('whitespace and macro uses'),
(r'"', String, 'quoted string'),
(r'\{\{', String, 'Python'),
(r'\n', Whitespace, '#pop')
],
'Python': [
(r'\}\}', String, '#pop'),
(r'([^}]+)|\}(?!\})', using(PythonLexer))
],
'Lua opcode': [
include('whitespace and macro uses'),
(r'"', String, 'quoted string'),
(r'\{\{', String, 'Lua'),
(r'\n', Whitespace, '#pop')
],
'Lua': [
(r'\}\}', String, '#pop'),
(r'([^}]+)|\}(?!\})', using(LuaLexer))
]
}
class CsoundDocumentLexer(RegexLexer):
"""
For Csound documents.
"""
name = 'Csound Document'
aliases = ['csound-document', 'csound-csd']
filenames = ['*.csd']
url = 'https://csound.com'
version_added = '2.1'
# These tokens are based on those in XmlLexer in pygments/lexers/html.py. Making
# CsoundDocumentLexer a subclass of XmlLexer rather than RegexLexer may seem like a
# better idea, since Csound Document files look like XML files. However, Csound
# Documents can contain Csound comments (preceded by //, for example) before and
# after the root element, unescaped bitwise AND & and less than < operators, etc. In
# other words, while Csound Document files look like XML files, they may not actually
# be XML files.
tokens = {
'root': [
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'(?:;|//).*$', Comment.Single),
(r'[^/;<]+|/(?!/)', Text),
(r'<\s*CsInstruments', Name.Tag, ('orchestra', 'tag')),
(r'<\s*CsScore', Name.Tag, ('score', 'tag')),
(r'<\s*[Hh][Tt][Mm][Ll]', Name.Tag, ('HTML', 'tag')),
(r'<\s*[\w:.-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[\w:.-]+\s*>', Name.Tag)
],
'orchestra': [
(r'<\s*/\s*CsInstruments\s*>', Name.Tag, '#pop'),
(r'(.|\n)+?(?=<\s*/\s*CsInstruments\s*>)', using(CsoundOrchestraLexer))
],
'score': [
(r'<\s*/\s*CsScore\s*>', Name.Tag, '#pop'),
(r'(.|\n)+?(?=<\s*/\s*CsScore\s*>)', using(CsoundScoreLexer))
],
'HTML': [
(r'<\s*/\s*[Hh][Tt][Mm][Ll]\s*>', Name.Tag, '#pop'),
(r'(.|\n)+?(?=<\s*/\s*[Hh][Tt][Mm][Ll]\s*>)', using(HtmlLexer))
],
'tag': [
(r'\s+', Whitespace),
(r'[\w.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop')
],
'attr': [
(r'\s+', Whitespace),
(r'".*?"', String, '#pop'),
(r"'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop')
]
}

View File

@@ -0,0 +1,602 @@
"""
pygments.lexers.css
~~~~~~~~~~~~~~~~~~~
Lexers for CSS and related stylesheet formats.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import copy
from pygments.lexer import ExtendedRegexLexer, RegexLexer, include, bygroups, \
default, words, inherit
from pygments.token import Comment, Operator, Keyword, Name, String, Number, \
Punctuation, Whitespace
from pygments.lexers._css_builtins import _css_properties
__all__ = ['CssLexer', 'SassLexer', 'ScssLexer', 'LessCssLexer']
# List of vendor prefixes obtained from:
# https://www.w3.org/TR/CSS21/syndata.html#vendor-keyword-history
_vendor_prefixes = (
'-ms-', 'mso-', '-moz-', '-o-', '-xv-', '-atsc-', '-wap-', '-khtml-',
'-webkit-', 'prince-', '-ah-', '-hp-', '-ro-', '-rim-', '-tc-',
)
# List of extended color keywords obtained from:
# https://drafts.csswg.org/css-color/#named-colors
_color_keywords = (
'aliceblue', 'antiquewhite', 'aqua', 'aquamarine', 'azure', 'beige',
'bisque', 'black', 'blanchedalmond', 'blue', 'blueviolet', 'brown',
'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral',
'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan',
'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki',
'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred',
'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray',
'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue',
'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite',
'forestgreen', 'fuchsia', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod',
'gray', 'green', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred',
'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen',
'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan',
'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey',
'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue',
'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow',
'lime', 'limegreen', 'linen', 'magenta', 'maroon', 'mediumaquamarine',
'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen',
'mediumslateblue', 'mediumspringgreen', 'mediumturquoise',
'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin',
'navajowhite', 'navy', 'oldlace', 'olive', 'olivedrab', 'orange',
'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise',
'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum',
'powderblue', 'purple', 'rebeccapurple', 'red', 'rosybrown', 'royalblue',
'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna',
'silver', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow',
'springgreen', 'steelblue', 'tan', 'teal', 'thistle', 'tomato', 'turquoise',
'violet', 'wheat', 'white', 'whitesmoke', 'yellow', 'yellowgreen',
) + ('transparent',)
# List of keyword values obtained from:
# http://cssvalues.com/
_keyword_values = (
'absolute', 'alias', 'all', 'all-petite-caps', 'all-scroll',
'all-small-caps', 'allow-end', 'alpha', 'alternate', 'alternate-reverse',
'always', 'armenian', 'auto', 'avoid', 'avoid-column', 'avoid-page',
'backwards', 'balance', 'baseline', 'below', 'blink', 'block', 'bold',
'bolder', 'border-box', 'both', 'bottom', 'box-decoration', 'break-word',
'capitalize', 'cell', 'center', 'circle', 'clip', 'clone', 'close-quote',
'col-resize', 'collapse', 'color', 'color-burn', 'color-dodge', 'column',
'column-reverse', 'compact', 'condensed', 'contain', 'container',
'content-box', 'context-menu', 'copy', 'cover', 'crisp-edges', 'crosshair',
'currentColor', 'cursive', 'darken', 'dashed', 'decimal',
'decimal-leading-zero', 'default', 'descendants', 'difference', 'digits',
'disc', 'distribute', 'dot', 'dotted', 'double', 'double-circle', 'e-resize',
'each-line', 'ease', 'ease-in', 'ease-in-out', 'ease-out', 'edges',
'ellipsis', 'end', 'ew-resize', 'exclusion', 'expanded', 'extra-condensed',
'extra-expanded', 'fantasy', 'fill', 'fill-box', 'filled', 'first', 'fixed',
'flat', 'flex', 'flex-end', 'flex-start', 'flip', 'force-end', 'forwards',
'from-image', 'full-width', 'geometricPrecision', 'georgian', 'groove',
'hanging', 'hard-light', 'help', 'hidden', 'hide', 'horizontal', 'hue',
'icon', 'infinite', 'inherit', 'initial', 'ink', 'inline', 'inline-block',
'inline-flex', 'inline-table', 'inset', 'inside', 'inter-word', 'invert',
'isolate', 'italic', 'justify', 'large', 'larger', 'last', 'left',
'lighten', 'lighter', 'line-through', 'linear', 'list-item', 'local',
'loose', 'lower-alpha', 'lower-greek', 'lower-latin', 'lower-roman',
'lowercase', 'ltr', 'luminance', 'luminosity', 'mandatory', 'manipulation',
'manual', 'margin-box', 'match-parent', 'medium', 'mixed', 'monospace',
'move', 'multiply', 'n-resize', 'ne-resize', 'nesw-resize',
'no-close-quote', 'no-drop', 'no-open-quote', 'no-repeat', 'none', 'normal',
'not-allowed', 'nowrap', 'ns-resize', 'nw-resize', 'nwse-resize', 'objects',
'oblique', 'off', 'on', 'open', 'open-quote', 'optimizeLegibility',
'optimizeSpeed', 'outset', 'outside', 'over', 'overlay', 'overline',
'padding-box', 'page', 'pan-down', 'pan-left', 'pan-right', 'pan-up',
'pan-x', 'pan-y', 'paused', 'petite-caps', 'pixelated', 'pointer',
'preserve-3d', 'progress', 'proximity', 'relative', 'repeat',
'repeat no-repeat', 'repeat-x', 'repeat-y', 'reverse', 'revert', 'ridge', 'right',
'round', 'row', 'row-resize', 'row-reverse', 'rtl', 'ruby', 'ruby-base',
'ruby-base-container', 'ruby-text', 'ruby-text-container', 'run-in',
'running', 's-resize', 'sans-serif', 'saturation', 'scale-down', 'screen',
'scroll', 'se-resize', 'semi-condensed', 'semi-expanded', 'separate',
'serif', 'sesame', 'show', 'sideways', 'sideways-left', 'sideways-right',
'slice', 'small', 'small-caps', 'smaller', 'smooth', 'snap', 'soft-light',
'solid', 'space', 'space-around', 'space-between', 'spaces', 'square',
'start', 'static', 'step-end', 'step-start', 'sticky', 'stretch', 'strict',
'stroke-box', 'style', 'sw-resize', 'table', 'table-caption', 'table-cell',
'table-column', 'table-column-group', 'table-footer-group',
'table-header-group', 'table-row', 'table-row-group', 'text', 'thick',
'thin', 'titling-caps', 'to', 'top', 'triangle', 'ultra-condensed',
'ultra-expanded', 'under', 'underline', 'unicase', 'unset', 'upper-alpha',
'upper-latin', 'upper-roman', 'uppercase', 'upright', 'use-glyph-orientation',
'vertical', 'vertical-text', 'view-box', 'visible', 'w-resize', 'wait',
'wavy', 'weight', 'weight style', 'wrap', 'wrap-reverse', 'x-large',
'x-small', 'xx-large', 'xx-small', 'zoom-in', 'zoom-out',
)
# List of other keyword values from other sources:
_other_keyword_values = (
'above', 'aural', 'behind', 'bidi-override', 'center-left', 'center-right',
'cjk-ideographic', 'continuous', 'crop', 'cross', 'embed', 'far-left',
'far-right', 'fast', 'faster', 'hebrew', 'high', 'higher', 'hiragana',
'hiragana-iroha', 'katakana', 'katakana-iroha', 'landscape', 'left-side',
'leftwards', 'level', 'loud', 'low', 'lower', 'message-box', 'middle',
'mix', 'narrower', 'once', 'portrait', 'right-side', 'rightwards', 'silent',
'slow', 'slower', 'small-caption', 'soft', 'spell-out', 'status-bar',
'super', 'text-bottom', 'text-top', 'wider', 'x-fast', 'x-high', 'x-loud',
'x-low', 'x-soft', 'yes', 'pre', 'pre-wrap', 'pre-line',
)
# List of functional notation and function keyword values:
_functional_notation_keyword_values = (
'attr', 'blackness', 'blend', 'blenda', 'blur', 'brightness', 'calc',
'circle', 'color-mod', 'contrast', 'counter', 'cubic-bezier', 'device-cmyk',
'drop-shadow', 'ellipse', 'gray', 'grayscale', 'hsl', 'hsla', 'hue',
'hue-rotate', 'hwb', 'image', 'inset', 'invert', 'lightness',
'linear-gradient', 'matrix', 'matrix3d', 'opacity', 'perspective',
'polygon', 'radial-gradient', 'rect', 'repeating-linear-gradient',
'repeating-radial-gradient', 'rgb', 'rgba', 'rotate', 'rotate3d', 'rotateX',
'rotateY', 'rotateZ', 'saturate', 'saturation', 'scale', 'scale3d',
'scaleX', 'scaleY', 'scaleZ', 'sepia', 'shade', 'skewX', 'skewY', 'steps',
'tint', 'toggle', 'translate', 'translate3d', 'translateX', 'translateY',
'translateZ', 'whiteness',
)
# Note! Handle url(...) separately.
# List of units obtained from:
# https://www.w3.org/TR/css3-values/
_angle_units = (
'deg', 'grad', 'rad', 'turn',
)
_frequency_units = (
'Hz', 'kHz',
)
_length_units = (
'em', 'ex', 'ch', 'rem',
'vh', 'vw', 'vmin', 'vmax',
'px', 'mm', 'cm', 'in', 'pt', 'pc', 'q',
)
_resolution_units = (
'dpi', 'dpcm', 'dppx',
)
_time_units = (
's', 'ms',
)
_all_units = _angle_units + _frequency_units + _length_units + \
_resolution_units + _time_units
class CssLexer(RegexLexer):
"""
For CSS (Cascading Style Sheets).
"""
name = 'CSS'
url = 'https://www.w3.org/TR/CSS/#css'
aliases = ['css']
filenames = ['*.css']
mimetypes = ['text/css']
version_added = ''
tokens = {
'root': [
include('basics'),
],
'basics': [
(r'\s+', Whitespace),
(r'/\*(?:.|\n)*?\*/', Comment),
(r'\{', Punctuation, 'content'),
(r'(\:{1,2})([\w-]+)', bygroups(Punctuation, Name.Decorator)),
(r'(\.)([\w-]+)', bygroups(Punctuation, Name.Class)),
(r'(\#)([\w-]+)', bygroups(Punctuation, Name.Namespace)),
(r'(@)([\w-]+)', bygroups(Punctuation, Keyword), 'atrule'),
(r'[\w-]+', Name.Tag),
(r'[~^*!%&$\[\]()<>|+=@:;,./?-]', Operator),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
],
'atrule': [
(r'\{', Punctuation, 'atcontent'),
(r';', Punctuation, '#pop'),
include('basics'),
],
'atcontent': [
include('basics'),
(r'\}', Punctuation, '#pop:2'),
],
'content': [
(r'\s+', Whitespace),
(r'\}', Punctuation, '#pop'),
(r';', Punctuation),
(r'^@.*?$', Comment.Preproc),
(words(_vendor_prefixes,), Keyword.Pseudo),
(r'('+r'|'.join(_css_properties)+r')(\s*)(\:)',
bygroups(Keyword, Whitespace, Punctuation), 'value-start'),
(r'([-]+[a-zA-Z_][\w-]*)(\s*)(\:)', bygroups(Name.Variable, Whitespace, Punctuation),
'value-start'),
(r'([a-zA-Z_][\w-]*)(\s*)(\:)', bygroups(Name, Whitespace, Punctuation),
'value-start'),
(r'/\*(?:.|\n)*?\*/', Comment),
],
'value-start': [
(r'\s+', Whitespace),
(words(_vendor_prefixes,), Name.Builtin.Pseudo),
include('urls'),
(r'('+r'|'.join(_functional_notation_keyword_values)+r')(\()',
bygroups(Name.Builtin, Punctuation), 'function-start'),
(r'([a-zA-Z_][\w-]+)(\()',
bygroups(Name.Function, Punctuation), 'function-start'),
(words(_keyword_values, suffix=r'\b'), Keyword.Constant),
(words(_other_keyword_values, suffix=r'\b'), Keyword.Constant),
(words(_color_keywords, suffix=r'\b'), Keyword.Constant),
# for transition-property etc.
(words(_css_properties, suffix=r'\b'), Keyword),
(r'\!important', Comment.Preproc),
(r'/\*(?:.|\n)*?\*/', Comment),
include('numeric-values'),
(r'[~^*!%&<>|+=@:./?-]+', Operator),
(r'[\[\](),]+', Punctuation),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
(r'[a-zA-Z_][\w-]*', Name),
(r';', Punctuation, '#pop'),
(r'\}', Punctuation, '#pop:2'),
],
'function-start': [
(r'\s+', Whitespace),
(r'[-]+([A-Za-z][\w+]*[-]*)+', Name.Variable),
include('urls'),
(words(_vendor_prefixes,), Keyword.Pseudo),
(words(_keyword_values, suffix=r'\b'), Keyword.Constant),
(words(_other_keyword_values, suffix=r'\b'), Keyword.Constant),
(words(_color_keywords, suffix=r'\b'), Keyword.Constant),
# function-start may be entered recursively
(r'(' + r'|'.join(_functional_notation_keyword_values) + r')(\()',
bygroups(Name.Builtin, Punctuation), 'function-start'),
(r'([a-zA-Z_][\w-]+)(\()',
bygroups(Name.Function, Punctuation), 'function-start'),
(r'/\*(?:.|\n)*?\*/', Comment),
include('numeric-values'),
(r'[*+/-]', Operator),
(r',', Punctuation),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
(r'[a-zA-Z_-]\w*', Name),
(r'\)', Punctuation, '#pop'),
],
'urls': [
(r'(url)(\()(".*?")(\))', bygroups(Name.Builtin, Punctuation,
String.Double, Punctuation)),
(r"(url)(\()('.*?')(\))", bygroups(Name.Builtin, Punctuation,
String.Single, Punctuation)),
(r'(url)(\()(.*?)(\))', bygroups(Name.Builtin, Punctuation,
String.Other, Punctuation)),
],
'numeric-values': [
(r'\#[a-zA-Z0-9]{1,6}', Number.Hex),
(r'[+\-]?[0-9]*[.][0-9]+', Number.Float, 'numeric-end'),
(r'[+\-]?[0-9]+', Number.Integer, 'numeric-end'),
],
'numeric-end': [
(words(_all_units, suffix=r'\b'), Keyword.Type),
(r'%', Keyword.Type),
default('#pop'),
],
}
common_sass_tokens = {
'value': [
(r'[ \t]+', Whitespace),
(r'[!$][\w-]+', Name.Variable),
(r'url\(', String.Other, 'string-url'),
(r'[a-z_-][\w-]*(?=\()', Name.Function),
(words(_css_properties + (
'above', 'absolute', 'always', 'armenian', 'aural', 'auto', 'avoid', 'baseline',
'behind', 'below', 'bidi-override', 'blink', 'block', 'bold', 'bolder', 'both',
'capitalize', 'center-left', 'center-right', 'center', 'circle',
'cjk-ideographic', 'close-quote', 'collapse', 'condensed', 'continuous',
'crosshair', 'cross', 'cursive', 'dashed', 'decimal-leading-zero',
'decimal', 'default', 'digits', 'disc', 'dotted', 'double', 'e-resize', 'embed',
'extra-condensed', 'extra-expanded', 'expanded', 'fantasy', 'far-left',
'far-right', 'faster', 'fast', 'fixed', 'georgian', 'groove', 'hebrew', 'help',
'hidden', 'hide', 'higher', 'high', 'hiragana-iroha', 'hiragana', 'icon',
'inherit', 'inline-table', 'inline', 'inset', 'inside', 'invert', 'italic',
'justify', 'katakana-iroha', 'katakana', 'landscape', 'larger', 'large',
'left-side', 'leftwards', 'level', 'lighter', 'line-through', 'list-item',
'loud', 'lower-alpha', 'lower-greek', 'lower-roman', 'lowercase', 'ltr',
'lower', 'low', 'medium', 'message-box', 'middle', 'mix', 'monospace',
'n-resize', 'narrower', 'ne-resize', 'no-close-quote', 'no-open-quote',
'no-repeat', 'none', 'normal', 'nowrap', 'nw-resize', 'oblique', 'once',
'open-quote', 'outset', 'outside', 'overline', 'pointer', 'portrait', 'px',
'relative', 'repeat-x', 'repeat-y', 'repeat', 'rgb', 'ridge', 'right-side',
'rightwards', 's-resize', 'sans-serif', 'scroll', 'se-resize',
'semi-condensed', 'semi-expanded', 'separate', 'serif', 'show', 'silent',
'slow', 'slower', 'small-caps', 'small-caption', 'smaller', 'soft', 'solid',
'spell-out', 'square', 'static', 'status-bar', 'super', 'sw-resize',
'table-caption', 'table-cell', 'table-column', 'table-column-group',
'table-footer-group', 'table-header-group', 'table-row',
'table-row-group', 'text', 'text-bottom', 'text-top', 'thick', 'thin',
'transparent', 'ultra-condensed', 'ultra-expanded', 'underline',
'upper-alpha', 'upper-latin', 'upper-roman', 'uppercase', 'url',
'visible', 'w-resize', 'wait', 'wider', 'x-fast', 'x-high', 'x-large', 'x-loud',
'x-low', 'x-small', 'x-soft', 'xx-large', 'xx-small', 'yes'), suffix=r'\b'),
Name.Constant),
(words(_color_keywords, suffix=r'\b'), Name.Entity),
(words((
'black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green',
'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua'), suffix=r'\b'),
Name.Builtin),
(r'\!(important|default)', Name.Exception),
(r'(true|false)', Name.Pseudo),
(r'(and|or|not)', Operator.Word),
(r'/\*', Comment.Multiline, 'inline-comment'),
(r'//[^\n]*', Comment.Single),
(r'\#[a-z0-9]{1,6}', Number.Hex),
(r'(-?\d+)(\%|[a-z]+)?', bygroups(Number.Integer, Keyword.Type)),
(r'(-?\d*\.\d+)(\%|[a-z]+)?', bygroups(Number.Float, Keyword.Type)),
(r'#\{', String.Interpol, 'interpolation'),
(r'[~^*!&%<>|+=@:,./?-]+', Operator),
(r'[\[\]()]+', Punctuation),
(r'"', String.Double, 'string-double'),
(r"'", String.Single, 'string-single'),
(r'[a-z_-][\w-]*', Name),
],
'interpolation': [
(r'\}', String.Interpol, '#pop'),
include('value'),
],
'selector': [
(r'[ \t]+', Whitespace),
(r'\:', Name.Decorator, 'pseudo-class'),
(r'\.', Name.Class, 'class'),
(r'\#', Name.Namespace, 'id'),
(r'[\w-]+', Name.Tag),
(r'#\{', String.Interpol, 'interpolation'),
(r'&', Keyword),
(r'[~^*!&\[\]()<>|+=@:;,./?-]', Operator),
(r'"', String.Double, 'string-double'),
(r"'", String.Single, 'string-single'),
],
'string-double': [
(r'(\\.|#(?=[^\n{])|[^\n"#])+', String.Double),
(r'#\{', String.Interpol, 'interpolation'),
(r'"', String.Double, '#pop'),
],
'string-single': [
(r"(\\.|#(?=[^\n{])|[^\n'#])+", String.Single),
(r'#\{', String.Interpol, 'interpolation'),
(r"'", String.Single, '#pop'),
],
'string-url': [
(r'(\\#|#(?=[^\n{])|[^\n#)])+', String.Other),
(r'#\{', String.Interpol, 'interpolation'),
(r'\)', String.Other, '#pop'),
],
'pseudo-class': [
(r'[\w-]+', Name.Decorator),
(r'#\{', String.Interpol, 'interpolation'),
default('#pop'),
],
'class': [
(r'[\w-]+', Name.Class),
(r'#\{', String.Interpol, 'interpolation'),
default('#pop'),
],
'id': [
(r'[\w-]+', Name.Namespace),
(r'#\{', String.Interpol, 'interpolation'),
default('#pop'),
],
'for': [
(r'(from|to|through)', Operator.Word),
include('value'),
],
}
def _indentation(lexer, match, ctx):
indentation = match.group(0)
yield match.start(), Whitespace, indentation
ctx.last_indentation = indentation
ctx.pos = match.end()
if hasattr(ctx, 'block_state') and ctx.block_state and \
indentation.startswith(ctx.block_indentation) and \
indentation != ctx.block_indentation:
ctx.stack.append(ctx.block_state)
else:
ctx.block_state = None
ctx.block_indentation = None
ctx.stack.append('content')
def _starts_block(token, state):
def callback(lexer, match, ctx):
yield match.start(), token, match.group(0)
if hasattr(ctx, 'last_indentation'):
ctx.block_indentation = ctx.last_indentation
else:
ctx.block_indentation = ''
ctx.block_state = state
ctx.pos = match.end()
return callback
class SassLexer(ExtendedRegexLexer):
"""
For Sass stylesheets.
"""
name = 'Sass'
url = 'https://sass-lang.com/'
aliases = ['sass']
filenames = ['*.sass']
mimetypes = ['text/x-sass']
version_added = '1.3'
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
(r'[ \t]*\n', Whitespace),
(r'[ \t]*', _indentation),
],
'content': [
(r'//[^\n]*', _starts_block(Comment.Single, 'single-comment'),
'root'),
(r'/\*[^\n]*', _starts_block(Comment.Multiline, 'multi-comment'),
'root'),
(r'@import', Keyword, 'import'),
(r'@for', Keyword, 'for'),
(r'@(debug|warn|if|while)', Keyword, 'value'),
(r'(@mixin)( )([\w-]+)', bygroups(Keyword, Whitespace, Name.Function), 'value'),
(r'(@include)( )([\w-]+)', bygroups(Keyword, Whitespace, Name.Decorator), 'value'),
(r'@extend', Keyword, 'selector'),
(r'@[\w-]+', Keyword, 'selector'),
(r'=[\w-]+', Name.Function, 'value'),
(r'\+[\w-]+', Name.Decorator, 'value'),
(r'([!$][\w-]\w*)([ \t]*(?:(?:\|\|)?=|:))',
bygroups(Name.Variable, Operator), 'value'),
(r':', Name.Attribute, 'old-style-attr'),
(r'(?=.+?[=:]([^a-z]|$))', Name.Attribute, 'new-style-attr'),
default('selector'),
],
'single-comment': [
(r'.+', Comment.Single),
(r'\n', Whitespace, 'root'),
],
'multi-comment': [
(r'.+', Comment.Multiline),
(r'\n', Whitespace, 'root'),
],
'import': [
(r'[ \t]+', Whitespace),
(r'\S+', String),
(r'\n', Whitespace, 'root'),
],
'old-style-attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#\{', String.Interpol, 'interpolation'),
(r'([ \t]*)(=)', bygroups(Whitespace, Operator), 'value'),
default('value'),
],
'new-style-attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#\{', String.Interpol, 'interpolation'),
(r'([ \t]*)([=:])', bygroups(Whitespace, Operator), 'value'),
],
'inline-comment': [
(r"(\\#|#(?=[^\n{])|\*(?=[^\n/])|[^\n#*])+", Comment.Multiline),
(r'#\{', String.Interpol, 'interpolation'),
(r"\*/", Comment, '#pop'),
],
}
for group, common in common_sass_tokens.items():
tokens[group] = copy.copy(common)
tokens['value'].append((r'\n', Whitespace, 'root'))
tokens['selector'].append((r'\n', Whitespace, 'root'))
class ScssLexer(RegexLexer):
"""
For SCSS stylesheets.
"""
name = 'SCSS'
url = 'https://sass-lang.com/'
aliases = ['scss']
filenames = ['*.scss']
mimetypes = ['text/x-scss']
version_added = ''
flags = re.IGNORECASE | re.DOTALL
tokens = {
'root': [
(r'\s+', Whitespace),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@import', Keyword, 'value'),
(r'@for', Keyword, 'for'),
(r'@(debug|warn|if|while)', Keyword, 'value'),
(r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'),
(r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'),
(r'@extend', Keyword, 'selector'),
(r'(@media)(\s+)', bygroups(Keyword, Whitespace), 'value'),
(r'@[\w-]+', Keyword, 'selector'),
(r'(\$[\w-]*\w)([ \t]*:)', bygroups(Name.Variable, Operator), 'value'),
# TODO: broken, and prone to infinite loops.
# (r'(?=[^;{}][;}])', Name.Attribute, 'attr'),
# (r'(?=[^;{}:]+:[^a-z])', Name.Attribute, 'attr'),
default('selector'),
],
'attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#\{', String.Interpol, 'interpolation'),
(r'[ \t]*:', Operator, 'value'),
default('#pop'),
],
'inline-comment': [
(r"(\\#|#(?=[^{])|\*(?=[^/])|[^#*])+", Comment.Multiline),
(r'#\{', String.Interpol, 'interpolation'),
(r"\*/", Comment, '#pop'),
],
}
for group, common in common_sass_tokens.items():
tokens[group] = copy.copy(common)
tokens['value'].extend([(r'\n', Whitespace), (r'[;{}]', Punctuation, '#pop')])
tokens['selector'].extend([(r'\n', Whitespace), (r'[;{}]', Punctuation, '#pop')])
class LessCssLexer(CssLexer):
"""
For LESS styleshets.
"""
name = 'LessCss'
url = 'http://lesscss.org/'
aliases = ['less']
filenames = ['*.less']
mimetypes = ['text/x-less-css']
version_added = '2.1'
tokens = {
'root': [
(r'@\w+', Name.Variable),
inherit,
],
'content': [
(r'\{', Punctuation, '#push'),
(r'//.*\n', Comment.Single),
inherit,
],
}

View File

@@ -0,0 +1,259 @@
"""
pygments.lexers.d
~~~~~~~~~~~~~~~~~
Lexers for D languages.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, words, bygroups
from pygments.token import Comment, Keyword, Name, String, Number, \
Punctuation, Whitespace
__all__ = ['DLexer', 'CrocLexer', 'MiniDLexer']
class DLexer(RegexLexer):
"""
For D source.
"""
name = 'D'
url = 'https://dlang.org/'
filenames = ['*.d', '*.di']
aliases = ['d']
mimetypes = ['text/x-dsrc']
version_added = '1.2'
tokens = {
'root': [
(r'\n', Whitespace),
(r'\s+', Whitespace),
# (r'\\\n', Text), # line continuations
# Comments
(r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'/\+', Comment.Multiline, 'nested_comment'),
# Keywords
(words((
'abstract', 'alias', 'align', 'asm', 'assert', 'auto', 'body',
'break', 'case', 'cast', 'catch', 'class', 'const', 'continue',
'debug', 'default', 'delegate', 'delete', 'deprecated', 'do', 'else',
'enum', 'export', 'extern', 'finally', 'final', 'foreach_reverse',
'foreach', 'for', 'function', 'goto', 'if', 'immutable', 'import',
'interface', 'invariant', 'inout', 'in', 'is', 'lazy', 'mixin',
'module', 'new', 'nothrow', 'out', 'override', 'package', 'pragma',
'private', 'protected', 'public', 'pure', 'ref', 'return', 'scope',
'shared', 'static', 'struct', 'super', 'switch', 'synchronized',
'template', 'this', 'throw', 'try', 'typeid', 'typeof',
'union', 'unittest', 'version', 'volatile', 'while', 'with',
'__gshared', '__traits', '__vector', '__parameters'),
suffix=r'\b'),
Keyword),
(words((
# Removed in 2.072
'typedef', ),
suffix=r'\b'),
Keyword.Removed),
(words((
'bool', 'byte', 'cdouble', 'cent', 'cfloat', 'char', 'creal',
'dchar', 'double', 'float', 'idouble', 'ifloat', 'int', 'ireal',
'long', 'real', 'short', 'ubyte', 'ucent', 'uint', 'ulong',
'ushort', 'void', 'wchar'), suffix=r'\b'),
Keyword.Type),
(r'(false|true|null)\b', Keyword.Constant),
(words((
'__FILE__', '__FILE_FULL_PATH__', '__MODULE__', '__LINE__', '__FUNCTION__',
'__PRETTY_FUNCTION__', '__DATE__', '__EOF__', '__TIME__', '__TIMESTAMP__',
'__VENDOR__', '__VERSION__'), suffix=r'\b'),
Keyword.Pseudo),
(r'macro\b', Keyword.Reserved),
(r'(string|wstring|dstring|size_t|ptrdiff_t)\b', Name.Builtin),
# FloatLiteral
# -- HexFloat
(r'0[xX]([0-9a-fA-F_]*\.[0-9a-fA-F_]+|[0-9a-fA-F_]+)'
r'[pP][+\-]?[0-9_]+[fFL]?[i]?', Number.Float),
# -- DecimalFloat
(r'[0-9_]+(\.[0-9_]+[eE][+\-]?[0-9_]+|'
r'\.[0-9_]*|[eE][+\-]?[0-9_]+)[fFL]?[i]?', Number.Float),
(r'\.(0|[1-9][0-9_]*)([eE][+\-]?[0-9_]+)?[fFL]?[i]?', Number.Float),
# IntegerLiteral
# -- Binary
(r'0[Bb][01_]+', Number.Bin),
# -- Octal
(r'0[0-7_]+', Number.Oct),
# -- Hexadecimal
(r'0[xX][0-9a-fA-F_]+', Number.Hex),
# -- Decimal
(r'(0|[1-9][0-9_]*)([LUu]|Lu|LU|uL|UL)?', Number.Integer),
# CharacterLiteral
(r"""'(\\['"?\\abfnrtv]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}"""
r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\&\w+;|.)'""",
String.Char),
# StringLiteral
# -- WysiwygString
(r'r"[^"]*"[cwd]?', String),
# -- AlternateWysiwygString
(r'`[^`]*`[cwd]?', String),
# -- DoubleQuotedString
(r'"(\\\\|\\[^\\]|[^"\\])*"[cwd]?', String),
# -- EscapeSequence
(r"\\(['\"?\\abfnrtv]|x[0-9a-fA-F]{2}|[0-7]{1,3}"
r"|u[0-9a-fA-F]{4}|U[0-9a-fA-F]{8}|&\w+;)",
String),
# -- HexString
(r'x"[0-9a-fA-F_\s]*"[cwd]?', String),
# -- DelimitedString
(r'q"\[', String, 'delimited_bracket'),
(r'q"\(', String, 'delimited_parenthesis'),
(r'q"<', String, 'delimited_angle'),
(r'q"\{', String, 'delimited_curly'),
(r'q"([a-zA-Z_]\w*)\n.*?\n\1"', String),
(r'q"(.).*?\1"', String),
# -- TokenString
(r'q\{', String, 'token_string'),
# Attributes
(r'@([a-zA-Z_]\w*)?', Name.Decorator),
# Tokens
(r'(~=|\^=|%=|\*=|==|!>=|!<=|!<>=|!<>|!<|!>|!=|>>>=|>>>|>>=|>>|>='
r'|<>=|<>|<<=|<<|<=|\+\+|\+=|--|-=|\|\||\|=|&&|&=|\.\.\.|\.\.|/=)'
r'|[/.&|\-+<>!()\[\]{}?,;:$=*%^~]', Punctuation),
# Identifier
(r'[a-zA-Z_]\w*', Name),
# Line
(r'(#line)(\s)(.*)(\n)', bygroups(Comment.Special, Whitespace,
Comment.Special, Whitespace)),
],
'nested_comment': [
(r'[^+/]+', Comment.Multiline),
(r'/\+', Comment.Multiline, '#push'),
(r'\+/', Comment.Multiline, '#pop'),
(r'[+/]', Comment.Multiline),
],
'token_string': [
(r'\{', Punctuation, 'token_string_nest'),
(r'\}', String, '#pop'),
include('root'),
],
'token_string_nest': [
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
include('root'),
],
'delimited_bracket': [
(r'[^\[\]]+', String),
(r'\[', String, 'delimited_inside_bracket'),
(r'\]"', String, '#pop'),
],
'delimited_inside_bracket': [
(r'[^\[\]]+', String),
(r'\[', String, '#push'),
(r'\]', String, '#pop'),
],
'delimited_parenthesis': [
(r'[^()]+', String),
(r'\(', String, 'delimited_inside_parenthesis'),
(r'\)"', String, '#pop'),
],
'delimited_inside_parenthesis': [
(r'[^()]+', String),
(r'\(', String, '#push'),
(r'\)', String, '#pop'),
],
'delimited_angle': [
(r'[^<>]+', String),
(r'<', String, 'delimited_inside_angle'),
(r'>"', String, '#pop'),
],
'delimited_inside_angle': [
(r'[^<>]+', String),
(r'<', String, '#push'),
(r'>', String, '#pop'),
],
'delimited_curly': [
(r'[^{}]+', String),
(r'\{', String, 'delimited_inside_curly'),
(r'\}"', String, '#pop'),
],
'delimited_inside_curly': [
(r'[^{}]+', String),
(r'\{', String, '#push'),
(r'\}', String, '#pop'),
],
}
class CrocLexer(RegexLexer):
"""
For Croc source.
"""
name = 'Croc'
url = 'http://jfbillingsley.com/croc'
filenames = ['*.croc']
aliases = ['croc']
mimetypes = ['text/x-crocsrc']
version_added = ''
tokens = {
'root': [
(r'\n', Whitespace),
(r'\s+', Whitespace),
# Comments
(r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
(r'/\*', Comment.Multiline, 'nestedcomment'),
# Keywords
(words((
'as', 'assert', 'break', 'case', 'catch', 'class', 'continue',
'default', 'do', 'else', 'finally', 'for', 'foreach', 'function',
'global', 'namespace', 'if', 'import', 'in', 'is', 'local',
'module', 'return', 'scope', 'super', 'switch', 'this', 'throw',
'try', 'vararg', 'while', 'with', 'yield'), suffix=r'\b'),
Keyword),
(r'(false|true|null)\b', Keyword.Constant),
# FloatLiteral
(r'([0-9][0-9_]*)(?=[.eE])(\.[0-9][0-9_]*)?([eE][+\-]?[0-9_]+)?',
Number.Float),
# IntegerLiteral
# -- Binary
(r'0[bB][01][01_]*', Number.Bin),
# -- Hexadecimal
(r'0[xX][0-9a-fA-F][0-9a-fA-F_]*', Number.Hex),
# -- Decimal
(r'([0-9][0-9_]*)(?![.eE])', Number.Integer),
# CharacterLiteral
(r"""'(\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\[0-9]{1,3}"""
r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|.)'""",
String.Char),
# StringLiteral
# -- WysiwygString
(r'@"(""|[^"])*"', String),
(r'@`(``|[^`])*`', String),
(r"@'(''|[^'])*'", String),
# -- DoubleQuotedString
(r'"(\\\\|\\[^\\]|[^"\\])*"', String),
# Tokens
(r'(~=|\^=|%=|\*=|==|!=|>>>=|>>>|>>=|>>|>=|<=>|\?=|-\>'
r'|<<=|<<|<=|\+\+|\+=|--|-=|\|\||\|=|&&|&=|\.\.|/=)'
r'|[-/.&$@|\+<>!()\[\]{}?,;:=*%^~#\\]', Punctuation),
# Identifier
(r'[a-zA-Z_]\w*', Name),
],
'nestedcomment': [
(r'[^*/]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline),
],
}
class MiniDLexer(CrocLexer):
"""
For MiniD source. MiniD is now known as Croc.
"""
name = 'MiniD'
filenames = [] # don't lex .md as MiniD, reserve for Markdown
aliases = ['minid']
mimetypes = ['text/x-minidsrc']
version_added = ''

View File

@@ -0,0 +1,126 @@
"""
pygments.lexers.dalvik
~~~~~~~~~~~~~~~~~~~~~~
Pygments lexers for Dalvik VM-related languages.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Keyword, Text, Comment, Name, String, Number, \
Punctuation, Whitespace
__all__ = ['SmaliLexer']
class SmaliLexer(RegexLexer):
"""
For Smali (Android/Dalvik) assembly
code.
"""
name = 'Smali'
url = 'http://code.google.com/p/smali/'
aliases = ['smali']
filenames = ['*.smali']
mimetypes = ['text/smali']
version_added = '1.6'
tokens = {
'root': [
include('comment'),
include('label'),
include('field'),
include('method'),
include('class'),
include('directive'),
include('access-modifier'),
include('instruction'),
include('literal'),
include('punctuation'),
include('type'),
include('whitespace')
],
'directive': [
(r'^([ \t]*)(\.(?:class|super|implements|field|subannotation|annotation|'
r'enum|method|registers|locals|array-data|packed-switch|'
r'sparse-switch|catchall|catch|line|parameter|local|prologue|'
r'epilogue|source))', bygroups(Whitespace, Keyword)),
(r'^([ \t]*)(\.end)( )(field|subannotation|annotation|method|array-data|'
'packed-switch|sparse-switch|parameter|local)',
bygroups(Whitespace, Keyword, Whitespace, Keyword)),
(r'^([ \t]*)(\.restart)( )(local)',
bygroups(Whitespace, Keyword, Whitespace, Keyword)),
],
'access-modifier': [
(r'(public|private|protected|static|final|synchronized|bridge|'
r'varargs|native|abstract|strictfp|synthetic|constructor|'
r'declared-synchronized|interface|enum|annotation|volatile|'
r'transient)', Keyword),
],
'whitespace': [
(r'\n', Whitespace),
(r'\s+', Whitespace),
],
'instruction': [
(r'\b[vp]\d+\b', Name.Builtin), # registers
(r'(\b[a-z][A-Za-z0-9/-]+)(\s+)', bygroups(Text, Whitespace)), # instructions
],
'literal': [
(r'".*"', String),
(r'0x[0-9A-Fa-f]+t?', Number.Hex),
(r'[0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'[0-9]+L?', Number.Integer),
],
'field': [
(r'(\$?\b)([\w$]*)(:)',
bygroups(Punctuation, Name.Variable, Punctuation)),
],
'method': [
(r'<(?:cl)?init>', Name.Function), # constructor
(r'(\$?\b)([\w$]*)(\()',
bygroups(Punctuation, Name.Function, Punctuation)),
],
'label': [
(r':\w+', Name.Label),
],
'class': [
# class names in the form Lcom/namespace/ClassName;
# I only want to color the ClassName part, so the namespace part is
# treated as 'Text'
(r'(L)((?:[\w$]+/)*)([\w$]+)(;)',
bygroups(Keyword.Type, Text, Name.Class, Text)),
],
'punctuation': [
(r'->', Punctuation),
(r'[{},():=.-]', Punctuation),
],
'type': [
(r'[ZBSCIJFDV\[]+', Keyword.Type),
],
'comment': [
(r'#.*?\n', Comment),
],
}
def analyse_text(text):
score = 0
if re.search(r'^\s*\.class\s', text, re.MULTILINE):
score += 0.5
if re.search(r'\b((check-cast|instance-of|throw-verification-error'
r')\b|(-to|add|[ais]get|[ais]put|and|cmpl|const|div|'
r'if|invoke|move|mul|neg|not|or|rem|return|rsub|shl|'
r'shr|sub|ushr)[-/])|{|}', text, re.MULTILINE):
score += 0.3
if re.search(r'(\.(catchall|epilogue|restart local|prologue)|'
r'\b(array-data|class-change-error|declared-synchronized|'
r'(field|inline|vtable)@0x[0-9a-fA-F]|generic-error|'
r'illegal-class-access|illegal-field-access|'
r'illegal-method-access|instantiation-error|no-error|'
r'no-such-class|no-such-field|no-such-method|'
r'packed-switch|sparse-switch))\b', text, re.MULTILINE):
score += 0.6
return score

View File

@@ -0,0 +1,763 @@
"""
pygments.lexers.data
~~~~~~~~~~~~~~~~~~~~
Lexers for data file format.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import Lexer, ExtendedRegexLexer, LexerContext, \
include, bygroups
from pygments.token import Comment, Error, Keyword, Literal, Name, Number, \
Punctuation, String, Whitespace
__all__ = ['YamlLexer', 'JsonLexer', 'JsonBareObjectLexer', 'JsonLdLexer']
class YamlLexerContext(LexerContext):
"""Indentation context for the YAML lexer."""
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
self.indent_stack = []
self.indent = -1
self.next_indent = 0
self.block_scalar_indent = None
class YamlLexer(ExtendedRegexLexer):
"""
Lexer for YAML, a human-friendly data serialization
language.
"""
name = 'YAML'
url = 'http://yaml.org/'
aliases = ['yaml']
filenames = ['*.yaml', '*.yml']
mimetypes = ['text/x-yaml']
version_added = '0.11'
def something(token_class):
"""Do not produce empty tokens."""
def callback(lexer, match, context):
text = match.group()
if not text:
return
yield match.start(), token_class, text
context.pos = match.end()
return callback
def reset_indent(token_class):
"""Reset the indentation levels."""
def callback(lexer, match, context):
text = match.group()
context.indent_stack = []
context.indent = -1
context.next_indent = 0
context.block_scalar_indent = None
yield match.start(), token_class, text
context.pos = match.end()
return callback
def save_indent(token_class, start=False):
"""Save a possible indentation level."""
def callback(lexer, match, context):
text = match.group()
extra = ''
if start:
context.next_indent = len(text)
if context.next_indent < context.indent:
while context.next_indent < context.indent:
context.indent = context.indent_stack.pop()
if context.next_indent > context.indent:
extra = text[context.indent:]
text = text[:context.indent]
else:
context.next_indent += len(text)
if text:
yield match.start(), token_class, text
if extra:
yield match.start()+len(text), token_class.Error, extra
context.pos = match.end()
return callback
def set_indent(token_class, implicit=False):
"""Set the previously saved indentation level."""
def callback(lexer, match, context):
text = match.group()
if context.indent < context.next_indent:
context.indent_stack.append(context.indent)
context.indent = context.next_indent
if not implicit:
context.next_indent += len(text)
yield match.start(), token_class, text
context.pos = match.end()
return callback
def set_block_scalar_indent(token_class):
"""Set an explicit indentation level for a block scalar."""
def callback(lexer, match, context):
text = match.group()
context.block_scalar_indent = None
if not text:
return
increment = match.group(1)
if increment:
current_indent = max(context.indent, 0)
increment = int(increment)
context.block_scalar_indent = current_indent + increment
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
def parse_block_scalar_empty_line(indent_token_class, content_token_class):
"""Process an empty line in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if (context.block_scalar_indent is None or
len(text) <= context.block_scalar_indent):
if text:
yield match.start(), indent_token_class, text
else:
indentation = text[:context.block_scalar_indent]
content = text[context.block_scalar_indent:]
yield match.start(), indent_token_class, indentation
yield (match.start()+context.block_scalar_indent,
content_token_class, content)
context.pos = match.end()
return callback
def parse_block_scalar_indent(token_class):
"""Process indentation spaces in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if context.block_scalar_indent is None:
if len(text) <= max(context.indent, 0):
context.stack.pop()
context.stack.pop()
return
context.block_scalar_indent = len(text)
else:
if len(text) < context.block_scalar_indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
def parse_plain_scalar_indent(token_class):
"""Process indentation spaces in a plain scalar."""
def callback(lexer, match, context):
text = match.group()
if len(text) <= context.indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
tokens = {
# the root rules
'root': [
# ignored whitespaces
(r'[ ]+(?=#|$)', Whitespace),
# line breaks
(r'\n+', Whitespace),
# a comment
(r'#[^\n]*', Comment.Single),
# the '%YAML' directive
(r'^%YAML(?=[ ]|$)', reset_indent(Name.Tag), 'yaml-directive'),
# the %TAG directive
(r'^%TAG(?=[ ]|$)', reset_indent(Name.Tag), 'tag-directive'),
# document start and document end indicators
(r'^(?:---|\.\.\.)(?=[ ]|$)', reset_indent(Name.Namespace),
'block-line'),
# indentation spaces
(r'[ ]*(?!\s|$)', save_indent(Whitespace, start=True),
('block-line', 'indentation')),
],
# trailing whitespaces after directives or a block scalar indicator
'ignored-line': [
# ignored whitespaces
(r'[ ]+(?=#|$)', Whitespace),
# a comment
(r'#[^\n]*', Comment.Single),
# line break
(r'\n', Whitespace, '#pop:2'),
],
# the %YAML directive
'yaml-directive': [
# the version number
(r'([ ]+)([0-9]+\.[0-9]+)',
bygroups(Whitespace, Number), 'ignored-line'),
],
# the %TAG directive
'tag-directive': [
# a tag handle and the corresponding prefix
(r'([ ]+)(!|![\w-]*!)'
r'([ ]+)(!|!?[\w;/?:@&=+$,.!~*\'()\[\]%-]+)',
bygroups(Whitespace, Keyword.Type, Whitespace, Keyword.Type),
'ignored-line'),
],
# block scalar indicators and indentation spaces
'indentation': [
# trailing whitespaces are ignored
(r'[ ]*$', something(Whitespace), '#pop:2'),
# whitespaces preceding block collection indicators
(r'[ ]+(?=[?:-](?:[ ]|$))', save_indent(Whitespace)),
# block collection indicators
(r'[?:-](?=[ ]|$)', set_indent(Punctuation.Indicator)),
# the beginning a block line
(r'[ ]*', save_indent(Whitespace), '#pop'),
],
# an indented line in the block context
'block-line': [
# the line end
(r'[ ]*(?=#|$)', something(Whitespace), '#pop'),
# whitespaces separating tokens
(r'[ ]+', Whitespace),
# key with colon
(r'''([^#,?\[\]{}"'\n]+)(:)(?=[ ]|$)''',
bygroups(Name.Tag, set_indent(Punctuation, implicit=True))),
# tags, anchors and aliases,
include('descriptors'),
# block collections and scalars
include('block-nodes'),
# flow collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^\s?:,\[\]{}#&*!|>\'"%@`-]|[?:-]\S)',
something(Name.Variable),
'plain-scalar-in-block-context'),
],
# tags, anchors, aliases
'descriptors': [
# a full-form tag
(r'!<[\w#;/?:@&=+$,.!~*\'()\[\]%-]+>', Keyword.Type),
# a tag in the form '!', '!suffix' or '!handle!suffix'
(r'!(?:[\w-]+!)?'
r'[\w#;/?:@&=+$,.!~*\'()\[\]%-]*', Keyword.Type),
# an anchor
(r'&[\w-]+', Name.Label),
# an alias
(r'\*[\w-]+', Name.Variable),
],
# block collections and scalars
'block-nodes': [
# implicit key
(r':(?=[ ]|$)', set_indent(Punctuation.Indicator, implicit=True)),
# literal and folded scalars
(r'[|>]', Punctuation.Indicator,
('block-scalar-content', 'block-scalar-header')),
],
# flow collections and quoted scalars
'flow-nodes': [
# a flow sequence
(r'\[', Punctuation.Indicator, 'flow-sequence'),
# a flow mapping
(r'\{', Punctuation.Indicator, 'flow-mapping'),
# a single-quoted scalar
(r'\'', String, 'single-quoted-scalar'),
# a double-quoted scalar
(r'\"', String, 'double-quoted-scalar'),
],
# the content of a flow collection
'flow-collection': [
# whitespaces
(r'[ ]+', Whitespace),
# line breaks
(r'\n+', Whitespace),
# a comment
(r'#[^\n]*', Comment.Single),
# simple indicators
(r'[?:,]', Punctuation.Indicator),
# tags, anchors and aliases
include('descriptors'),
# nested collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^\s?:,\[\]{}#&*!|>\'"%@`])',
something(Name.Variable),
'plain-scalar-in-flow-context'),
],
# a flow sequence indicated by '[' and ']'
'flow-sequence': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\]', Punctuation.Indicator, '#pop'),
],
# a flow mapping indicated by '{' and '}'
'flow-mapping': [
# key with colon
(r'''([^,:?\[\]{}"'\n]+)(:)(?=[ ]|$)''',
bygroups(Name.Tag, Punctuation)),
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\}', Punctuation.Indicator, '#pop'),
],
# block scalar lines
'block-scalar-content': [
# line break
(r'\n', Whitespace),
# empty line
(r'^[ ]+$',
parse_block_scalar_empty_line(Whitespace, Name.Constant)),
# indentation spaces (we may leave the state here)
(r'^[ ]*', parse_block_scalar_indent(Whitespace)),
# line content
(r'[\S\t ]+', Name.Constant),
],
# the content of a literal or folded scalar
'block-scalar-header': [
# indentation indicator followed by chomping flag
(r'([1-9])?[+-]?(?=[ ]|$)',
set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
# chomping flag followed by indentation indicator
(r'[+-]?([1-9])?(?=[ ]|$)',
set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
],
# ignored and regular whitespaces in quoted scalars
'quoted-scalar-whitespaces': [
# leading and trailing whitespaces are ignored
(r'^[ ]+', Whitespace),
(r'[ ]+$', Whitespace),
# line breaks are ignored
(r'\n+', Whitespace),
# other whitespaces are a part of the value
(r'[ ]+', Name.Variable),
],
# single-quoted scalars
'single-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of the quote character
(r'\'\'', String.Escape),
# regular non-whitespace characters
(r'[^\s\']+', String),
# the closing quote
(r'\'', String, '#pop'),
],
# double-quoted scalars
'double-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of special characters
(r'\\[0abt\tn\nvfre "\\N_LP]', String),
# escape codes
(r'\\(?:x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})',
String.Escape),
# regular non-whitespace characters
(r'[^\s"\\]+', String),
# the closing quote
(r'"', String, '#pop'),
],
# the beginning of a new line while scanning a plain scalar
'plain-scalar-in-block-context-new-line': [
# empty lines
(r'^[ ]+$', Whitespace),
# line breaks
(r'\n+', Whitespace),
# document start and document end indicators
(r'^(?=---|\.\.\.)', something(Name.Namespace), '#pop:3'),
# indentation spaces (we may leave the block line state here)
(r'^[ ]*', parse_plain_scalar_indent(Whitespace), '#pop'),
],
# a plain scalar in the block context
'plain-scalar-in-block-context': [
# the scalar ends with the ':' indicator
(r'[ ]*(?=:[ ]|:$)', something(Whitespace), '#pop'),
# the scalar ends with whitespaces followed by a comment
(r'[ ]+(?=#)', Whitespace, '#pop'),
# trailing whitespaces are ignored
(r'[ ]+$', Whitespace),
# line breaks are ignored
(r'\n+', Whitespace, 'plain-scalar-in-block-context-new-line'),
# other whitespaces are a part of the value
(r'[ ]+', Literal.Scalar.Plain),
# regular non-whitespace characters
(r'(?::(?!\s)|[^\s:])+', Literal.Scalar.Plain),
],
# a plain scalar is the flow context
'plain-scalar-in-flow-context': [
# the scalar ends with an indicator character
(r'[ ]*(?=[,:?\[\]{}])', something(Whitespace), '#pop'),
# the scalar ends with a comment
(r'[ ]+(?=#)', Whitespace, '#pop'),
# leading and trailing whitespaces are ignored
(r'^[ ]+', Whitespace),
(r'[ ]+$', Whitespace),
# line breaks are ignored
(r'\n+', Whitespace),
# other whitespaces are a part of the value
(r'[ ]+', Name.Variable),
# regular non-whitespace characters
(r'[^\s,:?\[\]{}]+', Name.Variable),
],
}
def get_tokens_unprocessed(self, text=None, context=None):
if context is None:
context = YamlLexerContext(text, 0)
return super().get_tokens_unprocessed(text, context)
class JsonLexer(Lexer):
"""
For JSON data structures.
Javascript-style comments are supported (like ``/* */`` and ``//``),
though comments are not part of the JSON specification.
This allows users to highlight JSON as it is used in the wild.
No validation is performed on the input JSON document.
"""
name = 'JSON'
url = 'https://www.json.org'
aliases = ['json', 'json-object']
filenames = ['*.json', '*.jsonl', '*.ndjson', 'Pipfile.lock']
mimetypes = ['application/json', 'application/json-object', 'application/x-ndjson', 'application/jsonl', 'application/json-seq']
version_added = '1.5'
# No validation of integers, floats, or constants is done.
# As long as the characters are members of the following
# sets, the token will be considered valid. For example,
#
# "--1--" is parsed as an integer
# "1...eee" is parsed as a float
# "trustful" is parsed as a constant
#
integers = set('-0123456789')
floats = set('.eE+')
constants = set('truefalsenull') # true|false|null
hexadecimals = set('0123456789abcdefABCDEF')
punctuations = set('{}[],')
whitespaces = {'\u0020', '\u000a', '\u000d', '\u0009'}
def get_tokens_unprocessed(self, text):
"""Parse JSON data."""
in_string = False
in_escape = False
in_unicode_escape = 0
in_whitespace = False
in_constant = False
in_number = False
in_float = False
in_punctuation = False
in_comment_single = False
in_comment_multiline = False
expecting_second_comment_opener = False # // or /*
expecting_second_comment_closer = False # */
start = 0
# The queue is used to store data that may need to be tokenized
# differently based on what follows. In particular, JSON object
# keys are tokenized differently than string values, but cannot
# be distinguished until punctuation is encountered outside the
# string.
#
# A ":" character after the string indicates that the string is
# an object key; any other character indicates the string is a
# regular string value.
#
# The queue holds tuples that contain the following data:
#
# (start_index, token_type, text)
#
# By default the token type of text in double quotes is
# String.Double. The token type will be replaced if a colon
# is encountered after the string closes.
#
queue = []
for stop, character in enumerate(text):
if in_string:
if in_unicode_escape:
if character in self.hexadecimals:
in_unicode_escape -= 1
if not in_unicode_escape:
in_escape = False
else:
in_unicode_escape = 0
in_escape = False
elif in_escape:
if character == 'u':
in_unicode_escape = 4
else:
in_escape = False
elif character == '\\':
in_escape = True
elif character == '"':
queue.append((start, String.Double, text[start:stop + 1]))
in_string = False
in_escape = False
in_unicode_escape = 0
continue
elif in_whitespace:
if character in self.whitespaces:
continue
if queue:
queue.append((start, Whitespace, text[start:stop]))
else:
yield start, Whitespace, text[start:stop]
in_whitespace = False
# Fall through so the new character can be evaluated.
elif in_constant:
if character in self.constants:
continue
yield start, Keyword.Constant, text[start:stop]
in_constant = False
# Fall through so the new character can be evaluated.
elif in_number:
if character in self.integers:
continue
elif character in self.floats:
in_float = True
continue
if in_float:
yield start, Number.Float, text[start:stop]
else:
yield start, Number.Integer, text[start:stop]
in_number = False
in_float = False
# Fall through so the new character can be evaluated.
elif in_punctuation:
if character in self.punctuations:
continue
yield start, Punctuation, text[start:stop]
in_punctuation = False
# Fall through so the new character can be evaluated.
elif in_comment_single:
if character != '\n':
continue
if queue:
queue.append((start, Comment.Single, text[start:stop]))
else:
yield start, Comment.Single, text[start:stop]
in_comment_single = False
# Fall through so the new character can be evaluated.
elif in_comment_multiline:
if character == '*':
expecting_second_comment_closer = True
elif expecting_second_comment_closer:
expecting_second_comment_closer = False
if character == '/':
if queue:
queue.append((start, Comment.Multiline, text[start:stop + 1]))
else:
yield start, Comment.Multiline, text[start:stop + 1]
in_comment_multiline = False
continue
elif expecting_second_comment_opener:
expecting_second_comment_opener = False
if character == '/':
in_comment_single = True
continue
elif character == '*':
in_comment_multiline = True
continue
# Exhaust the queue. Accept the existing token types.
yield from queue
queue.clear()
yield start, Error, text[start:stop]
# Fall through so the new character can be evaluated.
start = stop
if character == '"':
in_string = True
elif character in self.whitespaces:
in_whitespace = True
elif character in {'f', 'n', 't'}: # The first letters of true|false|null
# Exhaust the queue. Accept the existing token types.
yield from queue
queue.clear()
in_constant = True
elif character in self.integers:
# Exhaust the queue. Accept the existing token types.
yield from queue
queue.clear()
in_number = True
elif character == ':':
# Yield from the queue. Replace string token types.
for _start, _token, _text in queue:
# There can be only three types of tokens before a ':':
# Whitespace, Comment, or a quoted string.
#
# If it's a quoted string we emit Name.Tag.
# Otherwise, we yield the original token.
#
# In all other cases this would be invalid JSON,
# but this is not a validating JSON lexer, so it's OK.
if _token is String.Double:
yield _start, Name.Tag, _text
else:
yield _start, _token, _text
queue.clear()
in_punctuation = True
elif character in self.punctuations:
# Exhaust the queue. Accept the existing token types.
yield from queue
queue.clear()
in_punctuation = True
elif character == '/':
# This is the beginning of a comment.
expecting_second_comment_opener = True
else:
# Exhaust the queue. Accept the existing token types.
yield from queue
queue.clear()
yield start, Error, character
# Yield any remaining text.
yield from queue
if in_string:
yield start, Error, text[start:]
elif in_float:
yield start, Number.Float, text[start:]
elif in_number:
yield start, Number.Integer, text[start:]
elif in_constant:
yield start, Keyword.Constant, text[start:]
elif in_whitespace:
yield start, Whitespace, text[start:]
elif in_punctuation:
yield start, Punctuation, text[start:]
elif in_comment_single:
yield start, Comment.Single, text[start:]
elif in_comment_multiline:
yield start, Error, text[start:]
elif expecting_second_comment_opener:
yield start, Error, text[start:]
class JsonBareObjectLexer(JsonLexer):
"""
For JSON data structures (with missing object curly braces).
.. deprecated:: 2.8.0
Behaves the same as `JsonLexer` now.
"""
name = 'JSONBareObject'
aliases = []
filenames = []
mimetypes = []
version_added = '2.2'
class JsonLdLexer(JsonLexer):
"""
For JSON-LD linked data.
"""
name = 'JSON-LD'
url = 'https://json-ld.org/'
aliases = ['jsonld', 'json-ld']
filenames = ['*.jsonld']
mimetypes = ['application/ld+json']
version_added = '2.0'
json_ld_keywords = {
f'"@{keyword}"'
for keyword in (
'base',
'container',
'context',
'direction',
'graph',
'id',
'import',
'included',
'index',
'json',
'language',
'list',
'nest',
'none',
'prefix',
'propagate',
'protected',
'reverse',
'set',
'type',
'value',
'version',
'vocab',
)
}
def get_tokens_unprocessed(self, text):
for start, token, value in super().get_tokens_unprocessed(text):
if token is Name.Tag and value in self.json_ld_keywords:
yield start, Name.Decorator, value
else:
yield start, token, value

View File

@@ -0,0 +1,135 @@
"""
pygments.lexers.dax
~~~~~~~~~~~~~~~~~~~
Lexer for LilyPond.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words
from pygments.token import Comment, Punctuation, Whitespace,\
Name, Operator, String, Number, Text
__all__ = ['DaxLexer']
class DaxLexer(RegexLexer):
"""
Lexer for Power BI DAX
Referenced from: https://github.com/sql-bi/SyntaxHighlighterBrushDax
"""
name = 'Dax'
aliases = ['dax']
filenames = ['*.dax']
url = 'https://learn.microsoft.com/en-us/dax/dax-function-reference'
mimetypes = []
version_added = '2.15'
tokens = {
'root': [
(r'\s+', Whitespace),
(r"--.*\n?", Comment.Single), # Comment: Double dash comment
(r"//.*\n?", Comment.Single), # Comment: Double backslash comment
(r'/\*', Comment.Multiline, 'multiline-comments'),
(words(('abs', 'accrint', 'accrintm', 'acos', 'acosh', 'acot', 'acoth',
'addcolumns', 'addmissingitems', 'all', 'allcrossfiltered',
'allexcept', 'allnoblankrow', 'allselected', 'amordegrc', 'amorlinc',
'and','approximatedistinctcount', 'asin', 'asinh', 'atan', 'atanh',
'average', 'averagea', 'averagex', 'beta.dist', 'beta.inv',
'bitand', 'bitlshift', 'bitor', 'bitrshift', 'bitxor', 'blank',
'calculate', 'calculatetable', 'calendar', 'calendarauto', 'ceiling',
'chisq.dist', 'chisq.dist.rt', 'chisq.inv', 'chisq.inv.rt',
'closingbalancemonth', 'closingbalancequarter', 'closingbalanceyear',
'coalesce', 'columnstatistics', 'combin', 'combina', 'combinevalues',
'concatenate', 'concatenatex', 'confidence.norm', 'confidence.t',
'contains', 'containsrow', 'containsstring', 'containsstringexact',
'convert', 'cos', 'cosh', 'cot', 'coth', 'count', 'counta', 'countax',
'countblank', 'countrows', 'countx', 'coupdaybs', 'coupdays',
'coupdaysnc', 'coupncd', 'coupnum', 'couppcd', 'crossfilter',
'crossjoin', 'cumipmt', 'cumprinc', 'currency', 'currentgroup',
'customdata', 'datatable', 'date', 'dateadd', 'datediff',
'datesbetween', 'datesinperiod', 'datesmtd', 'datesqtd',
'datesytd', 'datevalue', 'day', 'db', 'ddb', 'degrees', 'detailrows',
'disc', 'distinct', 'distinctcount', 'distinctcountnoblank',
'divide', 'dollarde', 'dollarfr', 'duration', 'earlier', 'earliest',
'edate', 'effect', 'endofmonth', 'endofquarter', 'endofyear',
'eomonth', 'error', 'evaluateandlog', 'even', 'exact', 'except',
'exp', 'expon.dist', 'fact', 'false', 'filter', 'filters', 'find',
'firstdate', 'firstnonblank', 'firstnonblankvalue', 'fixed', 'floor',
'format', 'fv', 'gcd', 'generate', 'generateall', 'generateseries',
'geomean', 'geomeanx', 'groupby', 'hash', 'hasonefilter',
'hasonevalue', 'hour', 'if', 'if.eager', 'iferror', 'ignore', 'index',
'int', 'intersect', 'intrate', 'ipmt', 'isafter', 'isblank',
'iscrossfiltered', 'isempty', 'iserror', 'iseven', 'isfiltered',
'isinscope', 'islogical', 'isnontext', 'isnumber', 'iso.ceiling',
'isodd', 'isonorafter', 'ispmt', 'isselectedmeasure', 'issubtotal',
'istext', 'keepfilters', 'keywordmatch', 'lastdate', 'lastnonblank',
'lastnonblankvalue', 'lcm', 'left', 'len', 'linest', 'linestx', 'ln',
'log', 'log10', 'lookupvalue', 'lower', 'max', 'maxa', 'maxx',
'mduration', 'median', 'medianx', 'mid', 'min', 'mina', 'minute',
'minx', 'mod', 'month', 'mround', 'nameof', 'naturalinnerjoin',
'naturalleftouterjoin', 'networkdays', 'nextday', 'nextmonth',
'nextquarter', 'nextyear', 'nominal', 'nonvisual', 'norm.dist',
'norm.inv', 'norm.s.dist', 'norm.s.inv', 'not', 'now', 'nper', 'odd',
'oddfprice', 'oddfyield', 'oddlprice', 'oddlyield', 'offset',
'openingbalancemonth', 'openingbalancequarter', 'openingbalanceyear',
'or', 'orderby', 'parallelperiod', 'partitionby', 'path',
'pathcontains', 'pathitem', 'pathitemreverse', 'pathlength',
'pduration', 'percentile.exc', 'percentile.inc', 'percentilex.exc',
'percentilex.inc', 'permut', 'pi', 'pmt', 'poisson.dist', 'power',
'ppmt', 'previousday', 'previousmonth', 'previousquarter',
'previousyear', 'price', 'pricedisc', 'pricemat', 'product',
'productx', 'pv', 'quarter', 'quotient', 'radians', 'rand',
'randbetween', 'rank.eq', 'rankx', 'rate', 'received', 'related',
'relatedtable', 'removefilters', 'replace', 'rept', 'right',
'rollup', 'rollupaddissubtotal', 'rollupgroup', 'rollupissubtotal',
'round', 'rounddown', 'roundup', 'row', 'rri', 'sameperiodlastyear',
'sample', 'sampleaxiswithlocalminmax', 'search', 'second',
'selectcolumns', 'selectedmeasure', 'selectedmeasureformatstring',
'selectedmeasurename', 'selectedvalue', 'sign', 'sin', 'sinh', 'sln',
'sqrt', 'sqrtpi', 'startofmonth', 'startofquarter', 'startofyear',
'stdev.p', 'stdev.s', 'stdevx.p', 'stdevx.s', 'substitute',
'substitutewithindex', 'sum', 'summarize', 'summarizecolumns', 'sumx',
'switch', 'syd', 't.dist', 't.dist.2t', 't.dist.rt', 't.inv',
't.inv.2t', 'tan', 'tanh', 'tbilleq', 'tbillprice', 'tbillyield',
'time', 'timevalue', 'tocsv', 'today', 'tojson', 'topn',
'topnperlevel', 'topnskip', 'totalmtd', 'totalqtd', 'totalytd',
'treatas', 'trim', 'true', 'trunc', 'unichar', 'unicode', 'union',
'upper', 'userculture', 'userelationship', 'username', 'userobjectid',
'userprincipalname', 'utcnow', 'utctoday', 'value', 'values', 'var.p',
'var.s', 'varx.p', 'varx.s', 'vdb', 'weekday', 'weeknum', 'window',
'xirr', 'xnpv', 'year', 'yearfrac', 'yield', 'yielddisc', 'yieldmat'),
prefix=r'(?i)', suffix=r'\b'), Name.Function), #Functions
(words(('at','asc','boolean','both','by','create','currency',
'datetime','day','define','desc','double',
'evaluate','false','integer','measure',
'month','none','order','return','single','start','string',
'table','true','var','year'),
prefix=r'(?i)', suffix=r'\b'), Name.Builtin), # Keyword
(r':=|[-+*\/=^]', Operator),
(r'\b(IN|NOT)\b', Operator.Word),
(r'"', String, 'string'), #StringLiteral
(r"'(?:[^']|'')*'(?!')(?:\[[ \w]+\])?|\w+\[[ \w]+\]",
Name.Attribute), # Column reference
(r"\[[ \w]+\]", Name.Attribute), #Measure reference
(r'(?<!\w)(\d+\.?\d*|\.\d+\b)', Number),# Number
(r'[\[\](){}`,.]', Punctuation), #Parenthesis
(r'.*\n', Text),
],
'multiline-comments': [
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[^/*]+', Comment.Multiline),
(r'[/*]', Comment.Multiline)
],
'string': [
(r'""', String.Escape),
(r'"', String, '#pop'),
(r'[^"]+', String),
]
}

View File

@@ -0,0 +1,108 @@
"""
pygments.lexers.devicetree
~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for Devicetree language.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, include, default, words
from pygments.token import Comment, Keyword, Name, Number, Operator, \
Punctuation, String, Text, Whitespace
__all__ = ['DevicetreeLexer']
class DevicetreeLexer(RegexLexer):
"""
Lexer for Devicetree files.
"""
name = 'Devicetree'
url = 'https://www.devicetree.org/'
aliases = ['devicetree', 'dts']
filenames = ['*.dts', '*.dtsi']
mimetypes = ['text/x-c']
version_added = '2.7'
#: optional Whitespace or /*...*/ style comment
_ws = r'\s*(?:/[*][^*/]*?[*]/\s*)*'
tokens = {
'macro': [
# Include preprocessor directives (C style):
(r'(#include)(' + _ws + r')([^\n]+)',
bygroups(Comment.Preproc, Comment.Multiline, Comment.PreprocFile)),
# Define preprocessor directives (C style):
(r'(#define)(' + _ws + r')([^\n]+)',
bygroups(Comment.Preproc, Comment.Multiline, Comment.Preproc)),
# devicetree style with file:
(r'(/[^*/{]+/)(' + _ws + r')("[^\n{]+")',
bygroups(Comment.Preproc, Comment.Multiline, Comment.PreprocFile)),
# devicetree style with property:
(r'(/[^*/{]+/)(' + _ws + r')([^\n;{]*)([;]?)',
bygroups(Comment.Preproc, Comment.Multiline, Comment.Preproc, Punctuation)),
],
'whitespace': [
(r'\n', Whitespace),
(r'\s+', Whitespace),
(r'\\\n', Text), # line continuation
(r'//(\n|[\w\W]*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*][\w\W]*?[*](\\\n)?/', Comment.Multiline),
# Open until EOF, so no ending delimiter
(r'/(\\\n)?[*][\w\W]*', Comment.Multiline),
],
'statements': [
(r'(L?)(")', bygroups(String.Affix, String), 'string'),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'\d+', Number.Integer),
(r'([^\s{}/*]*)(\s*)(:)', bygroups(Name.Label, Text, Punctuation), '#pop'),
(words(('compatible', 'model', 'phandle', 'status', '#address-cells',
'#size-cells', 'reg', 'virtual-reg', 'ranges', 'dma-ranges',
'device_type', 'name'), suffix=r'\b'), Keyword.Reserved),
(r'([~!%^&*+=|?:<>/#-])', Operator),
(r'[()\[\]{},.]', Punctuation),
(r'[a-zA-Z_][\w-]*(?=(?:\s*,\s*[a-zA-Z_][\w-]*|(?:' + _ws + r'))*\s*[=;])',
Name),
(r'[a-zA-Z_]\w*', Name.Attribute),
],
'root': [
include('whitespace'),
include('macro'),
# Nodes
(r'([^/*@\s&]+|/)(@?)((?:0x)?[0-9a-fA-F,]*)(' + _ws + r')(\{)',
bygroups(Name.Function, Operator, Number.Integer,
Comment.Multiline, Punctuation), 'node'),
default('statement'),
],
'statement': [
include('whitespace'),
include('statements'),
(';', Punctuation, '#pop'),
],
'node': [
include('whitespace'),
include('macro'),
(r'([^/*@\s&]+|/)(@?)((?:0x)?[0-9a-fA-F,]*)(' + _ws + r')(\{)',
bygroups(Name.Function, Operator, Number.Integer,
Comment.Multiline, Punctuation), '#push'),
include('statements'),
(r'\};', Punctuation, '#pop'),
(';', Punctuation),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|'
r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
}

View File

@@ -0,0 +1,169 @@
"""
pygments.lexers.diff
~~~~~~~~~~~~~~~~~~~~
Lexers for diff/patch formats.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Text, Comment, Operator, Keyword, Name, Generic, \
Literal, Whitespace
__all__ = ['DiffLexer', 'DarcsPatchLexer', 'WDiffLexer']
class DiffLexer(RegexLexer):
"""
Lexer for unified or context-style diffs or patches.
"""
name = 'Diff'
aliases = ['diff', 'udiff']
filenames = ['*.diff', '*.patch']
mimetypes = ['text/x-diff', 'text/x-patch']
url = 'https://en.wikipedia.org/wiki/Diff'
version_added = ''
tokens = {
'root': [
(r'( )(.*)(\n)', bygroups(Whitespace, Text, Whitespace)),
(r'(!.*|---)(\n)', bygroups(Generic.Strong, Whitespace)),
(r'((?:< |-).*)(\n)', bygroups(Generic.Deleted, Whitespace)),
(r'((?:> |\+).*)(\n)', bygroups(Generic.Inserted, Whitespace)),
(
r'(@.*|\d(?:,\d+)?(?:a|c|d)\d+(?:,\d+)?)(\n)',
bygroups(Generic.Subheading, Whitespace),
),
(r'((?:[Ii]ndex|diff).*)(\n)', bygroups(Generic.Heading, Whitespace)),
(r'(=.*)(\n)', bygroups(Generic.Heading, Whitespace)),
(r'(.*)(\n)', bygroups(Text, Whitespace)),
]
}
def analyse_text(text):
if text[:7] == 'Index: ':
return True
if text[:5] == 'diff ':
return True
if text[:4] == '--- ':
return 0.9
class DarcsPatchLexer(RegexLexer):
"""
DarcsPatchLexer is a lexer for the various versions of the darcs patch
format. Examples of this format are derived by commands such as
``darcs annotate --patch`` and ``darcs send``.
"""
name = 'Darcs Patch'
aliases = ['dpatch']
filenames = ['*.dpatch', '*.darcspatch']
url = 'https://darcs.net'
version_added = '0.10'
DPATCH_KEYWORDS = ('hunk', 'addfile', 'adddir', 'rmfile', 'rmdir', 'move',
'replace')
tokens = {
'root': [
(r'<', Operator),
(r'>', Operator),
(r'\{', Operator),
(r'\}', Operator),
(r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)(\])',
bygroups(Operator, Keyword, Name, Whitespace, Name, Operator,
Literal.Date, Whitespace, Operator)),
(r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)',
bygroups(Operator, Keyword, Name, Whitespace, Name, Operator,
Literal.Date, Whitespace), 'comment'),
(r'New patches:', Generic.Heading),
(r'Context:', Generic.Heading),
(r'Patch bundle hash:', Generic.Heading),
(r'(\s*)({})(.*)(\n)'.format('|'.join(DPATCH_KEYWORDS)),
bygroups(Whitespace, Keyword, Text, Whitespace)),
(r'\+', Generic.Inserted, "insert"),
(r'-', Generic.Deleted, "delete"),
(r'(.*)(\n)', bygroups(Text, Whitespace)),
],
'comment': [
(r'[^\]].*\n', Comment),
(r'\]', Operator, "#pop"),
],
'specialText': [ # darcs add [_CODE_] special operators for clarity
(r'\n', Whitespace, "#pop"), # line-based
(r'\[_[^_]*_]', Operator),
],
'insert': [
include('specialText'),
(r'\[', Generic.Inserted),
(r'[^\n\[]+', Generic.Inserted),
],
'delete': [
include('specialText'),
(r'\[', Generic.Deleted),
(r'[^\n\[]+', Generic.Deleted),
],
}
class WDiffLexer(RegexLexer):
"""
A wdiff lexer.
Note that:
* It only works with normal output (without options like ``-l``).
* If the target files contain "[-", "-]", "{+", or "+}",
especially they are unbalanced, the lexer will get confused.
"""
name = 'WDiff'
url = 'https://www.gnu.org/software/wdiff/'
aliases = ['wdiff']
filenames = ['*.wdiff']
mimetypes = []
version_added = '2.2'
flags = re.MULTILINE | re.DOTALL
# We can only assume "[-" after "[-" before "-]" is `nested`,
# for instance wdiff to wdiff outputs. We have no way to
# distinct these marker is of wdiff output from original text.
ins_op = r"\{\+"
ins_cl = r"\+\}"
del_op = r"\[\-"
del_cl = r"\-\]"
normal = r'[^{}[\]+-]+' # for performance
tokens = {
'root': [
(ins_op, Generic.Inserted, 'inserted'),
(del_op, Generic.Deleted, 'deleted'),
(normal, Text),
(r'.', Text),
],
'inserted': [
(ins_op, Generic.Inserted, '#push'),
(del_op, Generic.Inserted, '#push'),
(del_cl, Generic.Inserted, '#pop'),
(ins_cl, Generic.Inserted, '#pop'),
(normal, Generic.Inserted),
(r'.', Generic.Inserted),
],
'deleted': [
(del_op, Generic.Deleted, '#push'),
(ins_op, Generic.Deleted, '#push'),
(ins_cl, Generic.Deleted, '#pop'),
(del_cl, Generic.Deleted, '#pop'),
(normal, Generic.Deleted),
(r'.', Generic.Deleted),
],
}

View File

@@ -0,0 +1,109 @@
"""
pygments.lexers.dns
~~~~~~~~~~~~~~~~~~~
Pygments lexers for DNS
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.token import Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace, Literal
from pygments.lexer import RegexLexer, bygroups, include
__all__ = ['DnsZoneLexer']
CLASSES = [
"IN",
"CS",
"CH",
"HS",
]
CLASSES_RE = "(" + "|".join(CLASSES) + ')'
class DnsZoneLexer(RegexLexer):
"""
Lexer for DNS zone file
"""
flags = re.MULTILINE
name = 'Zone'
aliases = ['zone']
filenames = [ "*.zone" ]
url = "https://datatracker.ietf.org/doc/html/rfc1035"
mimetypes = ['text/dns']
version_added = '2.16'
tokens = {
'root': [
# Empty/comment line:
(r'([ \t]*)(;.*)(\n)', bygroups(Whitespace, Comment.Single, Whitespace)),
# Special directives:
(r'^\$ORIGIN\b', Keyword, 'values'),
(r'^\$TTL\b', Keyword, 'values'),
(r'^\$INCLUDE\b', Comment.Preproc, 'include'),
# TODO, $GENERATE https://bind9.readthedocs.io/en/v9.18.14/chapter3.html#soa-rr
(r'^\$[A-Z]+\b', Keyword, 'values'),
# Records:
# <domain-name> [<TTL>] [<class>] <type> <RDATA> [<comment>]
(r'^(@)([ \t]+)(?:([0-9]+[smhdw]?)([ \t]+))?(?:' + CLASSES_RE + "([ \t]+))?([A-Z]+)([ \t]+)",
bygroups(Operator, Whitespace, Number.Integer, Whitespace, Name.Class, Whitespace, Keyword.Type, Whitespace),
"values"),
(r'^([^ \t\n]*)([ \t]+)(?:([0-9]+[smhdw]?)([ \t]+))?(?:' + CLASSES_RE + "([ \t]+))?([A-Z]+)([ \t]+)",
bygroups(Name, Whitespace, Number.Integer, Whitespace, Name.Class, Whitespace, Keyword.Type, Whitespace),
"values"),
# <domain-name> [<class>] [<TTL>] <type> <RDATA> [<comment>]
(r'^(Operator)([ \t]+)(?:' + CLASSES_RE + "([ \t]+))?(?:([0-9]+[smhdw]?)([ \t]+))?([A-Z]+)([ \t]+)",
bygroups(Name, Whitespace, Number.Integer, Whitespace, Name.Class, Whitespace, Keyword.Type, Whitespace),
"values"),
(r'^([^ \t\n]*)([ \t]+)(?:' + CLASSES_RE + "([ \t]+))?(?:([0-9]+[smhdw]?)([ \t]+))?([A-Z]+)([ \t]+)",
bygroups(Name, Whitespace, Number.Integer, Whitespace, Name.Class, Whitespace, Keyword.Type, Whitespace),
"values"),
],
# Parsing values:
'values': [
(r'\n', Whitespace, "#pop"),
(r'\(', Punctuation, 'nested'),
include('simple-value'),
],
# Parsing nested values (...):
'nested': [
(r'\)', Punctuation, "#pop"),
include('multiple-simple-values'),
],
# Parsing values:
'simple-value': [
(r'(;.*)', bygroups(Comment.Single)),
(r'[ \t]+', Whitespace),
(r"@\b", Operator),
('"', String, 'string'),
(r'[0-9]+[smhdw]?$', Number.Integer),
(r'([0-9]+[smhdw]?)([ \t]+)', bygroups(Number.Integer, Whitespace)),
(r'\S+', Literal),
],
'multiple-simple-values': [
include('simple-value'),
(r'[\n]+', Whitespace),
],
'include': [
(r'([ \t]+)([^ \t\n]+)([ \t]+)([-\._a-zA-Z]+)([ \t]+)(;.*)?$',
bygroups(Whitespace, Comment.PreprocFile, Whitespace, Name, Whitespace, Comment.Single), '#pop'),
(r'([ \t]+)([^ \t\n]+)([ \t\n]+)$', bygroups(Whitespace, Comment.PreprocFile, Whitespace), '#pop'),
],
"string": [
(r'\\"', String),
(r'"', String, "#pop"),
(r'[^"]+', String),
]
}
def analyse_text(text):
return text.startswith("$ORIGIN")

View File

@@ -0,0 +1,873 @@
"""
pygments.lexers.dotnet
~~~~~~~~~~~~~~~~~~~~~~
Lexers for .net languages.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, DelegatingLexer, bygroups, include, \
using, this, default, words
from pygments.token import Punctuation, Text, Comment, Operator, Keyword, \
Name, String, Number, Literal, Other, Whitespace
from pygments.util import get_choice_opt
from pygments import unistring as uni
from pygments.lexers.html import XmlLexer
__all__ = ['CSharpLexer', 'NemerleLexer', 'BooLexer', 'VbNetLexer',
'CSharpAspxLexer', 'VbNetAspxLexer', 'FSharpLexer', 'XppLexer']
class CSharpLexer(RegexLexer):
"""
For C# source code.
Additional options accepted:
`unicodelevel`
Determines which Unicode characters this lexer allows for identifiers.
The possible values are:
* ``none`` -- only the ASCII letters and numbers are allowed. This
is the fastest selection.
* ``basic`` -- all Unicode characters from the specification except
category ``Lo`` are allowed.
* ``full`` -- all Unicode characters as specified in the C# specs
are allowed. Note that this means a considerable slowdown since the
``Lo`` category has more than 40,000 characters in it!
The default value is ``basic``.
.. versionadded:: 0.8
"""
name = 'C#'
url = 'https://docs.microsoft.com/en-us/dotnet/csharp/'
aliases = ['csharp', 'c#', 'cs']
filenames = ['*.cs']
mimetypes = ['text/x-csharp'] # inferred
version_added = ''
flags = re.MULTILINE | re.DOTALL
# for the range of allowed unicode characters in identifiers, see
# http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
levels = {
'none': r'@?[_a-zA-Z]\w*',
'basic': ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' +
'[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc',
'Cf', 'Mn', 'Mc') + ']*'),
'full': ('@?(?:_|[^' +
uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])' +
'[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'),
}
tokens = {}
token_variants = True
for levelname, cs_ident in levels.items():
tokens[levelname] = {
'root': [
include('numbers'),
# method names
(r'^([ \t]*)((?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type
r'(' + cs_ident + ')' # method name
r'(\s*)(\()', # signature start
bygroups(Whitespace, using(this), Name.Function, Whitespace,
Punctuation)),
(r'^(\s*)(\[.*?\])', bygroups(Whitespace, Name.Attribute)),
(r'[^\S\n]+', Whitespace),
(r'(\\)(\n)', bygroups(Text, Whitespace)), # line continuation
(r'//.*?\n', Comment.Single),
(r'/[*].*?[*]/', Comment.Multiline),
(r'\n', Whitespace),
(words((
'>>>=', '>>=', '<<=', '<=', '>=', '+=', '-=', '*=', '/=',
'%=', '&=', '|=', '^=', '??=', '=>', '??', '?.', '!=', '==',
'&&', '||', '>>>', '>>', '<<', '++', '--', '+', '-', '*',
'/', '%', '&', '|', '^', '<', '>', '?', '!', '~', '=',
)), Operator),
(r'=~|!=|==|<<|>>|[-+/*%=<>&^|]', Operator),
(r'[()\[\];:,.]', Punctuation),
(r'[{}]', Punctuation),
(r'@"(""|[^"])*"', String),
(r'\$?"(\\\\|\\[^\\]|[^"\\\n])*["\n]', String),
(r"'\\.'|'[^\\]'", String.Char),
(r"[0-9]+(\.[0-9]*)?([eE][+-][0-9]+)?"
r"[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?", Number),
(r'(#)([ \t]*)(if|endif|else|elif|define|undef|'
r'line|error|warning|region|endregion|pragma)\b(.*?)(\n)',
bygroups(Comment.Preproc, Whitespace, Comment.Preproc,
Comment.Preproc, Whitespace)),
(r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Whitespace,
Keyword)),
(words((
'abstract', 'as', 'async', 'await', 'base', 'break', 'by',
'case', 'catch', 'checked', 'const', 'continue', 'default',
'delegate', 'do', 'else', 'enum', 'event', 'explicit',
'extern', 'false', 'finally', 'fixed', 'for', 'foreach',
'goto', 'if', 'implicit', 'in', 'interface', 'internal',
'is', 'let', 'lock', 'new', 'null', 'on', 'operator',
'out', 'override', 'params', 'private', 'protected',
'public', 'readonly', 'ref', 'return', 'sealed', 'sizeof',
'stackalloc', 'static', 'switch', 'this', 'throw', 'true',
'try', 'typeof', 'unchecked', 'unsafe', 'virtual', 'void',
'while', 'get', 'set', 'new', 'partial', 'yield', 'add',
'remove', 'value', 'alias', 'ascending', 'descending',
'from', 'group', 'into', 'orderby', 'select', 'thenby',
'where', 'join', 'equals', 'record', 'allows',
'and', 'init', 'managed', 'nameof', 'nint', 'not',
'notnull', 'nuint', 'or', 'scoped', 'unmanaged', 'when',
'with'
), suffix=r'\b'), Keyword),
# version 1: assumes that 'file' is the only contextual keyword
# that is a class modifier
(r'(file)(\s+)(record|class|abstract|enum|new|sealed|static)\b',
bygroups(Keyword, Whitespace, Keyword)),
(r'(global)(::)', bygroups(Keyword, Punctuation)),
(r'(bool|byte|char|decimal|double|dynamic|float|int|long|object|'
r'sbyte|short|string|uint|ulong|ushort|var)\b\??', Keyword.Type),
(r'(class|struct)(\s+)', bygroups(Keyword, Whitespace), 'class'),
(r'(namespace|using)(\s+)', bygroups(Keyword, Whitespace), 'namespace'),
(cs_ident, Name),
],
'numbers_int': [
(r"0[xX][0-9a-fA-F]+(([uU][lL]?)|[lL][uU]?)?", Number.Hex),
(r"0[bB][01]+(([uU][lL]?)|[lL][uU]?)?", Number.Bin),
(r"[0-9]+(([uU][lL]?)|[lL][uU]?)?", Number.Integer),
],
'numbers_float': [
(r"([0-9]+\.[0-9]+([eE][+-]?[0-9]+)?[fFdDmM]?)|"
r"(\.[0-9]+([eE][+-]?[0-9]+)?[fFdDmM]?)|"
r"([0-9]+([eE][+-]?[0-9]+)[fFdDmM]?)|"
r"([0-9]+[fFdDmM])", Number.Float),
],
'numbers': [
include('numbers_float'),
include('numbers_int'),
],
'class': [
(cs_ident, Name.Class, '#pop'),
default('#pop'),
],
'namespace': [
(r'(?=\()', Text, '#pop'), # using (resource)
('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop'),
]
}
def __init__(self, **options):
level = get_choice_opt(options, 'unicodelevel', list(self.tokens), 'basic')
if level not in self._all_tokens:
# compile the regexes now
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options)
class NemerleLexer(RegexLexer):
"""
For Nemerle source code.
Additional options accepted:
`unicodelevel`
Determines which Unicode characters this lexer allows for identifiers.
The possible values are:
* ``none`` -- only the ASCII letters and numbers are allowed. This
is the fastest selection.
* ``basic`` -- all Unicode characters from the specification except
category ``Lo`` are allowed.
* ``full`` -- all Unicode characters as specified in the C# specs
are allowed. Note that this means a considerable slowdown since the
``Lo`` category has more than 40,000 characters in it!
The default value is ``basic``.
"""
name = 'Nemerle'
url = 'http://nemerle.org'
aliases = ['nemerle']
filenames = ['*.n']
mimetypes = ['text/x-nemerle'] # inferred
version_added = '1.5'
flags = re.MULTILINE | re.DOTALL
# for the range of allowed unicode characters in identifiers, see
# http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
levels = {
'none': r'@?[_a-zA-Z]\w*',
'basic': ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' +
'[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc',
'Cf', 'Mn', 'Mc') + ']*'),
'full': ('@?(?:_|[^' +
uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])' +
'[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'),
}
tokens = {}
token_variants = True
for levelname, cs_ident in levels.items():
tokens[levelname] = {
'root': [
# method names
(r'^([ \t]*)((?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type
r'(' + cs_ident + ')' # method name
r'(\s*)(\()', # signature start
bygroups(Whitespace, using(this), Name.Function, Whitespace, \
Punctuation)),
(r'^(\s*)(\[.*?\])', bygroups(Whitespace, Name.Attribute)),
(r'[^\S\n]+', Whitespace),
(r'(\\)(\n)', bygroups(Text, Whitespace)), # line continuation
(r'//.*?\n', Comment.Single),
(r'/[*].*?[*]/', Comment.Multiline),
(r'\n', Whitespace),
(r'(\$)(\s*)(")', bygroups(String, Whitespace, String),
'splice-string'),
(r'(\$)(\s*)(<#)', bygroups(String, Whitespace, String),
'splice-string2'),
(r'<#', String, 'recursive-string'),
(r'(<\[)(\s*)(' + cs_ident + ':)?',
bygroups(Keyword, Whitespace, Keyword)),
(r'\]\>', Keyword),
# quasiquotation only
(r'\$' + cs_ident, Name),
(r'(\$)(\()', bygroups(Name, Punctuation),
'splice-string-content'),
(r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
(r'[{}]', Punctuation),
(r'@"(""|[^"])*"', String),
(r'"(\\\\|\\[^\\]|[^"\\\n])*["\n]', String),
(r"'\\.'|'[^\\]'", String.Char),
(r"0[xX][0-9a-fA-F]+[Ll]?", Number),
(r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?[flFLdD]?", Number),
(r'(#)([ \t]*)(if|endif|else|elif|define|undef|'
r'line|error|warning|region|endregion|pragma)\b',
bygroups(Comment.Preproc, Whitespace, Comment.Preproc), 'preproc'),
(r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Whitespace, Keyword)),
(r'(abstract|and|as|base|catch|def|delegate|'
r'enum|event|extern|false|finally|'
r'fun|implements|interface|internal|'
r'is|macro|match|matches|module|mutable|new|'
r'null|out|override|params|partial|private|'
r'protected|public|ref|sealed|static|'
r'syntax|this|throw|true|try|type|typeof|'
r'virtual|volatile|when|where|with|'
r'assert|assert2|async|break|checked|continue|do|else|'
r'ensures|for|foreach|if|late|lock|new|nolate|'
r'otherwise|regexp|repeat|requires|return|surroundwith|'
r'unchecked|unless|using|while|yield)\b', Keyword),
(r'(global)(::)', bygroups(Keyword, Punctuation)),
(r'(bool|byte|char|decimal|double|float|int|long|object|sbyte|'
r'short|string|uint|ulong|ushort|void|array|list)\b\??',
Keyword.Type),
(r'(:>?)(\s*)(' + cs_ident + r'\??)',
bygroups(Punctuation, Whitespace, Keyword.Type)),
(r'(class|struct|variant|module)(\s+)',
bygroups(Keyword, Whitespace), 'class'),
(r'(namespace|using)(\s+)', bygroups(Keyword, Whitespace),
'namespace'),
(cs_ident, Name),
],
'class': [
(cs_ident, Name.Class, '#pop')
],
'preproc': [
(r'\w+', Comment.Preproc),
(r'[ \t]+', Whitespace),
(r'\n', Whitespace, '#pop')
],
'namespace': [
(r'(?=\()', Text, '#pop'), # using (resource)
('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop')
],
'splice-string': [
(r'[^"$]', String),
(r'\$' + cs_ident, Name),
(r'(\$)(\()', bygroups(Name, Punctuation),
'splice-string-content'),
(r'\\"', String),
(r'"', String, '#pop')
],
'splice-string2': [
(r'[^#<>$]', String),
(r'\$' + cs_ident, Name),
(r'(\$)(\()', bygroups(Name, Punctuation),
'splice-string-content'),
(r'<#', String, '#push'),
(r'#>', String, '#pop')
],
'recursive-string': [
(r'[^#<>]', String),
(r'<#', String, '#push'),
(r'#>', String, '#pop')
],
'splice-string-content': [
(r'if|match', Keyword),
(r'[~!%^&*+=|\[\]:;,.<>/?-\\"$ ]', Punctuation),
(cs_ident, Name),
(r'\d+', Number),
(r'\(', Punctuation, '#push'),
(r'\)', Punctuation, '#pop')
]
}
def __init__(self, **options):
level = get_choice_opt(options, 'unicodelevel', list(self.tokens),
'basic')
if level not in self._all_tokens:
# compile the regexes now
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options)
def analyse_text(text):
"""Nemerle is quite similar to Python, but @if is relatively uncommon
elsewhere."""
result = 0
if '@if' in text:
result += 0.1
return result
class BooLexer(RegexLexer):
"""
For Boo source code.
"""
name = 'Boo'
url = 'https://github.com/boo-lang/boo'
aliases = ['boo']
filenames = ['*.boo']
mimetypes = ['text/x-boo']
version_added = ''
tokens = {
'root': [
(r'\s+', Whitespace),
(r'(#|//).*$', Comment.Single),
(r'/[*]', Comment.Multiline, 'comment'),
(r'[]{}:(),.;[]', Punctuation),
(r'(\\)(\n)', bygroups(Text, Whitespace)),
(r'\\', Text),
(r'(in|is|and|or|not)\b', Operator.Word),
(r'/(\\\\|\\[^\\]|[^/\\\s])/', String.Regex),
(r'@/(\\\\|\\[^\\]|[^/\\])*/', String.Regex),
(r'=~|!=|==|<<|>>|[-+/*%=<>&^|]', Operator),
(r'(as|abstract|callable|constructor|destructor|do|import|'
r'enum|event|final|get|interface|internal|of|override|'
r'partial|private|protected|public|return|set|static|'
r'struct|transient|virtual|yield|super|and|break|cast|'
r'continue|elif|else|ensure|except|for|given|goto|if|in|'
r'is|isa|not|or|otherwise|pass|raise|ref|try|unless|when|'
r'while|from|as)\b', Keyword),
(r'def(?=\s+\(.*?\))', Keyword),
(r'(def)(\s+)', bygroups(Keyword, Whitespace), 'funcname'),
(r'(class)(\s+)', bygroups(Keyword, Whitespace), 'classname'),
(r'(namespace)(\s+)', bygroups(Keyword, Whitespace), 'namespace'),
(r'(?<!\.)(true|false|null|self|__eval__|__switch__|array|'
r'assert|checked|enumerate|filter|getter|len|lock|map|'
r'matrix|max|min|normalArrayIndexing|print|property|range|'
r'rawArrayIndexing|required|typeof|unchecked|using|'
r'yieldAll|zip)\b', Name.Builtin),
(r'"""(\\\\|\\"|.*?)"""', String.Double),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
(r'[a-zA-Z_]\w*', Name),
(r'(\d+\.\d*|\d*\.\d+)([fF][+-]?[0-9]+)?', Number.Float),
(r'[0-9][0-9.]*(ms?|d|h|s)', Number),
(r'0\d+', Number.Oct),
(r'0x[a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer),
],
'comment': [
('/[*]', Comment.Multiline, '#push'),
('[*]/', Comment.Multiline, '#pop'),
('[^/*]', Comment.Multiline),
('[*/]', Comment.Multiline)
],
'funcname': [
(r'[a-zA-Z_]\w*', Name.Function, '#pop')
],
'classname': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
],
'namespace': [
(r'[a-zA-Z_][\w.]*', Name.Namespace, '#pop')
]
}
class VbNetLexer(RegexLexer):
"""
For Visual Basic.NET source code.
Also LibreOffice Basic, OpenOffice Basic, and StarOffice Basic.
"""
name = 'VB.net'
url = 'https://docs.microsoft.com/en-us/dotnet/visual-basic/'
aliases = ['vb.net', 'vbnet', 'lobas', 'oobas', 'sobas', 'visual-basic', 'visualbasic']
filenames = ['*.vb', '*.bas']
mimetypes = ['text/x-vbnet', 'text/x-vba'] # (?)
version_added = ''
uni_name = '[_' + uni.combine('Ll', 'Lt', 'Lm', 'Nl') + ']' + \
'[' + uni.combine('Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc',
'Cf', 'Mn', 'Mc') + ']*'
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'^\s*<.*?>', Name.Attribute),
(r'\s+', Whitespace),
(r'\n', Whitespace),
(r'(rem\b.*?)(\n)', bygroups(Comment, Whitespace)),
(r"('.*?)(\n)", bygroups(Comment, Whitespace)),
(r'#If\s.*?\sThen|#ElseIf\s.*?\sThen|#Else|#End\s+If|#Const|'
r'#ExternalSource.*?\n|#End\s+ExternalSource|'
r'#Region.*?\n|#End\s+Region|#ExternalChecksum',
Comment.Preproc),
(r'[(){}!#,.:]', Punctuation),
(r'(Option)(\s+)(Strict|Explicit|Compare)(\s+)'
r'(On|Off|Binary|Text)',
bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration,
Whitespace, Keyword.Declaration)),
(words((
'AddHandler', 'Alias', 'ByRef', 'ByVal', 'Call', 'Case',
'Catch', 'CBool', 'CByte', 'CChar', 'CDate', 'CDec', 'CDbl',
'CInt', 'CLng', 'CObj', 'Continue', 'CSByte', 'CShort', 'CSng',
'CStr', 'CType', 'CUInt', 'CULng', 'CUShort', 'Declare',
'Default', 'Delegate', 'DirectCast', 'Do', 'Each', 'Else',
'ElseIf', 'EndIf', 'Erase', 'Error', 'Event', 'Exit', 'False',
'Finally', 'For', 'Friend', 'Get', 'Global', 'GoSub', 'GoTo',
'Handles', 'If', 'Implements', 'Inherits', 'Interface', 'Let',
'Lib', 'Loop', 'Me', 'MustInherit', 'MustOverride', 'MyBase',
'MyClass', 'Narrowing', 'New', 'Next', 'Not', 'Nothing',
'NotInheritable', 'NotOverridable', 'Of', 'On', 'Operator',
'Option', 'Optional', 'Overloads', 'Overridable', 'Overrides',
'ParamArray', 'Partial', 'Private', 'Protected', 'Public',
'RaiseEvent', 'ReadOnly', 'ReDim', 'RemoveHandler', 'Resume',
'Return', 'Select', 'Set', 'Shadows', 'Shared', 'Single',
'Static', 'Step', 'Stop', 'SyncLock', 'Then', 'Throw', 'To',
'True', 'Try', 'TryCast', 'Wend', 'Using', 'When', 'While',
'Widening', 'With', 'WithEvents', 'WriteOnly'),
prefix=r'(?<!\.)', suffix=r'\b'), Keyword),
(r'(?<!\.)End\b', Keyword, 'end'),
(r'(?<!\.)(Dim|Const)\b', Keyword, 'dim'),
(r'(?<!\.)(Function|Sub|Property)(\s+)',
bygroups(Keyword, Whitespace), 'funcname'),
(r'(?<!\.)(Class|Structure|Enum)(\s+)',
bygroups(Keyword, Whitespace), 'classname'),
(r'(?<!\.)(Module|Namespace|Imports)(\s+)',
bygroups(Keyword, Whitespace), 'namespace'),
(r'(?<!\.)(Boolean|Byte|Char|Date|Decimal|Double|Integer|Long|'
r'Object|SByte|Short|Single|String|Variant|UInteger|ULong|'
r'UShort)\b', Keyword.Type),
(r'(?<!\.)(AddressOf|And|AndAlso|As|GetType|In|Is|IsNot|Like|Mod|'
r'Or|OrElse|TypeOf|Xor)\b', Operator.Word),
(r'&=|[*]=|/=|\\=|\^=|\+=|-=|<<=|>>=|<<|>>|:=|'
r'<=|>=|<>|[-&*/\\^+=<>\[\]]',
Operator),
('"', String, 'string'),
(r'(_)(\n)', bygroups(Text, Whitespace)), # Line continuation (must be before Name)
(uni_name + '[%&@!#$]?', Name),
('#.*?#', Literal.Date),
(r'(\d+\.\d*|\d*\.\d+)(F[+-]?[0-9]+)?', Number.Float),
(r'\d+([SILDFR]|US|UI|UL)?', Number.Integer),
(r'&H[0-9a-f]+([SILDFR]|US|UI|UL)?', Number.Integer),
(r'&O[0-7]+([SILDFR]|US|UI|UL)?', Number.Integer),
],
'string': [
(r'""', String),
(r'"C?', String, '#pop'),
(r'[^"]+', String),
],
'dim': [
(uni_name, Name.Variable, '#pop'),
default('#pop'), # any other syntax
],
'funcname': [
(uni_name, Name.Function, '#pop'),
],
'classname': [
(uni_name, Name.Class, '#pop'),
],
'namespace': [
(uni_name, Name.Namespace),
(r'\.', Name.Namespace),
default('#pop'),
],
'end': [
(r'\s+', Whitespace),
(r'(Function|Sub|Property|Class|Structure|Enum|Module|Namespace)\b',
Keyword, '#pop'),
default('#pop'),
]
}
def analyse_text(text):
if re.search(r'^\s*(#If|Module|Namespace)', text, re.MULTILINE):
return 0.5
class GenericAspxLexer(RegexLexer):
"""
Lexer for ASP.NET pages.
"""
name = 'aspx-gen'
filenames = []
mimetypes = []
url = 'https://dotnet.microsoft.com/en-us/apps/aspnet'
flags = re.DOTALL
tokens = {
'root': [
(r'(<%[@=#]?)(.*?)(%>)', bygroups(Name.Tag, Other, Name.Tag)),
(r'(<script.*?>)(.*?)(</script>)', bygroups(using(XmlLexer),
Other,
using(XmlLexer))),
(r'(.+?)(?=<)', using(XmlLexer)),
(r'.+', using(XmlLexer)),
],
}
# TODO support multiple languages within the same source file
class CSharpAspxLexer(DelegatingLexer):
"""
Lexer for highlighting C# within ASP.NET pages.
"""
name = 'aspx-cs'
aliases = ['aspx-cs']
filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd']
mimetypes = []
url = 'https://dotnet.microsoft.com/en-us/apps/aspnet'
version_added = ''
def __init__(self, **options):
super().__init__(CSharpLexer, GenericAspxLexer, **options)
def analyse_text(text):
if re.search(r'Page\s*Language="C#"', text, re.I) is not None:
return 0.2
elif re.search(r'script[^>]+language=["\']C#', text, re.I) is not None:
return 0.15
class VbNetAspxLexer(DelegatingLexer):
"""
Lexer for highlighting Visual Basic.net within ASP.NET pages.
"""
name = 'aspx-vb'
aliases = ['aspx-vb']
filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd']
mimetypes = []
url = 'https://dotnet.microsoft.com/en-us/apps/aspnet'
version_added = ''
def __init__(self, **options):
super().__init__(VbNetLexer, GenericAspxLexer, **options)
def analyse_text(text):
if re.search(r'Page\s*Language="Vb"', text, re.I) is not None:
return 0.2
elif re.search(r'script[^>]+language=["\']vb', text, re.I) is not None:
return 0.15
# Very close to functional.OcamlLexer
class FSharpLexer(RegexLexer):
"""
For the F# language (version 3.0).
"""
name = 'F#'
url = 'https://fsharp.org/'
aliases = ['fsharp', 'f#']
filenames = ['*.fs', '*.fsi', '*.fsx']
mimetypes = ['text/x-fsharp']
version_added = '1.5'
keywords = [
'abstract', 'as', 'assert', 'base', 'begin', 'class', 'default',
'delegate', 'do!', 'do', 'done', 'downcast', 'downto', 'elif', 'else',
'end', 'exception', 'extern', 'false', 'finally', 'for', 'function',
'fun', 'global', 'if', 'inherit', 'inline', 'interface', 'internal',
'in', 'lazy', 'let!', 'let', 'match', 'member', 'module', 'mutable',
'namespace', 'new', 'null', 'of', 'open', 'override', 'private', 'public',
'rec', 'return!', 'return', 'select', 'static', 'struct', 'then', 'to',
'true', 'try', 'type', 'upcast', 'use!', 'use', 'val', 'void', 'when',
'while', 'with', 'yield!', 'yield',
]
# Reserved words; cannot hurt to color them as keywords too.
keywords += [
'atomic', 'break', 'checked', 'component', 'const', 'constraint',
'constructor', 'continue', 'eager', 'event', 'external', 'fixed',
'functor', 'include', 'method', 'mixin', 'object', 'parallel',
'process', 'protected', 'pure', 'sealed', 'tailcall', 'trait',
'virtual', 'volatile',
]
keyopts = [
'!=', '#', '&&', '&', r'\(', r'\)', r'\*', r'\+', ',', r'-\.',
'->', '-', r'\.\.', r'\.', '::', ':=', ':>', ':', ';;', ';', '<-',
r'<\]', '<', r'>\]', '>', r'\?\?', r'\?', r'\[<', r'\[\|', r'\[', r'\]',
'_', '`', r'\{', r'\|\]', r'\|', r'\}', '~', '<@@', '<@', '=', '@>', '@@>',
]
operators = r'[!$%&*+\./:<=>?@^|~-]'
word_operators = ['and', 'or', 'not']
prefix_syms = r'[!?~]'
infix_syms = r'[=<>@^|&+\*/$%-]'
primitives = [
'sbyte', 'byte', 'char', 'nativeint', 'unativeint', 'float32', 'single',
'float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32',
'uint32', 'int64', 'uint64', 'decimal', 'unit', 'bool', 'string',
'list', 'exn', 'obj', 'enum',
]
# See http://msdn.microsoft.com/en-us/library/dd233181.aspx and/or
# http://fsharp.org/about/files/spec.pdf for reference. Good luck.
tokens = {
'escape-sequence': [
(r'\\[\\"\'ntbrafv]', String.Escape),
(r'\\[0-9]{3}', String.Escape),
(r'\\u[0-9a-fA-F]{4}', String.Escape),
(r'\\U[0-9a-fA-F]{8}', String.Escape),
],
'root': [
(r'\s+', Whitespace),
(r'\(\)|\[\]', Name.Builtin.Pseudo),
(r'\b(?<!\.)([A-Z][\w\']*)(?=\s*\.)',
Name.Namespace, 'dotted'),
(r'\b([A-Z][\w\']*)', Name),
(r'(///.*?)(\n)', bygroups(String.Doc, Whitespace)),
(r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
(r'\(\*(?!\))', Comment, 'comment'),
(r'@"', String, 'lstring'),
(r'"""', String, 'tqs'),
(r'"', String, 'string'),
(r'\b(open|module)(\s+)([\w.]+)',
bygroups(Keyword, Whitespace, Name.Namespace)),
(r'\b(let!?)(\s+)(\w+)',
bygroups(Keyword, Whitespace, Name.Variable)),
(r'\b(type)(\s+)(\w+)',
bygroups(Keyword, Whitespace, Name.Class)),
(r'\b(member|override)(\s+)(\w+)(\.)(\w+)',
bygroups(Keyword, Whitespace, Name, Punctuation, Name.Function)),
(r'\b({})\b'.format('|'.join(keywords)), Keyword),
(r'``([^`\n\r\t]|`[^`\n\r\t])+``', Name),
(r'({})'.format('|'.join(keyopts)), Operator),
(rf'({infix_syms}|{prefix_syms})?{operators}', Operator),
(r'\b({})\b'.format('|'.join(word_operators)), Operator.Word),
(r'\b({})\b'.format('|'.join(primitives)), Keyword.Type),
(r'(#)([ \t]*)(if|endif|else|line|nowarn|light|\d+)\b(.*?)(\n)',
bygroups(Comment.Preproc, Whitespace, Comment.Preproc,
Comment.Preproc, Whitespace)),
(r"[^\W\d][\w']*", Name),
(r'\d[\d_]*[uU]?[yslLnQRZINGmM]?', Number.Integer),
(r'0[xX][\da-fA-F][\da-fA-F_]*[uU]?[yslLn]?[fF]?', Number.Hex),
(r'0[oO][0-7][0-7_]*[uU]?[yslLn]?', Number.Oct),
(r'0[bB][01][01_]*[uU]?[yslLn]?', Number.Bin),
(r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)[fFmM]?',
Number.Float),
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'B?",
String.Char),
(r"'.'", String.Char),
(r"'", Keyword), # a stray quote is another syntax element
(r'@?"', String.Double, 'string'),
(r'[~?][a-z][\w\']*:', Name.Variable),
],
'dotted': [
(r'\s+', Whitespace),
(r'\.', Punctuation),
(r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace),
(r'[A-Z][\w\']*', Name, '#pop'),
(r'[a-z_][\w\']*', Name, '#pop'),
# e.g. dictionary index access
default('#pop'),
],
'comment': [
(r'[^(*)@"]+', Comment),
(r'\(\*', Comment, '#push'),
(r'\*\)', Comment, '#pop'),
# comments cannot be closed within strings in comments
(r'@"', String, 'lstring'),
(r'"""', String, 'tqs'),
(r'"', String, 'string'),
(r'[(*)@]', Comment),
],
'string': [
(r'[^\\"]+', String),
include('escape-sequence'),
(r'\\\n', String),
(r'\n', String), # newlines are allowed in any string
(r'"B?', String, '#pop'),
],
'lstring': [
(r'[^"]+', String),
(r'\n', String),
(r'""', String),
(r'"B?', String, '#pop'),
],
'tqs': [
(r'[^"]+', String),
(r'\n', String),
(r'"""B?', String, '#pop'),
(r'"', String),
],
}
def analyse_text(text):
"""F# doesn't have that many unique features -- |> and <| are weak
indicators."""
result = 0
if '|>' in text:
result += 0.05
if '<|' in text:
result += 0.05
return result
class XppLexer(RegexLexer):
"""
For X++ source code. This is based loosely on the CSharpLexer
"""
name = 'X++'
url = 'https://learn.microsoft.com/en-us/dynamics365/fin-ops-core/dev-itpro/dev-ref/xpp-language-reference'
aliases = ['xpp', 'x++']
filenames = ['*.xpp']
version_added = '2.15'
flags = re.MULTILINE
XPP_CHARS = ('@?(?:_|[^' +
uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])' +
'[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*')
# Temporary, see
# https://github.com/thatch/regexlint/pull/49
XPP_CHARS = XPP_CHARS.replace('\x00', '\x01')
OPERATORS = (
'<=', '>=', '+=', '-=', '*=', '/=', '!=', '==',
'&&', '||', '>>', '<<', '++', '--', '+', '-', '*',
'/', '%', '&', '|', '^', '<', '>', '?', '!', '~', '=',
)
KEYWORDS = ('abstract','anytype','as','async','asc','at','avg','break','breakpoint','by','byref','case','catch',
'changecompany','client','container','continue','count','crosscompany','default','delegate',
'delete_from','desc','display','div','do','edit','else','element','eventhandler','exists','false','final',
'firstfast','firstonly','firstonly10','firstonly100','firstonly1000','flush','for','forceliterals',
'forcenestedloop','forceplaceholders','forceselectorder','forupdate','from','group','if','insert_recordset',
'interface','is','join','like','maxof','minof','mod','new','next','nofetch','notexists','null','optimisticlock','order',
'outer','pause','pessimisticlock','print','private','protected','public','repeatableread','retry','return',
'reverse','select','server','setting','static','sum','super','switch','tablelock','this','throw','true','try','ttsabort','ttsbegin',
'ttscommit','update_recordset','validtimestate','void','where','while','window')
RUNTIME_FUNCTIONS = ('_duration','abs','acos','any2Date','any2Enum','any2Guid','any2Int','any2Int64','any2Real','any2Str','anytodate',
'anytoenum','anytoguid','anytoint','anytoint64','anytoreal','anytostr','asin','atan','beep','cTerm','char2Num','classIdGet',
'corrFlagGet','corrFlagSet','cos','cosh','curExt','curUserId','date2Num','date2Str','datetime2Str','dayName','dayOfMth',
'dayOfWk','dayOfYr','ddb','decRound','dg','dimOf','endMth','enum2str','exp','exp10','fV','fieldId2Name','fieldId2PName',
'fieldName2Id','frac','funcName','getCurrentPartition','getCurrentPartitionRecId','getPrefix','guid2Str','idg','indexId2Name',
'indexName2Id','int2Str','int642Str','intvMax','intvName','intvNo','intvNorm','log10','logN','match','max','min','mkDate','mthName',
'mthOfYr','newGuid','nextMth','nextQtr','nextYr','num2Char','num2Date','num2Str','pmt','power','prevMth','prevQtr','prevYr',
'prmIsDefault','pt','pv','rate','refPrintAll','round','runAs','sessionId','setPrefix','sin','sinh','sleep','sln','str2Date',
'str2Datetime','str2Enum','str2Guid','str2Int','str2Int64','str2Num','str2Time','strAlpha','strCmp','strColSeq','strDel',
'strFind','strFmt','strIns','strKeep','strLTrim','strLen','strLine','strLwr','strNFind','strPoke','strPrompt','strRTrim',
'strRem','strRep','strScan','strUpr','subStr','syd','systemDateGet','systemDateSet','tableId2Name',
'tableId2PName','tableName2Id','tan','tanh','term','time2Str','timeNow','today','trunc','typeOf','uint2Str','wkOfYr','year')
COMPILE_FUNCTIONS = ('attributeStr','classNum','classStr','configurationKeyNum','configurationKeyStr','dataEntityDataSourceStr','delegateStr',
'dimensionHierarchyLevelStr','dimensionHierarchyStr','dimensionReferenceStr','dutyStr','enumCnt','enumLiteralStr','enumNum','enumStr',
'extendedTypeNum','extendedTypeStr','fieldNum','fieldPName','fieldStr','formControlStr','formDataFieldStr','formDataSourceStr',
'formMethodStr','formStr','identifierStr','indexNum','indexStr','licenseCodeNum','licenseCodeStr','literalStr','maxDate','maxInt',
'measureStr','measurementStr','menuItemActionStr','menuItemDisplayStr','menuItemOutputStr','menuStr','methodStr','minInt','privilegeStr',
'queryDatasourceStr','queryMethodStr','queryStr','reportStr','resourceStr','roleStr','ssrsReportStr','staticDelegateStr','staticMethodStr',
'tableCollectionStr','tableFieldGroupStr','tableMethodStr','tableNum','tablePName','tableStaticMethodStr','tableStr','tileStr','varStr',
'webActionItemStr','webDisplayContentItemStr','webFormStr','webMenuStr','webOutputContentItemStr','webReportStr','webSiteTempStr',
'webStaticFileStr','webUrlItemStr','webWebPartStr','webletItemStr','webpageDefStr','websiteDefStr','workflowApprovalStr',
'workflowCategoryStr','workflowTaskStr','workflowTypeStr')
tokens = {}
tokens = {
'root': [
# method names
(r'(\s*)\b(else|if)\b([^\n])', bygroups(Whitespace, Keyword, using(this))), # ensure that if is not treated like a function
(r'^([ \t]*)((?:' + XPP_CHARS + r'(?:\[\])?\s+)+?)' # return type
r'(' + XPP_CHARS + ')' # method name
r'(\s*)(\()', # signature start
bygroups(Whitespace, using(this), Name.Function, Whitespace,
Punctuation)),
(r'^(\s*)(\[)([^\n]*?)(\])', bygroups(Whitespace, Name.Attribute, Name.Variable.Class, Name.Attribute)),
(r'[^\S\n]+', Whitespace),
(r'(\\)(\n)', bygroups(Text, Whitespace)), # line continuation
(r'//[^\n]*?\n', Comment.Single),
(r'/[*][^\n]*?[*]/', Comment.Multiline),
(r'\n', Whitespace),
(words(OPERATORS), Operator),
(r'=~|!=|==|<<|>>|[-+/*%=<>&^|]', Operator),
(r'[()\[\];:,.#@]', Punctuation),
(r'[{}]', Punctuation),
(r'@"(""|[^"])*"', String),
(r'\$?"(\\\\|\\[^\\]|[^"\\\n])*["\n]', String),
(r"'\\.'|'[^\\]'", String.Char),
(r"[0-9]+(\.[0-9]*)?([eE][+-][0-9]+)?"
r"[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?", Number),
(words(KEYWORDS, suffix=r'\b'), Keyword),
(r'(boolean|int|int64|str|real|guid|date)\b\??', Keyword.Type),
(r'(class|struct|extends|implements)(\s+)', bygroups(Keyword, Whitespace), 'class'),
(r'('+XPP_CHARS+')(::)', bygroups(Name.Variable.Class, Punctuation)),
(r'(\s*)(\w+)(\s+\w+(,|=)?[^\n]*;)', bygroups(Whitespace, Name.Variable.Class, using(this))), # declaration
# x++ specific function to get field should highlight the classname
(r'(fieldNum\()('+XPP_CHARS+r')(\s*,\s*)('+XPP_CHARS+r')(\s*\))',
bygroups(using(this), Name.Variable.Class, using(this), Name.Property, using(this))),
# x++ specific function to get table should highlight the classname
(r'(tableNum\()('+XPP_CHARS+r')(\s*\))',
bygroups(using(this), Name.Variable.Class, using(this))),
(words(RUNTIME_FUNCTIONS, suffix=r'(?=\()'), Name.Function.Magic),
(words(COMPILE_FUNCTIONS, suffix=r'(?=\()'), Name.Function.Magic),
(XPP_CHARS, Name),
],
'class': [
(XPP_CHARS, Name.Class, '#pop'),
default('#pop'),
],
'namespace': [
(r'(?=\()', Text, '#pop'), # using (resource)
('(' + XPP_CHARS + r'|\.)+', Name.Namespace, '#pop'),
]
}

View File

@@ -0,0 +1,970 @@
"""
pygments.lexers.dsls
~~~~~~~~~~~~~~~~~~~~
Lexers for various domain-specific languages.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import ExtendedRegexLexer, RegexLexer, bygroups, words, \
include, default, this, using, combined
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
__all__ = ['ProtoBufLexer', 'ZeekLexer', 'PuppetLexer', 'RslLexer',
'MscgenLexer', 'VGLLexer', 'AlloyLexer', 'PanLexer',
'CrmshLexer', 'ThriftLexer', 'FlatlineLexer', 'SnowballLexer']
class ProtoBufLexer(RegexLexer):
"""
Lexer for Protocol Buffer definition files.
"""
name = 'Protocol Buffer'
url = 'https://developers.google.com/protocol-buffers/'
aliases = ['protobuf', 'proto']
filenames = ['*.proto']
version_added = '1.4'
tokens = {
'root': [
(r'[ \t]+', Whitespace),
(r'[,;{}\[\]()<>]', Punctuation),
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline),
(words((
'import', 'option', 'optional', 'required', 'repeated',
'reserved', 'default', 'packed', 'ctype', 'extensions', 'to',
'max', 'rpc', 'returns', 'oneof', 'syntax'), prefix=r'\b', suffix=r'\b'),
Keyword),
(words((
'int32', 'int64', 'uint32', 'uint64', 'sint32', 'sint64',
'fixed32', 'fixed64', 'sfixed32', 'sfixed64',
'float', 'double', 'bool', 'string', 'bytes'), suffix=r'\b'),
Keyword.Type),
(r'(true|false)\b', Keyword.Constant),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Whitespace), 'package'),
(r'(message|extend)(\s+)',
bygroups(Keyword.Declaration, Whitespace), 'message'),
(r'(enum|group|service)(\s+)',
bygroups(Keyword.Declaration, Whitespace), 'type'),
(r'\".*?\"', String),
(r'\'.*?\'', String),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'(\-?(inf|nan))\b', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\d+[LlUu]*', Number.Integer),
(r'[+-=]', Operator),
(r'([a-zA-Z_][\w.]*)([ \t]*)(=)',
bygroups(Name.Attribute, Whitespace, Operator)),
(r'[a-zA-Z_][\w.]*', Name),
],
'package': [
(r'[a-zA-Z_]\w*', Name.Namespace, '#pop'),
default('#pop'),
],
'message': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
default('#pop'),
],
'type': [
(r'[a-zA-Z_]\w*', Name, '#pop'),
default('#pop'),
],
}
class ThriftLexer(RegexLexer):
"""
For Thrift interface definitions.
"""
name = 'Thrift'
url = 'https://thrift.apache.org/'
aliases = ['thrift']
filenames = ['*.thrift']
mimetypes = ['application/x-thrift']
version_added = '2.1'
tokens = {
'root': [
include('whitespace'),
include('comments'),
(r'"', String.Double, combined('stringescape', 'dqs')),
(r'\'', String.Single, combined('stringescape', 'sqs')),
(r'(namespace)(\s+)',
bygroups(Keyword.Namespace, Whitespace), 'namespace'),
(r'(enum|union|struct|service|exception)(\s+)',
bygroups(Keyword.Declaration, Whitespace), 'class'),
(r'((?:(?:[^\W\d]|\$)[\w.\[\]$<>]*\s+)+?)' # return arguments
r'((?:[^\W\d]|\$)[\w$]*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Whitespace, Operator)),
include('keywords'),
include('numbers'),
(r'[&=]', Operator),
(r'[:;,{}()<>\[\]]', Punctuation),
(r'[a-zA-Z_](\.\w|\w)*', Name),
],
'whitespace': [
(r'\n', Whitespace),
(r'\s+', Whitespace),
],
'comments': [
(r'#.*$', Comment),
(r'//.*?\n', Comment),
(r'/\*[\w\W]*?\*/', Comment.Multiline),
],
'stringescape': [
(r'\\([\\nrt"\'])', String.Escape),
],
'dqs': [
(r'"', String.Double, '#pop'),
(r'[^\\"\n]+', String.Double),
],
'sqs': [
(r"'", String.Single, '#pop'),
(r'[^\\\'\n]+', String.Single),
],
'namespace': [
(r'[a-z*](\.\w|\w)*', Name.Namespace, '#pop'),
default('#pop'),
],
'class': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
default('#pop'),
],
'keywords': [
(r'(async|oneway|extends|throws|required|optional)\b', Keyword),
(r'(true|false)\b', Keyword.Constant),
(r'(const|typedef)\b', Keyword.Declaration),
(words((
'cpp_namespace', 'cpp_include', 'cpp_type', 'java_package',
'cocoa_prefix', 'csharp_namespace', 'delphi_namespace',
'php_namespace', 'py_module', 'perl_package',
'ruby_namespace', 'smalltalk_category', 'smalltalk_prefix',
'xsd_all', 'xsd_optional', 'xsd_nillable', 'xsd_namespace',
'xsd_attrs', 'include'), suffix=r'\b'),
Keyword.Namespace),
(words((
'void', 'bool', 'byte', 'i16', 'i32', 'i64', 'double',
'string', 'binary', 'map', 'list', 'set', 'slist',
'senum'), suffix=r'\b'),
Keyword.Type),
(words((
'BEGIN', 'END', '__CLASS__', '__DIR__', '__FILE__',
'__FUNCTION__', '__LINE__', '__METHOD__', '__NAMESPACE__',
'abstract', 'alias', 'and', 'args', 'as', 'assert', 'begin',
'break', 'case', 'catch', 'class', 'clone', 'continue',
'declare', 'def', 'default', 'del', 'delete', 'do', 'dynamic',
'elif', 'else', 'elseif', 'elsif', 'end', 'enddeclare',
'endfor', 'endforeach', 'endif', 'endswitch', 'endwhile',
'ensure', 'except', 'exec', 'finally', 'float', 'for',
'foreach', 'function', 'global', 'goto', 'if', 'implements',
'import', 'in', 'inline', 'instanceof', 'interface', 'is',
'lambda', 'module', 'native', 'new', 'next', 'nil', 'not',
'or', 'pass', 'public', 'print', 'private', 'protected',
'raise', 'redo', 'rescue', 'retry', 'register', 'return',
'self', 'sizeof', 'static', 'super', 'switch', 'synchronized',
'then', 'this', 'throw', 'transient', 'try', 'undef',
'unless', 'unsigned', 'until', 'use', 'var', 'virtual',
'volatile', 'when', 'while', 'with', 'xor', 'yield'),
prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
],
'numbers': [
(r'[+-]?(\d+\.\d+([eE][+-]?\d+)?|\.?\d+[eE][+-]?\d+)', Number.Float),
(r'[+-]?0x[0-9A-Fa-f]+', Number.Hex),
(r'[+-]?[0-9]+', Number.Integer),
],
}
class ZeekLexer(RegexLexer):
"""
For Zeek scripts.
"""
name = 'Zeek'
url = 'https://www.zeek.org/'
aliases = ['zeek', 'bro']
filenames = ['*.zeek', '*.bro']
version_added = '2.5'
_hex = r'[0-9a-fA-F]'
_float = r'((\d*\.?\d+)|(\d+\.?\d*))([eE][-+]?\d+)?'
_h = r'[A-Za-z0-9][-A-Za-z0-9]*'
tokens = {
'root': [
include('whitespace'),
include('comments'),
include('directives'),
include('attributes'),
include('types'),
include('keywords'),
include('literals'),
include('operators'),
include('punctuation'),
(r'((?:[A-Za-z_]\w*)(?:::(?:[A-Za-z_]\w*))*)(?=\s*\()',
Name.Function),
include('identifiers'),
],
'whitespace': [
(r'\n', Whitespace),
(r'\s+', Whitespace),
(r'(\\)(\n)', bygroups(Text, Whitespace)),
],
'comments': [
(r'#.*$', Comment),
],
'directives': [
(r'@(load-plugin|load-sigs|load|unload)\b.*$', Comment.Preproc),
(r'@(DEBUG|DIR|FILENAME|deprecated|if|ifdef|ifndef|else|endif)\b', Comment.Preproc),
(r'(@prefixes)(\s*)((\+?=).*)$', bygroups(Comment.Preproc,
Whitespace, Comment.Preproc)),
],
'attributes': [
(words(('redef', 'priority', 'log', 'optional', 'default', 'add_func',
'delete_func', 'expire_func', 'read_expire', 'write_expire',
'create_expire', 'synchronized', 'persistent', 'rotate_interval',
'rotate_size', 'encrypt', 'raw_output', 'mergeable', 'error_handler',
'type_column', 'deprecated'),
prefix=r'&', suffix=r'\b'),
Keyword.Pseudo),
],
'types': [
(words(('any',
'enum', 'record', 'set', 'table', 'vector',
'function', 'hook', 'event',
'addr', 'bool', 'count', 'double', 'file', 'int', 'interval',
'pattern', 'port', 'string', 'subnet', 'time'),
suffix=r'\b'),
Keyword.Type),
(r'(opaque)(\s+)(of)(\s+)((?:[A-Za-z_]\w*)(?:::(?:[A-Za-z_]\w*))*)\b',
bygroups(Keyword.Type, Whitespace, Operator.Word, Whitespace, Keyword.Type)),
(r'(type)(\s+)((?:[A-Za-z_]\w*)(?:::(?:[A-Za-z_]\w*))*)(\s*)(:)(\s*)\b(record|enum)\b',
bygroups(Keyword, Whitespace, Name.Class, Whitespace, Operator, Whitespace, Keyword.Type)),
(r'(type)(\s+)((?:[A-Za-z_]\w*)(?:::(?:[A-Za-z_]\w*))*)(\s*)(:)',
bygroups(Keyword, Whitespace, Name, Whitespace, Operator)),
(r'(redef)(\s+)(record|enum)(\s+)((?:[A-Za-z_]\w*)(?:::(?:[A-Za-z_]\w*))*)\b',
bygroups(Keyword, Whitespace, Keyword.Type, Whitespace, Name.Class)),
],
'keywords': [
(words(('redef', 'export', 'if', 'else', 'for', 'while',
'return', 'break', 'next', 'continue', 'fallthrough',
'switch', 'default', 'case',
'add', 'delete',
'when', 'timeout', 'schedule'),
suffix=r'\b'),
Keyword),
(r'(print)\b', Keyword),
(r'(global|local|const|option)\b', Keyword.Declaration),
(r'(module)(\s+)(([A-Za-z_]\w*)(?:::([A-Za-z_]\w*))*)\b',
bygroups(Keyword.Namespace, Whitespace, Name.Namespace)),
],
'literals': [
(r'"', String, 'string'),
# Not the greatest match for patterns, but generally helps
# disambiguate between start of a pattern and just a division
# operator.
(r'/(?=.*/)', String.Regex, 'regex'),
(r'(T|F)\b', Keyword.Constant),
# Port
(r'\d{1,5}/(udp|tcp|icmp|unknown)\b', Number),
# IPv4 Address
(r'(\d{1,3}.){3}(\d{1,3})\b', Number),
# IPv6 Address
(r'\[([0-9a-fA-F]{0,4}:){2,7}([0-9a-fA-F]{0,4})?((\d{1,3}.){3}(\d{1,3}))?\]', Number),
# Numeric
(r'0[xX]' + _hex + r'+\b', Number.Hex),
(_float + r'\s*(day|hr|min|sec|msec|usec)s?\b', Number.Float),
(_float + r'\b', Number.Float),
(r'(\d+)\b', Number.Integer),
# Hostnames
(_h + r'(\.' + _h + r')+', String),
],
'operators': [
(r'[!%*/+<=>~|&^-]', Operator),
(r'([-+=&|]{2}|[+=!><-]=)', Operator),
(r'(in|as|is|of)\b', Operator.Word),
(r'\??\$', Operator),
],
'punctuation': [
(r'[{}()\[\],;.]', Punctuation),
# The "ternary if", which uses '?' and ':', could instead be
# treated as an Operator, but colons are more frequently used to
# separate field/identifier names from their types, so the (often)
# less-prominent Punctuation is used even with '?' for consistency.
(r'[?:]', Punctuation),
],
'identifiers': [
(r'([a-zA-Z_]\w*)(::)', bygroups(Name, Punctuation)),
(r'[a-zA-Z_]\w*', Name)
],
'string': [
(r'\\.', String.Escape),
(r'%-?[0-9]*(\.[0-9]+)?[DTd-gsx]', String.Escape),
(r'"', String, '#pop'),
(r'.', String),
],
'regex': [
(r'\\.', String.Escape),
(r'/', String.Regex, '#pop'),
(r'.', String.Regex),
],
}
BroLexer = ZeekLexer
class PuppetLexer(RegexLexer):
"""
For Puppet configuration DSL.
"""
name = 'Puppet'
url = 'https://puppet.com/'
aliases = ['puppet']
filenames = ['*.pp']
version_added = '1.6'
tokens = {
'root': [
include('comments'),
include('keywords'),
include('names'),
include('numbers'),
include('operators'),
include('strings'),
(r'[]{}:(),;[]', Punctuation),
(r'\s+', Whitespace),
],
'comments': [
(r'(\s*)(#.*)$', bygroups(Whitespace, Comment)),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'operators': [
(r'(=>|\?|<|>|=|\+|-|/|\*|~|!|\|)', Operator),
(r'(in|and|or|not)\b', Operator.Word),
],
'names': [
(r'[a-zA-Z_]\w*', Name.Attribute),
(r'(\$\S+)(\[)(\S+)(\])', bygroups(Name.Variable, Punctuation,
String, Punctuation)),
(r'\$\S+', Name.Variable),
],
'numbers': [
# Copypasta from the Python lexer
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
(r'\d+[eE][+-]?[0-9]+j?', Number.Float),
(r'0[0-7]+j?', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+j?', Number.Integer)
],
'keywords': [
# Left out 'group' and 'require'
# Since they're often used as attributes
(words((
'absent', 'alert', 'alias', 'audit', 'augeas', 'before', 'case',
'check', 'class', 'computer', 'configured', 'contained',
'create_resources', 'crit', 'cron', 'debug', 'default',
'define', 'defined', 'directory', 'else', 'elsif', 'emerg',
'err', 'exec', 'extlookup', 'fail', 'false', 'file',
'filebucket', 'fqdn_rand', 'generate', 'host', 'if', 'import',
'include', 'info', 'inherits', 'inline_template', 'installed',
'interface', 'k5login', 'latest', 'link', 'loglevel',
'macauthorization', 'mailalias', 'maillist', 'mcx', 'md5',
'mount', 'mounted', 'nagios_command', 'nagios_contact',
'nagios_contactgroup', 'nagios_host', 'nagios_hostdependency',
'nagios_hostescalation', 'nagios_hostextinfo', 'nagios_hostgroup',
'nagios_service', 'nagios_servicedependency', 'nagios_serviceescalation',
'nagios_serviceextinfo', 'nagios_servicegroup', 'nagios_timeperiod',
'node', 'noop', 'notice', 'notify', 'package', 'present', 'purged',
'realize', 'regsubst', 'resources', 'role', 'router', 'running',
'schedule', 'scheduled_task', 'search', 'selboolean', 'selmodule',
'service', 'sha1', 'shellquote', 'split', 'sprintf',
'ssh_authorized_key', 'sshkey', 'stage', 'stopped', 'subscribe',
'tag', 'tagged', 'template', 'tidy', 'true', 'undef', 'unmounted',
'user', 'versioncmp', 'vlan', 'warning', 'yumrepo', 'zfs', 'zone',
'zpool'), prefix='(?i)', suffix=r'\b'),
Keyword),
],
'strings': [
(r'"([^"])*"', String),
(r"'(\\'|[^'])*'", String),
],
}
class RslLexer(RegexLexer):
"""
RSL is the formal specification
language used in RAISE (Rigorous Approach to Industrial Software Engineering)
method.
"""
name = 'RSL'
url = 'http://en.wikipedia.org/wiki/RAISE'
aliases = ['rsl']
filenames = ['*.rsl']
mimetypes = ['text/rsl']
version_added = '2.0'
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
(words((
'Bool', 'Char', 'Int', 'Nat', 'Real', 'Text', 'Unit', 'abs',
'all', 'always', 'any', 'as', 'axiom', 'card', 'case', 'channel',
'chaos', 'class', 'devt_relation', 'dom', 'elems', 'else', 'elif',
'end', 'exists', 'extend', 'false', 'for', 'hd', 'hide', 'if',
'in', 'is', 'inds', 'initialise', 'int', 'inter', 'isin', 'len',
'let', 'local', 'ltl_assertion', 'object', 'of', 'out', 'post',
'pre', 'read', 'real', 'rng', 'scheme', 'skip', 'stop', 'swap',
'then', 'theory', 'test_case', 'tl', 'transition_system', 'true',
'type', 'union', 'until', 'use', 'value', 'variable', 'while',
'with', 'write', '~isin', '-inflist', '-infset', '-list',
'-set'), prefix=r'\b', suffix=r'\b'),
Keyword),
(r'(variable|value)\b', Keyword.Declaration),
(r'--.*?\n', Comment),
(r'<:.*?:>', Comment),
(r'\{!.*?!\}', Comment),
(r'/\*.*?\*/', Comment),
(r'^([ \t]*)([\w]+)([ \t]*)(:[^:])', bygroups(Whitespace,
Name.Function, Whitespace, Name.Function)),
(r'(^[ \t]*)([\w]+)([ \t]*)(\([\w\s,]*\))([ \t]*)(is|as)',
bygroups(Whitespace, Name.Function, Whitespace, Text,
Whitespace, Keyword)),
(r'\b[A-Z]\w*\b', Keyword.Type),
(r'(true|false)\b', Keyword.Constant),
(r'".*"', String),
(r'\'.\'', String.Char),
(r'(><|->|-m->|/\\|<=|<<=|<\.|\|\||\|\^\||-~->|-~m->|\\/|>=|>>|'
r'\.>|\+\+|-\\|<->|=>|:-|~=|\*\*|<<|>>=|\+>|!!|\|=\||#)',
Operator),
(r'[0-9]+\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'\s+', Whitespace),
(r'.', Text),
],
}
def analyse_text(text):
"""
Check for the most common text in the beginning of a RSL file.
"""
if re.search(r'scheme\s*.*?=\s*class\s*type', text, re.I) is not None:
return 1.0
class MscgenLexer(RegexLexer):
"""
For Mscgen files.
"""
name = 'Mscgen'
url = 'http://www.mcternan.me.uk/mscgen/'
aliases = ['mscgen', 'msc']
filenames = ['*.msc']
version_added = '1.6'
_var = r'(\w+|"(?:\\"|[^"])*")'
tokens = {
'root': [
(r'msc\b', Keyword.Type),
# Options
(r'(hscale|HSCALE|width|WIDTH|wordwraparcs|WORDWRAPARCS'
r'|arcgradient|ARCGRADIENT)\b', Name.Property),
# Operators
(r'(abox|ABOX|rbox|RBOX|box|BOX|note|NOTE)\b', Operator.Word),
(r'(\.|-|\|){3}', Keyword),
(r'(?:-|=|\.|:){2}'
r'|<<=>>|<->|<=>|<<>>|<:>'
r'|->|=>>|>>|=>|:>|-x|-X'
r'|<-|<<=|<<|<=|<:|x-|X-|=', Operator),
# Names
(r'\*', Name.Builtin),
(_var, Name.Variable),
# Other
(r'\[', Punctuation, 'attrs'),
(r'\{|\}|,|;', Punctuation),
include('comments')
],
'attrs': [
(r'\]', Punctuation, '#pop'),
(_var + r'(\s*)(=)(\s*)' + _var,
bygroups(Name.Attribute, Whitespace, Operator, Whitespace,
String)),
(r',', Punctuation),
include('comments')
],
'comments': [
(r'(?://|#).*?\n', Comment.Single),
(r'/\*(?:.|\n)*?\*/', Comment.Multiline),
(r'[ \t\r\n]+', Whitespace)
]
}
class VGLLexer(RegexLexer):
"""
For SampleManager VGL source code.
"""
name = 'VGL'
url = 'http://www.thermoscientific.com/samplemanager'
aliases = ['vgl']
filenames = ['*.rpf']
version_added = '1.6'
flags = re.MULTILINE | re.DOTALL | re.IGNORECASE
tokens = {
'root': [
(r'\{[^}]*\}', Comment.Multiline),
(r'declare', Keyword.Constant),
(r'(if|then|else|endif|while|do|endwhile|and|or|prompt|object'
r'|create|on|line|with|global|routine|value|endroutine|constant'
r'|global|set|join|library|compile_option|file|exists|create|copy'
r'|delete|enable|windows|name|notprotected)(?! *[=<>.,()])',
Keyword),
(r'(true|false|null|empty|error|locked)', Keyword.Constant),
(r'[~^*#!%&\[\]()<>|+=:;,./?-]', Operator),
(r'"[^"]*"', String),
(r'(\.)([a-z_$][\w$]*)', bygroups(Operator, Name.Attribute)),
(r'[0-9][0-9]*(\.[0-9]+(e[+\-]?[0-9]+)?)?', Number),
(r'[a-z_$][\w$]*', Name),
(r'[\r\n]+', Whitespace),
(r'\s+', Whitespace)
]
}
class AlloyLexer(RegexLexer):
"""
For Alloy source code.
"""
name = 'Alloy'
url = 'http://alloy.mit.edu'
aliases = ['alloy']
filenames = ['*.als']
mimetypes = ['text/x-alloy']
version_added = '2.0'
flags = re.MULTILINE | re.DOTALL
iden_rex = r'[a-zA-Z_][\w]*"*'
string_rex = r'"\b(\\\\|\\[^\\]|[^"\\])*"'
text_tuple = (r'[^\S\n]+', Whitespace)
tokens = {
'sig': [
(r'(extends)\b', Keyword, '#pop'),
(iden_rex, Name),
text_tuple,
(r',', Punctuation),
(r'\{', Operator, '#pop'),
],
'module': [
text_tuple,
(iden_rex, Name, '#pop'),
],
'fun': [
text_tuple,
(r'\{', Operator, '#pop'),
(iden_rex, Name, '#pop'),
],
'fact': [
include('fun'),
(string_rex, String, '#pop'),
],
'root': [
(r'--.*?$', Comment.Single),
(r'//.*?$', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
text_tuple,
(r'(module|open)(\s+)', bygroups(Keyword.Namespace, Whitespace),
'module'),
(r'(sig|enum)(\s+)', bygroups(Keyword.Declaration, Whitespace), 'sig'),
(r'(iden|univ|none)\b', Keyword.Constant),
(r'(int|Int)\b', Keyword.Type),
(r'(var|this|abstract|extends|set|seq|one|lone|let)\b', Keyword),
(r'(all|some|no|sum|disj|when|else)\b', Keyword),
(r'(run|check|for|but|exactly|expect|as|steps)\b', Keyword),
(r'(always|after|eventually|until|release)\b', Keyword), # future time operators
(r'(historically|before|once|since|triggered)\b', Keyword), # past time operators
(r'(and|or|implies|iff|in)\b', Operator.Word),
(r'(fun|pred|assert)(\s+)', bygroups(Keyword, Whitespace), 'fun'),
(r'(fact)(\s+)', bygroups(Keyword, Whitespace), 'fact'),
(r'!|#|&&|\+\+|<<|>>|>=|<=>|<=|\.\.|\.|->', Operator),
(r'[-+/*%=<>&!^|~{}\[\]().\';]', Operator),
(iden_rex, Name),
(r'[:,]', Punctuation),
(r'[0-9]+', Number.Integer),
(string_rex, String),
(r'\n', Whitespace),
]
}
class PanLexer(RegexLexer):
"""
Lexer for pan source files.
Based on tcsh lexer.
"""
name = 'Pan'
url = 'https://github.com/quattor/pan/'
aliases = ['pan']
filenames = ['*.pan']
version_added = '2.0'
tokens = {
'root': [
include('basic'),
(r'\(', Keyword, 'paren'),
(r'\{', Keyword, 'curly'),
include('data'),
],
'basic': [
(words((
'if', 'for', 'with', 'else', 'type', 'bind', 'while', 'valid', 'final',
'prefix', 'unique', 'object', 'foreach', 'include', 'template',
'function', 'variable', 'structure', 'extensible', 'declaration'),
prefix=r'\b', suffix=r'\b'),
Keyword),
(words((
'file_contents', 'format', 'index', 'length', 'match', 'matches',
'replace', 'splice', 'split', 'substr', 'to_lowercase', 'to_uppercase',
'debug', 'error', 'traceback', 'deprecated', 'base64_decode',
'base64_encode', 'digest', 'escape', 'unescape', 'append', 'create',
'first', 'nlist', 'key', 'list', 'merge', 'next', 'prepend', 'is_boolean',
'is_defined', 'is_double', 'is_list', 'is_long', 'is_nlist', 'is_null',
'is_number', 'is_property', 'is_resource', 'is_string', 'to_boolean',
'to_double', 'to_long', 'to_string', 'clone', 'delete', 'exists',
'path_exists', 'if_exists', 'return', 'value'),
prefix=r'\b', suffix=r'\b'),
Name.Builtin),
(r'#.*', Comment),
(r'\\[\w\W]', String.Escape),
(r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Whitespace, Operator)),
(r'[\[\]{}()=]+', Operator),
(r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
(r';', Punctuation),
],
'data': [
(r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
(r'\s+', Whitespace),
(r'[^=\s\[\]{}()$"\'`\\;#]+', Text),
(r'\d+(?= |\Z)', Number),
],
'curly': [
(r'\}', Keyword, '#pop'),
(r':-', Keyword),
(r'\w+', Name.Variable),
(r'[^}:"\'`$]+', Punctuation),
(r':', Punctuation),
include('root'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('root'),
],
}
class CrmshLexer(RegexLexer):
"""
Lexer for crmsh configuration files for Pacemaker clusters.
"""
name = 'Crmsh'
url = 'http://crmsh.github.io/'
aliases = ['crmsh', 'pcmk']
filenames = ['*.crmsh', '*.pcmk']
mimetypes = []
version_added = '2.1'
elem = words((
'node', 'primitive', 'group', 'clone', 'ms', 'location',
'colocation', 'order', 'fencing_topology', 'rsc_ticket',
'rsc_template', 'property', 'rsc_defaults',
'op_defaults', 'acl_target', 'acl_group', 'user', 'role',
'tag'), suffix=r'(?![\w#$-])')
sub = words((
'params', 'meta', 'operations', 'op', 'rule',
'attributes', 'utilization'), suffix=r'(?![\w#$-])')
acl = words(('read', 'write', 'deny'), suffix=r'(?![\w#$-])')
bin_rel = words(('and', 'or'), suffix=r'(?![\w#$-])')
un_ops = words(('defined', 'not_defined'), suffix=r'(?![\w#$-])')
date_exp = words(('in_range', 'date', 'spec', 'in'), suffix=r'(?![\w#$-])')
acl_mod = (r'(?:tag|ref|reference|attribute|type|xpath)')
bin_ops = (r'(?:lt|gt|lte|gte|eq|ne)')
val_qual = (r'(?:string|version|number)')
rsc_role_action = (r'(?:Master|Started|Slave|Stopped|'
r'start|promote|demote|stop)')
tokens = {
'root': [
(r'^(#.*)(\n)?', bygroups(Comment, Whitespace)),
# attr=value (nvpair)
(r'([\w#$-]+)(=)("(?:""|[^"])*"|\S+)',
bygroups(Name.Attribute, Punctuation, String)),
# need this construct, otherwise numeric node ids
# are matched as scores
# elem id:
(r'(node)(\s+)([\w#$-]+)(:)',
bygroups(Keyword, Whitespace, Name, Punctuation)),
# scores
(r'([+-]?([0-9]+|inf)):', Number),
# keywords (elements and other)
(elem, Keyword),
(sub, Keyword),
(acl, Keyword),
# binary operators
(rf'(?:{val_qual}:)?({bin_ops})(?![\w#$-])', Operator.Word),
# other operators
(bin_rel, Operator.Word),
(un_ops, Operator.Word),
(date_exp, Operator.Word),
# builtin attributes (e.g. #uname)
(r'#[a-z]+(?![\w#$-])', Name.Builtin),
# acl_mod:blah
(rf'({acl_mod})(:)("(?:""|[^"])*"|\S+)',
bygroups(Keyword, Punctuation, Name)),
# rsc_id[:(role|action)]
# NB: this matches all other identifiers
(rf'([\w#$-]+)(?:(:)({rsc_role_action}))?(?![\w#$-])',
bygroups(Name, Punctuation, Operator.Word)),
# punctuation
(r'(\\(?=\n)|[\[\](){}/:@])', Punctuation),
(r'\s+|\n', Whitespace),
],
}
class FlatlineLexer(RegexLexer):
"""
Lexer for Flatline expressions.
"""
name = 'Flatline'
url = 'https://github.com/bigmlcom/flatline'
aliases = ['flatline']
filenames = []
mimetypes = ['text/x-flatline']
version_added = '2.2'
special_forms = ('let',)
builtins = (
"!=", "*", "+", "-", "<", "<=", "=", ">", ">=", "abs", "acos", "all",
"all-but", "all-with-defaults", "all-with-numeric-default", "and",
"asin", "atan", "avg", "avg-window", "bin-center", "bin-count", "call",
"category-count", "ceil", "cond", "cond-window", "cons", "cos", "cosh",
"count", "diff-window", "div", "ensure-value", "ensure-weighted-value",
"epoch", "epoch-day", "epoch-fields", "epoch-hour", "epoch-millisecond",
"epoch-minute", "epoch-month", "epoch-second", "epoch-weekday",
"epoch-year", "exp", "f", "field", "field-prop", "fields", "filter",
"first", "floor", "head", "if", "in", "integer", "language", "length",
"levenshtein", "linear-regression", "list", "ln", "log", "log10", "map",
"matches", "matches?", "max", "maximum", "md5", "mean", "median", "min",
"minimum", "missing", "missing-count", "missing?", "missing_count",
"mod", "mode", "normalize", "not", "nth", "occurrences", "or",
"percentile", "percentile-label", "population", "population-fraction",
"pow", "preferred", "preferred?", "quantile-label", "rand", "rand-int",
"random-value", "re-quote", "real", "replace", "replace-first", "rest",
"round", "row-number", "segment-label", "sha1", "sha256", "sin", "sinh",
"sqrt", "square", "standard-deviation", "standard_deviation", "str",
"subs", "sum", "sum-squares", "sum-window", "sum_squares", "summary",
"summary-no", "summary-str", "tail", "tan", "tanh", "to-degrees",
"to-radians", "variance", "vectorize", "weighted-random-value", "window",
"winnow", "within-percentiles?", "z-score",
)
valid_name = r'(?!#)[\w!$%*+<=>?/.#-]+'
tokens = {
'root': [
# whitespaces - usually not relevant
(r'[,]+', Text),
(r'\s+', Whitespace),
# numbers
(r'-?\d+\.\d+', Number.Float),
(r'-?\d+', Number.Integer),
(r'0x-?[a-f\d]+', Number.Hex),
# strings, symbols and characters
(r'"(\\\\|\\[^\\]|[^"\\])*"', String),
(r"\\(.|[a-z]+)", String.Char),
# expression template placeholder
(r'_', String.Symbol),
# highlight the special forms
(words(special_forms, suffix=' '), Keyword),
# highlight the builtins
(words(builtins, suffix=' '), Name.Builtin),
# the remaining functions
(r'(?<=\()' + valid_name, Name.Function),
# find the remaining variables
(valid_name, Name.Variable),
# parentheses
(r'(\(|\))', Punctuation),
],
}
class SnowballLexer(ExtendedRegexLexer):
"""
Lexer for Snowball source code.
"""
name = 'Snowball'
url = 'https://snowballstem.org/'
aliases = ['snowball']
filenames = ['*.sbl']
version_added = '2.2'
_ws = r'\n\r\t '
def __init__(self, **options):
self._reset_stringescapes()
ExtendedRegexLexer.__init__(self, **options)
def _reset_stringescapes(self):
self._start = "'"
self._end = "'"
def _string(do_string_first):
def callback(lexer, match, ctx):
s = match.start()
text = match.group()
string = re.compile(rf'([^{re.escape(lexer._start)}]*)(.)').match
escape = re.compile(rf'([^{re.escape(lexer._end)}]*)(.)').match
pos = 0
do_string = do_string_first
while pos < len(text):
if do_string:
match = string(text, pos)
yield s + match.start(1), String.Single, match.group(1)
if match.group(2) == "'":
yield s + match.start(2), String.Single, match.group(2)
ctx.stack.pop()
break
yield s + match.start(2), String.Escape, match.group(2)
pos = match.end()
match = escape(text, pos)
yield s + match.start(), String.Escape, match.group()
if match.group(2) != lexer._end:
ctx.stack[-1] = 'escape'
break
pos = match.end()
do_string = True
ctx.pos = s + match.end()
return callback
def _stringescapes(lexer, match, ctx):
lexer._start = match.group(3)
lexer._end = match.group(5)
return bygroups(Keyword.Reserved, Whitespace, String.Escape, Whitespace,
String.Escape)(lexer, match, ctx)
tokens = {
'root': [
(r'len\b', Name.Builtin),
(r'lenof\b', Operator.Word),
include('root1'),
],
'root1': [
(rf'[{_ws}]+', Whitespace),
(r'\d+', Number.Integer),
(r"'", String.Single, 'string'),
(r'[()]', Punctuation),
(r'/\*[\w\W]*?\*/', Comment.Multiline),
(r'//.*', Comment.Single),
(r'[!*+\-/<=>]=|[-=]>|<[+-]|[$*+\-/<=>?\[\]]', Operator),
(words(('as', 'get', 'hex', 'among', 'define', 'decimal',
'backwardmode'), suffix=r'\b'),
Keyword.Reserved),
(words(('strings', 'booleans', 'integers', 'routines', 'externals',
'groupings'), suffix=r'\b'),
Keyword.Reserved, 'declaration'),
(words(('do', 'or', 'and', 'for', 'hop', 'non', 'not', 'set', 'try',
'fail', 'goto', 'loop', 'next', 'test', 'true',
'false', 'unset', 'atmark', 'attach', 'delete', 'gopast',
'insert', 'repeat', 'sizeof', 'tomark', 'atleast',
'atlimit', 'reverse', 'setmark', 'tolimit', 'setlimit',
'backwards', 'substring'), suffix=r'\b'),
Operator.Word),
(words(('size', 'limit', 'cursor', 'maxint', 'minint'),
suffix=r'\b'),
Name.Builtin),
(rf'(stringdef\b)([{_ws}]*)([^{_ws}]+)',
bygroups(Keyword.Reserved, Whitespace, String.Escape)),
(rf'(stringescapes\b)([{_ws}]*)(.)([{_ws}]*)(.)',
_stringescapes),
(r'[A-Za-z]\w*', Name),
],
'declaration': [
(r'\)', Punctuation, '#pop'),
(words(('len', 'lenof'), suffix=r'\b'), Name,
('root1', 'declaration')),
include('root1'),
],
'string': [
(r"[^']*'", _string(True)),
],
'escape': [
(r"[^']*'", _string(False)),
],
}
def get_tokens_unprocessed(self, text=None, context=None):
self._reset_stringescapes()
return ExtendedRegexLexer.get_tokens_unprocessed(self, text, context)

View File

@@ -0,0 +1,279 @@
"""
pygments.lexers.dylan
~~~~~~~~~~~~~~~~~~~~~
Lexers for the Dylan language.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, bygroups, do_insertions, \
default, line_re
from pygments.token import Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic, Literal, Whitespace
__all__ = ['DylanLexer', 'DylanConsoleLexer', 'DylanLidLexer']
class DylanLexer(RegexLexer):
"""
For the Dylan language.
"""
name = 'Dylan'
url = 'http://www.opendylan.org/'
aliases = ['dylan']
filenames = ['*.dylan', '*.dyl', '*.intr']
mimetypes = ['text/x-dylan']
version_added = '0.7'
flags = re.IGNORECASE
builtins = {
'subclass', 'abstract', 'block', 'concrete', 'constant', 'class',
'compiler-open', 'compiler-sideways', 'domain', 'dynamic',
'each-subclass', 'exception', 'exclude', 'function', 'generic',
'handler', 'inherited', 'inline', 'inline-only', 'instance',
'interface', 'import', 'keyword', 'library', 'macro', 'method',
'module', 'open', 'primary', 'required', 'sealed', 'sideways',
'singleton', 'slot', 'thread', 'variable', 'virtual'}
keywords = {
'above', 'afterwards', 'begin', 'below', 'by', 'case', 'cleanup',
'create', 'define', 'else', 'elseif', 'end', 'export', 'finally',
'for', 'from', 'if', 'in', 'let', 'local', 'otherwise', 'rename',
'select', 'signal', 'then', 'to', 'unless', 'until', 'use', 'when',
'while'}
operators = {
'~', '+', '-', '*', '|', '^', '=', '==', '~=', '~==', '<', '<=',
'>', '>=', '&', '|'}
functions = {
'abort', 'abs', 'add', 'add!', 'add-method', 'add-new', 'add-new!',
'all-superclasses', 'always', 'any?', 'applicable-method?', 'apply',
'aref', 'aref-setter', 'as', 'as-lowercase', 'as-lowercase!',
'as-uppercase', 'as-uppercase!', 'ash', 'backward-iteration-protocol',
'break', 'ceiling', 'ceiling/', 'cerror', 'check-type', 'choose',
'choose-by', 'complement', 'compose', 'concatenate', 'concatenate-as',
'condition-format-arguments', 'condition-format-string', 'conjoin',
'copy-sequence', 'curry', 'default-handler', 'dimension', 'dimensions',
'direct-subclasses', 'direct-superclasses', 'disjoin', 'do',
'do-handlers', 'element', 'element-setter', 'empty?', 'error', 'even?',
'every?', 'false-or', 'fill!', 'find-key', 'find-method', 'first',
'first-setter', 'floor', 'floor/', 'forward-iteration-protocol',
'function-arguments', 'function-return-values',
'function-specializers', 'gcd', 'generic-function-mandatory-keywords',
'generic-function-methods', 'head', 'head-setter', 'identity',
'initialize', 'instance?', 'integral?', 'intersection',
'key-sequence', 'key-test', 'last', 'last-setter', 'lcm', 'limited',
'list', 'logand', 'logbit?', 'logior', 'lognot', 'logxor', 'make',
'map', 'map-as', 'map-into', 'max', 'member?', 'merge-hash-codes',
'min', 'modulo', 'negative', 'negative?', 'next-method',
'object-class', 'object-hash', 'odd?', 'one-of', 'pair', 'pop',
'pop-last', 'positive?', 'push', 'push-last', 'range', 'rank',
'rcurry', 'reduce', 'reduce1', 'remainder', 'remove', 'remove!',
'remove-duplicates', 'remove-duplicates!', 'remove-key!',
'remove-method', 'replace-elements!', 'replace-subsequence!',
'restart-query', 'return-allowed?', 'return-description',
'return-query', 'reverse', 'reverse!', 'round', 'round/',
'row-major-index', 'second', 'second-setter', 'shallow-copy',
'signal', 'singleton', 'size', 'size-setter', 'slot-initialized?',
'sort', 'sort!', 'sorted-applicable-methods', 'subsequence-position',
'subtype?', 'table-protocol', 'tail', 'tail-setter', 'third',
'third-setter', 'truncate', 'truncate/', 'type-error-expected-type',
'type-error-value', 'type-for-copy', 'type-union', 'union', 'values',
'vector', 'zero?'}
valid_name = '\\\\?[\\w!&*<>|^$%@\\-+~?/=]+'
def get_tokens_unprocessed(self, text):
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
lowercase_value = value.lower()
if lowercase_value in self.builtins:
yield index, Name.Builtin, value
continue
if lowercase_value in self.keywords:
yield index, Keyword, value
continue
if lowercase_value in self.functions:
yield index, Name.Builtin, value
continue
if lowercase_value in self.operators:
yield index, Operator, value
continue
yield index, token, value
tokens = {
'root': [
# Whitespace
(r'\s+', Whitespace),
# single line comment
(r'//.*?\n', Comment.Single),
# lid header
(r'([a-z0-9-]+)(:)([ \t]*)(.*(?:\n[ \t].+)*)',
bygroups(Name.Attribute, Operator, Whitespace, String)),
default('code') # no header match, switch to code
],
'code': [
# Whitespace
(r'\s+', Whitespace),
# single line comment
(r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
# multi-line comment
(r'/\*', Comment.Multiline, 'comment'),
# strings and characters
(r'"', String, 'string'),
(r"'(\\.|\\[0-7]{1,3}|\\x[a-f0-9]{1,2}|[^\\\'\n])'", String.Char),
# binary integer
(r'#b[01]+', Number.Bin),
# octal integer
(r'#o[0-7]+', Number.Oct),
# floating point
(r'[-+]?(\d*\.\d+(e[-+]?\d+)?|\d+(\.\d*)?e[-+]?\d+)', Number.Float),
# decimal integer
(r'[-+]?\d+', Number.Integer),
# hex integer
(r'#x[0-9a-f]+', Number.Hex),
# Macro parameters
(r'(\?' + valid_name + ')(:)'
r'(token|name|variable|expression|body|case-body|\*)',
bygroups(Name.Tag, Operator, Name.Builtin)),
(r'(\?)(:)(token|name|variable|expression|body|case-body|\*)',
bygroups(Name.Tag, Operator, Name.Builtin)),
(r'\?' + valid_name, Name.Tag),
# Punctuation
(r'(=>|::|#\(|#\[|##|\?\?|\?=|\?|[(){}\[\],.;])', Punctuation),
# Most operators are picked up as names and then re-flagged.
# This one isn't valid in a name though, so we pick it up now.
(r':=', Operator),
# Pick up #t / #f before we match other stuff with #.
(r'#[tf]', Literal),
# #"foo" style keywords
(r'#"', String.Symbol, 'keyword'),
# #rest, #key, #all-keys, etc.
(r'#[a-z0-9-]+', Keyword),
# required-init-keyword: style keywords.
(valid_name + ':', Keyword),
# class names
('<' + valid_name + '>', Name.Class),
# define variable forms.
(r'\*' + valid_name + r'\*', Name.Variable.Global),
# define constant forms.
(r'\$' + valid_name, Name.Constant),
# everything else. We re-flag some of these in the method above.
(valid_name, Name),
],
'comment': [
(r'[^*/]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
'keyword': [
(r'"', String.Symbol, '#pop'),
(r'[^\\"]+', String.Symbol), # all other characters
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-f0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
]
}
class DylanLidLexer(RegexLexer):
"""
For Dylan LID (Library Interchange Definition) files.
"""
name = 'DylanLID'
aliases = ['dylan-lid', 'lid']
filenames = ['*.lid', '*.hdp']
mimetypes = ['text/x-dylan-lid']
url = 'http://www.opendylan.org/'
version_added = '1.6'
flags = re.IGNORECASE
tokens = {
'root': [
# Whitespace
(r'\s+', Whitespace),
# single line comment
(r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
# lid header
(r'(.*?)(:)([ \t]*)(.*(?:\n[ \t].+)*)',
bygroups(Name.Attribute, Operator, Whitespace, String)),
]
}
class DylanConsoleLexer(Lexer):
"""
For Dylan interactive console output.
This is based on a copy of the ``RubyConsoleLexer``.
"""
name = 'Dylan session'
aliases = ['dylan-console', 'dylan-repl']
filenames = ['*.dylan-console']
mimetypes = ['text/x-dylan-console']
url = 'http://www.opendylan.org/'
version_added = '1.6'
_example = 'dylan-console/console.dylan-console'
_prompt_re = re.compile(r'\?| ')
def get_tokens_unprocessed(self, text):
dylexer = DylanLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = self._prompt_re.match(line)
if m is not None:
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
yield from do_insertions(insertions,
dylexer.get_tokens_unprocessed(curcode))
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode:
yield from do_insertions(insertions,
dylexer.get_tokens_unprocessed(curcode))

View File

@@ -0,0 +1,144 @@
"""
pygments.lexers.ecl
~~~~~~~~~~~~~~~~~~~
Lexers for the ECL language.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, words
from pygments.token import Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
__all__ = ['ECLLexer']
class ECLLexer(RegexLexer):
"""
Lexer for the declarative big-data ECL language.
"""
name = 'ECL'
url = 'https://hpccsystems.com/training/documentation/ecl-language-reference/html'
aliases = ['ecl']
filenames = ['*.ecl']
mimetypes = ['application/x-ecl']
version_added = '1.5'
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
include('whitespace'),
include('statements'),
],
'whitespace': [
(r'\s+', Whitespace),
(r'\/\/.*', Comment.Single),
(r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline),
],
'statements': [
include('types'),
include('keywords'),
include('functions'),
include('hash'),
(r'"', String, 'string'),
(r'\'', String, 'string'),
(r'(\d+\.\d*|\.\d+|\d+)e[+-]?\d+[lu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+f)f?', Number.Float),
(r'0x[0-9a-f]+[lu]*', Number.Hex),
(r'0[0-7]+[lu]*', Number.Oct),
(r'\d+[lu]*', Number.Integer),
(r'[~!%^&*+=|?:<>/-]+', Operator),
(r'[{}()\[\],.;]', Punctuation),
(r'[a-z_]\w*', Name),
],
'hash': [
(r'^#.*$', Comment.Preproc),
],
'types': [
(r'(RECORD|END)\D', Keyword.Declaration),
(r'((?:ASCII|BIG_ENDIAN|BOOLEAN|DATA|DECIMAL|EBCDIC|INTEGER|PATTERN|'
r'QSTRING|REAL|RECORD|RULE|SET OF|STRING|TOKEN|UDECIMAL|UNICODE|'
r'UNSIGNED|VARSTRING|VARUNICODE)\d*)(\s+)',
bygroups(Keyword.Type, Whitespace)),
],
'keywords': [
(words((
'APPLY', 'ASSERT', 'BUILD', 'BUILDINDEX', 'EVALUATE', 'FAIL',
'KEYDIFF', 'KEYPATCH', 'LOADXML', 'NOTHOR', 'NOTIFY', 'OUTPUT',
'PARALLEL', 'SEQUENTIAL', 'SOAPCALL', 'CHECKPOINT', 'DEPRECATED',
'FAILCODE', 'FAILMESSAGE', 'FAILURE', 'GLOBAL', 'INDEPENDENT',
'ONWARNING', 'PERSIST', 'PRIORITY', 'RECOVERY', 'STORED', 'SUCCESS',
'WAIT', 'WHEN'), suffix=r'\b'),
Keyword.Reserved),
# These are classed differently, check later
(words((
'ALL', 'AND', 'ANY', 'AS', 'ATMOST', 'BEFORE', 'BEGINC++', 'BEST',
'BETWEEN', 'CASE', 'CONST', 'COUNTER', 'CSV', 'DESCEND', 'ENCRYPT',
'ENDC++', 'ENDMACRO', 'EXCEPT', 'EXCLUSIVE', 'EXPIRE', 'EXPORT',
'EXTEND', 'FALSE', 'FEW', 'FIRST', 'FLAT', 'FULL', 'FUNCTION',
'GROUP', 'HEADER', 'HEADING', 'HOLE', 'IFBLOCK', 'IMPORT', 'IN',
'JOINED', 'KEEP', 'KEYED', 'LAST', 'LEFT', 'LIMIT', 'LOAD', 'LOCAL',
'LOCALE', 'LOOKUP', 'MACRO', 'MANY', 'MAXCOUNT', 'MAXLENGTH',
'MIN SKEW', 'MODULE', 'INTERFACE', 'NAMED', 'NOCASE', 'NOROOT',
'NOSCAN', 'NOSORT', 'NOT', 'OF', 'ONLY', 'OPT', 'OR', 'OUTER',
'OVERWRITE', 'PACKED', 'PARTITION', 'PENALTY', 'PHYSICALLENGTH',
'PIPE', 'QUOTE', 'RELATIONSHIP', 'REPEAT', 'RETURN', 'RIGHT',
'SCAN', 'SELF', 'SEPARATOR', 'SERVICE', 'SHARED', 'SKEW', 'SKIP',
'SQL', 'STORE', 'TERMINATOR', 'THOR', 'THRESHOLD', 'TOKEN',
'TRANSFORM', 'TRIM', 'TRUE', 'TYPE', 'UNICODEORDER', 'UNSORTED',
'VALIDATE', 'VIRTUAL', 'WHOLE', 'WILD', 'WITHIN', 'XML', 'XPATH',
'__COMPRESSED__'), suffix=r'\b'),
Keyword.Reserved),
],
'functions': [
(words((
'ABS', 'ACOS', 'ALLNODES', 'ASCII', 'ASIN', 'ASSTRING', 'ATAN',
'ATAN2', 'AVE', 'CASE', 'CHOOSE', 'CHOOSEN', 'CHOOSESETS',
'CLUSTERSIZE', 'COMBINE', 'CORRELATION', 'COS', 'COSH', 'COUNT',
'COVARIANCE', 'CRON', 'DATASET', 'DEDUP', 'DEFINE', 'DENORMALIZE',
'DISTRIBUTE', 'DISTRIBUTED', 'DISTRIBUTION', 'EBCDIC', 'ENTH',
'ERROR', 'EVALUATE', 'EVENT', 'EVENTEXTRA', 'EVENTNAME', 'EXISTS',
'EXP', 'FAILCODE', 'FAILMESSAGE', 'FETCH', 'FROMUNICODE',
'GETISVALID', 'GLOBAL', 'GRAPH', 'GROUP', 'HASH', 'HASH32',
'HASH64', 'HASHCRC', 'HASHMD5', 'HAVING', 'IF', 'INDEX',
'INTFORMAT', 'ISVALID', 'ITERATE', 'JOIN', 'KEYUNICODE', 'LENGTH',
'LIBRARY', 'LIMIT', 'LN', 'LOCAL', 'LOG', 'LOOP', 'MAP', 'MATCHED',
'MATCHLENGTH', 'MATCHPOSITION', 'MATCHTEXT', 'MATCHUNICODE', 'MAX',
'MERGE', 'MERGEJOIN', 'MIN', 'NOLOCAL', 'NONEMPTY', 'NORMALIZE',
'PARSE', 'PIPE', 'POWER', 'PRELOAD', 'PROCESS', 'PROJECT', 'PULL',
'RANDOM', 'RANGE', 'RANK', 'RANKED', 'REALFORMAT', 'RECORDOF',
'REGEXFIND', 'REGEXREPLACE', 'REGROUP', 'REJECTED', 'ROLLUP',
'ROUND', 'ROUNDUP', 'ROW', 'ROWDIFF', 'SAMPLE', 'SET', 'SIN',
'SINH', 'SIZEOF', 'SOAPCALL', 'SORT', 'SORTED', 'SQRT', 'STEPPED',
'STORED', 'SUM', 'TABLE', 'TAN', 'TANH', 'THISNODE', 'TOPN',
'TOUNICODE', 'TRANSFER', 'TRIM', 'TRUNCATE', 'TYPEOF', 'UNGROUP',
'UNICODEORDER', 'VARIANCE', 'WHICH', 'WORKUNIT', 'XMLDECODE',
'XMLENCODE', 'XMLTEXT', 'XMLUNICODE'), suffix=r'\b'),
Name.Function),
],
'string': [
(r'"', String, '#pop'),
(r'\'', String, '#pop'),
(r'[^"\']+', String),
],
}
def analyse_text(text):
"""This is very difficult to guess relative to other business languages.
-> in conjunction with BEGIN/END seems relatively rare though."""
result = 0
if '->' in text:
result += 0.01
if 'BEGIN' in text:
result += 0.01
if 'END' in text:
result += 0.01
return result

View File

@@ -0,0 +1,68 @@
"""
pygments.lexers.eiffel
~~~~~~~~~~~~~~~~~~~~~~
Lexer for the Eiffel language.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, words, bygroups
from pygments.token import Comment, Operator, Keyword, Name, String, Number, \
Punctuation, Whitespace
__all__ = ['EiffelLexer']
class EiffelLexer(RegexLexer):
"""
For Eiffel source code.
"""
name = 'Eiffel'
url = 'https://www.eiffel.com'
aliases = ['eiffel']
filenames = ['*.e']
mimetypes = ['text/x-eiffel']
version_added = '2.0'
tokens = {
'root': [
(r'[^\S\n]+', Whitespace),
(r'--.*?$', Comment.Single),
(r'[^\S\n]+', Whitespace),
# Please note that keyword and operator are case insensitive.
(r'(?i)(true|false|void|current|result|precursor)\b', Keyword.Constant),
(r'(?i)(not|xor|implies|or)\b', Operator.Word),
(r'(?i)(and)(?:(\s+)(then))?\b',
bygroups(Operator.Word, Whitespace, Operator.Word)),
(r'(?i)(or)(?:(\s+)(else))?\b',
bygroups(Operator.Word, Whitespace, Operator.Word)),
(words((
'across', 'agent', 'alias', 'all', 'as', 'assign', 'attached',
'attribute', 'check', 'class', 'convert', 'create', 'debug',
'deferred', 'detachable', 'do', 'else', 'elseif', 'end', 'ensure',
'expanded', 'export', 'external', 'feature', 'from', 'frozen', 'if',
'inherit', 'inspect', 'invariant', 'like', 'local', 'loop', 'none',
'note', 'obsolete', 'old', 'once', 'only', 'redefine', 'rename',
'require', 'rescue', 'retry', 'select', 'separate', 'then',
'undefine', 'until', 'variant', 'when'), prefix=r'(?i)\b', suffix=r'\b'),
Keyword.Reserved),
(r'"\[([^\]%]|%(.|\n)|\][^"])*?\]"', String),
(r'"([^"%\n]|%.)*?"', String),
include('numbers'),
(r"'([^'%]|%'|%%)'", String.Char),
(r"(//|\\\\|>=|<=|:=|/=|~|/~|[\\?!#%&@|+/\-=>*$<^\[\]])", Operator),
(r"([{}():;,.])", Punctuation),
(r'([a-z]\w*)|([A-Z][A-Z0-9_]*[a-z]\w*)', Name),
(r'([A-Z][A-Z0-9_]*)', Name.Class),
(r'\n+', Whitespace),
],
'numbers': [
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'0[bB][01]+', Number.Bin),
(r'0[cC][0-7]+', Number.Oct),
(r'([0-9]+\.[0-9]*)|([0-9]*\.[0-9]+)', Number.Float),
(r'[0-9]+', Number.Integer),
],
}

View File

@@ -0,0 +1,123 @@
"""
pygments.lexers.elm
~~~~~~~~~~~~~~~~~~~
Lexer for the Elm programming language.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words, include, bygroups
from pygments.token import Comment, Keyword, Name, Number, Punctuation, \
String, Whitespace
__all__ = ['ElmLexer']
class ElmLexer(RegexLexer):
"""
For Elm source code.
"""
name = 'Elm'
url = 'https://elm-lang.org/'
aliases = ['elm']
filenames = ['*.elm']
mimetypes = ['text/x-elm']
version_added = '2.1'
validName = r'[a-z_][a-zA-Z0-9_\']*'
specialName = r'^main '
builtinOps = (
'~', '||', '|>', '|', '`', '^', '\\', '\'', '>>', '>=', '>', '==',
'=', '<~', '<|', '<=', '<<', '<-', '<', '::', ':', '/=', '//', '/',
'..', '.', '->', '-', '++', '+', '*', '&&', '%',
)
reservedWords = words((
'alias', 'as', 'case', 'else', 'if', 'import', 'in',
'let', 'module', 'of', 'port', 'then', 'type', 'where',
), suffix=r'\b')
tokens = {
'root': [
# Comments
(r'\{-', Comment.Multiline, 'comment'),
(r'--.*', Comment.Single),
# Whitespace
(r'\s+', Whitespace),
# Strings
(r'"', String, 'doublequote'),
# Modules
(r'^(\s*)(module)(\s*)', bygroups(Whitespace, Keyword.Namespace,
Whitespace), 'imports'),
# Imports
(r'^(\s*)(import)(\s*)', bygroups(Whitespace, Keyword.Namespace,
Whitespace), 'imports'),
# Shaders
(r'\[glsl\|.*', Name.Entity, 'shader'),
# Keywords
(reservedWords, Keyword.Reserved),
# Types
(r'[A-Z][a-zA-Z0-9_]*', Keyword.Type),
# Main
(specialName, Keyword.Reserved),
# Prefix Operators
(words((builtinOps), prefix=r'\(', suffix=r'\)'), Name.Function),
# Infix Operators
(words(builtinOps), Name.Function),
# Numbers
include('numbers'),
# Variable Names
(validName, Name.Variable),
# Parens
(r'[,()\[\]{}]', Punctuation),
],
'comment': [
(r'-(?!\})', Comment.Multiline),
(r'\{-', Comment.Multiline, 'comment'),
(r'[^-}]', Comment.Multiline),
(r'-\}', Comment.Multiline, '#pop'),
],
'doublequote': [
(r'\\u[0-9a-fA-F]{4}', String.Escape),
(r'\\[nrfvb\\"]', String.Escape),
(r'[^"]', String),
(r'"', String, '#pop'),
],
'imports': [
(r'\w+(\.\w+)*', Name.Class, '#pop'),
],
'numbers': [
(r'_?\d+\.(?=\d+)', Number.Float),
(r'_?\d+', Number.Integer),
],
'shader': [
(r'\|(?!\])', Name.Entity),
(r'\|\]', Name.Entity, '#pop'),
(r'(.*)(\n)', bygroups(Name.Entity, Whitespace)),
],
}

View File

@@ -0,0 +1,175 @@
"""
pygments.lexers.elpi
~~~~~~~~~~~~~~~~~~~~
Lexer for the `Elpi <http://github.com/LPCIC/elpi>`_ programming language.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, include, using
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['ElpiLexer']
from pygments.lexers.theorem import CoqLexer
class ElpiLexer(RegexLexer):
"""
Lexer for the Elpi programming language.
"""
name = 'Elpi'
url = 'http://github.com/LPCIC/elpi'
aliases = ['elpi']
filenames = ['*.elpi']
mimetypes = ['text/x-elpi']
version_added = '2.11'
lcase_re = r"[a-z]"
ucase_re = r"[A-Z]"
digit_re = r"[0-9]"
schar2_re = r"([+*^?/<>`'@#~=&!])"
schar_re = rf"({schar2_re}|-|\$|_)"
idchar_re = rf"({lcase_re}|{ucase_re}|{digit_re}|{schar_re})"
idcharstarns_re = rf"({idchar_re}*(\.({lcase_re}|{ucase_re}){idchar_re}*)*)"
symbchar_re = rf"({lcase_re}|{ucase_re}|{digit_re}|{schar_re}|:)"
constant_re = rf"({ucase_re}{idchar_re}*|{lcase_re}{idcharstarns_re}|{schar2_re}{symbchar_re}*|_{idchar_re}+)"
symbol_re = r"(,|<=>|->|:-|;|\?-|->|&|=>|\bas\b|\buvar\b|<|=<|=|==|>=|>|\bi<|\bi=<|\bi>=|\bi>|\bis\b|\br<|\br=<|\br>=|\br>|\bs<|\bs=<|\bs>=|\bs>|@|::|\[\]|`->|`:|`:=|\^|-|\+|\bi-|\bi\+|r-|r\+|/|\*|\bdiv\b|\bi\*|\bmod\b|\br\*|~|\bi~|\br~)"
escape_re = rf"\(({constant_re}|{symbol_re})\)"
const_sym_re = rf"({constant_re}|{symbol_re}|{escape_re})"
tokens = {
'root': [
include('elpi')
],
'elpi': [
include('_elpi-comment'),
(r"(:before|:after|:if|:name)(\s*)(\")",
bygroups(Keyword.Mode, Text.Whitespace, String.Double),
'elpi-string'),
(r"(:index)(\s*)(\()", bygroups(Keyword.Mode, Text.Whitespace, Punctuation),
'elpi-indexing-expr'),
(rf"\b(external pred|pred)(\s+)({const_sym_re})",
bygroups(Keyword.Declaration, Text.Whitespace, Name.Function),
'elpi-pred-item'),
(rf"\b(external type|type)(\s+)(({const_sym_re}(,\s*)?)+)",
bygroups(Keyword.Declaration, Text.Whitespace, Name.Function),
'elpi-type'),
(rf"\b(kind)(\s+)(({const_sym_re}|,)+)",
bygroups(Keyword.Declaration, Text.Whitespace, Name.Function),
'elpi-type'),
(rf"\b(typeabbrev)(\s+)({const_sym_re})",
bygroups(Keyword.Declaration, Text.Whitespace, Name.Function),
'elpi-type'),
(r"\b(typeabbrev)(\s+)(\([^)]+\))",
bygroups(Keyword.Declaration, Text.Whitespace, Name.Function),
'elpi-type'),
(r"\b(accumulate)(\s+)(\")",
bygroups(Keyword.Declaration, Text.Whitespace, String.Double),
'elpi-string'),
(rf"\b(accumulate|namespace|local)(\s+)({constant_re})",
bygroups(Keyword.Declaration, Text.Whitespace, Text)),
(rf"\b(shorten)(\s+)({constant_re}\.)",
bygroups(Keyword.Declaration, Text.Whitespace, Text)),
(r"\b(pi|sigma)(\s+)([a-zA-Z][A-Za-z0-9_ ]*)(\\)",
bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable, Text)),
(rf"\b(constraint)(\s+)(({const_sym_re}(\s+)?)+)",
bygroups(Keyword.Declaration, Text.Whitespace, Name.Function),
'elpi-chr-rule-start'),
(rf"(?=[A-Z_]){constant_re}", Name.Variable),
(rf"(?=[a-z_])({constant_re}|_)\\", Name.Variable),
(r"_", Name.Variable),
(rf"({symbol_re}|!|=>|;)", Keyword.Declaration),
(constant_re, Text),
(r"\[|\]|\||=>", Keyword.Declaration),
(r'"', String.Double, 'elpi-string'),
(r'`', String.Double, 'elpi-btick'),
(r'\'', String.Double, 'elpi-tick'),
(r'\{\{', Punctuation, 'elpi-quote'),
(r'\{[^\{]', Text, 'elpi-spill'),
(r"\(", Punctuation, 'elpi-in-parens'),
(r'\d[\d_]*', Number.Integer),
(r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float),
(r"[\+\*\-/\^\.]", Operator),
],
'_elpi-comment': [
(r'%[^\n]*\n', Comment),
(r'/(?:\\\n)?[*](?:[^*]|[*](?!(?:\\\n)?/))*[*](?:\\\n)?/', Comment),
(r"\s+", Text.Whitespace),
],
'elpi-indexing-expr':[
(r'[0-9 _]+', Number.Integer),
(r'\)', Punctuation, '#pop'),
],
'elpi-type': [
(r"(ctype\s+)(\")", bygroups(Keyword.Type, String.Double), 'elpi-string'),
(r'->', Keyword.Type),
(constant_re, Keyword.Type),
(r"\(|\)", Keyword.Type),
(r"\.", Text, '#pop'),
include('_elpi-comment'),
],
'elpi-chr-rule-start': [
(r"\{", Punctuation, 'elpi-chr-rule'),
include('_elpi-comment'),
],
'elpi-chr-rule': [
(r"\brule\b", Keyword.Declaration),
(r"\\", Keyword.Declaration),
(r"\}", Punctuation, '#pop:2'),
include('elpi'),
],
'elpi-pred-item': [
(r"[io]:", Keyword.Mode, 'elpi-ctype'),
(r"\.", Text, '#pop'),
include('_elpi-comment'),
],
'elpi-ctype': [
(r"(ctype\s+)(\")", bygroups(Keyword.Type, String.Double), 'elpi-string'),
(r'->', Keyword.Type),
(constant_re, Keyword.Type),
(r"\(|\)", Keyword.Type),
(r",", Text, '#pop'),
(r"\.", Text, '#pop:2'),
include('_elpi-comment'),
],
'elpi-btick': [
(r'[^` ]+', String.Double),
(r'`', String.Double, '#pop'),
],
'elpi-tick': [
(r'[^\' ]+', String.Double),
(r'\'', String.Double, '#pop'),
],
'elpi-string': [
(r'[^\"]+', String.Double),
(r'"', String.Double, '#pop'),
],
'elpi-quote': [
(r'\}\}', Punctuation, '#pop'),
(r"\s+", Text.Whitespace),
(r"(lp:)(\{\{)", bygroups(Number, Punctuation), 'elpi-quote-exit'),
(rf"(lp:)((?=[A-Z_]){constant_re})", bygroups(Number, Name.Variable)),
(r"((?!lp:|\}\}).)+", using(CoqLexer)),
],
'elpi-quote-exit': [
include('elpi'),
(r'\}\}', Punctuation, '#pop'),
],
'elpi-spill': [
(r'\{[^\{]', Text, '#push'),
(r'\}[^\}]', Text, '#pop'),
include('elpi'),
],
'elpi-in-parens': [
(r"\(", Punctuation, '#push'),
include('elpi'),
(r"\)", Punctuation, '#pop'),
],
}

View File

@@ -0,0 +1,132 @@
"""
pygments.lexers.email
~~~~~~~~~~~~~~~~~~~~~
Lexer for the raw E-mail.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, DelegatingLexer, bygroups
from pygments.lexers.mime import MIMELexer
from pygments.token import Text, Keyword, Name, String, Number, Comment
from pygments.util import get_bool_opt
__all__ = ["EmailLexer"]
class EmailHeaderLexer(RegexLexer):
"""
Sub-lexer for raw E-mail. This lexer only process header part of e-mail.
.. versionadded:: 2.5
"""
def __init__(self, **options):
super().__init__(**options)
self.highlight_x = get_bool_opt(options, "highlight-X-header", False)
def get_x_header_tokens(self, match):
if self.highlight_x:
# field
yield match.start(1), Name.Tag, match.group(1)
# content
default_actions = self.get_tokens_unprocessed(
match.group(2), stack=("root", "header"))
yield from default_actions
else:
# lowlight
yield match.start(1), Comment.Special, match.group(1)
yield match.start(2), Comment.Multiline, match.group(2)
tokens = {
"root": [
(r"^(?:[A-WYZ]|X400)[\w\-]*:", Name.Tag, "header"),
(r"^(X-(?:\w[\w\-]*:))([\s\S]*?\n)(?![ \t])", get_x_header_tokens),
],
"header": [
# folding
(r"\n[ \t]", Text.Whitespace),
(r"\n(?![ \t])", Text.Whitespace, "#pop"),
# keywords
(r"\bE?SMTPS?\b", Keyword),
(r"\b(?:HE|EH)LO\b", Keyword),
# mailbox
(r"[\w\.\-\+=]+@[\w\.\-]+", Name.Label),
(r"<[\w\.\-\+=]+@[\w\.\-]+>", Name.Label),
# domain
(r"\b(\w[\w\.-]*\.[\w\.-]*\w[a-zA-Z]+)\b", Name.Function),
# IPv4
(r"(?<=\b)(?:(?:25[0-5]|2[0-4][0-9]|1?[0-9][0-9]?)\.){3}(?:25[0"
r"-5]|2[0-4][0-9]|1?[0-9][0-9]?)(?=\b)",
Number.Integer),
# IPv6
(r"(?<=\b)([0-9a-fA-F]{1,4}:){1,7}:(?!\b)", Number.Hex),
(r"(?<=\b):((:[0-9a-fA-F]{1,4}){1,7}|:)(?=\b)", Number.Hex),
(r"(?<=\b)([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}(?=\b)", Number.Hex),
(r"(?<=\b)([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}(?=\b)", Number.Hex),
(r"(?<=\b)[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})(?=\b)", Number.Hex),
(r"(?<=\b)fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}(?=\b)", Number.Hex),
(r"(?<=\b)([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}(?=\b)", Number.Hex),
(r"(?<=\b)([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}(?=\b)",
Number.Hex),
(r"(?<=\b)([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}(?=\b)",
Number.Hex),
(r"(?<=\b)([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}(?=\b)",
Number.Hex),
(r"(?<=\b)::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}"
r"[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}"
r"[0-9])(?=\b)",
Number.Hex),
(r"(?<=\b)([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9])"
r"{0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])(?=\b)",
Number.Hex),
# Date time
(r"(?:(Sun|Mon|Tue|Wed|Thu|Fri|Sat),\s+)?(0[1-9]|[1-2]?[0-9]|3["
r"01])\s+(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s+("
r"19[0-9]{2}|[2-9][0-9]{3})\s+(2[0-3]|[0-1][0-9]):([0-5][0-9])"
r"(?::(60|[0-5][0-9]))?(?:\.\d{1,5})?\s+([-\+][0-9]{2}[0-5][0-"
r"9]|\(?(?:UTC?|GMT|(?:E|C|M|P)(?:ST|ET|DT)|[A-IK-Z])\)?)",
Name.Decorator),
# RFC-2047 encoded string
(r"(=\?)([\w-]+)(\?)([BbQq])(\?)([\[\w!\"#$%&\'()*+,-./:;<=>@[\\"
r"\]^_`{|}~]+)(\?=)",
bygroups(String.Affix, Name.Constant, String.Affix, Keyword.Constant,
String.Affix, Number.Hex, String.Affix)),
# others
(r'[\s]+', Text.Whitespace),
(r'[\S]', Text),
],
}
class EmailLexer(DelegatingLexer):
"""
Lexer for raw E-mail.
Additional options accepted:
`highlight-X-header`
Highlight the fields of ``X-`` user-defined email header. (default:
``False``).
"""
name = "E-mail"
aliases = ["email", "eml"]
filenames = ["*.eml"]
mimetypes = ["message/rfc822"]
url = "https://en.wikipedia.org/wiki/Email#Message_format"
version_added = '2.5'
def __init__(self, **options):
super().__init__(EmailHeaderLexer, MIMELexer, Comment, **options)

View File

@@ -0,0 +1,526 @@
"""
pygments.lexers.erlang
~~~~~~~~~~~~~~~~~~~~~~
Lexers for Erlang.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, bygroups, words, do_insertions, \
include, default, line_re
from pygments.token import Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic, Whitespace
__all__ = ['ErlangLexer', 'ErlangShellLexer', 'ElixirConsoleLexer',
'ElixirLexer']
class ErlangLexer(RegexLexer):
"""
For the Erlang functional programming language.
"""
name = 'Erlang'
url = 'https://www.erlang.org/'
aliases = ['erlang']
filenames = ['*.erl', '*.hrl', '*.es', '*.escript']
mimetypes = ['text/x-erlang']
version_added = '0.9'
keywords = (
'after', 'begin', 'case', 'catch', 'cond', 'end', 'fun', 'if',
'let', 'of', 'query', 'receive', 'try', 'when',
)
builtins = ( # See erlang(3) man page
'abs', 'append_element', 'apply', 'atom_to_list', 'binary_to_list',
'bitstring_to_list', 'binary_to_term', 'bit_size', 'bump_reductions',
'byte_size', 'cancel_timer', 'check_process_code', 'delete_module',
'demonitor', 'disconnect_node', 'display', 'element', 'erase', 'exit',
'float', 'float_to_list', 'fun_info', 'fun_to_list',
'function_exported', 'garbage_collect', 'get', 'get_keys',
'group_leader', 'hash', 'hd', 'integer_to_list', 'iolist_to_binary',
'iolist_size', 'is_atom', 'is_binary', 'is_bitstring', 'is_boolean',
'is_builtin', 'is_float', 'is_function', 'is_integer', 'is_list',
'is_number', 'is_pid', 'is_port', 'is_process_alive', 'is_record',
'is_reference', 'is_tuple', 'length', 'link', 'list_to_atom',
'list_to_binary', 'list_to_bitstring', 'list_to_existing_atom',
'list_to_float', 'list_to_integer', 'list_to_pid', 'list_to_tuple',
'load_module', 'localtime_to_universaltime', 'make_tuple', 'md5',
'md5_final', 'md5_update', 'memory', 'module_loaded', 'monitor',
'monitor_node', 'node', 'nodes', 'open_port', 'phash', 'phash2',
'pid_to_list', 'port_close', 'port_command', 'port_connect',
'port_control', 'port_call', 'port_info', 'port_to_list',
'process_display', 'process_flag', 'process_info', 'purge_module',
'put', 'read_timer', 'ref_to_list', 'register', 'resume_process',
'round', 'send', 'send_after', 'send_nosuspend', 'set_cookie',
'setelement', 'size', 'spawn', 'spawn_link', 'spawn_monitor',
'spawn_opt', 'split_binary', 'start_timer', 'statistics',
'suspend_process', 'system_flag', 'system_info', 'system_monitor',
'system_profile', 'term_to_binary', 'tl', 'trace', 'trace_delivered',
'trace_info', 'trace_pattern', 'trunc', 'tuple_size', 'tuple_to_list',
'universaltime_to_localtime', 'unlink', 'unregister', 'whereis'
)
operators = r'(\+\+?|--?|\*|/|<|>|/=|=:=|=/=|=<|>=|==?|<-|!|\?)'
word_operators = (
'and', 'andalso', 'band', 'bnot', 'bor', 'bsl', 'bsr', 'bxor',
'div', 'not', 'or', 'orelse', 'rem', 'xor'
)
atom_re = r"(?:[a-z]\w*|'[^\n']*[^\\]')"
variable_re = r'(?:[A-Z_]\w*)'
esc_char_re = r'[bdefnrstv\'"\\]'
esc_octal_re = r'[0-7][0-7]?[0-7]?'
esc_hex_re = r'(?:x[0-9a-fA-F]{2}|x\{[0-9a-fA-F]+\})'
esc_ctrl_re = r'\^[a-zA-Z]'
escape_re = r'(?:\\(?:'+esc_char_re+r'|'+esc_octal_re+r'|'+esc_hex_re+r'|'+esc_ctrl_re+r'))'
macro_re = r'(?:'+variable_re+r'|'+atom_re+r')'
base_re = r'(?:[2-9]|[12][0-9]|3[0-6])'
tokens = {
'root': [
(r'\s+', Whitespace),
(r'(%.*)(\n)', bygroups(Comment, Whitespace)),
(words(keywords, suffix=r'\b'), Keyword),
(words(builtins, suffix=r'\b'), Name.Builtin),
(words(word_operators, suffix=r'\b'), Operator.Word),
(r'^-', Punctuation, 'directive'),
(operators, Operator),
(r'"', String, 'string'),
(r'<<', Name.Label),
(r'>>', Name.Label),
('(' + atom_re + ')(:)', bygroups(Name.Namespace, Punctuation)),
('(?:^|(?<=:))(' + atom_re + r')(\s*)(\()',
bygroups(Name.Function, Whitespace, Punctuation)),
(r'[+-]?' + base_re + r'#[0-9a-zA-Z]+', Number.Integer),
(r'[+-]?\d+', Number.Integer),
(r'[+-]?\d+.\d+', Number.Float),
(r'[]\[:_@\".{}()|;,]', Punctuation),
(variable_re, Name.Variable),
(atom_re, Name),
(r'\?'+macro_re, Name.Constant),
(r'\$(?:'+escape_re+r'|\\[ %]|[^\\])', String.Char),
(r'#'+atom_re+r'(:?\.'+atom_re+r')?', Name.Label),
# Erlang script shebang
(r'\A#!.+\n', Comment.Hashbang),
# EEP 43: Maps
# http://www.erlang.org/eeps/eep-0043.html
(r'#\{', Punctuation, 'map_key'),
],
'string': [
(escape_re, String.Escape),
(r'"', String, '#pop'),
(r'~[0-9.*]*[~#+BPWXb-ginpswx]', String.Interpol),
(r'[^"\\~]+', String),
(r'~', String),
],
'directive': [
(r'(define)(\s*)(\()('+macro_re+r')',
bygroups(Name.Entity, Whitespace, Punctuation, Name.Constant), '#pop'),
(r'(record)(\s*)(\()('+macro_re+r')',
bygroups(Name.Entity, Whitespace, Punctuation, Name.Label), '#pop'),
(atom_re, Name.Entity, '#pop'),
],
'map_key': [
include('root'),
(r'=>', Punctuation, 'map_val'),
(r':=', Punctuation, 'map_val'),
(r'\}', Punctuation, '#pop'),
],
'map_val': [
include('root'),
(r',', Punctuation, '#pop'),
(r'(?=\})', Punctuation, '#pop'),
],
}
class ErlangShellLexer(Lexer):
"""
Shell sessions in erl (for Erlang code).
"""
name = 'Erlang erl session'
aliases = ['erl']
filenames = ['*.erl-sh']
mimetypes = ['text/x-erl-shellsession']
url = 'https://www.erlang.org/'
version_added = '1.1'
_prompt_re = re.compile(r'(?:\([\w@_.]+\))?\d+>(?=\s|\Z)')
def get_tokens_unprocessed(self, text):
erlexer = ErlangLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = self._prompt_re.match(line)
if m is not None:
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
yield from do_insertions(insertions,
erlexer.get_tokens_unprocessed(curcode))
curcode = ''
insertions = []
if line.startswith('*'):
yield match.start(), Generic.Traceback, line
else:
yield match.start(), Generic.Output, line
if curcode:
yield from do_insertions(insertions,
erlexer.get_tokens_unprocessed(curcode))
def gen_elixir_string_rules(name, symbol, token):
states = {}
states['string_' + name] = [
(rf'[^#{symbol}\\]+', token),
include('escapes'),
(r'\\.', token),
(rf'({symbol})', bygroups(token), "#pop"),
include('interpol')
]
return states
def gen_elixir_sigstr_rules(term, term_class, token, interpol=True):
if interpol:
return [
(rf'[^#{term_class}\\]+', token),
include('escapes'),
(r'\\.', token),
(rf'{term}[a-zA-Z]*', token, '#pop'),
include('interpol')
]
else:
return [
(rf'[^{term_class}\\]+', token),
(r'\\.', token),
(rf'{term}[a-zA-Z]*', token, '#pop'),
]
class ElixirLexer(RegexLexer):
"""
For the Elixir language.
"""
name = 'Elixir'
url = 'https://elixir-lang.org'
aliases = ['elixir', 'ex', 'exs']
filenames = ['*.ex', '*.eex', '*.exs', '*.leex']
mimetypes = ['text/x-elixir']
version_added = '1.5'
KEYWORD = ('fn', 'do', 'end', 'after', 'else', 'rescue', 'catch')
KEYWORD_OPERATOR = ('not', 'and', 'or', 'when', 'in')
BUILTIN = (
'case', 'cond', 'for', 'if', 'unless', 'try', 'receive', 'raise',
'quote', 'unquote', 'unquote_splicing', 'throw', 'super',
)
BUILTIN_DECLARATION = (
'def', 'defp', 'defmodule', 'defprotocol', 'defmacro', 'defmacrop',
'defdelegate', 'defexception', 'defstruct', 'defimpl', 'defcallback',
)
BUILTIN_NAMESPACE = ('import', 'require', 'use', 'alias')
CONSTANT = ('nil', 'true', 'false')
PSEUDO_VAR = ('_', '__MODULE__', '__DIR__', '__ENV__', '__CALLER__')
OPERATORS3 = (
'<<<', '>>>', '|||', '&&&', '^^^', '~~~', '===', '!==',
'~>>', '<~>', '|~>', '<|>',
)
OPERATORS2 = (
'==', '!=', '<=', '>=', '&&', '||', '<>', '++', '--', '|>', '=~',
'->', '<-', '|', '.', '=', '~>', '<~',
)
OPERATORS1 = ('<', '>', '+', '-', '*', '/', '!', '^', '&')
PUNCTUATION = (
'\\\\', '<<', '>>', '=>', '(', ')', ':', ';', ',', '[', ']',
)
def get_tokens_unprocessed(self, text):
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
if value in self.KEYWORD:
yield index, Keyword, value
elif value in self.KEYWORD_OPERATOR:
yield index, Operator.Word, value
elif value in self.BUILTIN:
yield index, Keyword, value
elif value in self.BUILTIN_DECLARATION:
yield index, Keyword.Declaration, value
elif value in self.BUILTIN_NAMESPACE:
yield index, Keyword.Namespace, value
elif value in self.CONSTANT:
yield index, Name.Constant, value
elif value in self.PSEUDO_VAR:
yield index, Name.Builtin.Pseudo, value
else:
yield index, token, value
else:
yield index, token, value
def gen_elixir_sigil_rules():
# all valid sigil terminators (excluding heredocs)
terminators = [
(r'\{', r'\}', '}', 'cb'),
(r'\[', r'\]', r'\]', 'sb'),
(r'\(', r'\)', ')', 'pa'),
('<', '>', '>', 'ab'),
('/', '/', '/', 'slas'),
(r'\|', r'\|', '|', 'pipe'),
('"', '"', '"', 'quot'),
("'", "'", "'", 'apos'),
]
# heredocs have slightly different rules
triquotes = [(r'"""', 'triquot'), (r"'''", 'triapos')]
token = String.Other
states = {'sigils': []}
for term, name in triquotes:
states['sigils'] += [
(rf'(~[a-z])({term})', bygroups(token, String.Heredoc),
(name + '-end', name + '-intp')),
(rf'(~[A-Z])({term})', bygroups(token, String.Heredoc),
(name + '-end', name + '-no-intp')),
]
states[name + '-end'] = [
(r'[a-zA-Z]+', token, '#pop'),
default('#pop'),
]
states[name + '-intp'] = [
(r'^(\s*)(' + term + ')', bygroups(Whitespace, String.Heredoc), '#pop'),
include('heredoc_interpol'),
]
states[name + '-no-intp'] = [
(r'^(\s*)(' + term +')', bygroups(Whitespace, String.Heredoc), '#pop'),
include('heredoc_no_interpol'),
]
for lterm, rterm, rterm_class, name in terminators:
states['sigils'] += [
(r'~[a-z]' + lterm, token, name + '-intp'),
(r'~[A-Z]' + lterm, token, name + '-no-intp'),
]
states[name + '-intp'] = \
gen_elixir_sigstr_rules(rterm, rterm_class, token)
states[name + '-no-intp'] = \
gen_elixir_sigstr_rules(rterm, rterm_class, token, interpol=False)
return states
op3_re = "|".join(re.escape(s) for s in OPERATORS3)
op2_re = "|".join(re.escape(s) for s in OPERATORS2)
op1_re = "|".join(re.escape(s) for s in OPERATORS1)
ops_re = rf'(?:{op3_re}|{op2_re}|{op1_re})'
punctuation_re = "|".join(re.escape(s) for s in PUNCTUATION)
alnum = r'\w'
name_re = rf'(?:\.\.\.|[a-z_]{alnum}*[!?]?)'
modname_re = rf'[A-Z]{alnum}*(?:\.[A-Z]{alnum}*)*'
complex_name_re = rf'(?:{name_re}|{modname_re}|{ops_re})'
special_atom_re = r'(?:\.\.\.|<<>>|%\{\}|%|\{\})'
long_hex_char_re = r'(\\x\{)([\da-fA-F]+)(\})'
hex_char_re = r'(\\x[\da-fA-F]{1,2})'
escape_char_re = r'(\\[abdefnrstv])'
tokens = {
'root': [
(r'\s+', Whitespace),
(r'#.*$', Comment.Single),
# Various kinds of characters
(r'(\?)' + long_hex_char_re,
bygroups(String.Char,
String.Escape, Number.Hex, String.Escape)),
(r'(\?)' + hex_char_re,
bygroups(String.Char, String.Escape)),
(r'(\?)' + escape_char_re,
bygroups(String.Char, String.Escape)),
(r'\?\\?.', String.Char),
# '::' has to go before atoms
(r':::', String.Symbol),
(r'::', Operator),
# atoms
(r':' + special_atom_re, String.Symbol),
(r':' + complex_name_re, String.Symbol),
(r':"', String.Symbol, 'string_double_atom'),
(r":'", String.Symbol, 'string_single_atom'),
# [keywords: ...]
(rf'({special_atom_re}|{complex_name_re})(:)(?=\s|\n)',
bygroups(String.Symbol, Punctuation)),
# @attributes
(r'@' + name_re, Name.Attribute),
# identifiers
(name_re, Name),
(rf'(%?)({modname_re})', bygroups(Punctuation, Name.Class)),
# operators and punctuation
(op3_re, Operator),
(op2_re, Operator),
(punctuation_re, Punctuation),
(r'&\d', Name.Entity), # anon func arguments
(op1_re, Operator),
# numbers
(r'0b[01]+', Number.Bin),
(r'0o[0-7]+', Number.Oct),
(r'0x[\da-fA-F]+', Number.Hex),
(r'\d(_?\d)*\.\d(_?\d)*([eE][-+]?\d(_?\d)*)?', Number.Float),
(r'\d(_?\d)*', Number.Integer),
# strings and heredocs
(r'(""")(\s*)', bygroups(String.Heredoc, Whitespace),
'heredoc_double'),
(r"(''')(\s*)$", bygroups(String.Heredoc, Whitespace),
'heredoc_single'),
(r'"', String.Double, 'string_double'),
(r"'", String.Single, 'string_single'),
include('sigils'),
(r'%\{', Punctuation, 'map_key'),
(r'\{', Punctuation, 'tuple'),
],
'heredoc_double': [
(r'^(\s*)(""")', bygroups(Whitespace, String.Heredoc), '#pop'),
include('heredoc_interpol'),
],
'heredoc_single': [
(r"^\s*'''", String.Heredoc, '#pop'),
include('heredoc_interpol'),
],
'heredoc_interpol': [
(r'[^#\\\n]+', String.Heredoc),
include('escapes'),
(r'\\.', String.Heredoc),
(r'\n+', String.Heredoc),
include('interpol'),
],
'heredoc_no_interpol': [
(r'[^\\\n]+', String.Heredoc),
(r'\\.', String.Heredoc),
(r'\n+', Whitespace),
],
'escapes': [
(long_hex_char_re,
bygroups(String.Escape, Number.Hex, String.Escape)),
(hex_char_re, String.Escape),
(escape_char_re, String.Escape),
],
'interpol': [
(r'#\{', String.Interpol, 'interpol_string'),
],
'interpol_string': [
(r'\}', String.Interpol, "#pop"),
include('root')
],
'map_key': [
include('root'),
(r':', Punctuation, 'map_val'),
(r'=>', Punctuation, 'map_val'),
(r'\}', Punctuation, '#pop'),
],
'map_val': [
include('root'),
(r',', Punctuation, '#pop'),
(r'(?=\})', Punctuation, '#pop'),
],
'tuple': [
include('root'),
(r'\}', Punctuation, '#pop'),
],
}
tokens.update(gen_elixir_string_rules('double', '"', String.Double))
tokens.update(gen_elixir_string_rules('single', "'", String.Single))
tokens.update(gen_elixir_string_rules('double_atom', '"', String.Symbol))
tokens.update(gen_elixir_string_rules('single_atom', "'", String.Symbol))
tokens.update(gen_elixir_sigil_rules())
class ElixirConsoleLexer(Lexer):
"""
For Elixir interactive console (iex) output like:
.. sourcecode:: iex
iex> [head | tail] = [1,2,3]
[1,2,3]
iex> head
1
iex> tail
[2,3]
iex> [head | tail]
[1,2,3]
iex> length [head | tail]
3
"""
name = 'Elixir iex session'
aliases = ['iex']
mimetypes = ['text/x-elixir-shellsession']
url = 'https://elixir-lang.org'
version_added = '1.5'
_prompt_re = re.compile(r'(iex|\.{3})((?:\([\w@_.]+\))?\d+|\(\d+\))?> ')
def get_tokens_unprocessed(self, text):
exlexer = ElixirLexer(**self.options)
curcode = ''
in_error = False
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith('** '):
in_error = True
insertions.append((len(curcode),
[(0, Generic.Error, line[:-1])]))
curcode += line[-1:]
else:
m = self._prompt_re.match(line)
if m is not None:
in_error = False
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
yield from do_insertions(
insertions, exlexer.get_tokens_unprocessed(curcode))
curcode = ''
insertions = []
token = Generic.Error if in_error else Generic.Output
yield match.start(), token, line
if curcode:
yield from do_insertions(
insertions, exlexer.get_tokens_unprocessed(curcode))

View File

@@ -0,0 +1,300 @@
"""
pygments.lexers.esoteric
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for esoteric languages.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, words, bygroups
from pygments.token import Comment, Operator, Keyword, Name, String, Number, \
Punctuation, Error, Whitespace
__all__ = ['BrainfuckLexer', 'BefungeLexer', 'RedcodeLexer', 'CAmkESLexer',
'CapDLLexer', 'AheuiLexer']
class BrainfuckLexer(RegexLexer):
"""
Lexer for the esoteric BrainFuck language.
"""
name = 'Brainfuck'
url = 'http://www.muppetlabs.com/~breadbox/bf/'
aliases = ['brainfuck', 'bf']
filenames = ['*.bf', '*.b']
mimetypes = ['application/x-brainfuck']
version_added = ''
tokens = {
'common': [
# use different colors for different instruction types
(r'[.,]+', Name.Tag),
(r'[+-]+', Name.Builtin),
(r'[<>]+', Name.Variable),
(r'[^.,+\-<>\[\]]+', Comment),
],
'root': [
(r'\[', Keyword, 'loop'),
(r'\]', Error),
include('common'),
],
'loop': [
(r'\[', Keyword, '#push'),
(r'\]', Keyword, '#pop'),
include('common'),
]
}
def analyse_text(text):
"""It's safe to assume that a program which mostly consists of + -
and < > is brainfuck."""
plus_minus_count = 0
greater_less_count = 0
range_to_check = max(256, len(text))
for c in text[:range_to_check]:
if c == '+' or c == '-':
plus_minus_count += 1
if c == '<' or c == '>':
greater_less_count += 1
if plus_minus_count > (0.25 * range_to_check):
return 1.0
if greater_less_count > (0.25 * range_to_check):
return 1.0
result = 0
if '[-]' in text:
result += 0.5
return result
class BefungeLexer(RegexLexer):
"""
Lexer for the esoteric Befunge language.
"""
name = 'Befunge'
url = 'http://en.wikipedia.org/wiki/Befunge'
aliases = ['befunge']
filenames = ['*.befunge']
mimetypes = ['application/x-befunge']
version_added = '0.7'
tokens = {
'root': [
(r'[0-9a-f]', Number),
(r'[+*/%!`-]', Operator), # Traditional math
(r'[<>^v?\[\]rxjk]', Name.Variable), # Move, imperatives
(r'[:\\$.,n]', Name.Builtin), # Stack ops, imperatives
(r'[|_mw]', Keyword),
(r'[{}]', Name.Tag), # Befunge-98 stack ops
(r'".*?"', String.Double), # Strings don't appear to allow escapes
(r'\'.', String.Single), # Single character
(r'[#;]', Comment), # Trampoline... depends on direction hit
(r'[pg&~=@iotsy]', Keyword), # Misc
(r'[()A-Z]', Comment), # Fingerprints
(r'\s+', Whitespace), # Whitespace doesn't matter
],
}
class CAmkESLexer(RegexLexer):
"""
Basic lexer for the input language for the CAmkES component platform.
"""
name = 'CAmkES'
url = 'https://sel4.systems/CAmkES/'
aliases = ['camkes', 'idl4']
filenames = ['*.camkes', '*.idl4']
version_added = '2.1'
tokens = {
'root': [
# C pre-processor directive
(r'^(\s*)(#.*)(\n)', bygroups(Whitespace, Comment.Preproc,
Whitespace)),
# Whitespace, comments
(r'\s+', Whitespace),
(r'/\*(.|\n)*?\*/', Comment),
(r'//.*$', Comment),
(r'[\[(){},.;\]]', Punctuation),
(r'[~!%^&*+=|?:<>/-]', Operator),
(words(('assembly', 'attribute', 'component', 'composition',
'configuration', 'connection', 'connector', 'consumes',
'control', 'dataport', 'Dataport', 'Dataports', 'emits',
'event', 'Event', 'Events', 'export', 'from', 'group',
'hardware', 'has', 'interface', 'Interface', 'maybe',
'procedure', 'Procedure', 'Procedures', 'provides',
'template', 'thread', 'threads', 'to', 'uses', 'with'),
suffix=r'\b'), Keyword),
(words(('bool', 'boolean', 'Buf', 'char', 'character', 'double',
'float', 'in', 'inout', 'int', 'int16_6', 'int32_t',
'int64_t', 'int8_t', 'integer', 'mutex', 'out', 'real',
'refin', 'semaphore', 'signed', 'string', 'struct',
'uint16_t', 'uint32_t', 'uint64_t', 'uint8_t', 'uintptr_t',
'unsigned', 'void'),
suffix=r'\b'), Keyword.Type),
# Recognised attributes
(r'[a-zA-Z_]\w*_(priority|domain|buffer)', Keyword.Reserved),
(words(('dma_pool', 'from_access', 'to_access'), suffix=r'\b'),
Keyword.Reserved),
# CAmkES-level include
(r'(import)(\s+)((?:<[^>]*>|"[^"]*");)',
bygroups(Comment.Preproc, Whitespace, Comment.Preproc)),
# C-level include
(r'(include)(\s+)((?:<[^>]*>|"[^"]*");)',
bygroups(Comment.Preproc, Whitespace, Comment.Preproc)),
# Literals
(r'0[xX][\da-fA-F]+', Number.Hex),
(r'-?[\d]+', Number),
(r'-?[\d]+\.[\d]+', Number.Float),
(r'"[^"]*"', String),
(r'[Tt]rue|[Ff]alse', Name.Builtin),
# Identifiers
(r'[a-zA-Z_]\w*', Name),
],
}
class CapDLLexer(RegexLexer):
"""
Basic lexer for CapDL.
The source of the primary tool that reads such specifications is available
at https://github.com/seL4/capdl/tree/master/capDL-tool. Note that this
lexer only supports a subset of the grammar. For example, identifiers can
shadow type names, but these instances are currently incorrectly
highlighted as types. Supporting this would need a stateful lexer that is
considered unnecessarily complex for now.
"""
name = 'CapDL'
url = 'https://ssrg.nicta.com.au/publications/nictaabstracts/Kuz_KLW_10.abstract.pml'
aliases = ['capdl']
filenames = ['*.cdl']
version_added = '2.2'
tokens = {
'root': [
# C pre-processor directive
(r'^(\s*)(#.*)(\n)',
bygroups(Whitespace, Comment.Preproc, Whitespace)),
# Whitespace, comments
(r'\s+', Whitespace),
(r'/\*(.|\n)*?\*/', Comment),
(r'(//|--).*$', Comment),
(r'[<>\[(){},:;=\]]', Punctuation),
(r'\.\.', Punctuation),
(words(('arch', 'arm11', 'caps', 'child_of', 'ia32', 'irq', 'maps',
'objects'), suffix=r'\b'), Keyword),
(words(('aep', 'asid_pool', 'cnode', 'ep', 'frame', 'io_device',
'io_ports', 'io_pt', 'notification', 'pd', 'pt', 'tcb',
'ut', 'vcpu'), suffix=r'\b'), Keyword.Type),
# Properties
(words(('asid', 'addr', 'badge', 'cached', 'dom', 'domainID', 'elf',
'fault_ep', 'G', 'guard', 'guard_size', 'init', 'ip',
'prio', 'sp', 'R', 'RG', 'RX', 'RW', 'RWG', 'RWX', 'W',
'WG', 'WX', 'level', 'masked', 'master_reply', 'paddr',
'ports', 'reply', 'uncached'), suffix=r'\b'),
Keyword.Reserved),
# Literals
(r'0[xX][\da-fA-F]+', Number.Hex),
(r'\d+(\.\d+)?(k|M)?', Number),
(words(('bits',), suffix=r'\b'), Number),
(words(('cspace', 'vspace', 'reply_slot', 'caller_slot',
'ipc_buffer_slot'), suffix=r'\b'), Number),
# Identifiers
(r'[a-zA-Z_][-@\.\w]*', Name),
],
}
class RedcodeLexer(RegexLexer):
"""
A simple Redcode lexer based on ICWS'94.
Contributed by Adam Blinkinsop <blinks@acm.org>.
"""
name = 'Redcode'
aliases = ['redcode']
filenames = ['*.cw']
url = 'https://en.wikipedia.org/wiki/Core_War'
version_added = '0.8'
opcodes = ('DAT', 'MOV', 'ADD', 'SUB', 'MUL', 'DIV', 'MOD',
'JMP', 'JMZ', 'JMN', 'DJN', 'CMP', 'SLT', 'SPL',
'ORG', 'EQU', 'END')
modifiers = ('A', 'B', 'AB', 'BA', 'F', 'X', 'I')
tokens = {
'root': [
# Whitespace:
(r'\s+', Whitespace),
(r';.*$', Comment.Single),
# Lexemes:
# Identifiers
(r'\b({})\b'.format('|'.join(opcodes)), Name.Function),
(r'\b({})\b'.format('|'.join(modifiers)), Name.Decorator),
(r'[A-Za-z_]\w+', Name),
# Operators
(r'[-+*/%]', Operator),
(r'[#$@<>]', Operator), # mode
(r'[.,]', Punctuation), # mode
# Numbers
(r'[-+]?\d+', Number.Integer),
],
}
class AheuiLexer(RegexLexer):
"""
Aheui is esoteric language based on Korean alphabets.
"""
name = 'Aheui'
url = 'http://aheui.github.io/'
aliases = ['aheui']
filenames = ['*.aheui']
version_added = ''
tokens = {
'root': [
('['
'나-낳냐-냫너-넣녀-녛노-놓뇨-눟뉴-닇'
'다-닿댜-댷더-덯뎌-뎧도-돟됴-둫듀-딓'
'따-땋땨-떃떠-떻뗘-뗳또-똫뚀-뚷뜌-띟'
'라-랗랴-럏러-렇려-렿로-롷료-뤃류-릫'
'마-맣먀-먛머-멓며-몋모-뫃묘-뭏뮤-믷'
'바-밯뱌-뱧버-벟벼-볗보-봏뵤-붛뷰-빃'
'빠-빻뺘-뺳뻐-뻫뼈-뼣뽀-뽛뾰-뿧쀼-삏'
'사-샇샤-샿서-섷셔-셯소-솧쇼-숳슈-싛'
'싸-쌓쌰-썋써-쎃쎠-쎻쏘-쏳쑈-쑿쓔-씧'
'자-잫쟈-쟣저-젛져-졓조-좋죠-줗쥬-즿'
'차-챃챠-챻처-첳쳐-쳫초-촣쵸-춯츄-칗'
'카-캏캬-컇커-컿켜-켷코-콯쿄-쿻큐-킣'
'타-탛탸-턓터-텋텨-톃토-톻툐-퉇튜-틯'
'파-팧퍄-퍟퍼-펗펴-폏포-퐇표-풓퓨-픻'
'하-핳햐-햫허-헣혀-혛호-홓효-훟휴-힇'
']', Operator),
('.', Comment),
],
}

View File

@@ -0,0 +1,76 @@
"""
pygments.lexers.ezhil
~~~~~~~~~~~~~~~~~~~~~
Pygments lexers for Ezhil language.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, words
from pygments.token import Keyword, Comment, Name, String, Number, \
Punctuation, Operator, Whitespace
__all__ = ['EzhilLexer']
class EzhilLexer(RegexLexer):
"""
Lexer for Ezhil, a Tamil script-based programming language.
"""
name = 'Ezhil'
url = 'http://ezhillang.org'
aliases = ['ezhil']
filenames = ['*.n']
mimetypes = ['text/x-ezhil']
version_added = '2.1'
# Refer to tamil.utf8.tamil_letters from open-tamil for a stricter version of this.
# This much simpler version is close enough, and includes combining marks.
_TALETTERS = '[a-zA-Z_]|[\u0b80-\u0bff]'
tokens = {
'root': [
include('keywords'),
(r'#.*$', Comment.Single),
(r'[@+/*,^\-%]|[!<>=]=?|&&?|\|\|?', Operator),
('இல்', Operator.Word),
(words(('assert', 'max', 'min',
'நீளம்', 'சரம்_இடமாற்று', 'சரம்_கண்டுபிடி',
'பட்டியல்', 'பின்இணை', 'வரிசைப்படுத்து',
'எடு', 'தலைகீழ்', 'நீட்டிக்க', 'நுழைக்க', 'வை',
'கோப்பை_திற', 'கோப்பை_எழுது', 'கோப்பை_மூடு',
'pi', 'sin', 'cos', 'tan', 'sqrt', 'hypot', 'pow',
'exp', 'log', 'log10', 'exit',
), suffix=r'\b'), Name.Builtin),
(r'(True|False)\b', Keyword.Constant),
(r'[^\S\n]+', Whitespace),
include('identifier'),
include('literal'),
(r'[(){}\[\]:;.]', Punctuation),
],
'keywords': [
('பதிப்பி|தேர்ந்தெடு|தேர்வு|ஏதேனில்|ஆனால்|இல்லைஆனால்|இல்லை|ஆக|ஒவ்வொன்றாக|இல்|வரை|செய்|முடியேனில்|பின்கொடு|முடி|நிரல்பாகம்|தொடர்|நிறுத்து|நிரல்பாகம்', Keyword),
],
'identifier': [
('(?:'+_TALETTERS+')(?:[0-9]|'+_TALETTERS+')*', Name),
],
'literal': [
(r'".*?"', String),
(r'\d+((\.\d*)?[eE][+-]?\d+|\.\d*)', Number.Float),
(r'\d+', Number.Integer),
]
}
def analyse_text(text):
"""This language uses Tamil-script. We'll assume that if there's a
decent amount of Tamil-characters, it's this language. This assumption
is obviously horribly off if someone uses string literals in tamil
in another language."""
if len(re.findall(r'[\u0b80-\u0bff]', text)) > 10:
return 0.25
def __init__(self, **options):
super().__init__(**options)
self.encoding = options.get('encoding', 'utf-8')

View File

@@ -0,0 +1,363 @@
"""
pygments.lexers.factor
~~~~~~~~~~~~~~~~~~~~~~
Lexers for the Factor language.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, default, words
from pygments.token import Text, Comment, Keyword, Name, String, Number, \
Whitespace, Punctuation
__all__ = ['FactorLexer']
class FactorLexer(RegexLexer):
"""
Lexer for the Factor language.
"""
name = 'Factor'
url = 'http://factorcode.org'
aliases = ['factor']
filenames = ['*.factor']
mimetypes = ['text/x-factor']
version_added = '1.4'
builtin_kernel = words((
'-rot', '2bi', '2bi@', '2bi*', '2curry', '2dip', '2drop', '2dup', '2keep', '2nip',
'2over', '2tri', '2tri@', '2tri*', '3bi', '3curry', '3dip', '3drop', '3dup', '3keep',
'3tri', '4dip', '4drop', '4dup', '4keep', '<wrapper>', '=', '>boolean', 'clone',
'?', '?execute', '?if', 'and', 'assert', 'assert=', 'assert?', 'bi', 'bi-curry',
'bi-curry@', 'bi-curry*', 'bi@', 'bi*', 'boa', 'boolean', 'boolean?', 'both?',
'build', 'call', 'callstack', 'callstack>array', 'callstack?', 'clear', '(clone)',
'compose', 'compose?', 'curry', 'curry?', 'datastack', 'die', 'dip', 'do', 'drop',
'dup', 'dupd', 'either?', 'eq?', 'equal?', 'execute', 'hashcode', 'hashcode*',
'identity-hashcode', 'identity-tuple', 'identity-tuple?', 'if', 'if*',
'keep', 'loop', 'most', 'new', 'nip', 'not', 'null', 'object', 'or', 'over',
'pick', 'prepose', 'retainstack', 'rot', 'same?', 'swap', 'swapd', 'throw',
'tri', 'tri-curry', 'tri-curry@', 'tri-curry*', 'tri@', 'tri*', 'tuple',
'tuple?', 'unless', 'unless*', 'until', 'when', 'when*', 'while', 'with',
'wrapper', 'wrapper?', 'xor'), suffix=r'(\s+)')
builtin_assocs = words((
'2cache', '<enum>', '>alist', '?at', '?of', 'assoc', 'assoc-all?',
'assoc-any?', 'assoc-clone-like', 'assoc-combine', 'assoc-diff',
'assoc-diff!', 'assoc-differ', 'assoc-each', 'assoc-empty?',
'assoc-filter', 'assoc-filter!', 'assoc-filter-as', 'assoc-find',
'assoc-hashcode', 'assoc-intersect', 'assoc-like', 'assoc-map',
'assoc-map-as', 'assoc-partition', 'assoc-refine', 'assoc-size',
'assoc-stack', 'assoc-subset?', 'assoc-union', 'assoc-union!',
'assoc=', 'assoc>map', 'assoc?', 'at', 'at+', 'at*', 'cache', 'change-at',
'clear-assoc', 'delete-at', 'delete-at*', 'enum', 'enum?', 'extract-keys',
'inc-at', 'key?', 'keys', 'map>assoc', 'maybe-set-at', 'new-assoc', 'of',
'push-at', 'rename-at', 'set-at', 'sift-keys', 'sift-values', 'substitute',
'unzip', 'value-at', 'value-at*', 'value?', 'values', 'zip'), suffix=r'(\s+)')
builtin_combinators = words((
'2cleave', '2cleave>quot', '3cleave', '3cleave>quot', '4cleave',
'4cleave>quot', 'alist>quot', 'call-effect', 'case', 'case-find',
'case>quot', 'cleave', 'cleave>quot', 'cond', 'cond>quot', 'deep-spread>quot',
'execute-effect', 'linear-case-quot', 'no-case', 'no-case?', 'no-cond',
'no-cond?', 'recursive-hashcode', 'shallow-spread>quot', 'spread',
'to-fixed-point', 'wrong-values', 'wrong-values?'), suffix=r'(\s+)')
builtin_math = words((
'-', '/', '/f', '/i', '/mod', '2/', '2^', '<', '<=', '<fp-nan>', '>',
'>=', '>bignum', '>fixnum', '>float', '>integer', '(all-integers?)',
'(each-integer)', '(find-integer)', '*', '+', '?1+',
'abs', 'align', 'all-integers?', 'bignum', 'bignum?', 'bit?', 'bitand',
'bitnot', 'bitor', 'bits>double', 'bits>float', 'bitxor', 'complex',
'complex?', 'denominator', 'double>bits', 'each-integer', 'even?',
'find-integer', 'find-last-integer', 'fixnum', 'fixnum?', 'float',
'float>bits', 'float?', 'fp-bitwise=', 'fp-infinity?', 'fp-nan-payload',
'fp-nan?', 'fp-qnan?', 'fp-sign', 'fp-snan?', 'fp-special?',
'if-zero', 'imaginary-part', 'integer', 'integer>fixnum',
'integer>fixnum-strict', 'integer?', 'log2', 'log2-expects-positive',
'log2-expects-positive?', 'mod', 'neg', 'neg?', 'next-float',
'next-power-of-2', 'number', 'number=', 'number?', 'numerator', 'odd?',
'out-of-fixnum-range', 'out-of-fixnum-range?', 'power-of-2?',
'prev-float', 'ratio', 'ratio?', 'rational', 'rational?', 'real',
'real-part', 'real?', 'recip', 'rem', 'sgn', 'shift', 'sq', 'times',
'u<', 'u<=', 'u>', 'u>=', 'unless-zero', 'unordered?', 'when-zero',
'zero?'), suffix=r'(\s+)')
builtin_sequences = words((
'1sequence', '2all?', '2each', '2map', '2map-as', '2map-reduce', '2reduce',
'2selector', '2sequence', '3append', '3append-as', '3each', '3map', '3map-as',
'3sequence', '4sequence', '<repetition>', '<reversed>', '<slice>', '?first',
'?last', '?nth', '?second', '?set-nth', 'accumulate', 'accumulate!',
'accumulate-as', 'all?', 'any?', 'append', 'append!', 'append-as',
'assert-sequence', 'assert-sequence=', 'assert-sequence?',
'binary-reduce', 'bounds-check', 'bounds-check?', 'bounds-error',
'bounds-error?', 'but-last', 'but-last-slice', 'cartesian-each',
'cartesian-map', 'cartesian-product', 'change-nth', 'check-slice',
'check-slice-error', 'clone-like', 'collapse-slice', 'collector',
'collector-for', 'concat', 'concat-as', 'copy', 'count', 'cut', 'cut-slice',
'cut*', 'delete-all', 'delete-slice', 'drop-prefix', 'each', 'each-from',
'each-index', 'empty?', 'exchange', 'filter', 'filter!', 'filter-as', 'find',
'find-from', 'find-index', 'find-index-from', 'find-last', 'find-last-from',
'first', 'first2', 'first3', 'first4', 'flip', 'follow', 'fourth', 'glue', 'halves',
'harvest', 'head', 'head-slice', 'head-slice*', 'head*', 'head?',
'if-empty', 'immutable', 'immutable-sequence', 'immutable-sequence?',
'immutable?', 'index', 'index-from', 'indices', 'infimum', 'infimum-by',
'insert-nth', 'interleave', 'iota', 'iota-tuple', 'iota-tuple?', 'join',
'join-as', 'last', 'last-index', 'last-index-from', 'length', 'lengthen',
'like', 'longer', 'longer?', 'longest', 'map', 'map!', 'map-as', 'map-find',
'map-find-last', 'map-index', 'map-integers', 'map-reduce', 'map-sum',
'max-length', 'member-eq?', 'member?', 'midpoint@', 'min-length',
'mismatch', 'move', 'new-like', 'new-resizable', 'new-sequence',
'non-negative-integer-expected', 'non-negative-integer-expected?',
'nth', 'nths', 'pad-head', 'pad-tail', 'padding', 'partition', 'pop', 'pop*',
'prefix', 'prepend', 'prepend-as', 'produce', 'produce-as', 'product', 'push',
'push-all', 'push-either', 'push-if', 'reduce', 'reduce-index', 'remove',
'remove!', 'remove-eq', 'remove-eq!', 'remove-nth', 'remove-nth!', 'repetition',
'repetition?', 'replace-slice', 'replicate', 'replicate-as', 'rest',
'rest-slice', 'reverse', 'reverse!', 'reversed', 'reversed?', 'second',
'selector', 'selector-for', 'sequence', 'sequence-hashcode', 'sequence=',
'sequence?', 'set-first', 'set-fourth', 'set-last', 'set-length', 'set-nth',
'set-second', 'set-third', 'short', 'shorten', 'shorter', 'shorter?',
'shortest', 'sift', 'slice', 'slice-error', 'slice-error?', 'slice?',
'snip', 'snip-slice', 'start', 'start*', 'subseq', 'subseq?', 'suffix',
'suffix!', 'sum', 'sum-lengths', 'supremum', 'supremum-by', 'surround', 'tail',
'tail-slice', 'tail-slice*', 'tail*', 'tail?', 'third', 'trim',
'trim-head', 'trim-head-slice', 'trim-slice', 'trim-tail', 'trim-tail-slice',
'unclip', 'unclip-last', 'unclip-last-slice', 'unclip-slice', 'unless-empty',
'virtual-exemplar', 'virtual-sequence', 'virtual-sequence?', 'virtual@',
'when-empty'), suffix=r'(\s+)')
builtin_namespaces = words((
'+@', 'change', 'change-global', 'counter', 'dec', 'get', 'get-global',
'global', 'inc', 'init-namespaces', 'initialize', 'is-global', 'make-assoc',
'namespace', 'namestack', 'off', 'on', 'set', 'set-global', 'set-namestack',
'toggle', 'with-global', 'with-scope', 'with-variable', 'with-variables'),
suffix=r'(\s+)')
builtin_arrays = words((
'1array', '2array', '3array', '4array', '<array>', '>array', 'array',
'array?', 'pair', 'pair?', 'resize-array'), suffix=r'(\s+)')
builtin_io = words((
'(each-stream-block-slice)', '(each-stream-block)',
'(stream-contents-by-block)', '(stream-contents-by-element)',
'(stream-contents-by-length-or-block)',
'(stream-contents-by-length)', '+byte+', '+character+',
'bad-seek-type', 'bad-seek-type?', 'bl', 'contents', 'each-block',
'each-block-size', 'each-block-slice', 'each-line', 'each-morsel',
'each-stream-block', 'each-stream-block-slice', 'each-stream-line',
'error-stream', 'flush', 'input-stream', 'input-stream?',
'invalid-read-buffer', 'invalid-read-buffer?', 'lines', 'nl',
'output-stream', 'output-stream?', 'print', 'read', 'read-into',
'read-partial', 'read-partial-into', 'read-until', 'read1', 'readln',
'seek-absolute', 'seek-absolute?', 'seek-end', 'seek-end?',
'seek-input', 'seek-output', 'seek-relative', 'seek-relative?',
'stream-bl', 'stream-contents', 'stream-contents*', 'stream-copy',
'stream-copy*', 'stream-element-type', 'stream-flush',
'stream-length', 'stream-lines', 'stream-nl', 'stream-print',
'stream-read', 'stream-read-into', 'stream-read-partial',
'stream-read-partial-into', 'stream-read-partial-unsafe',
'stream-read-unsafe', 'stream-read-until', 'stream-read1',
'stream-readln', 'stream-seek', 'stream-seekable?', 'stream-tell',
'stream-write', 'stream-write1', 'tell-input', 'tell-output',
'with-error-stream', 'with-error-stream*', 'with-error>output',
'with-input-output+error-streams',
'with-input-output+error-streams*', 'with-input-stream',
'with-input-stream*', 'with-output-stream', 'with-output-stream*',
'with-output>error', 'with-output+error-stream',
'with-output+error-stream*', 'with-streams', 'with-streams*',
'write', 'write1'), suffix=r'(\s+)')
builtin_strings = words((
'1string', '<string>', '>string', 'resize-string', 'string',
'string?'), suffix=r'(\s+)')
builtin_vectors = words((
'1vector', '<vector>', '>vector', '?push', 'vector', 'vector?'),
suffix=r'(\s+)')
builtin_continuations = words((
'<condition>', '<continuation>', '<restart>', 'attempt-all',
'attempt-all-error', 'attempt-all-error?', 'callback-error-hook',
'callcc0', 'callcc1', 'cleanup', 'compute-restarts', 'condition',
'condition?', 'continuation', 'continuation?', 'continue',
'continue-restart', 'continue-with', 'current-continuation',
'error', 'error-continuation', 'error-in-thread', 'error-thread',
'ifcc', 'ignore-errors', 'in-callback?', 'original-error', 'recover',
'restart', 'restart?', 'restarts', 'rethrow', 'rethrow-restarts',
'return', 'return-continuation', 'thread-error-hook', 'throw-continue',
'throw-restarts', 'with-datastack', 'with-return'), suffix=r'(\s+)')
tokens = {
'root': [
# factor allows a file to start with a shebang
(r'#!.*$', Comment.Preproc),
default('base'),
],
'base': [
(r'\s+', Whitespace),
# defining words
(r'((?:MACRO|MEMO|TYPED)?:[:]?)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Function)),
(r'(M:[:]?)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Class, Whitespace,
Name.Function)),
(r'(C:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Function, Whitespace,
Name.Class)),
(r'(GENERIC:)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Function)),
(r'(HOOK:|GENERIC#)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Function, Whitespace,
Name.Function)),
(r'(\()(\s)', bygroups(Name.Function, Whitespace), 'stackeffect'),
(r'(;)(\s)', bygroups(Keyword, Whitespace)),
# imports and namespaces
(r'(USING:)(\s+)',
bygroups(Keyword.Namespace, Whitespace), 'vocabs'),
(r'(USE:|UNUSE:|IN:|QUALIFIED:)(\s+)(\S+)',
bygroups(Keyword.Namespace, Whitespace, Name.Namespace)),
(r'(QUALIFIED-WITH:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword.Namespace, Whitespace, Name.Namespace,
Whitespace, Name.Namespace)),
(r'(FROM:|EXCLUDE:)(\s+)(\S+)(\s+=>\s)',
bygroups(Keyword.Namespace, Whitespace, Name.Namespace,
Whitespace), 'words'),
(r'(RENAME:)(\s+)(\S+)(\s+)(\S+)(\s+)(=>)(\s+)(\S+)',
bygroups(Keyword.Namespace, Whitespace, Name.Function, Whitespace,
Name.Namespace, Whitespace, Punctuation, Whitespace,
Name.Function)),
(r'(ALIAS:|TYPEDEF:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword.Namespace, Whitespace, Name.Function, Whitespace,
Name.Function)),
(r'(DEFER:|FORGET:|POSTPONE:)(\s+)(\S+)',
bygroups(Keyword.Namespace, Whitespace, Name.Function)),
# tuples and classes
(r'(TUPLE:|ERROR:)(\s+)(\S+)(\s+)(<)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Class, Whitespace, Punctuation,
Whitespace, Name.Class), 'slots'),
(r'(TUPLE:|ERROR:|BUILTIN:)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Class), 'slots'),
(r'(MIXIN:|UNION:|INTERSECTION:)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Class)),
(r'(PREDICATE:)(\s+)(\S+)(\s+)(<)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Class, Whitespace,
Punctuation, Whitespace, Name.Class)),
(r'(C:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Function, Whitespace, Name.Class)),
(r'(INSTANCE:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Class, Whitespace, Name.Class)),
(r'(SLOT:)(\s+)(\S+)', bygroups(Keyword, Whitespace, Name.Function)),
(r'(SINGLETON:)(\s+)(\S+)', bygroups(Keyword, Whitespace, Name.Class)),
(r'SINGLETONS:', Keyword, 'classes'),
# other syntax
(r'(CONSTANT:|SYMBOL:|MAIN:|HELP:)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Function)),
(r'(SYMBOLS:)(\s+)', bygroups(Keyword, Whitespace), 'words'),
(r'(SYNTAX:)(\s+)', bygroups(Keyword, Whitespace)),
(r'(ALIEN:)(\s+)', bygroups(Keyword, Whitespace)),
(r'(STRUCT:)(\s+)(\S+)', bygroups(Keyword, Whitespace, Name.Class)),
(r'(FUNCTION:)(\s+)'
r'(\S+)(\s+)(\S+)(\s+)'
r'(\()(\s+)([^)]+)(\))(\s)',
bygroups(Keyword.Namespace, Whitespace,
Text, Whitespace, Name.Function, Whitespace,
Punctuation, Whitespace, Text, Punctuation, Whitespace)),
(r'(FUNCTION-ALIAS:)(\s+)'
r'(\S+)(\s+)(\S+)(\s+)'
r'(\S+)(\s+)'
r'(\()(\s+)([^)]+)(\))(\s)',
bygroups(Keyword.Namespace, Whitespace,
Text, Whitespace, Name.Function, Whitespace,
Name.Function, Whitespace,
Punctuation, Whitespace, Text, Punctuation, Whitespace)),
# vocab.private
(r'(<PRIVATE|PRIVATE>)(\s)', bygroups(Keyword.Namespace, Whitespace)),
# strings
(r'"""\s(?:.|\n)*?\s"""', String),
(r'"(?:\\\\|\\"|[^"])*"', String),
(r'(\S+")(\s+)((?:\\\\|\\"|[^"])*")',
bygroups(String, Whitespace, String)),
(r'(CHAR:)(\s+)(\\[\\abfnrstv]|[^\\]\S*)(\s)',
bygroups(String.Char, Whitespace, String.Char, Whitespace)),
# comments
(r'!\s+.*$', Comment),
(r'#!\s+.*$', Comment),
(r'/\*\s+(?:.|\n)*?\s\*/', Comment),
# boolean constants
(r'[tf]\b', Name.Constant),
# symbols and literals
(r'[\\$]\s+\S+', Name.Constant),
(r'M\\\s+\S+\s+\S+', Name.Constant),
# numbers
(r'[+-]?(?:[\d,]*\d)?\.(?:\d([\d,]*\d)?)?(?:[eE][+-]?\d+)?\s', Number),
(r'[+-]?\d(?:[\d,]*\d)?(?:[eE][+-]?\d+)?\s', Number),
(r'0x[a-fA-F\d](?:[a-fA-F\d,]*[a-fA-F\d])?(?:p\d([\d,]*\d)?)?\s', Number),
(r'NAN:\s+[a-fA-F\d](?:[a-fA-F\d,]*[a-fA-F\d])?(?:p\d([\d,]*\d)?)?\s', Number),
(r'0b[01]+\s', Number.Bin),
(r'0o[0-7]+\s', Number.Oct),
(r'(?:\d([\d,]*\d)?)?\+\d(?:[\d,]*\d)?/\d(?:[\d,]*\d)?\s', Number),
(r'(?:\-\d([\d,]*\d)?)?\-\d(?:[\d,]*\d)?/\d(?:[\d,]*\d)?\s', Number),
# keywords
(r'(?:deprecated|final|foldable|flushable|inline|recursive)\s',
Keyword),
# builtins
(builtin_kernel, bygroups(Name.Builtin, Whitespace)),
(builtin_assocs, bygroups(Name.Builtin, Whitespace)),
(builtin_combinators, bygroups(Name.Builtin, Whitespace)),
(builtin_math, bygroups(Name.Builtin, Whitespace)),
(builtin_sequences, bygroups(Name.Builtin, Whitespace)),
(builtin_namespaces, bygroups(Name.Builtin, Whitespace)),
(builtin_arrays, bygroups(Name.Builtin, Whitespace)),
(builtin_io, bygroups(Name.Builtin, Whitespace)),
(builtin_strings, bygroups(Name.Builtin, Whitespace)),
(builtin_vectors, bygroups(Name.Builtin, Whitespace)),
(builtin_continuations, bygroups(Name.Builtin, Whitespace)),
# everything else is text
(r'\S+', Text),
],
'stackeffect': [
(r'\s+', Whitespace),
(r'(\()(\s+)', bygroups(Name.Function, Whitespace), 'stackeffect'),
(r'(\))(\s+)', bygroups(Name.Function, Whitespace), '#pop'),
(r'(--)(\s+)', bygroups(Name.Function, Whitespace)),
(r'\S+', Name.Variable),
],
'slots': [
(r'\s+', Whitespace),
(r'(;)(\s+)', bygroups(Keyword, Whitespace), '#pop'),
(r'(\{)(\s+)(\S+)(\s+)([^}]+)(\s+)(\})(\s+)',
bygroups(Text, Whitespace, Name.Variable, Whitespace,
Text, Whitespace, Text, Whitespace)),
(r'\S+', Name.Variable),
],
'vocabs': [
(r'\s+', Whitespace),
(r'(;)(\s+)', bygroups(Keyword, Whitespace), '#pop'),
(r'\S+', Name.Namespace),
],
'classes': [
(r'\s+', Whitespace),
(r'(;)(\s+)', bygroups(Keyword, Whitespace), '#pop'),
(r'\S+', Name.Class),
],
'words': [
(r'\s+', Whitespace),
(r'(;)(\s+)', bygroups(Keyword, Whitespace), '#pop'),
(r'\S+', Name.Function),
],
}

View File

@@ -0,0 +1,251 @@
"""
pygments.lexers.fantom
~~~~~~~~~~~~~~~~~~~~~~
Lexer for the Fantom language.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from string import Template
from pygments.lexer import RegexLexer, include, bygroups, using, \
this, default, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Literal, Whitespace
__all__ = ['FantomLexer']
class FantomLexer(RegexLexer):
"""
For Fantom source code.
"""
name = 'Fantom'
aliases = ['fan']
filenames = ['*.fan']
mimetypes = ['application/x-fantom']
url = 'https://www.fantom.org'
version_added = '1.5'
# often used regexes
def s(str):
return Template(str).substitute(
dict(
pod=r'[\"\w\.]+',
eos=r'\n|;',
id=r'[a-zA-Z_]\w*',
# all chars which can be part of type definition. Starts with
# either letter, or [ (maps), or | (funcs)
type=r'(?:\[|[a-zA-Z_]|\|)[:\w\[\]|\->?]*?',
)
)
tokens = {
'comments': [
(r'(?s)/\*.*?\*/', Comment.Multiline), # Multiline
(r'//.*?$', Comment.Single), # Single line
# TODO: highlight references in fandocs
(r'\*\*.*?$', Comment.Special), # Fandoc
(r'#.*$', Comment.Single) # Shell-style
],
'literals': [
(r'\b-?[\d_]+(ns|ms|sec|min|hr|day)', Number), # Duration
(r'\b-?[\d_]*\.[\d_]+(ns|ms|sec|min|hr|day)', Number), # Duration with dot
(r'\b-?(\d+)?\.\d+(f|F|d|D)?', Number.Float), # Float/Decimal
(r'\b-?0x[0-9a-fA-F_]+', Number.Hex), # Hex
(r'\b-?[\d_]+', Number.Integer), # Int
(r"'\\.'|'[^\\]'|'\\u[0-9a-f]{4}'", String.Char), # Char
(r'"', Punctuation, 'insideStr'), # Opening quote
(r'`', Punctuation, 'insideUri'), # Opening accent
(r'\b(true|false|null)\b', Keyword.Constant), # Bool & null
(r'(?:(\w+)(::))?(\w+)(<\|)(.*?)(\|>)', # DSL
bygroups(Name.Namespace, Punctuation, Name.Class,
Punctuation, String, Punctuation)),
(r'(?:(\w+)(::))?(\w+)?(#)(\w+)?', # Type/slot literal
bygroups(Name.Namespace, Punctuation, Name.Class,
Punctuation, Name.Function)),
(r'\[,\]', Literal), # Empty list
(s(r'($type)(\[,\])'), # Typed empty list
bygroups(using(this, state='inType'), Literal)),
(r'\[:\]', Literal), # Empty Map
(s(r'($type)(\[:\])'),
bygroups(using(this, state='inType'), Literal)),
],
'insideStr': [
(r'\\\\', String.Escape), # Escaped backslash
(r'\\"', String.Escape), # Escaped "
(r'\\`', String.Escape), # Escaped `
(r'\$\w+', String.Interpol), # Subst var
(r'\$\{.*?\}', String.Interpol), # Subst expr
(r'"', Punctuation, '#pop'), # Closing quot
(r'.', String) # String content
],
'insideUri': [ # TODO: remove copy/paste str/uri
(r'\\\\', String.Escape), # Escaped backslash
(r'\\"', String.Escape), # Escaped "
(r'\\`', String.Escape), # Escaped `
(r'\$\w+', String.Interpol), # Subst var
(r'\$\{.*?\}', String.Interpol), # Subst expr
(r'`', Punctuation, '#pop'), # Closing tick
(r'.', String.Backtick) # URI content
],
'protectionKeywords': [
(r'\b(public|protected|private|internal)\b', Keyword),
],
'typeKeywords': [
(r'\b(abstract|final|const|native|facet|enum)\b', Keyword),
],
'methodKeywords': [
(r'\b(abstract|native|once|override|static|virtual|final)\b',
Keyword),
],
'fieldKeywords': [
(r'\b(abstract|const|final|native|override|static|virtual|'
r'readonly)\b', Keyword)
],
'otherKeywords': [
(words((
'try', 'catch', 'throw', 'finally', 'for', 'if', 'else', 'while',
'as', 'is', 'isnot', 'switch', 'case', 'default', 'continue',
'break', 'do', 'return', 'get', 'set'), prefix=r'\b', suffix=r'\b'),
Keyword),
(r'\b(it|this|super)\b', Name.Builtin.Pseudo),
],
'operators': [
(r'\+\+|\-\-|\+|\-|\*|/|\|\||&&|<=>|<=|<|>=|>|=|!|\[|\]', Operator)
],
'inType': [
(r'[\[\]|\->:?]', Punctuation),
(s(r'$id'), Name.Class),
default('#pop'),
],
'root': [
include('comments'),
include('protectionKeywords'),
include('typeKeywords'),
include('methodKeywords'),
include('fieldKeywords'),
include('literals'),
include('otherKeywords'),
include('operators'),
(r'using\b', Keyword.Namespace, 'using'), # Using stmt
(r'@\w+', Name.Decorator, 'facet'), # Symbol
(r'(class|mixin)(\s+)(\w+)', bygroups(Keyword, Whitespace, Name.Class),
'inheritance'), # Inheritance list
# Type var := val
(s(r'($type)([ \t]+)($id)(\s*)(:=)'),
bygroups(using(this, state='inType'), Whitespace,
Name.Variable, Whitespace, Operator)),
# var := val
(s(r'($id)(\s*)(:=)'),
bygroups(Name.Variable, Whitespace, Operator)),
# .someId( or ->someId( ###
(s(r'(\.|(?:\->))($id)(\s*)(\()'),
bygroups(Operator, Name.Function, Whitespace, Punctuation),
'insideParen'),
# .someId or ->someId
(s(r'(\.|(?:\->))($id)'),
bygroups(Operator, Name.Function)),
# new makeXXX (
(r'(new)(\s+)(make\w*)(\s*)(\()',
bygroups(Keyword, Whitespace, Name.Function, Whitespace, Punctuation),
'insideMethodDeclArgs'),
# Type name (
(s(r'($type)([ \t]+)' # Return type and whitespace
r'($id)(\s*)(\()'), # method name + open brace
bygroups(using(this, state='inType'), Whitespace,
Name.Function, Whitespace, Punctuation),
'insideMethodDeclArgs'),
# ArgType argName,
(s(r'($type)(\s+)($id)(\s*)(,)'),
bygroups(using(this, state='inType'), Whitespace, Name.Variable,
Whitespace, Punctuation)),
# ArgType argName)
# Covered in 'insideParen' state
# ArgType argName -> ArgType|
(s(r'($type)(\s+)($id)(\s*)(\->)(\s*)($type)(\|)'),
bygroups(using(this, state='inType'), Whitespace, Name.Variable,
Whitespace, Punctuation, Whitespace, using(this, state='inType'),
Punctuation)),
# ArgType argName|
(s(r'($type)(\s+)($id)(\s*)(\|)'),
bygroups(using(this, state='inType'), Whitespace, Name.Variable,
Whitespace, Punctuation)),
# Type var
(s(r'($type)([ \t]+)($id)'),
bygroups(using(this, state='inType'), Whitespace,
Name.Variable)),
(r'\(', Punctuation, 'insideParen'),
(r'\{', Punctuation, 'insideBrace'),
(r'\s+', Whitespace),
(r'.', Text)
],
'insideParen': [
(r'\)', Punctuation, '#pop'),
include('root'),
],
'insideMethodDeclArgs': [
(r'\)', Punctuation, '#pop'),
(s(r'($type)(\s+)($id)(\s*)(\))'),
bygroups(using(this, state='inType'), Whitespace, Name.Variable,
Whitespace, Punctuation), '#pop'),
include('root'),
],
'insideBrace': [
(r'\}', Punctuation, '#pop'),
include('root'),
],
'inheritance': [
(r'\s+', Whitespace), # Whitespace
(r':|,', Punctuation),
(r'(?:(\w+)(::))?(\w+)',
bygroups(Name.Namespace, Punctuation, Name.Class)),
(r'\{', Punctuation, '#pop')
],
'using': [
(r'[ \t]+', Whitespace), # consume whitespaces
(r'(\[)(\w+)(\])',
bygroups(Punctuation, Comment.Special, Punctuation)), # ffi
(r'(\")?([\w.]+)(\")?',
bygroups(Punctuation, Name.Namespace, Punctuation)), # podname
(r'::', Punctuation, 'usingClass'),
default('#pop')
],
'usingClass': [
(r'[ \t]+', Whitespace), # consume whitespaces
(r'(as)(\s+)(\w+)',
bygroups(Keyword.Declaration, Whitespace, Name.Class), '#pop:2'),
(r'[\w$]+', Name.Class),
default('#pop:2') # jump out to root state
],
'facet': [
(r'\s+', Whitespace),
(r'\{', Punctuation, 'facetFields'),
default('#pop')
],
'facetFields': [
include('comments'),
include('literals'),
include('operators'),
(r'\s+', Whitespace),
(r'(\s*)(\w+)(\s*)(=)', bygroups(Whitespace, Name, Whitespace, Operator)),
(r'\}', Punctuation, '#pop'),
(r'\s+', Whitespace),
(r'.', Text)
],
}

View File

@@ -0,0 +1,275 @@
"""
pygments.lexers.felix
~~~~~~~~~~~~~~~~~~~~~
Lexer for the Felix language.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, bygroups, default, words, \
combined
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
__all__ = ['FelixLexer']
class FelixLexer(RegexLexer):
"""
For Felix source code.
"""
name = 'Felix'
url = 'http://www.felix-lang.org'
aliases = ['felix', 'flx']
filenames = ['*.flx', '*.flxh']
mimetypes = ['text/x-felix']
version_added = '1.2'
preproc = (
'elif', 'else', 'endif', 'if', 'ifdef', 'ifndef',
)
keywords = (
'_', '_deref', 'all', 'as',
'assert', 'attempt', 'call', 'callback', 'case', 'caseno', 'cclass',
'code', 'compound', 'ctypes', 'do', 'done', 'downto', 'elif', 'else',
'endattempt', 'endcase', 'endif', 'endmatch', 'enum', 'except',
'exceptions', 'expect', 'finally', 'for', 'forall', 'forget', 'fork',
'functor', 'goto', 'ident', 'if', 'incomplete', 'inherit', 'instance',
'interface', 'jump', 'lambda', 'loop', 'match', 'module', 'namespace',
'new', 'noexpand', 'nonterm', 'obj', 'of', 'open', 'parse', 'raise',
'regexp', 'reglex', 'regmatch', 'rename', 'return', 'the', 'then',
'to', 'type', 'typecase', 'typedef', 'typematch', 'typeof', 'upto',
'when', 'whilst', 'with', 'yield',
)
keyword_directives = (
'_gc_pointer', '_gc_type', 'body', 'comment', 'const', 'export',
'header', 'inline', 'lval', 'macro', 'noinline', 'noreturn',
'package', 'private', 'pod', 'property', 'public', 'publish',
'requires', 'todo', 'virtual', 'use',
)
keyword_declarations = (
'def', 'let', 'ref', 'val', 'var',
)
keyword_types = (
'unit', 'void', 'any', 'bool',
'byte', 'offset',
'address', 'caddress', 'cvaddress', 'vaddress',
'tiny', 'short', 'int', 'long', 'vlong',
'utiny', 'ushort', 'vshort', 'uint', 'ulong', 'uvlong',
'int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float', 'double', 'ldouble',
'complex', 'dcomplex', 'lcomplex',
'imaginary', 'dimaginary', 'limaginary',
'char', 'wchar', 'uchar',
'charp', 'charcp', 'ucharp', 'ucharcp',
'string', 'wstring', 'ustring',
'cont',
'array', 'varray', 'list',
'lvalue', 'opt', 'slice',
)
keyword_constants = (
'false', 'true',
)
operator_words = (
'and', 'not', 'in', 'is', 'isin', 'or', 'xor',
)
name_builtins = (
'_svc', 'while',
)
name_pseudo = (
'root', 'self', 'this',
)
decimal_suffixes = '([tTsSiIlLvV]|ll|LL|([iIuU])(8|16|32|64))?'
tokens = {
'root': [
include('whitespace'),
# Keywords
(words(('axiom', 'ctor', 'fun', 'gen', 'proc', 'reduce',
'union'), suffix=r'\b'),
Keyword, 'funcname'),
(words(('class', 'cclass', 'cstruct', 'obj', 'struct'), suffix=r'\b'),
Keyword, 'classname'),
(r'(instance|module|typeclass)\b', Keyword, 'modulename'),
(words(keywords, suffix=r'\b'), Keyword),
(words(keyword_directives, suffix=r'\b'), Name.Decorator),
(words(keyword_declarations, suffix=r'\b'), Keyword.Declaration),
(words(keyword_types, suffix=r'\b'), Keyword.Type),
(words(keyword_constants, suffix=r'\b'), Keyword.Constant),
# Operators
include('operators'),
# Float Literal
# -- Hex Float
(r'0[xX]([0-9a-fA-F_]*\.[0-9a-fA-F_]+|[0-9a-fA-F_]+)'
r'[pP][+\-]?[0-9_]+[lLfFdD]?', Number.Float),
# -- DecimalFloat
(r'[0-9_]+(\.[0-9_]+[eE][+\-]?[0-9_]+|'
r'\.[0-9_]*|[eE][+\-]?[0-9_]+)[lLfFdD]?', Number.Float),
(r'\.(0|[1-9][0-9_]*)([eE][+\-]?[0-9_]+)?[lLfFdD]?',
Number.Float),
# IntegerLiteral
# -- Binary
(rf'0[Bb][01_]+{decimal_suffixes}', Number.Bin),
# -- Octal
(rf'0[0-7_]+{decimal_suffixes}', Number.Oct),
# -- Hexadecimal
(rf'0[xX][0-9a-fA-F_]+{decimal_suffixes}', Number.Hex),
# -- Decimal
(rf'(0|[1-9][0-9_]*){decimal_suffixes}', Number.Integer),
# Strings
('([rR][cC]?|[cC][rR])"""', String, 'tdqs'),
("([rR][cC]?|[cC][rR])'''", String, 'tsqs'),
('([rR][cC]?|[cC][rR])"', String, 'dqs'),
("([rR][cC]?|[cC][rR])'", String, 'sqs'),
('[cCfFqQwWuU]?"""', String, combined('stringescape', 'tdqs')),
("[cCfFqQwWuU]?'''", String, combined('stringescape', 'tsqs')),
('[cCfFqQwWuU]?"', String, combined('stringescape', 'dqs')),
("[cCfFqQwWuU]?'", String, combined('stringescape', 'sqs')),
# Punctuation
(r'[\[\]{}:(),;?]', Punctuation),
# Labels
(r'[a-zA-Z_]\w*:>', Name.Label),
# Identifiers
(r'({})\b'.format('|'.join(name_builtins)), Name.Builtin),
(r'({})\b'.format('|'.join(name_pseudo)), Name.Builtin.Pseudo),
(r'[a-zA-Z_]\w*', Name),
],
'whitespace': [
(r'\s+', Whitespace),
include('comment'),
# Preprocessor
(r'(#)(\s*)(if)(\s+)(0)',
bygroups(Comment.Preproc, Whitespace, Comment.Preproc,
Whitespace, Comment.Preproc), 'if0'),
(r'#', Comment.Preproc, 'macro'),
],
'operators': [
(r'({})\b'.format('|'.join(operator_words)), Operator.Word),
(r'!=|==|<<|>>|\|\||&&|[-~+/*%=<>&^|.$]', Operator),
],
'comment': [
(r'//(.*?)$', Comment.Single),
(r'/[*]', Comment.Multiline, 'comment2'),
],
'comment2': [
(r'[^/*]', Comment.Multiline),
(r'/[*]', Comment.Multiline, '#push'),
(r'[*]/', Comment.Multiline, '#pop'),
(r'[/*]', Comment.Multiline),
],
'if0': [
(r'^(\s*)(#if.*?(?<!\\))(\n)',
bygroups(Whitespace, Comment, Whitespace), '#push'),
(r'^(\s*)(#endif.*?(?<!\\))(\n)',
bygroups(Whitespace, Comment, Whitespace), '#pop'),
(r'(.*?)(\n)', bygroups(Comment, Whitespace)),
],
'macro': [
include('comment'),
(r'(import|include)(\s+)(<[^>]*?>)',
bygroups(Comment.Preproc, Whitespace, String), '#pop'),
(r'(import|include)(\s+)("[^"]*?")',
bygroups(Comment.Preproc, Whitespace, String), '#pop'),
(r"(import|include)(\s+)('[^']*?')",
bygroups(Comment.Preproc, Whitespace, String), '#pop'),
(r'[^/\n]+', Comment.Preproc),
# (r'/[*](.|\n)*?[*]/', Comment),
# (r'//.*?\n', Comment, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Whitespace, '#pop'),
],
'funcname': [
include('whitespace'),
(r'[a-zA-Z_]\w*', Name.Function, '#pop'),
# anonymous functions
(r'(?=\()', Text, '#pop'),
],
'classname': [
include('whitespace'),
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
# anonymous classes
(r'(?=\{)', Text, '#pop'),
],
'modulename': [
include('whitespace'),
(r'\[', Punctuation, ('modulename2', 'tvarlist')),
default('modulename2'),
],
'modulename2': [
include('whitespace'),
(r'([a-zA-Z_]\w*)', Name.Namespace, '#pop:2'),
],
'tvarlist': [
include('whitespace'),
include('operators'),
(r'\[', Punctuation, '#push'),
(r'\]', Punctuation, '#pop'),
(r',', Punctuation),
(r'(with|where)\b', Keyword),
(r'[a-zA-Z_]\w*', Name),
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'strings': [
(r'%(\([a-zA-Z0-9]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[E-GXc-giorsux%]', String.Interpol),
(r'[^\\\'"%\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'%', String)
# newlines are an error (use "nl" state)
],
'nl': [
(r'\n', String)
],
'dqs': [
(r'"', String, '#pop'),
# included here again for raw strings
(r'\\\\|\\"|\\\n', String.Escape),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
# included here again for raw strings
(r"\\\\|\\'|\\\n", String.Escape),
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
include('strings'),
include('nl')
],
'tsqs': [
(r"'''", String, '#pop'),
include('strings'),
include('nl')
],
}

View File

@@ -0,0 +1,68 @@
"""
pygments.lexers.fift
~~~~~~~~~~~~~~~~~~~~
Lexers for fift.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include
from pygments.token import Literal, Comment, Name, String, Number, Whitespace
__all__ = ['FiftLexer']
class FiftLexer(RegexLexer):
"""
For Fift source code.
"""
name = 'Fift'
aliases = ['fift', 'fif']
filenames = ['*.fif']
url = 'https://ton-blockchain.github.io/docs/fiftbase.pdf'
version_added = ''
tokens = {
'root': [
(r'\s+', Whitespace),
include('comments'),
(r'[\.+]?\"', String, 'string'),
# numbers
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'0b[01]+', Number.Bin),
(r'-?[0-9]+("/"-?[0-9]+)?', Number.Decimal),
# slices
(r'b\{[01]+\}', Literal),
(r'x\{[0-9a-fA-F_]+\}', Literal),
# byte literal
(r'B\{[0-9a-fA-F_]+\}', Literal),
# treat anything as word
(r'\S+', Name)
],
'string': [
(r'\\.', String.Escape),
(r'\"', String, '#pop'),
(r'[^\"\r\n\\]+', String)
],
'comments': [
(r'//.*', Comment.Singleline),
(r'/\*', Comment.Multiline, 'comment'),
],
'comment': [
(r'[^/*]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline),
],
}

View File

@@ -0,0 +1,81 @@
"""
pygments.lexers.floscript
~~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for FloScript
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
__all__ = ['FloScriptLexer']
class FloScriptLexer(RegexLexer):
"""
For FloScript configuration language source code.
"""
name = 'FloScript'
url = 'https://github.com/ioflo/ioflo'
aliases = ['floscript', 'flo']
filenames = ['*.flo']
version_added = '2.4'
def innerstring_rules(ttype):
return [
# the old style '%s' % (...) string formatting
(r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[E-GXc-giorsux%]', String.Interpol),
# backslashes, quotes and formatting signs must be parsed one at a time
(r'[^\\\'"%\n]+', ttype),
(r'[\'"\\]', ttype),
# unhandled string formatting sign
(r'%', ttype),
# newlines are an error (use "nl" state)
]
tokens = {
'root': [
(r'\s+', Whitespace),
(r'[]{}:(),;[]', Punctuation),
(r'(\\)(\n)', bygroups(Text, Whitespace)),
(r'\\', Text),
(r'(to|by|with|from|per|for|cum|qua|via|as|at|in|of|on|re|is|if|be|into|'
r'and|not)\b', Operator.Word),
(r'!=|==|<<|>>|[-~+/*%=<>&^|.]', Operator),
(r'(load|init|server|logger|log|loggee|first|over|under|next|done|timeout|'
r'repeat|native|benter|enter|recur|exit|precur|renter|rexit|print|put|inc|'
r'copy|set|aux|rear|raze|go|let|do|bid|ready|start|stop|run|abort|use|flo|'
r'give|take)\b', Name.Builtin),
(r'(frame|framer|house)\b', Keyword),
('"', String, 'string'),
include('name'),
include('numbers'),
(r'#.+$', Comment.Single),
],
'string': [
('[^"]+', String),
('"', String, '#pop'),
],
'numbers': [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
(r'\d+[eE][+-]?[0-9]+j?', Number.Float),
(r'0[0-7]+j?', Number.Oct),
(r'0[bB][01]+', Number.Bin),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+j?', Number.Integer)
],
'name': [
(r'@[\w.]+', Name.Decorator),
(r'[a-zA-Z_]\w*', Name),
],
}

View File

@@ -0,0 +1,178 @@
"""
pygments.lexers.forth
~~~~~~~~~~~~~~~~~~~~~
Lexer for the Forth language.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups
from pygments.token import Text, Comment, Keyword, Name, String, Number, \
Whitespace
__all__ = ['ForthLexer']
class ForthLexer(RegexLexer):
"""
Lexer for Forth files.
"""
name = 'Forth'
url = 'https://www.forth.com/forth/'
aliases = ['forth']
filenames = ['*.frt', '*.fs']
mimetypes = ['application/x-forth']
version_added = '2.2'
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
(r'\s+', Whitespace),
# All comment types
(r'\\.*?$', Comment.Single),
(r'\([\s].*?\)', Comment.Single),
# defining words. The next word is a new command name
(r'(:|variable|constant|value|buffer:)(\s+)',
bygroups(Keyword.Namespace, Whitespace), 'worddef'),
# strings are rather simple
(r'([.sc]")(\s+?)', bygroups(String, Whitespace), 'stringdef'),
# keywords from the various wordsets
# *** Wordset BLOCK
(r'(blk|block|buffer|evaluate|flush|load|save-buffers|update|'
# *** Wordset BLOCK-EXT
r'empty-buffers|list|refill|scr|thru|'
# *** Wordset CORE
r'\#s|\*\/mod|\+loop|\/mod|0<|0=|1\+|1-|2!|'
r'2\*|2\/|2@|2drop|2dup|2over|2swap|>body|'
r'>in|>number|>r|\?dup|abort|abort\"|abs|'
r'accept|align|aligned|allot|and|base|begin|'
r'bl|c!|c,|c@|cell\+|cells|char|char\+|'
r'chars|constant|count|cr|create|decimal|'
r'depth|do|does>|drop|dup|else|emit|environment\?|'
r'evaluate|execute|exit|fill|find|fm\/mod|'
r'here|hold|i|if|immediate|invert|j|key|'
r'leave|literal|loop|lshift|m\*|max|min|'
r'mod|move|negate|or|over|postpone|quit|'
r'r>|r@|recurse|repeat|rot|rshift|s\"|s>d|'
r'sign|sm\/rem|source|space|spaces|state|swap|'
r'then|type|u\.|u\<|um\*|um\/mod|unloop|until|'
r'variable|while|word|xor|\[char\]|\[\'\]|'
r'@|!|\#|<\#|\#>|:|;|\+|-|\*|\/|,|<|>|\|1\+|1-|\.|'
# *** Wordset CORE-EXT
r'\.r|0<>|'
r'0>|2>r|2r>|2r@|:noname|\?do|again|c\"|'
r'case|compile,|endcase|endof|erase|false|'
r'hex|marker|nip|of|pad|parse|pick|refill|'
r'restore-input|roll|save-input|source-id|to|'
r'true|tuck|u\.r|u>|unused|value|within|'
r'\[compile\]|'
# *** Wordset CORE-EXT-obsolescent
r'\#tib|convert|expect|query|span|'
r'tib|'
# *** Wordset DOUBLE
r'2constant|2literal|2variable|d\+|d-|'
r'd\.|d\.r|d0<|d0=|d2\*|d2\/|d<|d=|d>s|'
r'dabs|dmax|dmin|dnegate|m\*\/|m\+|'
# *** Wordset DOUBLE-EXT
r'2rot|du<|'
# *** Wordset EXCEPTION
r'catch|throw|'
# *** Wordset EXCEPTION-EXT
r'abort|abort\"|'
# *** Wordset FACILITY
r'at-xy|key\?|page|'
# *** Wordset FACILITY-EXT
r'ekey|ekey>char|ekey\?|emit\?|ms|time&date|'
# *** Wordset FILE
r'BIN|CLOSE-FILE|CREATE-FILE|DELETE-FILE|FILE-POSITION|'
r'FILE-SIZE|INCLUDE-FILE|INCLUDED|OPEN-FILE|R\/O|'
r'R\/W|READ-FILE|READ-LINE|REPOSITION-FILE|RESIZE-FILE|'
r'S\"|SOURCE-ID|W/O|WRITE-FILE|WRITE-LINE|'
# *** Wordset FILE-EXT
r'FILE-STATUS|FLUSH-FILE|REFILL|RENAME-FILE|'
# *** Wordset FLOAT
r'>float|d>f|'
r'f!|f\*|f\+|f-|f\/|f0<|f0=|f<|f>d|f@|'
r'falign|faligned|fconstant|fdepth|fdrop|fdup|'
r'fliteral|float\+|floats|floor|fmax|fmin|'
r'fnegate|fover|frot|fround|fswap|fvariable|'
r'represent|'
# *** Wordset FLOAT-EXT
r'df!|df@|dfalign|dfaligned|dfloat\+|'
r'dfloats|f\*\*|f\.|fabs|facos|facosh|falog|'
r'fasin|fasinh|fatan|fatan2|fatanh|fcos|fcosh|'
r'fe\.|fexp|fexpm1|fln|flnp1|flog|fs\.|fsin|'
r'fsincos|fsinh|fsqrt|ftan|ftanh|f~|precision|'
r'set-precision|sf!|sf@|sfalign|sfaligned|sfloat\+|'
r'sfloats|'
# *** Wordset LOCAL
r'\(local\)|to|'
# *** Wordset LOCAL-EXT
r'locals\||'
# *** Wordset MEMORY
r'allocate|free|resize|'
# *** Wordset SEARCH
r'definitions|find|forth-wordlist|get-current|'
r'get-order|search-wordlist|set-current|set-order|'
r'wordlist|'
# *** Wordset SEARCH-EXT
r'also|forth|only|order|previous|'
# *** Wordset STRING
r'-trailing|\/string|blank|cmove|cmove>|compare|'
r'search|sliteral|'
# *** Wordset TOOLS
r'.s|dump|see|words|'
# *** Wordset TOOLS-EXT
r';code|'
r'ahead|assembler|bye|code|cs-pick|cs-roll|'
r'editor|state|\[else\]|\[if\]|\[then\]|'
# *** Wordset TOOLS-EXT-obsolescent
r'forget|'
# Forth 2012
r'defer|defer@|defer!|action-of|begin-structure|field:|buffer:|'
r'parse-name|buffer:|traverse-wordlist|n>r|nr>|2value|fvalue|'
r'name>interpret|name>compile|name>string|'
r'cfield:|end-structure)(?!\S)', Keyword),
# Numbers
(r'(\$[0-9A-F]+)', Number.Hex),
(r'(\#|%|&|\-|\+)?[0-9]+', Number.Integer),
(r'(\#|%|&|\-|\+)?[0-9.]+', Keyword.Type),
# amforth specific
(r'(@i|!i|@e|!e|pause|noop|turnkey|sleep|'
r'itype|icompare|sp@|sp!|rp@|rp!|up@|up!|'
r'>a|a>|a@|a!|a@+|a@-|>b|b>|b@|b!|b@+|b@-|'
r'find-name|1ms|'
r'sp0|rp0|\(evaluate\)|int-trap|int!)(?!\S)',
Name.Constant),
# a proposal
(r'(do-recognizer|r:fail|recognizer:|get-recognizers|'
r'set-recognizers|r:float|r>comp|r>int|r>post|'
r'r:name|r:word|r:dnum|r:num|recognizer|forth-recognizer|'
r'rec:num|rec:float|rec:word)(?!\S)', Name.Decorator),
# defining words. The next word is a new command name
(r'(Evalue|Rvalue|Uvalue|Edefer|Rdefer|Udefer)(\s+)',
bygroups(Keyword.Namespace, Text), 'worddef'),
(r'\S+', Name.Function), # Anything else is executed
],
'worddef': [
(r'\S+', Name.Class, '#pop'),
],
'stringdef': [
(r'[^"]+', String, '#pop'),
],
}
def analyse_text(text):
"""Forth uses : COMMAND ; quite a lot in a single line, so we're trying
to find that."""
if re.search('\n:[^\n]+;\n', text):
return 0.3

View File

@@ -0,0 +1,212 @@
"""
pygments.lexers.fortran
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for Fortran languages.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, include, words, using, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic
__all__ = ['FortranLexer', 'FortranFixedLexer']
class FortranLexer(RegexLexer):
"""
Lexer for FORTRAN 90 code.
"""
name = 'Fortran'
url = 'https://fortran-lang.org/'
aliases = ['fortran', 'f90']
filenames = ['*.f03', '*.f90', '*.F03', '*.F90']
mimetypes = ['text/x-fortran']
version_added = '0.10'
flags = re.IGNORECASE | re.MULTILINE
# Data Types: INTEGER, REAL, COMPLEX, LOGICAL, CHARACTER and DOUBLE PRECISION
# Operators: **, *, +, -, /, <, >, <=, >=, ==, /=
# Logical (?): NOT, AND, OR, EQV, NEQV
# Builtins:
# http://gcc.gnu.org/onlinedocs/gcc-3.4.6/g77/Table-of-Intrinsic-Functions.html
tokens = {
'root': [
(r'^#.*\n', Comment.Preproc),
(r'!.*\n', Comment),
include('strings'),
include('core'),
(r'[a-z][\w$]*', Name),
include('nums'),
(r'[\s]+', Text.Whitespace),
],
'core': [
# Statements
(r'\b(DO)(\s+)(CONCURRENT)\b', bygroups(Keyword, Text.Whitespace, Keyword)),
(r'\b(GO)(\s*)(TO)\b', bygroups(Keyword, Text.Whitespace, Keyword)),
(words((
'ABSTRACT', 'ACCEPT', 'ALL', 'ALLSTOP', 'ALLOCATABLE', 'ALLOCATE',
'ARRAY', 'ASSIGN', 'ASSOCIATE', 'ASYNCHRONOUS', 'BACKSPACE', 'BIND',
'BLOCK', 'BLOCKDATA', 'BYTE', 'CALL', 'CASE', 'CLASS', 'CLOSE',
'CODIMENSION', 'COMMON', 'CONTIGUOUS', 'CONTAINS',
'CONTINUE', 'CRITICAL', 'CYCLE', 'DATA', 'DEALLOCATE', 'DECODE',
'DEFERRED', 'DIMENSION', 'DO', 'ELEMENTAL', 'ELSE', 'ELSEIF', 'ENCODE',
'END', 'ENDASSOCIATE', 'ENDBLOCK', 'ENDDO', 'ENDENUM', 'ENDFORALL',
'ENDFUNCTION', 'ENDIF', 'ENDINTERFACE', 'ENDMODULE', 'ENDPROGRAM',
'ENDSELECT', 'ENDSUBMODULE', 'ENDSUBROUTINE', 'ENDTYPE', 'ENDWHERE',
'ENTRY', 'ENUM', 'ENUMERATOR', 'EQUIVALENCE', 'ERROR STOP', 'EXIT',
'EXTENDS', 'EXTERNAL', 'EXTRINSIC', 'FILE', 'FINAL', 'FORALL', 'FORMAT',
'FUNCTION', 'GENERIC', 'IF', 'IMAGES', 'IMPLICIT',
'IMPORT', 'IMPURE', 'INCLUDE', 'INQUIRE', 'INTENT', 'INTERFACE',
'INTRINSIC', 'IS', 'LOCK', 'MEMORY', 'MODULE', 'NAMELIST', 'NULLIFY',
'NONE', 'NON_INTRINSIC', 'NON_OVERRIDABLE', 'NOPASS', 'ONLY', 'OPEN',
'OPTIONAL', 'OPTIONS', 'PARAMETER', 'PASS', 'PAUSE', 'POINTER', 'PRINT',
'PRIVATE', 'PROGRAM', 'PROCEDURE', 'PROTECTED', 'PUBLIC', 'PURE', 'READ',
'RECURSIVE', 'RESULT', 'RETURN', 'REWIND', 'SAVE', 'SELECT', 'SEQUENCE',
'STOP', 'SUBMODULE', 'SUBROUTINE', 'SYNC', 'SYNCALL', 'SYNCIMAGES',
'SYNCMEMORY', 'TARGET', 'THEN', 'TYPE', 'UNLOCK', 'USE', 'VALUE',
'VOLATILE', 'WHERE', 'WRITE', 'WHILE'), prefix=r'\b', suffix=r'\s*\b'),
Keyword),
# Data Types
(words((
'CHARACTER', 'COMPLEX', 'DOUBLE PRECISION', 'DOUBLE COMPLEX', 'INTEGER',
'LOGICAL', 'REAL', 'C_INT', 'C_SHORT', 'C_LONG', 'C_LONG_LONG',
'C_SIGNED_CHAR', 'C_SIZE_T', 'C_INT8_T', 'C_INT16_T', 'C_INT32_T',
'C_INT64_T', 'C_INT_LEAST8_T', 'C_INT_LEAST16_T', 'C_INT_LEAST32_T',
'C_INT_LEAST64_T', 'C_INT_FAST8_T', 'C_INT_FAST16_T', 'C_INT_FAST32_T',
'C_INT_FAST64_T', 'C_INTMAX_T', 'C_INTPTR_T', 'C_FLOAT', 'C_DOUBLE',
'C_LONG_DOUBLE', 'C_FLOAT_COMPLEX', 'C_DOUBLE_COMPLEX',
'C_LONG_DOUBLE_COMPLEX', 'C_BOOL', 'C_CHAR', 'C_PTR', 'C_FUNPTR'),
prefix=r'\b', suffix=r'\s*\b'),
Keyword.Type),
# Operators
(r'(\*\*|\*|\+|-|\/|<|>|<=|>=|==|\/=|=)', Operator),
(r'(::)', Keyword.Declaration),
(r'[()\[\],:&%;.]', Punctuation),
# Intrinsics
(words((
'Abort', 'Abs', 'Access', 'AChar', 'ACos', 'ACosH', 'AdjustL',
'AdjustR', 'AImag', 'AInt', 'Alarm', 'All', 'Allocated', 'ALog',
'AMax', 'AMin', 'AMod', 'And', 'ANInt', 'Any', 'ASin', 'ASinH',
'Associated', 'ATan', 'ATanH', 'Atomic_Define', 'Atomic_Ref',
'BesJ', 'BesJN', 'Bessel_J0', 'Bessel_J1', 'Bessel_JN', 'Bessel_Y0',
'Bessel_Y1', 'Bessel_YN', 'BesY', 'BesYN', 'BGE', 'BGT', 'BLE',
'BLT', 'Bit_Size', 'BTest', 'CAbs', 'CCos', 'Ceiling', 'CExp',
'Char', 'ChDir', 'ChMod', 'CLog', 'Cmplx', 'Command_Argument_Count',
'Complex', 'Conjg', 'Cos', 'CosH', 'Count', 'CPU_Time', 'CShift',
'CSin', 'CSqRt', 'CTime', 'C_Loc', 'C_Associated',
'C_Null_Ptr', 'C_Null_Funptr', 'C_F_Pointer', 'C_F_ProcPointer',
'C_Null_Char', 'C_Alert', 'C_Backspace', 'C_Form_Feed', 'C_FunLoc',
'C_Sizeof', 'C_New_Line', 'C_Carriage_Return',
'C_Horizontal_Tab', 'C_Vertical_Tab', 'DAbs', 'DACos', 'DASin',
'DATan', 'Date_and_Time', 'DbesJ', 'DbesJN', 'DbesY',
'DbesYN', 'Dble', 'DCos', 'DCosH', 'DDiM', 'DErF',
'DErFC', 'DExp', 'Digits', 'DiM', 'DInt', 'DLog', 'DMax',
'DMin', 'DMod', 'DNInt', 'Dot_Product', 'DProd', 'DSign', 'DSinH',
'DShiftL', 'DShiftR', 'DSin', 'DSqRt', 'DTanH', 'DTan', 'DTime',
'EOShift', 'Epsilon', 'ErF', 'ErFC', 'ErFC_Scaled', 'ETime',
'Execute_Command_Line', 'Exit', 'Exp', 'Exponent', 'Extends_Type_Of',
'FDate', 'FGet', 'FGetC', 'FindLoc', 'Float', 'Floor', 'Flush',
'FNum', 'FPutC', 'FPut', 'Fraction', 'FSeek', 'FStat', 'FTell',
'Gamma', 'GError', 'GetArg', 'Get_Command', 'Get_Command_Argument',
'Get_Environment_Variable', 'GetCWD', 'GetEnv', 'GetGId', 'GetLog',
'GetPId', 'GetUId', 'GMTime', 'HostNm', 'Huge', 'Hypot', 'IAbs',
'IAChar', 'IAll', 'IAnd', 'IAny', 'IArgC', 'IBClr', 'IBits',
'IBSet', 'IChar', 'IDate', 'IDiM', 'IDInt', 'IDNInt', 'IEOr',
'IErrNo', 'IFix', 'Imag', 'ImagPart', 'Image_Index', 'Index',
'Int', 'IOr', 'IParity', 'IRand', 'IsaTty', 'IShft', 'IShftC',
'ISign', 'Iso_C_Binding', 'Is_Contiguous', 'Is_Iostat_End',
'Is_Iostat_Eor', 'ITime', 'Kill', 'Kind', 'LBound', 'LCoBound',
'Len', 'Len_Trim', 'LGe', 'LGt', 'Link', 'LLe', 'LLt', 'LnBlnk',
'Loc', 'Log', 'Log_Gamma', 'Logical', 'Long', 'LShift', 'LStat',
'LTime', 'MaskL', 'MaskR', 'MatMul', 'Max', 'MaxExponent',
'MaxLoc', 'MaxVal', 'MClock', 'Merge', 'Merge_Bits', 'Move_Alloc',
'Min', 'MinExponent', 'MinLoc', 'MinVal', 'Mod', 'Modulo', 'MvBits',
'Nearest', 'New_Line', 'NInt', 'Norm2', 'Not', 'Null', 'Num_Images',
'Or', 'Pack', 'Parity', 'PError', 'Precision', 'Present', 'Product',
'Radix', 'Rand', 'Random_Number', 'Random_Seed', 'Range', 'Real',
'RealPart', 'Rename', 'Repeat', 'Reshape', 'RRSpacing', 'RShift',
'Same_Type_As', 'Scale', 'Scan', 'Second', 'Selected_Char_Kind',
'Selected_Int_Kind', 'Selected_Real_Kind', 'Set_Exponent', 'Shape',
'ShiftA', 'ShiftL', 'ShiftR', 'Short', 'Sign', 'Signal', 'SinH',
'Sin', 'Sleep', 'Sngl', 'Spacing', 'Spread', 'SqRt', 'SRand',
'Stat', 'Storage_Size', 'Sum', 'SymLnk', 'System', 'System_Clock',
'Tan', 'TanH', 'Time', 'This_Image', 'Tiny', 'TrailZ', 'Transfer',
'Transpose', 'Trim', 'TtyNam', 'UBound', 'UCoBound', 'UMask',
'Unlink', 'Unpack', 'Verify', 'XOr', 'ZAbs', 'ZCos', 'ZExp',
'ZLog', 'ZSin', 'ZSqRt'), prefix=r'\b', suffix=r'\s*\b'),
Name.Builtin),
# Booleans
(r'\.(true|false)\.', Name.Builtin),
# Comparing Operators
(r'\.(eq|ne|lt|le|gt|ge|not|and|or|eqv|neqv)\.', Operator.Word),
],
'strings': [
(r'"(\\[0-7]+|\\[^0-7]|[^"\\])*"', String.Double),
(r"'(\\[0-7]+|\\[^0-7]|[^'\\])*'", String.Single),
],
'nums': [
(r'\d+(?![.e])(_([1-9]|[a-z]\w*))?', Number.Integer),
(r'[+-]?\d*\.\d+([ed][-+]?\d+)?(_([1-9]|[a-z]\w*))?', Number.Float),
(r'[+-]?\d+\.\d*([ed][-+]?\d+)?(_([1-9]|[a-z]\w*))?', Number.Float),
(r'[+-]?\d+(\.\d*)?[ed][-+]?\d+(_([1-9]|[a-z]\w*))?', Number.Float),
],
}
class FortranFixedLexer(RegexLexer):
"""
Lexer for fixed format Fortran.
"""
name = 'FortranFixed'
aliases = ['fortranfixed']
filenames = ['*.f', '*.F']
url = 'https://fortran-lang.org/'
version_added = '2.1'
flags = re.IGNORECASE
def _lex_fortran(self, match, ctx=None):
"""Lex a line just as free form fortran without line break."""
lexer = FortranLexer()
text = match.group(0) + "\n"
for index, token, value in lexer.get_tokens_unprocessed(text):
value = value.replace('\n', '')
if value != '':
yield index, token, value
tokens = {
'root': [
(r'[C*].*\n', Comment),
(r'#.*\n', Comment.Preproc),
(r' {0,4}!.*\n', Comment),
(r'(.{5})', Name.Label, 'cont-char'),
(r'.*\n', using(FortranLexer)),
],
'cont-char': [
(' ', Text, 'code'),
('0', Comment, 'code'),
('.', Generic.Strong, 'code'),
],
'code': [
(r'(.{66})(.*)(\n)',
bygroups(_lex_fortran, Comment, Text.Whitespace), 'root'),
(r'(.*)(\n)', bygroups(_lex_fortran, Text.Whitespace), 'root'),
default('root'),
]
}

View File

@@ -0,0 +1,427 @@
"""
pygments.lexers.foxpro
~~~~~~~~~~~~~~~~~~~~~~
Simple lexer for Microsoft Visual FoxPro source code.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer
from pygments.token import Punctuation, Text, Comment, Operator, Keyword, \
Name, String
__all__ = ['FoxProLexer']
class FoxProLexer(RegexLexer):
"""Lexer for Microsoft Visual FoxPro language.
FoxPro syntax allows to shorten all keywords and function names
to 4 characters. Shortened forms are not recognized by this lexer.
"""
name = 'FoxPro'
aliases = ['foxpro', 'vfp', 'clipper', 'xbase']
filenames = ['*.PRG', '*.prg']
version_added = '1.6'
mimetype = []
url = 'https://learn.microsoft.com/en-us/previous-versions/visualstudio/foxpro'
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
(r';\s*\n', Punctuation), # consume newline
(r'(^|\n)\s*', Text, 'newline'),
# Square brackets may be used for array indices
# and for string literal. Look for arrays
# before matching string literals.
(r'(?<=\w)\[[0-9, ]+\]', Text),
(r'\'[^\'\n]*\'|"[^"\n]*"|\[[^]*]\]', String),
(r'(^\s*\*|&&|&amp;&amp;).*?\n', Comment.Single),
(r'(ABS|ACLASS|ACOPY|ACOS|ADATABASES|ADBOBJECTS|ADDBS|'
r'ADDPROPERTY|ADEL|ADIR|ADLLS|ADOCKSTATE|AELEMENT|AERROR|'
r'AEVENTS|AFIELDS|AFONT|AGETCLASS|AGETFILEVERSION|AINS|'
r'AINSTANCE|ALANGUAGE|ALEN|ALIAS|ALINES|ALLTRIM|'
r'AMEMBERS|AMOUSEOBJ|ANETRESOURCES|APRINTERS|APROCINFO|'
r'ASC|ASCAN|ASELOBJ|ASESSIONS|ASIN|ASORT|ASQLHANDLES|'
r'ASTACKINFO|ASUBSCRIPT|AT|AT_C|ATAGINFO|ATAN|ATC|ATCC|'
r'ATCLINE|ATLINE|ATN2|AUSED|AVCXCLASSES|BAR|BARCOUNT|'
r'BARPROMPT|BETWEEN|BINDEVENT|BINTOC|BITAND|BITCLEAR|'
r'BITLSHIFT|BITNOT|BITOR|BITRSHIFT|BITSET|BITTEST|BITXOR|'
r'BOF|CANDIDATE|CAPSLOCK|CAST|CDOW|CDX|CEILING|CHR|CHRSAW|'
r'CHRTRAN|CHRTRANC|CLEARRESULTSET|CMONTH|CNTBAR|CNTPAD|COL|'
r'COM|Functions|COMARRAY|COMCLASSINFO|COMPOBJ|COMPROP|'
r'COMRETURNERROR|COS|CPCONVERT|CPCURRENT|CPDBF|CREATEBINARY|'
r'CREATEOBJECT|CREATEOBJECTEX|CREATEOFFLINE|CTOBIN|CTOD|'
r'CTOT|CURDIR|CURSORGETPROP|CURSORSETPROP|CURSORTOXML|'
r'CURVAL|DATE|DATETIME|DAY|DBC|DBF|DBGETPROP|DBSETPROP|'
r'DBUSED|DDEAbortTrans|DDEAdvise|DDEEnabled|DDEExecute|'
r'DDEInitiate|DDELastError|DDEPoke|DDERequest|DDESetOption|'
r'DDESetService|DDESetTopic|DDETerminate|DEFAULTEXT|'
r'DELETED|DESCENDING|DIFFERENCE|DIRECTORY|DISKSPACE|'
r'DisplayPath|DMY|DODEFAULT|DOW|DRIVETYPE|DROPOFFLINE|'
r'DTOC|DTOR|DTOS|DTOT|EDITSOURCE|EMPTY|EOF|ERROR|EVAL(UATE)?|'
r'EVENTHANDLER|EVL|EXECSCRIPT|EXP|FCHSIZE|FCLOSE|FCOUNT|'
r'FCREATE|FDATE|FEOF|FERROR|FFLUSH|FGETS|FIELD|FILE|'
r'FILETOSTR|FILTER|FKLABEL|FKMAX|FLDLIST|FLOCK|FLOOR|'
r'FONTMETRIC|FOPEN|FOR|FORCEEXT|FORCEPATH|FOUND|FPUTS|'
r'FREAD|FSEEK|FSIZE|FTIME|FULLPATH|FV|FWRITE|'
r'GETAUTOINCVALUE|GETBAR|GETCOLOR|GETCP|GETDIR|GETENV|'
r'GETFILE|GETFLDSTATE|GETFONT|GETINTERFACE|'
r'GETNEXTMODIFIED|GETOBJECT|GETPAD|GETPEM|GETPICT|'
r'GETPRINTER|GETRESULTSET|GETWORDCOUNT|GETWORDNUM|'
r'GETCURSORADAPTER|GOMONTH|HEADER|HOME|HOUR|ICASE|'
r'IDXCOLLATE|IIF|IMESTATUS|INDBC|INDEXSEEK|INKEY|INLIST|'
r'INPUTBOX|INSMODE|INT|ISALPHA|ISBLANK|ISCOLOR|ISDIGIT|'
r'ISEXCLUSIVE|ISFLOCKED|ISLEADBYTE|ISLOWER|ISMEMOFETCHED|'
r'ISMOUSE|ISNULL|ISPEN|ISREADONLY|ISRLOCKED|'
r'ISTRANSACTABLE|ISUPPER|JUSTDRIVE|JUSTEXT|JUSTFNAME|'
r'JUSTPATH|JUSTSTEM|KEY|KEYMATCH|LASTKEY|LEFT|LEFTC|LEN|'
r'LENC|LIKE|LIKEC|LINENO|LOADPICTURE|LOCFILE|LOCK|LOG|'
r'LOG10|LOOKUP|LOWER|LTRIM|LUPDATE|MAKETRANSACTABLE|MAX|'
r'MCOL|MDOWN|MDX|MDY|MEMLINES|MEMORY|MENU|MESSAGE|'
r'MESSAGEBOX|MIN|MINUTE|MLINE|MOD|MONTH|MRKBAR|MRKPAD|'
r'MROW|MTON|MWINDOW|NDX|NEWOBJECT|NORMALIZE|NTOM|NUMLOCK|'
r'NVL|OBJNUM|OBJTOCLIENT|OBJVAR|OCCURS|OEMTOANSI|OLDVAL|'
r'ON|ORDER|OS|PAD|PADL|PARAMETERS|PAYMENT|PCOL|PCOUNT|'
r'PEMSTATUS|PI|POPUP|PRIMARY|PRINTSTATUS|PRMBAR|PRMPAD|'
r'PROGRAM|PROMPT|PROPER|PROW|PRTINFO|PUTFILE|PV|QUARTER|'
r'RAISEEVENT|RAND|RAT|RATC|RATLINE|RDLEVEL|READKEY|RECCOUNT|'
r'RECNO|RECSIZE|REFRESH|RELATION|REPLICATE|REQUERY|RGB|'
r'RGBSCHEME|RIGHT|RIGHTC|RLOCK|ROUND|ROW|RTOD|RTRIM|'
r'SAVEPICTURE|SCHEME|SCOLS|SEC|SECONDS|SEEK|SELECT|SET|'
r'SETFLDSTATE|SETRESULTSET|SIGN|SIN|SKPBAR|SKPPAD|SOUNDEX|'
r'SPACE|SQLCANCEL|SQLCOLUMNS|SQLCOMMIT|SQLCONNECT|'
r'SQLDISCONNECT|SQLEXEC|SQLGETPROP|SQLIDLEDISCONNECT|'
r'SQLMORERESULTS|SQLPREPARE|SQLROLLBACK|SQLSETPROP|'
r'SQLSTRINGCONNECT|SQLTABLES|SQRT|SROWS|STR|STRCONV|'
r'STREXTRACT|STRTOFILE|STRTRAN|STUFF|STUFFC|SUBSTR|'
r'SUBSTRC|SYS|SYSMETRIC|TABLEREVERT|TABLEUPDATE|TAG|'
r'TAGCOUNT|TAGNO|TAN|TARGET|TEXTMERGE|TIME|TRANSFORM|'
r'TRIM|TTOC|TTOD|TXNLEVEL|TXTWIDTH|TYPE|UNBINDEVENTS|'
r'UNIQUE|UPDATED|UPPER|USED|VAL|VARREAD|VARTYPE|VERSION|'
r'WBORDER|WCHILD|WCOLS|WDOCKABLE|WEEK|WEXIST|WFONT|WLAST|'
r'WLCOL|WLROW|WMAXIMUM|WMINIMUM|WONTOP|WOUTPUT|WPARENT|'
r'WREAD|WROWS|WTITLE|WVISIBLE|XMLTOCURSOR|XMLUPDATEGRAM|'
r'YEAR)(?=\s*\()', Name.Function),
(r'_ALIGNMENT|_ASCIICOLS|_ASCIIROWS|_ASSIST|_BEAUTIFY|_BOX|'
r'_BROWSER|_BUILDER|_CALCMEM|_CALCVALUE|_CLIPTEXT|_CONVERTER|'
r'_COVERAGE|_CUROBJ|_DBLCLICK|_DIARYDATE|_DOS|_FOXDOC|_FOXREF|'
r'_GALLERY|_GENGRAPH|_GENHTML|_GENMENU|_GENPD|_GENSCRN|'
r'_GENXTAB|_GETEXPR|_INCLUDE|_INCSEEK|_INDENT|_LMARGIN|_MAC|'
r'_MENUDESIGNER|_MLINE|_PADVANCE|_PAGENO|_PAGETOTAL|_PBPAGE|'
r'_PCOLNO|_PCOPIES|_PDRIVER|_PDSETUP|_PECODE|_PEJECT|_PEPAGE|'
r'_PLENGTH|_PLINENO|_PLOFFSET|_PPITCH|_PQUALITY|_PRETEXT|'
r'_PSCODE|_PSPACING|_PWAIT|_RMARGIN|_REPORTBUILDER|'
r'_REPORTOUTPUT|_REPORTPREVIEW|_SAMPLES|_SCCTEXT|_SCREEN|'
r'_SHELL|_SPELLCHK|_STARTUP|_TABS|_TALLY|_TASKPANE|_TEXT|'
r'_THROTTLE|_TOOLBOX|_TOOLTIPTIMEOUT|_TRANSPORT|_TRIGGERLEVEL|'
r'_UNIX|_VFP|_WINDOWS|_WIZARD|_WRAP', Keyword.Pseudo),
(r'THISFORMSET|THISFORM|THIS', Name.Builtin),
(r'Application|CheckBox|Collection|Column|ComboBox|'
r'CommandButton|CommandGroup|Container|Control|CursorAdapter|'
r'Cursor|Custom|DataEnvironment|DataObject|EditBox|'
r'Empty|Exception|Fields|Files|File|FormSet|Form|FoxCode|'
r'Grid|Header|Hyperlink|Image|Label|Line|ListBox|Objects|'
r'OptionButton|OptionGroup|PageFrame|Page|ProjectHook|Projects|'
r'Project|Relation|ReportListener|Separator|Servers|Server|'
r'Session|Shape|Spinner|Tables|TextBox|Timer|ToolBar|'
r'XMLAdapter|XMLField|XMLTable', Name.Class),
(r'm\.[a-z_]\w*', Name.Variable),
(r'\.(F|T|AND|OR|NOT|NULL)\.|\b(AND|OR|NOT|NULL)\b', Operator.Word),
(r'\.(ActiveColumn|ActiveControl|ActiveForm|ActivePage|'
r'ActiveProject|ActiveRow|AddLineFeeds|ADOCodePage|Alias|'
r'Alignment|Align|AllowAddNew|AllowAutoColumnFit|'
r'AllowCellSelection|AllowDelete|AllowHeaderSizing|'
r'AllowInsert|AllowModalMessages|AllowOutput|AllowRowSizing|'
r'AllowSimultaneousFetch|AllowTabs|AllowUpdate|'
r'AlwaysOnBottom|AlwaysOnTop|Anchor|Application|'
r'AutoActivate|AutoCenter|AutoCloseTables|AutoComplete|'
r'AutoCompSource|AutoCompTable|AutoHideScrollBar|'
r'AutoIncrement|AutoOpenTables|AutoRelease|AutoSize|'
r'AutoVerbMenu|AutoYield|BackColor|ForeColor|BackStyle|'
r'BaseClass|BatchUpdateCount|BindControls|BorderColor|'
r'BorderStyle|BorderWidth|BoundColumn|BoundTo|Bound|'
r'BreakOnError|BufferModeOverride|BufferMode|'
r'BuildDateTime|ButtonCount|Buttons|Cancel|Caption|'
r'Centered|Century|ChildAlias|ChildOrder|ChildTable|'
r'ClassLibrary|Class|ClipControls|Closable|CLSID|CodePage|'
r'ColorScheme|ColorSource|ColumnCount|ColumnLines|'
r'ColumnOrder|Columns|ColumnWidths|CommandClauses|'
r'Comment|CompareMemo|ConflictCheckCmd|ConflictCheckType|'
r'ContinuousScroll|ControlBox|ControlCount|Controls|'
r'ControlSource|ConversionFunc|Count|CurrentControl|'
r'CurrentDataSession|CurrentPass|CurrentX|CurrentY|'
r'CursorSchema|CursorSource|CursorStatus|Curvature|'
r'Database|DataSessionID|DataSession|DataSourceType|'
r'DataSource|DataType|DateFormat|DateMark|Debug|'
r'DeclareXMLPrefix|DEClassLibrary|DEClass|DefaultFilePath|'
r'Default|DefOLELCID|DeleteCmdDataSourceType|DeleteCmdDataSource|'
r'DeleteCmd|DeleteMark|Description|Desktop|'
r'Details|DisabledBackColor|DisabledForeColor|'
r'DisabledItemBackColor|DisabledItemForeColor|'
r'DisabledPicture|DisableEncode|DisplayCount|'
r'DisplayValue|Dockable|Docked|DockPosition|'
r'DocumentFile|DownPicture|DragIcon|DragMode|DrawMode|'
r'DrawStyle|DrawWidth|DynamicAlignment|DynamicBackColor|'
r'DynamicForeColor|DynamicCurrentControl|DynamicFontBold|'
r'DynamicFontItalic|DynamicFontStrikethru|'
r'DynamicFontUnderline|DynamicFontName|DynamicFontOutline|'
r'DynamicFontShadow|DynamicFontSize|DynamicInputMask|'
r'DynamicLineHeight|EditorOptions|Enabled|'
r'EnableHyperlinks|Encrypted|ErrorNo|Exclude|Exclusive|'
r'FetchAsNeeded|FetchMemoCmdList|FetchMemoDataSourceType|'
r'FetchMemoDataSource|FetchMemo|FetchSize|'
r'FileClassLibrary|FileClass|FillColor|FillStyle|Filter|'
r'FirstElement|FirstNestedTable|Flags|FontBold|FontItalic|'
r'FontStrikethru|FontUnderline|FontCharSet|FontCondense|'
r'FontExtend|FontName|FontOutline|FontShadow|FontSize|'
r'ForceCloseTag|Format|FormCount|FormattedOutput|Forms|'
r'FractionDigits|FRXDataSession|FullName|GDIPlusGraphics|'
r'GridLineColor|GridLines|GridLineWidth|HalfHeightCaption|'
r'HeaderClassLibrary|HeaderClass|HeaderHeight|Height|'
r'HelpContextID|HideSelection|HighlightBackColor|'
r'HighlightForeColor|HighlightStyle|HighlightRowLineWidth|'
r'HighlightRow|Highlight|HomeDir|Hours|HostName|'
r'HScrollSmallChange|hWnd|Icon|IncrementalSearch|Increment|'
r'InitialSelectedAlias|InputMask|InsertCmdDataSourceType|'
r'InsertCmdDataSource|InsertCmdRefreshCmd|'
r'InsertCmdRefreshFieldList|InsertCmdRefreshKeyFieldList|'
r'InsertCmd|Instancing|IntegralHeight|'
r'Interval|IMEMode|IsAttribute|IsBase64|IsBinary|IsNull|'
r'IsDiffGram|IsLoaded|ItemBackColor,|ItemData|ItemIDData|'
r'ItemTips|IXMLDOMElement|KeyboardHighValue|KeyboardLowValue|'
r'Keyfield|KeyFieldList|KeyPreview|KeySort|LanguageOptions|'
r'LeftColumn|Left|LineContents|LineNo|LineSlant|LinkMaster|'
r'ListCount|ListenerType|ListIndex|ListItemID|ListItem|'
r'List|LockColumnsLeft|LockColumns|LockScreen|MacDesktop|'
r'MainFile|MapN19_4ToCurrency|MapBinary|MapVarchar|Margin|'
r'MaxButton|MaxHeight|MaxLeft|MaxLength|MaxRecords|MaxTop|'
r'MaxWidth|MDIForm|MemberClassLibrary|MemberClass|'
r'MemoWindow|Message|MinButton|MinHeight|MinWidth|'
r'MouseIcon|MousePointer|Movable|MoverBars|MultiSelect|'
r'Name|NestedInto|NewIndex|NewItemID|NextSiblingTable|'
r'NoCpTrans|NoDataOnLoad|NoData|NullDisplay|'
r'NumberOfElements|Object|OLEClass|OLEDragMode|'
r'OLEDragPicture|OLEDropEffects|OLEDropHasData|'
r'OLEDropMode|OLEDropTextInsertion|OLELCID|'
r'OLERequestPendingTimeout|OLEServerBusyRaiseError|'
r'OLEServerBusyTimeout|OLETypeAllowed|OneToMany|'
r'OpenViews|OpenWindow|Optimize|OrderDirection|Order|'
r'OutputPageCount|OutputType|PageCount|PageHeight|'
r'PageNo|PageOrder|Pages|PageTotal|PageWidth|'
r'PanelLink|Panel|ParentAlias|ParentClass|ParentTable|'
r'Parent|Partition|PasswordChar|PictureMargin|'
r'PicturePosition|PictureSpacing|PictureSelectionDisplay|'
r'PictureVal|Picture|Prepared|'
r'PolyPoints|PreserveWhiteSpace|PreviewContainer|'
r'PrintJobName|Procedure|PROCESSID|ProgID|ProjectHookClass|'
r'ProjectHookLibrary|ProjectHook|QuietMode|'
r'ReadCycle|ReadLock|ReadMouse|ReadObject|ReadOnly|'
r'ReadSave|ReadTimeout|RecordMark|RecordSourceType|'
r'RecordSource|RefreshAlias|'
r'RefreshCmdDataSourceType|RefreshCmdDataSource|RefreshCmd|'
r'RefreshIgnoreFieldList|RefreshTimeStamp|RelationalExpr|'
r'RelativeColumn|RelativeRow|ReleaseType|Resizable|'
r'RespectCursorCP|RespectNesting|RightToLeft|RotateFlip|'
r'Rotation|RowColChange|RowHeight|RowSourceType|'
r'RowSource|ScaleMode|SCCProvider|SCCStatus|ScrollBars|'
r'Seconds|SelectCmd|SelectedID|'
r'SelectedItemBackColor|SelectedItemForeColor|Selected|'
r'SelectionNamespaces|SelectOnEntry|SelLength|SelStart|'
r'SelText|SendGDIPlusImage|SendUpdates|ServerClassLibrary|'
r'ServerClass|ServerHelpFile|ServerName|'
r'ServerProject|ShowTips|ShowInTaskbar|ShowWindow|'
r'Sizable|SizeBox|SOM|Sorted|Sparse|SpecialEffect|'
r'SpinnerHighValue|SpinnerLowValue|SplitBar|StackLevel|'
r'StartMode|StatusBarText|StatusBar|Stretch|StrictDateEntry|'
r'Style|TabIndex|Tables|TabOrientation|Tabs|TabStop|'
r'TabStretch|TabStyle|Tag|TerminateRead|Text|Themes|'
r'ThreadID|TimestampFieldList|TitleBar|ToolTipText|'
r'TopIndex|TopItemID|Top|TwoPassProcess|TypeLibCLSID|'
r'TypeLibDesc|TypeLibName|Type|Unicode|UpdatableFieldList|'
r'UpdateCmdDataSourceType|UpdateCmdDataSource|'
r'UpdateCmdRefreshCmd|UpdateCmdRefreshFieldList|'
r'UpdateCmdRefreshKeyFieldList|UpdateCmd|'
r'UpdateGramSchemaLocation|UpdateGram|UpdateNameList|UpdateType|'
r'UseCodePage|UseCursorSchema|UseDeDataSource|UseMemoSize|'
r'UserValue|UseTransactions|UTF8Encoded|Value|VersionComments|'
r'VersionCompany|VersionCopyright|VersionDescription|'
r'VersionNumber|VersionProduct|VersionTrademarks|Version|'
r'VFPXMLProgID|ViewPortHeight|ViewPortLeft|'
r'ViewPortTop|ViewPortWidth|VScrollSmallChange|View|Visible|'
r'VisualEffect|WhatsThisButton|WhatsThisHelpID|WhatsThisHelp|'
r'WhereType|Width|WindowList|WindowState|WindowType|WordWrap|'
r'WrapCharInCDATA|WrapInCDATA|WrapMemoInCDATA|XMLAdapter|'
r'XMLConstraints|XMLNameIsXPath|XMLNamespace|XMLName|'
r'XMLPrefix|XMLSchemaLocation|XMLTable|XMLType|'
r'XSDfractionDigits|XSDmaxLength|XSDtotalDigits|'
r'XSDtype|ZoomBox)', Name.Attribute),
(r'\.(ActivateCell|AddColumn|AddItem|AddListItem|AddObject|'
r'AddProperty|AddTableSchema|AddToSCC|Add|'
r'ApplyDiffgram|Attach|AutoFit|AutoOpen|Box|Build|'
r'CancelReport|ChangesToCursor|CheckIn|CheckOut|Circle|'
r'CleanUp|ClearData|ClearStatus|Clear|CloneObject|CloseTables|'
r'Close|Cls|CursorAttach|CursorDetach|CursorFill|'
r'CursorRefresh|DataToClip|DelayedMemoFetch|DeleteColumn|'
r'Dock|DoMessage|DoScroll|DoStatus|DoVerb|Drag|Draw|Eval|'
r'GetData|GetDockState|GetFormat|GetKey|GetLatestVersion|'
r'GetPageHeight|GetPageWidth|Help|Hide|IncludePageInOutput|'
r'IndexToItemID|ItemIDToIndex|Item|LoadXML|Line|Modify|'
r'MoveItem|Move|Nest|OLEDrag|OnPreviewClose|OutputPage|'
r'Point|Print|PSet|Quit|ReadExpression|ReadMethod|'
r'RecordRefresh|Refresh|ReleaseXML|Release|RemoveFromSCC|'
r'RemoveItem|RemoveListItem|RemoveObject|Remove|'
r'Render|Requery|RequestData|ResetToDefault|Reset|Run|'
r'SaveAsClass|SaveAs|SetAll|SetData|SetFocus|SetFormat|'
r'SetMain|SetVar|SetViewPort|ShowWhatsThis|Show|'
r'SupportsListenerType|TextHeight|TextWidth|ToCursor|'
r'ToXML|UndoCheckOut|Unnest|UpdateStatus|WhatsThisMode|'
r'WriteExpression|WriteMethod|ZOrder)', Name.Function),
(r'\.(Activate|AdjustObjectSize|AfterBand|AfterBuild|'
r'AfterCloseTables|AfterCursorAttach|AfterCursorClose|'
r'AfterCursorDetach|AfterCursorFill|AfterCursorRefresh|'
r'AfterCursorUpdate|AfterDelete|AfterInsert|'
r'AfterRecordRefresh|AfterUpdate|AfterDock|AfterReport|'
r'AfterRowColChange|BeforeBand|BeforeCursorAttach|'
r'BeforeCursorClose|BeforeCursorDetach|BeforeCursorFill|'
r'BeforeCursorRefresh|BeforeCursorUpdate|BeforeDelete|'
r'BeforeInsert|BeforeDock|BeforeOpenTables|'
r'BeforeRecordRefresh|BeforeReport|BeforeRowColChange|'
r'BeforeUpdate|Click|dbc_Activate|dbc_AfterAddTable|'
r'dbc_AfterAppendProc|dbc_AfterCloseTable|dbc_AfterCopyProc|'
r'dbc_AfterCreateConnection|dbc_AfterCreateOffline|'
r'dbc_AfterCreateTable|dbc_AfterCreateView|dbc_AfterDBGetProp|'
r'dbc_AfterDBSetProp|dbc_AfterDeleteConnection|'
r'dbc_AfterDropOffline|dbc_AfterDropTable|'
r'dbc_AfterModifyConnection|dbc_AfterModifyProc|'
r'dbc_AfterModifyTable|dbc_AfterModifyView|dbc_AfterOpenTable|'
r'dbc_AfterRemoveTable|dbc_AfterRenameConnection|'
r'dbc_AfterRenameTable|dbc_AfterRenameView|'
r'dbc_AfterValidateData|dbc_BeforeAddTable|'
r'dbc_BeforeAppendProc|dbc_BeforeCloseTable|'
r'dbc_BeforeCopyProc|dbc_BeforeCreateConnection|'
r'dbc_BeforeCreateOffline|dbc_BeforeCreateTable|'
r'dbc_BeforeCreateView|dbc_BeforeDBGetProp|'
r'dbc_BeforeDBSetProp|dbc_BeforeDeleteConnection|'
r'dbc_BeforeDropOffline|dbc_BeforeDropTable|'
r'dbc_BeforeModifyConnection|dbc_BeforeModifyProc|'
r'dbc_BeforeModifyTable|dbc_BeforeModifyView|'
r'dbc_BeforeOpenTable|dbc_BeforeRemoveTable|'
r'dbc_BeforeRenameConnection|dbc_BeforeRenameTable|'
r'dbc_BeforeRenameView|dbc_BeforeValidateData|'
r'dbc_CloseData|dbc_Deactivate|dbc_ModifyData|dbc_OpenData|'
r'dbc_PackData|DblClick|Deactivate|Deleted|Destroy|DoCmd|'
r'DownClick|DragDrop|DragOver|DropDown|ErrorMessage|Error|'
r'EvaluateContents|GotFocus|Init|InteractiveChange|KeyPress|'
r'LoadReport|Load|LostFocus|Message|MiddleClick|MouseDown|'
r'MouseEnter|MouseLeave|MouseMove|MouseUp|MouseWheel|Moved|'
r'OLECompleteDrag|OLEDragOver|OLEGiveFeedback|OLESetData|'
r'OLEStartDrag|OnMoveItem|Paint|ProgrammaticChange|'
r'QueryAddFile|QueryModifyFile|QueryNewFile|QueryRemoveFile|'
r'QueryRunFile|QueryUnload|RangeHigh|RangeLow|ReadActivate|'
r'ReadDeactivate|ReadShow|ReadValid|ReadWhen|Resize|'
r'RightClick|SCCInit|SCCDestroy|Scrolled|Timer|UIEnable|'
r'UnDock|UnloadReport|Unload|UpClick|Valid|When)', Name.Function),
(r'\s+', Text),
# everything else is not colored
(r'.', Text),
],
'newline': [
(r'\*.*?$', Comment.Single, '#pop'),
(r'(ACCEPT|ACTIVATE\s*MENU|ACTIVATE\s*POPUP|ACTIVATE\s*SCREEN|'
r'ACTIVATE\s*WINDOW|APPEND|APPEND\s*FROM|APPEND\s*FROM\s*ARRAY|'
r'APPEND\s*GENERAL|APPEND\s*MEMO|ASSIST|AVERAGE|BLANK|BROWSE|'
r'BUILD\s*APP|BUILD\s*EXE|BUILD\s*PROJECT|CALCULATE|CALL|'
r'CANCEL|CHANGE|CLEAR|CLOSE|CLOSE\s*MEMO|COMPILE|CONTINUE|'
r'COPY\s*FILE|COPY\s*INDEXES|COPY\s*MEMO|COPY\s*STRUCTURE|'
r'COPY\s*STRUCTURE\s*EXTENDED|COPY\s*TAG|COPY\s*TO|'
r'COPY\s*TO\s*ARRAY|COUNT|CREATE|CREATE\s*COLOR\s*SET|'
r'CREATE\s*CURSOR|CREATE\s*FROM|CREATE\s*LABEL|CREATE\s*MENU|'
r'CREATE\s*PROJECT|CREATE\s*QUERY|CREATE\s*REPORT|'
r'CREATE\s*SCREEN|CREATE\s*TABLE|CREATE\s*VIEW|DDE|'
r'DEACTIVATE\s*MENU|DEACTIVATE\s*POPUP|DEACTIVATE\s*WINDOW|'
r'DECLARE|DEFINE\s*BAR|DEFINE\s*BOX|DEFINE\s*MENU|'
r'DEFINE\s*PAD|DEFINE\s*POPUP|DEFINE\s*WINDOW|DELETE|'
r'DELETE\s*FILE|DELETE\s*TAG|DIMENSION|DIRECTORY|DISPLAY|'
r'DISPLAY\s*FILES|DISPLAY\s*MEMORY|DISPLAY\s*STATUS|'
r'DISPLAY\s*STRUCTURE|DO|EDIT|EJECT|EJECT\s*PAGE|ERASE|'
r'EXIT|EXPORT|EXTERNAL|FILER|FIND|FLUSH|FUNCTION|GATHER|'
r'GETEXPR|GO|GOTO|HELP|HIDE\s*MENU|HIDE\s*POPUP|'
r'HIDE\s*WINDOW|IMPORT|INDEX|INPUT|INSERT|JOIN|KEYBOARD|'
r'LABEL|LIST|LOAD|LOCATE|LOOP|MENU|MENU\s*TO|MODIFY\s*COMMAND|'
r'MODIFY\s*FILE|MODIFY\s*GENERAL|MODIFY\s*LABEL|MODIFY\s*MEMO|'
r'MODIFY\s*MENU|MODIFY\s*PROJECT|MODIFY\s*QUERY|'
r'MODIFY\s*REPORT|MODIFY\s*SCREEN|MODIFY\s*STRUCTURE|'
r'MODIFY\s*WINDOW|MOVE\s*POPUP|MOVE\s*WINDOW|NOTE|'
r'ON\s*APLABOUT|ON\s*BAR|ON\s*ERROR|ON\s*ESCAPE|'
r'ON\s*EXIT\s*BAR|ON\s*EXIT\s*MENU|ON\s*EXIT\s*PAD|'
r'ON\s*EXIT\s*POPUP|ON\s*KEY|ON\s*KEY\s*=|ON\s*KEY\s*LABEL|'
r'ON\s*MACHELP|ON\s*PAD|ON\s*PAGE|ON\s*READERROR|'
r'ON\s*SELECTION\s*BAR|ON\s*SELECTION\s*MENU|'
r'ON\s*SELECTION\s*PAD|ON\s*SELECTION\s*POPUP|ON\s*SHUTDOWN|'
r'PACK|PARAMETERS|PLAY\s*MACRO|POP\s*KEY|POP\s*MENU|'
r'POP\s*POPUP|PRIVATE|PROCEDURE|PUBLIC|PUSH\s*KEY|'
r'PUSH\s*MENU|PUSH\s*POPUP|QUIT|READ|READ\s*MENU|RECALL|'
r'REINDEX|RELEASE|RELEASE\s*MODULE|RENAME|REPLACE|'
r'REPLACE\s*FROM\s*ARRAY|REPORT|RESTORE\s*FROM|'
r'RESTORE\s*MACROS|RESTORE\s*SCREEN|RESTORE\s*WINDOW|'
r'RESUME|RETRY|RETURN|RUN|RUN\s*\/N"|RUNSCRIPT|'
r'SAVE\s*MACROS|SAVE\s*SCREEN|SAVE\s*TO|SAVE\s*WINDOWS|'
r'SCATTER|SCROLL|SEEK|SELECT|SET|SET\s*ALTERNATE|'
r'SET\s*ANSI|SET\s*APLABOUT|SET\s*AUTOSAVE|SET\s*BELL|'
r'SET\s*BLINK|SET\s*BLOCKSIZE|SET\s*BORDER|SET\s*BRSTATUS|'
r'SET\s*CARRY|SET\s*CENTURY|SET\s*CLEAR|SET\s*CLOCK|'
r'SET\s*COLLATE|SET\s*COLOR\s*OF|SET\s*COLOR\s*OF\s*SCHEME|'
r'SET\s*COLOR\s*SET|SET\s*COLOR\s*TO|SET\s*COMPATIBLE|'
r'SET\s*CONFIRM|SET\s*CONSOLE|SET\s*CURRENCY|SET\s*CURSOR|'
r'SET\s*DATE|SET\s*DEBUG|SET\s*DECIMALS|SET\s*DEFAULT|'
r'SET\s*DELETED|SET\s*DELIMITERS|SET\s*DEVELOPMENT|'
r'SET\s*DEVICE|SET\s*DISPLAY|SET\s*DOHISTORY|SET\s*ECHO|'
r'SET\s*ESCAPE|SET\s*EXACT|SET\s*EXCLUSIVE|SET\s*FIELDS|'
r'SET\s*FILTER|SET\s*FIXED|SET\s*FORMAT|SET\s*FULLPATH|'
r'SET\s*FUNCTION|SET\s*HEADINGS|SET\s*HELP|SET\s*HELPFILTER|'
r'SET\s*HOURS|SET\s*INDEX|SET\s*INTENSITY|SET\s*KEY|'
r'SET\s*KEYCOMP|SET\s*LIBRARY|SET\s*LOCK|SET\s*LOGERRORS|'
r'SET\s*MACDESKTOP|SET\s*MACHELP|SET\s*MACKEY|SET\s*MARGIN|'
r'SET\s*MARK\s*OF|SET\s*MARK\s*TO|SET\s*MEMOWIDTH|'
r'SET\s*MESSAGE|SET\s*MOUSE|SET\s*MULTILOCKS|SET\s*NEAR|'
r'SET\s*NOCPTRANS|SET\s*NOTIFY|SET\s*ODOMETER|SET\s*OPTIMIZE|'
r'SET\s*ORDER|SET\s*PALETTE|SET\s*PATH|SET\s*PDSETUP|'
r'SET\s*POINT|SET\s*PRINTER|SET\s*PROCEDURE|SET\s*READBORDER|'
r'SET\s*REFRESH|SET\s*RELATION|SET\s*RELATION\s*OFF|'
r'SET\s*REPROCESS|SET\s*RESOURCE|SET\s*SAFETY|SET\s*SCOREBOARD|'
r'SET\s*SEPARATOR|SET\s*SHADOWS|SET\s*SKIP|SET\s*SKIP\s*OF|'
r'SET\s*SPACE|SET\s*STATUS|SET\s*STATUS\s*BAR|SET\s*STEP|'
r'SET\s*STICKY|SET\s*SYSMENU|SET\s*TALK|SET\s*TEXTMERGE|'
r'SET\s*TEXTMERGE\s*DELIMITERS|SET\s*TOPIC|SET\s*TRBETWEEN|'
r'SET\s*TYPEAHEAD|SET\s*UDFPARMS|SET\s*UNIQUE|SET\s*VIEW|'
r'SET\s*VOLUME|SET\s*WINDOW\s*OF\s*MEMO|SET\s*XCMDFILE|'
r'SHOW\s*GET|SHOW\s*GETS|SHOW\s*MENU|SHOW\s*OBJECT|'
r'SHOW\s*POPUP|SHOW\s*WINDOW|SIZE\s*POPUP|SKIP|SORT|'
r'STORE|SUM|SUSPEND|TOTAL|TYPE|UNLOCK|UPDATE|USE|WAIT|'
r'ZAP|ZOOM\s*WINDOW|DO\s*CASE|CASE|OTHERWISE|ENDCASE|'
r'DO\s*WHILE|ENDDO|FOR|ENDFOR|NEXT|IF|ELSE|ENDIF|PRINTJOB|'
r'ENDPRINTJOB|SCAN|ENDSCAN|TEXT|ENDTEXT|=)',
Keyword.Reserved, '#pop'),
(r'#\s*(IF|ELIF|ELSE|ENDIF|DEFINE|IFDEF|IFNDEF|INCLUDE)',
Comment.Preproc, '#pop'),
(r'(m\.)?[a-z_]\w*', Name.Variable, '#pop'),
(r'.', Text, '#pop'),
],
}

View File

@@ -0,0 +1,893 @@
"""
pygments.lexers.freefem
~~~~~~~~~~~~~~~~~~~~~~~
Lexer for FreeFem++ language.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.token import Comment, Operator, Keyword, Name
from pygments.lexers.c_cpp import CppLexer
__all__ = ['FreeFemLexer']
class FreeFemLexer(CppLexer):
"""
For FreeFem++ source.
This is an extension of the CppLexer, as the FreeFem Language is a superset
of C++.
"""
name = 'Freefem'
url = 'https://freefem.org/'
aliases = ['freefem']
filenames = ['*.edp']
mimetypes = ['text/x-freefem']
version_added = '2.4'
# Language operators
operators = {'+', '-', '*', '.*', '/', './', '%', '^', '^-1', ':', '\''}
# types
types = {'bool', 'border', 'complex', 'dmatrix', 'fespace', 'func', 'gslspline',
'ifstream', 'int', 'macro', 'matrix', 'mesh', 'mesh3', 'mpiComm',
'mpiGroup', 'mpiRequest', 'NewMacro', 'EndMacro', 'ofstream', 'Pmmap',
'problem', 'Psemaphore', 'real', 'solve', 'string', 'varf'}
# finite element spaces
fespaces = {'BDM1', 'BDM1Ortho', 'Edge03d', 'Edge13d', 'Edge23d', 'FEQF', 'HCT',
'P0', 'P03d', 'P0Edge', 'P1', 'P13d', 'P1b', 'P1b3d', 'P1bl', 'P1bl3d',
'P1dc', 'P1Edge', 'P1nc', 'P2', 'P23d', 'P2b', 'P2BR', 'P2dc', 'P2Edge',
'P2h', 'P2Morley', 'P2pnc', 'P3', 'P3dc', 'P3Edge', 'P4', 'P4dc',
'P4Edge', 'P5Edge', 'RT0', 'RT03d', 'RT0Ortho', 'RT1', 'RT1Ortho',
'RT2', 'RT2Ortho'}
# preprocessor
preprocessor = {'ENDIFMACRO', 'include', 'IFMACRO', 'load'}
# Language keywords
keywords = {
'adj',
'append',
'area',
'ARGV',
'be',
'binary',
'BoundaryEdge',
'bordermeasure',
'CG',
'Cholesky',
'cin',
'cout',
'Crout',
'default',
'diag',
'edgeOrientation',
'endl',
'false',
'ffind',
'FILE',
'find',
'fixed',
'flush',
'GMRES',
'good',
'hTriangle',
'im',
'imax',
'imin',
'InternalEdge',
'l1',
'l2',
'label',
'lenEdge',
'length',
'LINE',
'linfty',
'LU',
'm',
'max',
'measure',
'min',
'mpiAnySource',
'mpiBAND',
'mpiBXOR',
'mpiCommWorld',
'mpiLAND',
'mpiLOR',
'mpiLXOR',
'mpiMAX',
'mpiMIN',
'mpiPROD',
'mpirank',
'mpisize',
'mpiSUM',
'mpiUndefined',
'n',
'N',
'nbe',
'ndof',
'ndofK',
'noshowbase',
'noshowpos',
'notaregion',
'nt',
'nTonEdge',
'nuEdge',
'nuTriangle',
'nv',
'P',
'pi',
'precision',
'qf1pE',
'qf1pElump',
'qf1pT',
'qf1pTlump',
'qfV1',
'qfV1lump',
'qf2pE',
'qf2pT',
'qf2pT4P1',
'qfV2',
'qf3pE',
'qf4pE',
'qf5pE',
'qf5pT',
'qfV5',
'qf7pT',
'qf9pT',
'qfnbpE',
'quantile',
're',
'region',
'rfind',
'scientific',
'searchMethod',
'setw',
'showbase',
'showpos',
'sparsesolver',
'sum',
'tellp',
'true',
'UMFPACK',
'unused',
'whoinElement',
'verbosity',
'version',
'volume',
'x',
'y',
'z'
}
# Language shipped functions and class ( )
functions = {
'abs',
'acos',
'acosh',
'adaptmesh',
'adj',
'AffineCG',
'AffineGMRES',
'arg',
'asin',
'asinh',
'assert',
'atan',
'atan2',
'atanh',
'atof',
'atoi',
'BFGS',
'broadcast',
'buildlayers',
'buildmesh',
'ceil',
'chi',
'complexEigenValue',
'copysign',
'change',
'checkmovemesh',
'clock',
'cmaes',
'conj',
'convect',
'cos',
'cosh',
'cube',
'd',
'dd',
'dfft',
'diffnp',
'diffpos',
'dimKrylov',
'dist',
'dumptable',
'dx',
'dxx',
'dxy',
'dxz',
'dy',
'dyx',
'dyy',
'dyz',
'dz',
'dzx',
'dzy',
'dzz',
'EigenValue',
'emptymesh',
'erf',
'erfc',
'exec',
'exit',
'exp',
'fdim',
'floor',
'fmax',
'fmin',
'fmod',
'freeyams',
'getARGV',
'getline',
'gmshload',
'gmshload3',
'gslcdfugaussianP',
'gslcdfugaussianQ',
'gslcdfugaussianPinv',
'gslcdfugaussianQinv',
'gslcdfgaussianP',
'gslcdfgaussianQ',
'gslcdfgaussianPinv',
'gslcdfgaussianQinv',
'gslcdfgammaP',
'gslcdfgammaQ',
'gslcdfgammaPinv',
'gslcdfgammaQinv',
'gslcdfcauchyP',
'gslcdfcauchyQ',
'gslcdfcauchyPinv',
'gslcdfcauchyQinv',
'gslcdflaplaceP',
'gslcdflaplaceQ',
'gslcdflaplacePinv',
'gslcdflaplaceQinv',
'gslcdfrayleighP',
'gslcdfrayleighQ',
'gslcdfrayleighPinv',
'gslcdfrayleighQinv',
'gslcdfchisqP',
'gslcdfchisqQ',
'gslcdfchisqPinv',
'gslcdfchisqQinv',
'gslcdfexponentialP',
'gslcdfexponentialQ',
'gslcdfexponentialPinv',
'gslcdfexponentialQinv',
'gslcdfexppowP',
'gslcdfexppowQ',
'gslcdftdistP',
'gslcdftdistQ',
'gslcdftdistPinv',
'gslcdftdistQinv',
'gslcdffdistP',
'gslcdffdistQ',
'gslcdffdistPinv',
'gslcdffdistQinv',
'gslcdfbetaP',
'gslcdfbetaQ',
'gslcdfbetaPinv',
'gslcdfbetaQinv',
'gslcdfflatP',
'gslcdfflatQ',
'gslcdfflatPinv',
'gslcdfflatQinv',
'gslcdflognormalP',
'gslcdflognormalQ',
'gslcdflognormalPinv',
'gslcdflognormalQinv',
'gslcdfgumbel1P',
'gslcdfgumbel1Q',
'gslcdfgumbel1Pinv',
'gslcdfgumbel1Qinv',
'gslcdfgumbel2P',
'gslcdfgumbel2Q',
'gslcdfgumbel2Pinv',
'gslcdfgumbel2Qinv',
'gslcdfweibullP',
'gslcdfweibullQ',
'gslcdfweibullPinv',
'gslcdfweibullQinv',
'gslcdfparetoP',
'gslcdfparetoQ',
'gslcdfparetoPinv',
'gslcdfparetoQinv',
'gslcdflogisticP',
'gslcdflogisticQ',
'gslcdflogisticPinv',
'gslcdflogisticQinv',
'gslcdfbinomialP',
'gslcdfbinomialQ',
'gslcdfpoissonP',
'gslcdfpoissonQ',
'gslcdfgeometricP',
'gslcdfgeometricQ',
'gslcdfnegativebinomialP',
'gslcdfnegativebinomialQ',
'gslcdfpascalP',
'gslcdfpascalQ',
'gslinterpakima',
'gslinterpakimaperiodic',
'gslinterpcsplineperiodic',
'gslinterpcspline',
'gslinterpsteffen',
'gslinterplinear',
'gslinterppolynomial',
'gslranbernoullipdf',
'gslranbeta',
'gslranbetapdf',
'gslranbinomialpdf',
'gslranexponential',
'gslranexponentialpdf',
'gslranexppow',
'gslranexppowpdf',
'gslrancauchy',
'gslrancauchypdf',
'gslranchisq',
'gslranchisqpdf',
'gslranerlang',
'gslranerlangpdf',
'gslranfdist',
'gslranfdistpdf',
'gslranflat',
'gslranflatpdf',
'gslrangamma',
'gslrangammaint',
'gslrangammapdf',
'gslrangammamt',
'gslrangammaknuth',
'gslrangaussian',
'gslrangaussianratiomethod',
'gslrangaussianziggurat',
'gslrangaussianpdf',
'gslranugaussian',
'gslranugaussianratiomethod',
'gslranugaussianpdf',
'gslrangaussiantail',
'gslrangaussiantailpdf',
'gslranugaussiantail',
'gslranugaussiantailpdf',
'gslranlandau',
'gslranlandaupdf',
'gslrangeometricpdf',
'gslrangumbel1',
'gslrangumbel1pdf',
'gslrangumbel2',
'gslrangumbel2pdf',
'gslranlogistic',
'gslranlogisticpdf',
'gslranlognormal',
'gslranlognormalpdf',
'gslranlogarithmicpdf',
'gslrannegativebinomialpdf',
'gslranpascalpdf',
'gslranpareto',
'gslranparetopdf',
'gslranpoissonpdf',
'gslranrayleigh',
'gslranrayleighpdf',
'gslranrayleightail',
'gslranrayleightailpdf',
'gslrantdist',
'gslrantdistpdf',
'gslranlaplace',
'gslranlaplacepdf',
'gslranlevy',
'gslranweibull',
'gslranweibullpdf',
'gslsfairyAi',
'gslsfairyBi',
'gslsfairyAiscaled',
'gslsfairyBiscaled',
'gslsfairyAideriv',
'gslsfairyBideriv',
'gslsfairyAiderivscaled',
'gslsfairyBiderivscaled',
'gslsfairyzeroAi',
'gslsfairyzeroBi',
'gslsfairyzeroAideriv',
'gslsfairyzeroBideriv',
'gslsfbesselJ0',
'gslsfbesselJ1',
'gslsfbesselJn',
'gslsfbesselY0',
'gslsfbesselY1',
'gslsfbesselYn',
'gslsfbesselI0',
'gslsfbesselI1',
'gslsfbesselIn',
'gslsfbesselI0scaled',
'gslsfbesselI1scaled',
'gslsfbesselInscaled',
'gslsfbesselK0',
'gslsfbesselK1',
'gslsfbesselKn',
'gslsfbesselK0scaled',
'gslsfbesselK1scaled',
'gslsfbesselKnscaled',
'gslsfbesselj0',
'gslsfbesselj1',
'gslsfbesselj2',
'gslsfbesseljl',
'gslsfbessely0',
'gslsfbessely1',
'gslsfbessely2',
'gslsfbesselyl',
'gslsfbesseli0scaled',
'gslsfbesseli1scaled',
'gslsfbesseli2scaled',
'gslsfbesselilscaled',
'gslsfbesselk0scaled',
'gslsfbesselk1scaled',
'gslsfbesselk2scaled',
'gslsfbesselklscaled',
'gslsfbesselJnu',
'gslsfbesselYnu',
'gslsfbesselInuscaled',
'gslsfbesselInu',
'gslsfbesselKnuscaled',
'gslsfbesselKnu',
'gslsfbessellnKnu',
'gslsfbesselzeroJ0',
'gslsfbesselzeroJ1',
'gslsfbesselzeroJnu',
'gslsfclausen',
'gslsfhydrogenicR1',
'gslsfdawson',
'gslsfdebye1',
'gslsfdebye2',
'gslsfdebye3',
'gslsfdebye4',
'gslsfdebye5',
'gslsfdebye6',
'gslsfdilog',
'gslsfmultiply',
'gslsfellintKcomp',
'gslsfellintEcomp',
'gslsfellintPcomp',
'gslsfellintDcomp',
'gslsfellintF',
'gslsfellintE',
'gslsfellintRC',
'gslsferfc',
'gslsflogerfc',
'gslsferf',
'gslsferfZ',
'gslsferfQ',
'gslsfhazard',
'gslsfexp',
'gslsfexpmult',
'gslsfexpm1',
'gslsfexprel',
'gslsfexprel2',
'gslsfexpreln',
'gslsfexpintE1',
'gslsfexpintE2',
'gslsfexpintEn',
'gslsfexpintE1scaled',
'gslsfexpintE2scaled',
'gslsfexpintEnscaled',
'gslsfexpintEi',
'gslsfexpintEiscaled',
'gslsfShi',
'gslsfChi',
'gslsfexpint3',
'gslsfSi',
'gslsfCi',
'gslsfatanint',
'gslsffermidiracm1',
'gslsffermidirac0',
'gslsffermidirac1',
'gslsffermidirac2',
'gslsffermidiracint',
'gslsffermidiracmhalf',
'gslsffermidirachalf',
'gslsffermidirac3half',
'gslsffermidiracinc0',
'gslsflngamma',
'gslsfgamma',
'gslsfgammastar',
'gslsfgammainv',
'gslsftaylorcoeff',
'gslsffact',
'gslsfdoublefact',
'gslsflnfact',
'gslsflndoublefact',
'gslsflnchoose',
'gslsfchoose',
'gslsflnpoch',
'gslsfpoch',
'gslsfpochrel',
'gslsfgammaincQ',
'gslsfgammaincP',
'gslsfgammainc',
'gslsflnbeta',
'gslsfbeta',
'gslsfbetainc',
'gslsfgegenpoly1',
'gslsfgegenpoly2',
'gslsfgegenpoly3',
'gslsfgegenpolyn',
'gslsfhyperg0F1',
'gslsfhyperg1F1int',
'gslsfhyperg1F1',
'gslsfhypergUint',
'gslsfhypergU',
'gslsfhyperg2F0',
'gslsflaguerre1',
'gslsflaguerre2',
'gslsflaguerre3',
'gslsflaguerren',
'gslsflambertW0',
'gslsflambertWm1',
'gslsflegendrePl',
'gslsflegendreP1',
'gslsflegendreP2',
'gslsflegendreP3',
'gslsflegendreQ0',
'gslsflegendreQ1',
'gslsflegendreQl',
'gslsflegendrePlm',
'gslsflegendresphPlm',
'gslsflegendrearraysize',
'gslsfconicalPhalf',
'gslsfconicalPmhalf',
'gslsfconicalP0',
'gslsfconicalP1',
'gslsfconicalPsphreg',
'gslsfconicalPcylreg',
'gslsflegendreH3d0',
'gslsflegendreH3d1',
'gslsflegendreH3d',
'gslsflog',
'gslsflogabs',
'gslsflog1plusx',
'gslsflog1plusxmx',
'gslsfpowint',
'gslsfpsiint',
'gslsfpsi',
'gslsfpsi1piy',
'gslsfpsi1int',
'gslsfpsi1',
'gslsfpsin',
'gslsfsynchrotron1',
'gslsfsynchrotron2',
'gslsftransport2',
'gslsftransport3',
'gslsftransport4',
'gslsftransport5',
'gslsfsin',
'gslsfcos',
'gslsfhypot',
'gslsfsinc',
'gslsflnsinh',
'gslsflncosh',
'gslsfanglerestrictsymm',
'gslsfanglerestrictpos',
'gslsfzetaint',
'gslsfzeta',
'gslsfzetam1',
'gslsfzetam1int',
'gslsfhzeta',
'gslsfetaint',
'gslsfeta',
'imag',
'int1d',
'int2d',
'int3d',
'intalledges',
'intallfaces',
'interpolate',
'invdiff',
'invdiffnp',
'invdiffpos',
'Isend',
'isInf',
'isNaN',
'isoline',
'Irecv',
'j0',
'j1',
'jn',
'jump',
'lgamma',
'LinearCG',
'LinearGMRES',
'log',
'log10',
'lrint',
'lround',
'max',
'mean',
'medit',
'min',
'mmg3d',
'movemesh',
'movemesh23',
'mpiAlltoall',
'mpiAlltoallv',
'mpiAllgather',
'mpiAllgatherv',
'mpiAllReduce',
'mpiBarrier',
'mpiGather',
'mpiGatherv',
'mpiRank',
'mpiReduce',
'mpiScatter',
'mpiScatterv',
'mpiSize',
'mpiWait',
'mpiWaitAny',
'mpiWtick',
'mpiWtime',
'mshmet',
'NaN',
'NLCG',
'on',
'plot',
'polar',
'Post',
'pow',
'processor',
'processorblock',
'projection',
'randinit',
'randint31',
'randint32',
'random',
'randreal1',
'randreal2',
'randreal3',
'randres53',
'Read',
'readmesh',
'readmesh3',
'Recv',
'rint',
'round',
'savemesh',
'savesol',
'savevtk',
'seekg',
'Sent',
'set',
'sign',
'signbit',
'sin',
'sinh',
'sort',
'splitComm',
'splitmesh',
'sqrt',
'square',
'srandom',
'srandomdev',
'Stringification',
'swap',
'system',
'tan',
'tanh',
'tellg',
'tetg',
'tetgconvexhull',
'tetgreconstruction',
'tetgtransfo',
'tgamma',
'triangulate',
'trunc',
'Wait',
'Write',
'y0',
'y1',
'yn'
}
# function parameters
parameters = {
'A',
'A1',
'abserror',
'absolute',
'aniso',
'aspectratio',
'B',
'B1',
'bb',
'beginend',
'bin',
'boundary',
'bw',
'close',
'cmm',
'coef',
'composante',
'cutoff',
'datafilename',
'dataname',
'dim',
'distmax',
'displacement',
'doptions',
'dparams',
'eps',
'err',
'errg',
'facemerge',
'facetcl',
'factorize',
'file',
'fill',
'fixedborder',
'flabel',
'flags',
'floatmesh',
'floatsol',
'fregion',
'gradation',
'grey',
'hmax',
'hmin',
'holelist',
'hsv',
'init',
'inquire',
'inside',
'IsMetric',
'iso',
'ivalue',
'keepbackvertices',
'label',
'labeldown',
'labelmid',
'labelup',
'levelset',
'loptions',
'lparams',
'maxit',
'maxsubdiv',
'meditff',
'mem',
'memory',
'metric',
'mode',
'nbarrow',
'nbiso',
'nbiter',
'nbjacoby',
'nboffacetcl',
'nbofholes',
'nbofregions',
'nbregul',
'nbsmooth',
'nbvx',
'ncv',
'nev',
'nomeshgeneration',
'normalization',
'omega',
'op',
'optimize',
'option',
'options',
'order',
'orientation',
'periodic',
'power',
'precon',
'prev',
'ps',
'ptmerge',
'qfe',
'qforder',
'qft',
'qfV',
'ratio',
'rawvector',
'reffacelow',
'reffacemid',
'reffaceup',
'refnum',
'reftet',
'reftri',
'region',
'regionlist',
'renumv',
'rescaling',
'ridgeangle',
'save',
'sigma',
'sizeofvolume',
'smoothing',
'solver',
'sparams',
'split',
'splitin2',
'splitpbedge',
'stop',
'strategy',
'swap',
'switch',
'sym',
't',
'tgv',
'thetamax',
'tol',
'tolpivot',
'tolpivotsym',
'transfo',
'U2Vc',
'value',
'varrow',
'vector',
'veps',
'viso',
'wait',
'width',
'withsurfacemesh',
'WindowIndex',
'which',
'zbound'
}
# deprecated
deprecated = {'fixeborder'}
# do not highlight
suppress_highlight = {
'alignof',
'asm',
'constexpr',
'decltype',
'div',
'double',
'grad',
'mutable',
'namespace',
'noexcept',
'restrict',
'static_assert',
'template',
'this',
'thread_local',
'typeid',
'typename',
'using'
}
def get_tokens_unprocessed(self, text, stack=('root',)):
for index, token, value in CppLexer.get_tokens_unprocessed(self, text, stack):
if value in self.operators:
yield index, Operator, value
elif value in self.types:
yield index, Keyword.Type, value
elif value in self.fespaces:
yield index, Name.Class, value
elif value in self.preprocessor:
yield index, Comment.Preproc, value
elif value in self.keywords:
yield index, Keyword.Reserved, value
elif value in self.functions:
yield index, Name.Function, value
elif value in self.parameters:
yield index, Keyword.Pseudo, value
elif value in self.suppress_highlight:
yield index, Name, value
else:
yield index, token, value

View File

@@ -0,0 +1,110 @@
"""
pygments.lexers.func
~~~~~~~~~~~~~~~~~~~~
Lexers for FunC.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Whitespace, Punctuation
__all__ = ['FuncLexer']
class FuncLexer(RegexLexer):
"""
For FunC source code.
"""
name = 'FunC'
aliases = ['func', 'fc']
filenames = ['*.fc', '*.func']
url = 'https://docs.ton.org/develop/func/overview'
version_added = ''
# 1. Does not start from "
# 2. Can start from ` and end with `, containing any character
# 3. Starts with underscore or { or } and have more than 1 character after it
# 4. Starts with letter, contains letters, numbers and underscores
identifier = r'(?!")(`([^`]+)`|((?=_)_|(?=\{)\{|(?=\})\}|(?![_`{}]))([^;,\[\]\(\)\s~.]+))'
tokens = {
'root': [
(r'\n', Whitespace),
(r'\s+', Whitespace),
include('keywords'),
include('strings'),
include('directives'),
include('numeric'),
include('comments'),
include('storage'),
include('functions'),
include('variables'),
(r'[.;(),\[\]~{}]', Punctuation)
],
'keywords': [
(words((
'<=>', '>=', '<=', '!=', '==', '^>>', '~>>',
'>>', '<<', '/%', '^%', '~%', '^/', '~/', '+=',
'-=', '*=', '/=', '~/=', '^/=', '%=', '^%=', '<<=',
'>>=', '~>>=', '^>>=', '&=', '|=', '^=', '^', '=',
'~', '/', '%', '-', '*', '+','>',
'<', '&', '|', ':', '?'), prefix=r'(?<=\s)', suffix=r'(?=\s)'),
Operator),
(words((
'if', 'ifnot',
'else', 'elseif', 'elseifnot',
'while', 'do', 'until', 'repeat',
'return', 'impure', 'method_id',
'forall', 'asm', 'inline', 'inline_ref'), prefix=r'\b', suffix=r'\b'),
Keyword),
(words(('true', 'false'), prefix=r'\b', suffix=r'\b'), Keyword.Constant),
],
'directives': [
(r'#include|#pragma', Keyword, 'directive'),
],
'directive': [
include('strings'),
(r'\s+', Whitespace),
(r'version|not-version', Keyword),
(r'(>=|<=|=|>|<|\^)?([0-9]+)(.[0-9]+)?(.[0-9]+)?', Number), # version
(r';', Text, '#pop')
],
'strings': [
(r'\"([^\n\"]+)\"[Hhcusa]?', String),
],
'numeric': [
(r'\b(-?(?!_)([\d_]+|0x[\d_a-fA-F]+)|0b[1_0]+)(?<!_)(?=[\s\)\],;])', Number)
],
'comments': [
(r';;([^\n]*)', Comment.Singleline),
(r'\{-', Comment.Multiline, 'comment'),
],
'comment': [
(r'[^-}{]+', Comment.Multiline),
(r'\{-', Comment.Multiline, '#push'),
(r'-\}', Comment.Multiline, '#pop'),
(r'[-}{]', Comment.Multiline),
],
'storage': [
(words((
'var', 'int', 'slice', 'tuple',
'cell', 'builder', 'cont', '_'),
prefix=r'\b', suffix=r'(?=[\s\(\),\[\]])'),
Keyword.Type),
(words(('global', 'const'), prefix=r'\b', suffix=r'\b'), Keyword.Constant),
],
'variables': [
(identifier, Name.Variable),
],
'functions': [
# identifier followed by (
(identifier + r'(?=[\(])', Name.Function),
]
}

View File

@@ -0,0 +1,21 @@
"""
pygments.lexers.functional
~~~~~~~~~~~~~~~~~~~~~~~~~~
Just export lexer classes previously contained in this module.
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# ruff: noqa: F401
from pygments.lexers.lisp import SchemeLexer, CommonLispLexer, RacketLexer, \
NewLispLexer, ShenLexer
from pygments.lexers.haskell import HaskellLexer, LiterateHaskellLexer, \
KokaLexer
from pygments.lexers.theorem import CoqLexer
from pygments.lexers.erlang import ErlangLexer, ErlangShellLexer, \
ElixirConsoleLexer, ElixirLexer
from pygments.lexers.ml import SMLLexer, OcamlLexer, OpaLexer
__all__ = []

Some files were not shown because too many files have changed in this diff Show More