# -*- coding: utf-8 -*- """ pygments.lexers.markup ~~~~~~~~~~~~~~~~~~~~~~ Lexers for non-HTML markup languages. :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexers.html import HtmlLexer, XmlLexer from pygments.lexers.javascript import JavascriptLexer from pygments.lexers.css import CssLexer from pygments.lexer import RegexLexer, DelegatingLexer, include, bygroups, \ using, this, do_insertions, default, words from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Generic, Other from pygments.util import get_bool_opt, ClassNotFound __all__ = ['BBCodeLexer', 'MoinWikiLexer', 'RstLexer', 'TexLexer', 'GroffLexer', 'MozPreprocHashLexer', 'MozPreprocPercentLexer', 'MozPreprocXulLexer', 'MozPreprocJavascriptLexer', 'MozPreprocCssLexer', 'MarkdownLexer'] class BBCodeLexer(RegexLexer): """ A lexer that highlights BBCode(-like) syntax. .. versionadded:: 0.6 """ name = 'BBCode' aliases = ['bbcode'] mimetypes = ['text/x-bbcode'] tokens = { 'root': [ (r'[^[]+', Text), # tag/end tag begin (r'\[/?\w+', Keyword, 'tag'), # stray bracket (r'\[', Text), ], 'tag': [ (r'\s+', Text), # attribute with value (r'(\w+)(=)("?[^\s"\]]+"?)', bygroups(Name.Attribute, Operator, String)), # tag argument (a la [color=green]) (r'(=)("?[^\s"\]]+"?)', bygroups(Operator, String)), # tag end (r'\]', Keyword, '#pop'), ], } class MoinWikiLexer(RegexLexer): """ For MoinMoin (and Trac) Wiki markup. .. versionadded:: 0.7 """ name = 'MoinMoin/Trac Wiki markup' aliases = ['trac-wiki', 'moin'] filenames = [] mimetypes = ['text/x-trac-wiki'] flags = re.MULTILINE | re.IGNORECASE tokens = { 'root': [ (r'^#.*$', Comment), (r'(!)(\S+)', bygroups(Keyword, Text)), # Ignore-next # Titles (r'^(=+)([^=]+)(=+)(\s*#.+)?$', bygroups(Generic.Heading, using(this), Generic.Heading, String)), # Literal code blocks, with optional shebang (r'(\{\{\{)(\n#!.+)?', bygroups(Name.Builtin, Name.Namespace), 'codeblock'), (r'(\'\'\'?|\|\||`|__|~~|\^|,,|::)', Comment), # Formatting # Lists (r'^( +)([.*-])( )', bygroups(Text, Name.Builtin, Text)), (r'^( +)([a-z]{1,5}\.)( )', bygroups(Text, Name.Builtin, Text)), # Other Formatting (r'\[\[\w+.*?\]\]', Keyword), # Macro (r'(\[[^\s\]]+)(\s+[^\]]+?)?(\])', bygroups(Keyword, String, Keyword)), # Link (r'^----+$', Keyword), # Horizontal rules (r'[^\n\'\[{!_~^,|]+', Text), (r'\n', Text), (r'.', Text), ], 'codeblock': [ (r'\}\}\}', Name.Builtin, '#pop'), # these blocks are allowed to be nested in Trac, but not MoinMoin (r'\{\{\{', Text, '#push'), (r'[^{}]+', Comment.Preproc), # slurp boring text (r'.', Comment.Preproc), # allow loose { or } ], } class RstLexer(RegexLexer): """ For `reStructuredText <http://docutils.sf.net/rst.html>`_ markup. .. versionadded:: 0.7 Additional options accepted: `handlecodeblocks` Highlight the contents of ``.. sourcecode:: language``, ``.. code:: language`` and ``.. code-block:: language`` directives with a lexer for the given language (default: ``True``). .. versionadded:: 0.8 """ name = 'reStructuredText' aliases = ['rst', 'rest', 'restructuredtext'] filenames = ['*.rst', '*.rest'] mimetypes = ["text/x-rst", "text/prs.fallenstein.rst"] flags = re.MULTILINE def _handle_sourcecode(self, match): from pygments.lexers import get_lexer_by_name # section header yield match.start(1), Punctuation, match.group(1) yield match.start(2), Text, match.group(2) yield match.start(3), Operator.Word, match.group(3) yield match.start(4), Punctuation, match.group(4) yield match.start(5), Text, match.group(5) yield match.start(6), Keyword, match.group(6) yield match.start(7), Text, match.group(7) # lookup lexer if wanted and existing lexer = None if self.handlecodeblocks: try: lexer = get_lexer_by_name(match.group(6).strip()) except ClassNotFound: pass indention = match.group(8) indention_size = len(indention) code = (indention + match.group(9) + match.group(10) + match.group(11)) # no lexer for this language. handle it like it was a code block if lexer is None: yield match.start(8), String, code return # highlight the lines with the lexer. ins = [] codelines = code.splitlines(True) code = '' for line in codelines: if len(line) > indention_size: ins.append((len(code), [(0, Text, line[:indention_size])])) code += line[indention_size:] else: code += line for item in do_insertions(ins, lexer.get_tokens_unprocessed(code)): yield item # from docutils.parsers.rst.states closers = u'\'")]}>\u2019\u201d\xbb!?' unicode_delimiters = u'\u2010\u2011\u2012\u2013\u2014\u00a0' end_string_suffix = (r'((?=$)|(?=[-/:.,; \n\x00%s%s]))' % (re.escape(unicode_delimiters), re.escape(closers))) tokens = { 'root': [ # Heading with overline (r'^(=+|-+|`+|:+|\.+|\'+|"+|~+|\^+|_+|\*+|\++|#+)([ \t]*\n)' r'(.+)(\n)(\1)(\n)', bygroups(Generic.Heading, Text, Generic.Heading, Text, Generic.Heading, Text)), # Plain heading (r'^(\S.*)(\n)(={3,}|-{3,}|`{3,}|:{3,}|\.{3,}|\'{3,}|"{3,}|' r'~{3,}|\^{3,}|_{3,}|\*{3,}|\+{3,}|#{3,})(\n)', bygroups(Generic.Heading, Text, Generic.Heading, Text)), # Bulleted lists (r'^(\s*)([-*+])( .+\n(?:\1 .+\n)*)', bygroups(Text, Number, using(this, state='inline'))), # Numbered lists (r'^(\s*)([0-9#ivxlcmIVXLCM]+\.)( .+\n(?:\1 .+\n)*)', bygroups(Text, Number, using(this, state='inline'))), (r'^(\s*)(\(?[0-9#ivxlcmIVXLCM]+\))( .+\n(?:\1 .+\n)*)', bygroups(Text, Number, using(this, state='inline'))), # Numbered, but keep words at BOL from becoming lists (r'^(\s*)([A-Z]+\.)( .+\n(?:\1 .+\n)+)', bygroups(Text, Number, using(this, state='inline'))), (r'^(\s*)(\(?[A-Za-z]+\))( .+\n(?:\1 .+\n)+)', bygroups(Text, Number, using(this, state='inline'))), # Line blocks (r'^(\s*)(\|)( .+\n(?:\| .+\n)*)', bygroups(Text, Operator, using(this, state='inline'))), # Sourcecode directives (r'^( *\.\.)(\s*)((?:source)?code(?:-block)?)(::)([ \t]*)([^\n]+)' r'(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\8.*|)\n)+)', _handle_sourcecode), # A directive (r'^( *\.\.)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))', bygroups(Punctuation, Text, Operator.Word, Punctuation, Text, using(this, state='inline'))), # A reference target (r'^( *\.\.)(\s*)(_(?:[^:\\]|\\.)+:)(.*?)$', bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))), # A footnote/citation target (r'^( *\.\.)(\s*)(\[.+\])(.*?)$', bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))), # A substitution def (r'^( *\.\.)(\s*)(\|.+\|)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))', bygroups(Punctuation, Text, Name.Tag, Text, Operator.Word, Punctuation, Text, using(this, state='inline'))), # Comments (r'^ *\.\..*(\n( +.*\n|\n)+)?', Comment.Preproc), # Field list (r'^( *)(:[a-zA-Z-]+:)(\s*)$', bygroups(Text, Name.Class, Text)), (r'^( *)(:.*?:)([ \t]+)(.*?)$', bygroups(Text, Name.Class, Text, Name.Function)), # Definition list (r'^(\S.*(?<!::)\n)((?:(?: +.*)\n)+)', bygroups(using(this, state='inline'), using(this, state='inline'))), # Code blocks (r'(::)(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\3.*|)\n)+)', bygroups(String.Escape, Text, String, String, Text, String)), include('inline'), ], 'inline': [ (r'\\.', Text), # escape (r'``', String, 'literal'), # code (r'(`.+?)(<.+?>)(`__?)', # reference with inline target bygroups(String, String.Interpol, String)), (r'`.+?`__?', String), # reference (r'(`.+?`)(:[a-zA-Z0-9:-]+?:)?', bygroups(Name.Variable, Name.Attribute)), # role (r'(:[a-zA-Z0-9:-]+?:)(`.+?`)', bygroups(Name.Attribute, Name.Variable)), # role (content first) (r'\*\*.+?\*\*', Generic.Strong), # Strong emphasis (r'\*.+?\*', Generic.Emph), # Emphasis (r'\[.*?\]_', String), # Footnote or citation (r'<.+?>', Name.Tag), # Hyperlink (r'[^\\\n\[*`:]+', Text), (r'.', Text), ], 'literal': [ (r'[^`]+', String), (r'``' + end_string_suffix, String, '#pop'), (r'`', String), ] } def __init__(self, **options): self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True) RegexLexer.__init__(self, **options) def analyse_text(text): if text[:2] == '..' and text[2:3] != '.': return 0.3 p1 = text.find("\n") p2 = text.find("\n", p1 + 1) if (p2 > -1 and # has two lines p1 * 2 + 1 == p2 and # they are the same length text[p1+1] in '-=' and # the next line both starts and ends with text[p1+1] == text[p2-1]): # ...a sufficiently high header return 0.5 class TexLexer(RegexLexer): """ Lexer for the TeX and LaTeX typesetting languages. """ name = 'TeX' aliases = ['tex', 'latex'] filenames = ['*.tex', '*.aux', '*.toc'] mimetypes = ['text/x-tex', 'text/x-latex'] tokens = { 'general': [ (r'%.*?\n', Comment), (r'[{}]', Name.Builtin), (r'[&_^]', Name.Builtin), ], 'root': [ (r'\\\[', String.Backtick, 'displaymath'), (r'\\\(', String, 'inlinemath'), (r'\$\$', String.Backtick, 'displaymath'), (r'\$', String, 'inlinemath'), (r'\\([a-zA-Z]+|.)', Keyword, 'command'), (r'\\$', Keyword), include('general'), (r'[^\\$%&_^{}]+', Text), ], 'math': [ (r'\\([a-zA-Z]+|.)', Name.Variable), include('general'), (r'[0-9]+', Number), (r'[-=!+*/()\[\]]', Operator), (r'[^=!+*/()\[\]\\$%&_^{}0-9-]+', Name.Builtin), ], 'inlinemath': [ (r'\\\)', String, '#pop'), (r'\$', String, '#pop'), include('math'), ], 'displaymath': [ (r'\\\]', String, '#pop'), (r'\$\$', String, '#pop'), (r'\$', Name.Builtin), include('math'), ], 'command': [ (r'\[.*?\]', Name.Attribute), (r'\*', Keyword), default('#pop'), ], } def analyse_text(text): for start in ("\\documentclass", "\\input", "\\documentstyle", "\\relax"): if text[:len(start)] == start: return True class GroffLexer(RegexLexer): """ Lexer for the (g)roff typesetting language, supporting groff extensions. Mainly useful for highlighting manpage sources. .. versionadded:: 0.6 """ name = 'Groff' aliases = ['groff', 'nroff', 'man'] filenames = ['*.[1234567]', '*.man'] mimetypes = ['application/x-troff', 'text/troff'] tokens = { 'root': [ (r'(\.)(\w+)', bygroups(Text, Keyword), 'request'), (r'\.', Punctuation, 'request'), # Regular characters, slurp till we find a backslash or newline (r'[^\\\n]+', Text, 'textline'), default('textline'), ], 'textline': [ include('escapes'), (r'[^\\\n]+', Text), (r'\n', Text, '#pop'), ], 'escapes': [ # groff has many ways to write escapes. (r'\\"[^\n]*', Comment), (r'\\[fn]\w', String.Escape), (r'\\\(.{2}', String.Escape), (r'\\.\[.*\]', String.Escape), (r'\\.', String.Escape), (r'\\\n', Text, 'request'), ], 'request': [ (r'\n', Text, '#pop'), include('escapes'), (r'"[^\n"]+"', String.Double), (r'\d+', Number), (r'\S+', String), (r'\s+', Text), ], } def analyse_text(text): if text[:1] != '.': return False if text[:3] == '.\\"': return True if text[:4] == '.TH ': return True if text[1:3].isalnum() and text[3].isspace(): return 0.9 class MozPreprocHashLexer(RegexLexer): """ Lexer for Mozilla Preprocessor files (with '#' as the marker). Other data is left untouched. .. versionadded:: 2.0 """ name = 'mozhashpreproc' aliases = [name] filenames = [] mimetypes = [] tokens = { 'root': [ (r'^#', Comment.Preproc, ('expr', 'exprstart')), (r'.+', Other), ], 'exprstart': [ (r'(literal)(.*)', bygroups(Comment.Preproc, Text), '#pop:2'), (words(( 'define', 'undef', 'if', 'ifdef', 'ifndef', 'else', 'elif', 'elifdef', 'elifndef', 'endif', 'expand', 'filter', 'unfilter', 'include', 'includesubst', 'error')), Comment.Preproc, '#pop'), ], 'expr': [ (words(('!', '!=', '==', '&&', '||')), Operator), (r'(defined)(\()', bygroups(Keyword, Punctuation)), (r'\)', Punctuation), (r'[0-9]+', Number.Decimal), (r'__\w+?__', Name.Variable), (r'@\w+?@', Name.Class), (r'\w+', Name), (r'\n', Text, '#pop'), (r'\s+', Text), (r'\S', Punctuation), ], } class MozPreprocPercentLexer(MozPreprocHashLexer): """ Lexer for Mozilla Preprocessor files (with '%' as the marker). Other data is left untouched. .. versionadded:: 2.0 """ name = 'mozpercentpreproc' aliases = [name] filenames = [] mimetypes = [] tokens = { 'root': [ (r'^%', Comment.Preproc, ('expr', 'exprstart')), (r'.+', Other), ], } class MozPreprocXulLexer(DelegatingLexer): """ Subclass of the `MozPreprocHashLexer` that highlights unlexed data with the `XmlLexer`. .. versionadded:: 2.0 """ name = "XUL+mozpreproc" aliases = ['xul+mozpreproc'] filenames = ['*.xul.in'] mimetypes = [] def __init__(self, **options): super(MozPreprocXulLexer, self).__init__( XmlLexer, MozPreprocHashLexer, **options) class MozPreprocJavascriptLexer(DelegatingLexer): """ Subclass of the `MozPreprocHashLexer` that highlights unlexed data with the `JavascriptLexer`. .. versionadded:: 2.0 """ name = "Javascript+mozpreproc" aliases = ['javascript+mozpreproc'] filenames = ['*.js.in'] mimetypes = [] def __init__(self, **options): super(MozPreprocJavascriptLexer, self).__init__( JavascriptLexer, MozPreprocHashLexer, **options) class MozPreprocCssLexer(DelegatingLexer): """ Subclass of the `MozPreprocHashLexer` that highlights unlexed data with the `CssLexer`. .. versionadded:: 2.0 """ name = "CSS+mozpreproc" aliases = ['css+mozpreproc'] filenames = ['*.css.in'] mimetypes = [] def __init__(self, **options): super(MozPreprocCssLexer, self).__init__( CssLexer, MozPreprocPercentLexer, **options) class MarkdownLexer(RegexLexer): """ For `Markdown <https://help.github.com/categories/writing-on-github/>`_ markup. .. versionadded:: 2.2 """ name = 'markdown' aliases = ['md'] filenames = ['*.md'] mimetypes = ["text/x-markdown"] flags = re.MULTILINE def _handle_codeblock(self, match): """ match args: 1:backticks, 2:lang_name, 3:newline, 4:code, 5:backticks """ from pygments.lexers import get_lexer_by_name # section header yield match.start(1), String , match.group(1) yield match.start(2), String , match.group(2) yield match.start(3), Text , match.group(3) # lookup lexer if wanted and existing lexer = None if self.handlecodeblocks: try: lexer = get_lexer_by_name( match.group(2).strip() ) except ClassNotFound: pass code = match.group(4) # no lexer for this language. handle it like it was a code block if lexer is None: yield match.start(4), String, code return for item in do_insertions([], lexer.get_tokens_unprocessed(code)): yield item yield match.start(5), String , match.group(5) tokens = { 'root': [ # heading with pound prefix (r'^(#)([^#].+\n)', bygroups(Generic.Heading, Text)), (r'^(#{2,6})(.+\n)', bygroups(Generic.Subheading, Text)), # task list (r'^(\s*)([*-] )(\[[ xX]\])( .+\n)', bygroups(Text, Keyword, Keyword, using(this, state='inline'))), # bulleted lists (r'^(\s*)([*-])(\s)(.+\n)', bygroups(Text, Keyword, Text, using(this, state='inline'))), # numbered lists (r'^(\s*)([0-9]+\.)( .+\n)', bygroups(Text, Keyword, using(this, state='inline'))), # quote (r'^(\s*>\s)(.+\n)', bygroups(Keyword, Generic.Emph)), # text block (r'^(```\n)([\w\W]*?)(^```$)', bygroups(String, Text, String)), # code block with language (r'^(```)(\w+)(\n)([\w\W]*?)(^```$)', _handle_codeblock), include('inline'), ], 'inline': [ # escape (r'\\.', Text), # italics (r'(\s)([*_][^*_]+[*_])(\W|\n)', bygroups(Text, Generic.Emph, Text)), # bold # warning: the following rule eats internal tags. eg. **foo _bar_ baz** bar is not italics (r'(\s)((\*\*|__).*\3)((?=\W|\n))', bygroups(Text, Generic.Strong, None, Text)), # "proper way" (r'(\s)([*_]{2}[^*_]+[*_]{2})((?=\W|\n))', bygroups(Text, Generic.Strong, Text)), # strikethrough (r'(\s)(~~[^~]+~~)((?=\W|\n))', bygroups(Text, Generic.Deleted, Text)), # inline code (r'`[^`]+`', String.Backtick), # mentions and topics (twitter and github stuff) (r'[@#][\w/:]+', Name.Entity), # (image?) links eg: ![Image of Yaktocat](https://octodex.github.com/images/yaktocat.png) (r'(!?\[)([^]]+)(\])(\()([^)]+)(\))', bygroups(Text, Name.Tag, Text, Text, Name.Attribute, Text)), # general text, must come last! (r'[^\\\s]+', Text), (r'.', Text), ], } def __init__(self, **options): self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True) RegexLexer.__init__(self, **options)
Name | Type | Size | Permission | Actions |
---|---|---|---|---|
__pycache__ | Folder | 0755 |
|
|
__init__.py | File | 10.65 KB | 0644 |
|
_asy_builtins.py | File | 26.68 KB | 0644 |
|
_cl_builtins.py | File | 13.72 KB | 0644 |
|
_cocoa_builtins.py | File | 39.04 KB | 0644 |
|
_csound_builtins.py | File | 21.14 KB | 0644 |
|
_lasso_builtins.py | File | 131.38 KB | 0644 |
|
_lua_builtins.py | File | 8.14 KB | 0644 |
|
_mapping.py | File | 53.43 KB | 0644 |
|
_mql_builtins.py | File | 24.16 KB | 0644 |
|
_openedge_builtins.py | File | 47.23 KB | 0644 |
|
_php_builtins.py | File | 150.75 KB | 0644 |
|
_postgres_builtins.py | File | 10.95 KB | 0644 |
|
_scilab_builtins.py | File | 51.18 KB | 0644 |
|
_sourcemod_builtins.py | File | 26.48 KB | 0644 |
|
_stan_builtins.py | File | 9.88 KB | 0644 |
|
_stata_builtins.py | File | 24.55 KB | 0644 |
|
_tsql_builtins.py | File | 15.12 KB | 0644 |
|
_vim_builtins.py | File | 55.75 KB | 0644 |
|
actionscript.py | File | 10.92 KB | 0644 |
|
agile.py | File | 900 B | 0644 |
|
algebra.py | File | 7.03 KB | 0644 |
|
ambient.py | File | 2.5 KB | 0644 |
|
ampl.py | File | 4.02 KB | 0644 |
|
apl.py | File | 3.09 KB | 0644 |
|
archetype.py | File | 10.87 KB | 0644 |
|
asm.py | File | 24.67 KB | 0644 |
|
automation.py | File | 19.19 KB | 0644 |
|
basic.py | File | 19.83 KB | 0644 |
|
bibtex.py | File | 4.61 KB | 0644 |
|
business.py | File | 27.02 KB | 0644 |
|
c_cpp.py | File | 10.28 KB | 0644 |
|
c_like.py | File | 23.56 KB | 0644 |
|
capnproto.py | File | 2.14 KB | 0644 |
|
chapel.py | File | 3.43 KB | 0644 |
|
clean.py | File | 10.16 KB | 0644 |
|
compiled.py | File | 1.35 KB | 0644 |
|
configs.py | File | 27.6 KB | 0644 |
|
console.py | File | 4.02 KB | 0644 |
|
crystal.py | File | 16.45 KB | 0644 |
|
csound.py | File | 12.25 KB | 0644 |
|
css.py | File | 30.77 KB | 0644 |
|
d.py | File | 9.31 KB | 0644 |
|
dalvik.py | File | 4.32 KB | 0644 |
|
data.py | File | 18.33 KB | 0644 |
|
diff.py | File | 4.76 KB | 0644 |
|
dotnet.py | File | 27.02 KB | 0644 |
|
dsls.py | File | 32.55 KB | 0644 |
|
dylan.py | File | 10.18 KB | 0644 |
|
ecl.py | File | 5.74 KB | 0644 |
|
eiffel.py | File | 2.42 KB | 0644 |
|
elm.py | File | 2.93 KB | 0644 |
|
erlang.py | File | 18.49 KB | 0644 |
|
esoteric.py | File | 9.27 KB | 0644 |
|
ezhil.py | File | 2.95 KB | 0644 |
|
factor.py | File | 17.44 KB | 0644 |
|
fantom.py | File | 9.75 KB | 0644 |
|
felix.py | File | 9.19 KB | 0644 |
|
forth.py | File | 6.98 KB | 0644 |
|
fortran.py | File | 9.54 KB | 0644 |
|
foxpro.py | File | 25.62 KB | 0644 |
|
functional.py | File | 698 B | 0644 |
|
go.py | File | 3.61 KB | 0644 |
|
grammar_notation.py | File | 6.18 KB | 0644 |
|
graph.py | File | 2.31 KB | 0644 |
|
graphics.py | File | 25.23 KB | 0644 |
|
haskell.py | File | 30.49 KB | 0644 |
|
haxe.py | File | 30.23 KB | 0644 |
|
hdl.py | File | 18.26 KB | 0644 |
|
hexdump.py | File | 3.42 KB | 0644 |
|
html.py | File | 18.82 KB | 0644 |
|
idl.py | File | 14.63 KB | 0644 |
|
igor.py | File | 19.53 KB | 0644 |
|
inferno.py | File | 3.04 KB | 0644 |
|
installers.py | File | 12.56 KB | 0644 |
|
int_fiction.py | File | 54.47 KB | 0644 |
|
iolang.py | File | 1.86 KB | 0644 |
|
j.py | File | 4.42 KB | 0644 |
|
javascript.py | File | 58.72 KB | 0644 |
|
julia.py | File | 13.76 KB | 0644 |
|
jvm.py | File | 65.18 KB | 0644 |
|
lisp.py | File | 137.38 KB | 0644 |
|
make.py | File | 7.16 KB | 0644 |
|
markup.py | File | 19.97 KB | 0644 |
|
math.py | File | 700 B | 0644 |
|
matlab.py | File | 28.47 KB | 0644 |
|
ml.py | File | 27.23 KB | 0644 |
|
modeling.py | File | 12.53 KB | 0644 |
|
modula2.py | File | 51.33 KB | 0644 |
|
monte.py | File | 6.16 KB | 0644 |
|
ncl.py | File | 62.49 KB | 0644 |
|
nimrod.py | File | 5.05 KB | 0644 |
|
nit.py | File | 2.68 KB | 0644 |
|
nix.py | File | 3.94 KB | 0644 |
|
oberon.py | File | 3.65 KB | 0644 |
|
objective.py | File | 22.22 KB | 0644 |
|
ooc.py | File | 2.93 KB | 0644 |
|
other.py | File | 1.73 KB | 0644 |
|
parasail.py | File | 2.67 KB | 0644 |
|
parsers.py | File | 26.94 KB | 0644 |
|
pascal.py | File | 31.88 KB | 0644 |
|
pawn.py | File | 7.9 KB | 0644 |
|
perl.py | File | 31.26 KB | 0644 |
|
php.py | File | 10.48 KB | 0644 |
|
praat.py | File | 12.26 KB | 0644 |
|
prolog.py | File | 11.78 KB | 0644 |
|
python.py | File | 41.39 KB | 0644 |
|
qvt.py | File | 5.97 KB | 0644 |
|
r.py | File | 23.2 KB | 0644 |
|
rdf.py | File | 9.18 KB | 0644 |
|
rebol.py | File | 18.18 KB | 0644 |
|
resource.py | File | 2.86 KB | 0644 |
|
rnc.py | File | 1.94 KB | 0644 |
|
roboconf.py | File | 2.02 KB | 0644 |
|
robotframework.py | File | 18.3 KB | 0644 |
|
ruby.py | File | 21.62 KB | 0644 |
|
rust.py | File | 7.51 KB | 0644 |
|
sas.py | File | 9.23 KB | 0644 |
|
scripting.py | File | 66.17 KB | 0644 |
|
shell.py | File | 30.69 KB | 0644 |
|
smalltalk.py | File | 7.05 KB | 0644 |
|
smv.py | File | 2.74 KB | 0644 |
|
snobol.py | File | 2.69 KB | 0644 |
|
special.py | File | 3.08 KB | 0644 |
|
sql.py | File | 28.75 KB | 0644 |
|
stata.py | File | 3.54 KB | 0644 |
|
supercollider.py | File | 3.43 KB | 0644 |
|
tcl.py | File | 5.27 KB | 0644 |
|
templates.py | File | 71.73 KB | 0644 |
|
testing.py | File | 10.5 KB | 0644 |
|
text.py | File | 977 B | 0644 |
|
textedit.py | File | 5.92 KB | 0644 |
|
textfmts.py | File | 10.6 KB | 0644 |
|
theorem.py | File | 18.59 KB | 0644 |
|
trafficscript.py | File | 1.51 KB | 0644 |
|
typoscript.py | File | 8.2 KB | 0644 |
|
urbi.py | File | 5.62 KB | 0644 |
|
varnish.py | File | 7.1 KB | 0644 |
|
verification.py | File | 3.62 KB | 0644 |
|
web.py | File | 918 B | 0644 |
|
webmisc.py | File | 38.96 KB | 0644 |
|
whiley.py | File | 3.92 KB | 0644 |
|
x10.py | File | 1.92 KB | 0644 |
|