mirror of
https://github.com/facebook/sapling.git
synced 2024-10-07 15:27:13 +03:00
73dce8b9d6
When highlight extension encountered files that pygments didn't recognize, it used to fall back to text lexer. Also, pygments uses TextLexer for .txt files. This lexer is noop by design. On bigger files, however, doing the noop highlighting resulted in noticeable extra CPU work and memory usage: to show a 1 MB text file, hgweb required about 0.7s more (on top of ~3.8s, Q8400) and consumed about 100 MB of RAM more (on top of ~150 MB). Let's just exit the function when it's clear that nothing will be highlighted. Due to how this pygmentize function works (it modifies the template in-place), we can just return from it and everything else will work as if highlight extension wasn't enabled.
70 lines
2.3 KiB
Python
70 lines
2.3 KiB
Python
# highlight.py - highlight extension implementation file
|
|
#
|
|
# Copyright 2007-2009 Adam Hupp <adam@hupp.org> and others
|
|
#
|
|
# This software may be used and distributed according to the terms of the
|
|
# GNU General Public License version 2 or any later version.
|
|
#
|
|
# The original module was split in an interface and an implementation
|
|
# file to defer pygments loading and speedup extension setup.
|
|
|
|
from mercurial import demandimport
|
|
demandimport.ignore.extend(['pkgutil', 'pkg_resources', '__main__'])
|
|
from mercurial import util, encoding
|
|
|
|
from pygments import highlight
|
|
from pygments.util import ClassNotFound
|
|
from pygments.lexers import guess_lexer, guess_lexer_for_filename, TextLexer
|
|
from pygments.formatters import HtmlFormatter
|
|
|
|
SYNTAX_CSS = ('\n<link rel="stylesheet" href="{url}highlightcss" '
|
|
'type="text/css" />')
|
|
|
|
def pygmentize(field, fctx, style, tmpl):
|
|
|
|
# append a <link ...> to the syntax highlighting css
|
|
old_header = tmpl.load('header')
|
|
if SYNTAX_CSS not in old_header:
|
|
new_header = old_header + SYNTAX_CSS
|
|
tmpl.cache['header'] = new_header
|
|
|
|
text = fctx.data()
|
|
if util.binary(text):
|
|
return
|
|
|
|
# str.splitlines() != unicode.splitlines() because "reasons"
|
|
for c in "\x0c\x1c\x1d\x1e":
|
|
if c in text:
|
|
text = text.replace(c, '')
|
|
|
|
# Pygments is best used with Unicode strings:
|
|
# <http://pygments.org/docs/unicode/>
|
|
text = text.decode(encoding.encoding, 'replace')
|
|
|
|
# To get multi-line strings right, we can't format line-by-line
|
|
try:
|
|
lexer = guess_lexer_for_filename(fctx.path(), text[:1024],
|
|
stripnl=False)
|
|
except (ClassNotFound, ValueError):
|
|
try:
|
|
lexer = guess_lexer(text[:1024], stripnl=False)
|
|
except (ClassNotFound, ValueError):
|
|
# Don't highlight unknown files
|
|
return
|
|
|
|
# Don't highlight text files
|
|
if isinstance(lexer, TextLexer):
|
|
return
|
|
|
|
formatter = HtmlFormatter(nowrap=True, style=style)
|
|
|
|
colorized = highlight(text, lexer, formatter)
|
|
coloriter = (s.encode(encoding.encoding, 'replace')
|
|
for s in colorized.splitlines())
|
|
|
|
tmpl.filters['colorize'] = lambda x: coloriter.next()
|
|
|
|
oldl = tmpl.cache[field]
|
|
newl = oldl.replace('line|escape', 'line|colorize')
|
|
tmpl.cache[field] = newl
|