diff options
| author | milde <milde@929543f6-e4f2-0310-98a6-ba3bd3dd1d04> | 2017-08-11 14:05:30 +0000 |
|---|---|---|
| committer | milde <milde@929543f6-e4f2-0310-98a6-ba3bd3dd1d04> | 2017-08-11 14:05:30 +0000 |
| commit | 1a25c166fc8f480ae6b476cf2e0a24a478548419 (patch) | |
| tree | 7963edee5a2984763b172c3b0a802bdada7f9456 /docutils/utils/code_analyzer.py | |
| parent | 33a5eed8e3654b3fbf6bdd3695e8090f63dcee8e (diff) | |
| download | docutils-1a25c166fc8f480ae6b476cf2e0a24a478548419.tar.gz | |
Drop compatibility code for Python 2.4 and 2.5.
git-svn-id: https://svn.code.sf.net/p/docutils/code/trunk/docutils@8163 929543f6-e4f2-0310-98a6-ba3bd3dd1d04
Diffstat (limited to 'docutils/utils/code_analyzer.py')
| -rw-r--r-- | docutils/utils/code_analyzer.py | 13 |
1 files changed, 8 insertions, 5 deletions
diff --git a/docutils/utils/code_analyzer.py b/docutils/utils/code_analyzer.py index 5df203e54..7e4197924 100644 --- a/docutils/utils/code_analyzer.py +++ b/docutils/utils/code_analyzer.py @@ -22,7 +22,7 @@ unstyled_tokens = ['token', # Token (base token type) ''] # short name for Token and Text # (Add, e.g., Token.Punctuation with ``unstyled_tokens += 'punctuation'``.) -class LexerError(ApplicationError): +class LexerError(ApplicationError): pass class Lexer(object): @@ -64,11 +64,14 @@ class Lexer(object): except pygments.util.ClassNotFound: raise LexerError('Cannot analyze code. ' 'No Pygments lexer found for "%s".' % language) + # self.lexer.add_filter('tokenmerge') + # Since version 1.2. (released Jan 01, 2010) Pygments has a + # TokenMergeFilter. # ``self.merge(tokens)`` in __iter__ could + # be replaced by ``self.lexer.add_filter('tokenmerge')`` in __init__. + # However, `merge` below also strips a final newline added by pygments. + # + # self.lexer.add_filter('tokenmerge') - # Since version 1.2. (released Jan 01, 2010) Pygments has a - # TokenMergeFilter. However, this requires Python >= 2.4. When Docutils - # requires same minimal version, ``self.merge(tokens)`` in __iter__ can - # be replaced by ``self.lexer.add_filter('tokenmerge')`` in __init__. def merge(self, tokens): """Merge subsequent tokens of same token-type. |
