summaryrefslogtreecommitdiff
path: root/sphinx/util
diff options
context:
space:
mode:
Diffstat (limited to 'sphinx/util')
-rw-r--r--sphinx/util/__init__.py75
-rw-r--r--sphinx/util/docstrings.py32
-rw-r--r--sphinx/util/jsdump.py4
-rw-r--r--sphinx/util/jsonimpl.py2
-rw-r--r--sphinx/util/matching.py11
-rw-r--r--sphinx/util/nodes.py57
-rw-r--r--sphinx/util/osutil.py16
-rw-r--r--sphinx/util/png.py14
-rw-r--r--sphinx/util/pycompat.py101
-rw-r--r--sphinx/util/websupport.py12
10 files changed, 259 insertions, 65 deletions
diff --git a/sphinx/util/__init__.py b/sphinx/util/__init__.py
index 621c45ae..d51f0208 100644
--- a/sphinx/util/__init__.py
+++ b/sphinx/util/__init__.py
@@ -18,6 +18,8 @@ import tempfile
import posixpath
import traceback
from os import path
+from codecs import open
+from collections import deque
import docutils
from docutils.utils import relative_path
@@ -48,8 +50,7 @@ def docname_join(basedocname, docname):
def get_matching_files(dirname, exclude_matchers=()):
- """
- Get all file names in a directory, recursively.
+ """Get all file names in a directory, recursively.
Exclude files and dirs matching some matcher in *exclude_matchers*.
"""
@@ -75,9 +76,8 @@ def get_matching_files(dirname, exclude_matchers=()):
def get_matching_docs(dirname, suffix, exclude_matchers=()):
- """
- Get all file names (without suffix) matching a suffix in a
- directory, recursively.
+ """Get all file names (without suffix) matching a suffix in a directory,
+ recursively.
Exclude files and dirs matching a pattern in *exclude_patterns*.
"""
@@ -140,8 +140,8 @@ def copy_static_entry(source, targetdir, builder, context={},
target = path.join(targetdir, path.basename(source))
if source.lower().endswith('_t') and builder.templates:
# templated!
- fsrc = open(source, 'rb')
- fdst = open(target[:-2], 'wb')
+ fsrc = open(source, 'r', encoding='utf-8')
+ fdst = open(target[:-2], 'w', encoding='utf-8')
fdst.write(builder.templates.render_string(fsrc.read(), context))
fsrc.close()
fdst.close()
@@ -162,19 +162,24 @@ def copy_static_entry(source, targetdir, builder, context={},
shutil.copytree(source, target)
+_DEBUG_HEADER = '''\
+# Sphinx version: %s
+# Python version: %s
+# Docutils version: %s %s
+# Jinja2 version: %s
+'''
+
def save_traceback():
- """
- Save the current exception's traceback in a temporary file.
- """
+ """Save the current exception's traceback in a temporary file."""
import platform
exc = traceback.format_exc()
fd, path = tempfile.mkstemp('.log', 'sphinx-err-')
- os.write(fd, '# Sphinx version: %s\n' % sphinx.__version__)
- os.write(fd, '# Python version: %s\n' % platform.python_version())
- os.write(fd, '# Docutils version: %s %s\n' % (docutils.__version__,
- docutils.__version_details__))
- os.write(fd, '# Jinja2 version: %s\n' % jinja2.__version__)
- os.write(fd, exc)
+ os.write(fd, (_DEBUG_HEADER %
+ (sphinx.__version__,
+ platform.python_version(),
+ docutils.__version__, docutils.__version_details__,
+ jinja2.__version__)).encode('utf-8'))
+ os.write(fd, exc.encode('utf-8'))
os.close(fd)
return path
@@ -227,8 +232,7 @@ class Tee(object):
def parselinenos(spec, total):
- """
- Parse a line number spec (such as "1,2,4-6") and return a list of
+ """Parse a line number spec (such as "1,2,4-6") and return a list of
wanted line numbers.
"""
items = list()
@@ -282,9 +286,7 @@ def rpartition(s, t):
def format_exception_cut_frames(x=1):
- """
- Format an exception with traceback, but only the last x frames.
- """
+ """Format an exception with traceback, but only the last x frames."""
typ, val, tb = sys.exc_info()
#res = ['Traceback (most recent call last):\n']
res = []
@@ -292,3 +294,34 @@ def format_exception_cut_frames(x=1):
res += tbres[-x:]
res += traceback.format_exception_only(typ, val)
return ''.join(res)
+
+
+class PeekableIterator(object):
+ """
+ An iterator which wraps any iterable and makes it possible to peek to see
+ what's the next item.
+ """
+ def __init__(self, iterable):
+ self.remaining = deque()
+ self._iterator = iter(iterable)
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ """Return the next item from the iterator."""
+ if self.remaining:
+ return self.remaining.popleft()
+ return self._iterator.next()
+
+ def push(self, item):
+ """Push the `item` on the internal stack, it will be returned on the
+ next :meth:`next` call.
+ """
+ self.remaining.append(item)
+
+ def peek(self):
+ """Return the next item without changing the state of the iterator."""
+ item = self.next()
+ self.push(item)
+ return item
diff --git a/sphinx/util/docstrings.py b/sphinx/util/docstrings.py
index fca79c3f..ba81bf00 100644
--- a/sphinx/util/docstrings.py
+++ b/sphinx/util/docstrings.py
@@ -12,26 +12,29 @@
import sys
-def prepare_docstring(s):
- """
- Convert a docstring into lines of parseable reST. Return it as a list of
- lines usable for inserting into a docutils ViewList (used as argument
- of nested_parse().) An empty line is added to act as a separator between
- this docstring and following content.
+def prepare_docstring(s, ignore=1):
+ """Convert a docstring into lines of parseable reST. Remove common leading
+ indentation, where the indentation of a given number of lines (usually just
+ one) is ignored.
+
+ Return the docstring as a list of lines usable for inserting into a docutils
+ ViewList (used as argument of nested_parse().) An empty line is added to
+ act as a separator between this docstring and following content.
"""
lines = s.expandtabs().splitlines()
- # Find minimum indentation of any non-blank lines after first line.
+ # Find minimum indentation of any non-blank lines after ignored lines.
margin = sys.maxint
- for line in lines[1:]:
+ for line in lines[ignore:]:
content = len(line.lstrip())
if content:
indent = len(line) - content
margin = min(margin, indent)
- # Remove indentation.
- if lines:
- lines[0] = lines[0].lstrip()
+ # Remove indentation from ignored lines.
+ for i in range(ignore):
+ if i < len(lines):
+ lines[i] = lines[i].lstrip()
if margin < sys.maxint:
- for i in range(1, len(lines)): lines[i] = lines[i][margin:]
+ for i in range(ignore, len(lines)): lines[i] = lines[i][margin:]
# Remove any leading blank lines.
while lines and not lines[0]:
lines.pop(0)
@@ -42,9 +45,8 @@ def prepare_docstring(s):
def prepare_commentdoc(s):
- """
- Extract documentation comment lines (starting with #:) and return them as a
- list of lines. Returns an empty list if there is no documentation.
+ """Extract documentation comment lines (starting with #:) and return them
+ as a list of lines. Returns an empty list if there is no documentation.
"""
result = []
lines = [line.strip() for line in s.expandtabs().splitlines()]
diff --git a/sphinx/util/jsdump.py b/sphinx/util/jsdump.py
index a0f6d0e3..191e2419 100644
--- a/sphinx/util/jsdump.py
+++ b/sphinx/util/jsdump.py
@@ -12,6 +12,8 @@
import re
+from sphinx.util.pycompat import u
+
_str_re = re.compile(r'"(\\\\|\\"|[^"])*"')
_int_re = re.compile(r'\d+')
_name_re = re.compile(r'[a-zA-Z]\w*')
@@ -50,7 +52,7 @@ def encode_string(s):
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
def decode_string(s):
- return ESCAPED.sub(lambda m: eval('u"'+m.group()+'"'), s)
+ return ESCAPED.sub(lambda m: eval(u + '"' + m.group() + '"'), s)
reswords = set("""\
diff --git a/sphinx/util/jsonimpl.py b/sphinx/util/jsonimpl.py
index b1982d9f..cf5ddb42 100644
--- a/sphinx/util/jsonimpl.py
+++ b/sphinx/util/jsonimpl.py
@@ -13,7 +13,7 @@ import UserString
try:
import json
- # json-py's json module has not JSONEncoder; this will raise AttributeError
+ # json-py's json module has no JSONEncoder; this will raise AttributeError
# if json-py is imported instead of the built-in json module
JSONEncoder = json.JSONEncoder
except (ImportError, AttributeError):
diff --git a/sphinx/util/matching.py b/sphinx/util/matching.py
index d358a2a0..fa6cb7e0 100644
--- a/sphinx/util/matching.py
+++ b/sphinx/util/matching.py
@@ -13,8 +13,7 @@ import re
def _translate_pattern(pat):
- """
- Translate a shell-style glob pattern to a regular expression.
+ """Translate a shell-style glob pattern to a regular expression.
Adapted from the fnmatch module, but enhanced so that single stars don't
match slashes.
@@ -65,16 +64,14 @@ def compile_matchers(patterns):
_pat_cache = {}
def patmatch(name, pat):
- """
- Return if name matches pat. Adapted from fnmatch module.
- """
+ """Return if name matches pat. Adapted from fnmatch module."""
if pat not in _pat_cache:
_pat_cache[pat] = re.compile(_translate_pattern(pat))
return _pat_cache[pat].match(name)
def patfilter(names, pat):
- """
- Return the subset of the list NAMES that match PAT.
+ """Return the subset of the list NAMES that match PAT.
+
Adapted from fnmatch module.
"""
if pat not in _pat_cache:
diff --git a/sphinx/util/nodes.py b/sphinx/util/nodes.py
index fd6a2f83..09ab8b88 100644
--- a/sphinx/util/nodes.py
+++ b/sphinx/util/nodes.py
@@ -10,11 +10,12 @@
"""
import re
-import types
from docutils import nodes
from sphinx import addnodes
+from sphinx.locale import pairindextypes
+from sphinx.util.pycompat import class_types
# \x00 means the "<" was backslash-escaped
@@ -22,7 +23,28 @@ explicit_title_re = re.compile(r'^(.+?)\s*(?<!\x00)<(.*?)>$', re.DOTALL)
caption_ref_re = explicit_title_re # b/w compat alias
+def extract_messages(doctree):
+ """Extract translatable messages from a document tree."""
+ for node in doctree.traverse(nodes.TextElement):
+ if isinstance(node, (nodes.Invisible, nodes.Inline)):
+ continue
+ # <field_name>orphan</field_name>
+ # XXX ignore all metadata (== docinfo)
+ if isinstance(node, nodes.field_name) and node.children[0] == 'orphan':
+ continue
+ msg = node.rawsource.replace('\n', ' ').strip()
+ # XXX nodes rendering empty are likely a bug in sphinx.addnodes
+ if msg:
+ yield node, msg
+
+
def nested_parse_with_titles(state, content, node):
+ """Version of state.nested_parse() that allows titles and does not require
+ titles to have the same decoration as the calling document.
+
+ This is useful when the parsed content comes from a completely different
+ context, such as docstrings.
+ """
# hack around title style bookkeeping
surrounding_title_styles = state.memo.title_styles
surrounding_section_level = state.memo.section_level
@@ -51,6 +73,37 @@ def split_explicit_title(text):
return False, text, text
+indextypes = [
+ 'single', 'pair', 'double', 'triple',
+]
+
+def process_index_entry(entry, targetid):
+ indexentries = []
+ entry = entry.strip()
+ for type in pairindextypes:
+ if entry.startswith(type+':'):
+ value = entry[len(type)+1:].strip()
+ value = pairindextypes[type] + '; ' + value
+ indexentries.append(('pair', value, targetid, value))
+ break
+ else:
+ for type in indextypes:
+ if entry.startswith(type+':'):
+ value = entry[len(type)+1:].strip()
+ if type == 'double':
+ type = 'pair'
+ indexentries.append((type, value, targetid, value))
+ break
+ # shorthand notation for single entries
+ else:
+ for value in entry.split(','):
+ value = value.strip()
+ if not value:
+ continue
+ indexentries.append(('single', value, targetid, value))
+ return indexentries
+
+
def inline_all_toctrees(builder, docnameset, docname, tree, colorfunc):
"""Inline all toctrees in the *tree*.
@@ -115,7 +168,7 @@ def _new_traverse(self, condition=None,
if include_self and descend and not siblings and not ascend:
if condition is None:
return self._all_traverse([])
- elif isinstance(condition, (types.ClassType, type)):
+ elif isinstance(condition, class_types):
return self._fast_traverse(condition, [])
return self._old_traverse(condition, include_self,
descend, siblings, ascend)
diff --git a/sphinx/util/osutil.py b/sphinx/util/osutil.py
index 6aeb2f4f..487a5afc 100644
--- a/sphinx/util/osutil.py
+++ b/sphinx/util/osutil.py
@@ -11,6 +11,7 @@
import os
import re
+import sys
import time
import errno
import shutil
@@ -58,8 +59,8 @@ def ensuredir(path):
def walk(top, topdown=True, followlinks=False):
- """
- Backport of os.walk from 2.6, where the followlinks argument was added.
+ """Backport of os.walk from 2.6, where the *followlinks* argument was
+ added.
"""
names = os.listdir(top)
@@ -124,7 +125,10 @@ no_fn_re = re.compile(r'[^a-zA-Z0-9_-]')
def make_filename(string):
return no_fn_re.sub('', string)
-
-def ustrftime(format, *args):
- # strftime for unicode strings
- return time.strftime(unicode(format).encode('utf-8'), *args).decode('utf-8')
+if sys.version_info < (3, 0):
+ def ustrftime(format, *args):
+ # strftime for unicode strings
+ return time.strftime(unicode(format).encode('utf-8'), *args) \
+ .decode('utf-8')
+else:
+ ustrftime = time.strftime
diff --git a/sphinx/util/png.py b/sphinx/util/png.py
index 5450bccf..50c72efd 100644
--- a/sphinx/util/png.py
+++ b/sphinx/util/png.py
@@ -12,18 +12,18 @@
import struct
import binascii
+from sphinx.util.pycompat import b
+
LEN_IEND = 12
LEN_DEPTH = 22
DEPTH_CHUNK_LEN = struct.pack('!i', 10)
-DEPTH_CHUNK_START = 'tEXtDepth\x00'
-IEND_CHUNK = '\x00\x00\x00\x00IEND\xAE\x42\x60\x82'
+DEPTH_CHUNK_START = b('tEXtDepth\x00')
+IEND_CHUNK = b('\x00\x00\x00\x00IEND\xAE\x42\x60\x82')
def read_png_depth(filename):
- """
- Read the special tEXt chunk indicating the depth from a PNG file.
- """
+ """Read the special tEXt chunk indicating the depth from a PNG file."""
result = None
f = open(filename, 'rb')
try:
@@ -39,8 +39,8 @@ def read_png_depth(filename):
def write_png_depth(filename, depth):
- """
- Write the special tEXt chunk indicating the depth to a PNG file.
+ """Write the special tEXt chunk indicating the depth to a PNG file.
+
The chunk is placed immediately before the special IEND chunk.
"""
data = struct.pack('!i', depth)
diff --git a/sphinx/util/pycompat.py b/sphinx/util/pycompat.py
index bbaf4e23..a95c9332 100644
--- a/sphinx/util/pycompat.py
+++ b/sphinx/util/pycompat.py
@@ -13,11 +13,104 @@ import sys
import codecs
import encodings
-
-try:
+# ------------------------------------------------------------------------------
+# Python 2/3 compatibility
+
+if sys.version_info >= (3, 0):
+ # Python 3
+ class_types = (type,)
+ # the ubiquitous "bytes" helper functions
+ def b(s):
+ return s.encode('utf-8')
+ bytes = bytes
+ # prefix for Unicode strings
+ u = ''
+ # support for running 2to3 over config files
+ def convert_with_2to3(filepath):
+ from lib2to3.refactor import RefactoringTool, get_fixers_from_package
+ from lib2to3.pgen2.parse import ParseError
+ fixers = get_fixers_from_package('lib2to3.fixes')
+ refactoring_tool = RefactoringTool(fixers)
+ source = refactoring_tool._read_python_source(filepath)[0]
+ try:
+ tree = refactoring_tool.refactor_string(source, 'conf.py')
+ except ParseError, err:
+ # do not propagate lib2to3 exceptions
+ lineno, offset = err.context[1]
+ # try to match ParseError details with SyntaxError details
+ raise SyntaxError(err.msg, (filepath, lineno, offset, err.value))
+ return unicode(tree)
+
+else:
+ # Python 2
+ from types import ClassType
+ class_types = (type, ClassType)
+ b = str
+ bytes = str
+ u = 'u'
+ # no need to refactor on 2.x versions
+ convert_with_2to3 = None
+
+
+# ------------------------------------------------------------------------------
+# Missing builtins and itertools in Python < 2.6
+
+if sys.version_info >= (2, 6):
+ # Python >= 2.6
+ next = next
+
+ from itertools import product
+ try:
+ from itertools import zip_longest # Python 3 name
+ except ImportError:
+ from itertools import izip_longest as zip_longest
+
+else:
+ # Python < 2.6
+ from itertools import izip, repeat, chain
+
+ # this is on Python 2, where the method is called "next" (it is refactored
+ # to __next__ by 2to3, but in that case never executed)
+ def next(iterator):
+ return iterator.next()
+
+ # These replacement functions have been taken from the Python 2.6
+ # itertools documentation.
+ def product(*args, **kwargs):
+ pools = map(tuple, args) * kwargs.get('repeat', 1)
+ result = [[]]
+ for pool in pools:
+ result = [x + [y] for x in result for y in pool]
+ for prod in result:
+ yield tuple(prod)
+
+ def zip_longest(*args, **kwds):
+ # zip_longest('ABCD', 'xy', fillvalue='-') --> Ax By C- D-
+ fillvalue = kwds.get('fillvalue')
+ def sentinel(counter = ([fillvalue]*(len(args)-1)).pop):
+ yield counter() # yields the fillvalue, or raises IndexError
+ fillers = repeat(fillvalue)
+ iters = [chain(it, sentinel(), fillers) for it in args]
+ try:
+ for tup in izip(*iters):
+ yield tup
+ except IndexError:
+ pass
+
+
+# ------------------------------------------------------------------------------
+# Missing builtins and codecs in Python < 2.5
+
+if sys.version_info >= (2, 5):
+ # Python >= 2.5
+ base_exception = BaseException
any = any
all = all
-except NameError:
+
+else:
+ # Python 2.4
+ base_exception = Exception
+
def all(gen):
for i in gen:
if not i:
@@ -30,8 +123,6 @@ except NameError:
return True
return False
-
-if sys.version_info < (2, 5):
# Python 2.4 doesn't know the utf-8-sig encoding, so deliver it here
def my_search_function(encoding):
diff --git a/sphinx/util/websupport.py b/sphinx/util/websupport.py
new file mode 100644
index 00000000..d9b47213
--- /dev/null
+++ b/sphinx/util/websupport.py
@@ -0,0 +1,12 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.util.websupport
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+def is_commentable(node):
+ #return node.__class__.__name__ in ('paragraph', 'literal_block')
+ return node.__class__.__name__ == 'paragraph'