summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.hgignore2
-rw-r--r--AUTHORS4
-rw-r--r--CHANGES19
-rw-r--r--MANIFEST.in2
-rw-r--r--Makefile53
-rw-r--r--README12
-rw-r--r--custom_fixers/__init__.py0
-rw-r--r--custom_fixers/fix_alt_unicode.py12
-rw-r--r--distribute_setup.py485
-rw-r--r--doc/Makefile6
-rw-r--r--doc/builders.rst12
-rw-r--r--doc/config.rst1
-rw-r--r--doc/contents.rst2
-rw-r--r--doc/ext/graphviz.rst11
-rw-r--r--doc/intl.rst13
-rw-r--r--doc/intro.rst12
-rw-r--r--doc/markup/inline.rst1
-rw-r--r--doc/markup/misc.rst79
-rw-r--r--doc/markup/para.rst62
-rw-r--r--doc/markup/toctree.rst14
-rw-r--r--doc/web/api.rst65
-rw-r--r--doc/web/quickstart.rst261
-rw-r--r--doc/web/searchadapters.rst45
-rw-r--r--doc/web/storagebackends.rst46
-rw-r--r--doc/websupport.rst16
-rw-r--r--ez_setup.py276
-rw-r--r--setup.py11
-rw-r--r--sphinx/__init__.py25
-rw-r--r--sphinx/addnodes.py195
-rw-r--r--sphinx/application.py22
-rw-r--r--sphinx/builders/__init__.py46
-rw-r--r--sphinx/builders/devhelp.py1
-rw-r--r--sphinx/builders/epub.py32
-rw-r--r--sphinx/builders/html.py64
-rw-r--r--sphinx/builders/htmlhelp.py3
-rw-r--r--sphinx/builders/intl.py110
-rw-r--r--sphinx/builders/qthelp.py14
-rw-r--r--sphinx/builders/versioning.py72
-rw-r--r--sphinx/builders/websupport.py161
-rw-r--r--sphinx/config.py42
-rw-r--r--sphinx/directives/code.py24
-rw-r--r--sphinx/directives/other.py50
-rw-r--r--sphinx/domains/__init__.py29
-rw-r--r--sphinx/domains/cpp.py29
-rw-r--r--sphinx/domains/javascript.py2
-rw-r--r--sphinx/domains/python.py22
-rw-r--r--sphinx/domains/rst.py7
-rw-r--r--sphinx/environment.py228
-rw-r--r--sphinx/ext/autodoc.py162
-rw-r--r--sphinx/ext/autosummary/__init__.py22
-rw-r--r--sphinx/ext/autosummary/generate.py13
-rw-r--r--sphinx/ext/coverage.py5
-rw-r--r--sphinx/ext/doctest.py6
-rw-r--r--sphinx/ext/graphviz.py38
-rw-r--r--sphinx/ext/inheritance_diagram.py16
-rw-r--r--sphinx/ext/intersphinx.py26
-rw-r--r--sphinx/ext/oldcmarkup.py1
-rw-r--r--sphinx/ext/pngmath.py6
-rw-r--r--sphinx/ext/viewcode.py8
-rw-r--r--sphinx/highlighting.py4
-rw-r--r--sphinx/jinja2glue.py6
-rw-r--r--sphinx/locale/__init__.py40
-rw-r--r--sphinx/locale/sv/LC_MESSAGES/sphinx.js1
-rw-r--r--sphinx/locale/sv/LC_MESSAGES/sphinx.mobin0 -> 9509 bytes
-rw-r--r--sphinx/locale/sv/LC_MESSAGES/sphinx.po797
-rw-r--r--sphinx/pycode/__init__.py6
-rw-r--r--sphinx/pycode/nodes.py2
-rw-r--r--sphinx/pycode/pgen2/literals.py2
-rw-r--r--sphinx/pycode/pgen2/tokenize.py4
-rw-r--r--sphinx/quickstart.py88
-rw-r--r--sphinx/roles.py35
-rw-r--r--sphinx/setup_command.py3
-rw-r--r--sphinx/themes/basic/searchresults.html36
-rw-r--r--sphinx/themes/basic/static/ajax-loader.gifbin0 -> 673 bytes
-rw-r--r--sphinx/themes/basic/static/comment-bright.pngbin0 -> 3500 bytes
-rw-r--r--sphinx/themes/basic/static/comment-close.pngbin0 -> 3578 bytes
-rw-r--r--sphinx/themes/basic/static/comment.pngbin0 -> 3445 bytes
-rw-r--r--sphinx/themes/basic/static/down-pressed.pngbin0 -> 368 bytes
-rw-r--r--sphinx/themes/basic/static/down.pngbin0 -> 363 bytes
-rw-r--r--sphinx/themes/basic/static/up-pressed.pngbin0 -> 372 bytes
-rw-r--r--sphinx/themes/basic/static/up.pngbin0 -> 363 bytes
-rw-r--r--sphinx/themes/basic/static/websupport.js762
-rw-r--r--sphinx/theming.py14
-rw-r--r--sphinx/util/__init__.py72
-rw-r--r--sphinx/util/docstrings.py15
-rw-r--r--sphinx/util/jsonimpl.py2
-rw-r--r--sphinx/util/matching.py11
-rw-r--r--sphinx/util/nodes.py57
-rw-r--r--sphinx/util/osutil.py16
-rw-r--r--sphinx/util/png.py14
-rw-r--r--sphinx/util/pycompat.py98
-rw-r--r--sphinx/util/websupport.py11
-rw-r--r--sphinx/versioning.py128
-rw-r--r--sphinx/websupport/__init__.py414
-rw-r--r--sphinx/websupport/errors.py33
-rw-r--r--sphinx/websupport/search/__init__.py121
-rw-r--r--sphinx/websupport/search/nullsearch.py24
-rw-r--r--sphinx/websupport/search/whooshsearch.py59
-rw-r--r--sphinx/websupport/search/xapiansearch.py81
-rw-r--r--sphinx/websupport/storage/__init__.py123
-rw-r--r--sphinx/websupport/storage/differ.py79
-rw-r--r--sphinx/websupport/storage/sqlalchemy_db.py205
-rw-r--r--sphinx/websupport/storage/sqlalchemystorage.py174
-rw-r--r--sphinx/writers/html.py6
-rw-r--r--sphinx/writers/latex.py24
-rw-r--r--sphinx/writers/text.py2
-rw-r--r--sphinx/writers/websupport.py46
-rw-r--r--tests/etree13/ElementTree.py14
-rw-r--r--tests/path.py1021
-rw-r--r--tests/root/bom.po12
-rw-r--r--tests/root/conf.py2
-rw-r--r--tests/root/contents.txt1
-rw-r--r--tests/root/doctest.txt41
-rw-r--r--tests/root/literal.inc2
-rw-r--r--tests/root/markup.txt2
-rw-r--r--tests/root/versioning/added.txt20
-rw-r--r--tests/root/versioning/deleted.txt12
-rw-r--r--tests/root/versioning/deleted_end.txt11
-rw-r--r--tests/root/versioning/index.txt13
-rw-r--r--tests/root/versioning/insert.txt18
-rw-r--r--tests/root/versioning/insert_beginning.txt18
-rw-r--r--tests/root/versioning/insert_similar.txt17
-rw-r--r--tests/root/versioning/modified.txt17
-rw-r--r--tests/root/versioning/original.txt15
-rwxr-xr-xtests/run.py16
-rw-r--r--tests/test_application.py2
-rw-r--r--tests/test_autosummary.py4
-rw-r--r--tests/test_build_gettext.py110
-rw-r--r--tests/test_build_html.py10
-rw-r--r--tests/test_build_latex.py3
-rw-r--r--tests/test_config.py17
-rw-r--r--tests/test_coverage.py2
-rw-r--r--tests/test_env.py10
-rw-r--r--tests/test_intersphinx.py17
-rw-r--r--tests/test_markup.py3
-rw-r--r--tests/test_quickstart.py31
-rw-r--r--tests/test_search.py3
-rw-r--r--tests/test_searchadapters.py79
-rw-r--r--tests/test_versioning.py114
-rw-r--r--tests/test_websupport.py279
-rw-r--r--tests/util.py48
-rwxr-xr-xutils/check_sources.py115
-rw-r--r--utils/convert.py43
-rwxr-xr-xutils/reindent.py80
144 files changed, 6826 insertions, 2014 deletions
diff --git a/.hgignore b/.hgignore
index 70ea36a5..b68cccf9 100644
--- a/.hgignore
+++ b/.hgignore
@@ -15,3 +15,5 @@
^env/
\.DS_Store$
~$
+^utils/.*3\.py$
+^distribute-
diff --git a/AUTHORS b/AUTHORS
index 10120c7b..2ee77739 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -15,12 +15,14 @@ Other contributors, listed alphabetically, are:
* Martin Hans -- autodoc improvements
* Dave Kuhlman -- original LaTeX writer
* Thomas Lamb -- linkcheck builder
+* Robert Lehmann -- gettext builder (GSOC project)
* Dan MacKinlay -- metadata fixes
* Martin Mahner -- nature theme
* Will Maier -- directory HTML builder
+* Jacob Mason -- websupport library (GSOC project)
* Roland Meister -- epub builder
* Ezio Melotti -- collapsible sidebar JavaScript
-* Daniel Neuhäuser -- JavaScript domain
+* Daniel Neuhäuser -- JavaScript domain, Python 3 support (GSOC)
* Christopher Perkins -- autosummary integration
* Benjamin Peterson -- unittests
* T. Powers -- HTML output improvements
diff --git a/CHANGES b/CHANGES
index 65967f5b..ce46eb0e 100644
--- a/CHANGES
+++ b/CHANGES
@@ -1,3 +1,22 @@
+Release 1.1 (in development)
+============================
+
+* Added Python 3.x support.
+
+* Added i18n support for content, a ``gettext`` builder and
+ related utilities.
+
+* Added the ``websupport`` library.
+
+* #460: Allow limiting the depth of section numbers for HTML.
+
+* #138: Add an ``index`` role, to make inline index entries.
+
+* #443: Allow referencing external graphviz files.
+
+* #221: Add Swedish locale.
+
+
Release 1.0.4 (in development)
==============================
diff --git a/MANIFEST.in b/MANIFEST.in
index 25cbc334..cfc44c17 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -7,7 +7,7 @@ include TODO
include babel.cfg
include Makefile
-include ez_setup.py
+include distribute_setup.py
include sphinx-autogen.py
include sphinx-build.py
include sphinx-quickstart.py
diff --git a/Makefile b/Makefile
index 7057a715..09aa3c96 100644
--- a/Makefile
+++ b/Makefile
@@ -1,36 +1,63 @@
PYTHON ?= python
-export PYTHONPATH = $(shell echo "$$PYTHONPATH"):./sphinx
+.PHONY: all check clean clean-pyc clean-patchfiles clean-backupfiles \
+ clean-generated pylint reindent test covertest build convert-utils
-.PHONY: all check clean clean-pyc clean-patchfiles pylint reindent test
+DONT_CHECK = -i build -i dist -i sphinx/style/jquery.js \
+ -i sphinx/pycode/pgen2 -i sphinx/util/smartypants.py \
+ -i .ropeproject -i doc/_build -i tests/path.py \
+ -i tests/coverage.py -i env -i utils/convert.py \
+ -i utils/reindent3.py -i utils/check_sources3.py -i .tox
-all: clean-pyc check test
+all: clean-pyc clean-backupfiles check test
+ifeq ($(PYTHON), python3)
+check: convert-utils
+ @$(PYTHON) utils/check_sources3.py $(DONT_CHECK) .
+else
check:
- @$(PYTHON) utils/check_sources.py -i build -i dist -i sphinx/style/jquery.js \
- -i sphinx/pycode/pgen2 -i sphinx/util/smartypants.py -i .ropeproject \
- -i doc/_build -i ez_setup.py -i tests/path.py -i tests/coverage.py \
- -i env -i .tox .
+ @$(PYTHON) utils/check_sources.py $(DONT_CHECK) .
+endif
-clean: clean-pyc clean-patchfiles
+clean: clean-pyc clean-patchfiles clean-backupfiles clean-generated
clean-pyc:
find . -name '*.pyc' -exec rm -f {} +
find . -name '*.pyo' -exec rm -f {} +
- find . -name '*~' -exec rm -f {} +
clean-patchfiles:
find . -name '*.orig' -exec rm -f {} +
find . -name '*.rej' -exec rm -f {} +
+clean-backupfiles:
+ find . -name '*~' -exec rm -f {} +
+ find . -name '*.bak' -exec rm -f {} +
+
+clean-generated:
+ rm -f utils/*3.py*
+
pylint:
@pylint --rcfile utils/pylintrc sphinx
+ifeq ($(PYTHON), python3)
+reindent: convert-utils
+ @$(PYTHON) utils/reindent3.py -r -n .
+else
reindent:
- @$(PYTHON) utils/reindent.py -r -B .
+ @$(PYTHON) utils/reindent.py -r -n .
+endif
-test:
+test: build
@cd tests; $(PYTHON) run.py -d -m '^[tT]est' $(TEST)
-covertest:
- @cd tests; $(PYTHON) run.py -d -m '^[tT]est' --with-coverage --cover-package=sphinx $(TEST)
+covertest: build
+ @cd tests; $(PYTHON) run.py -d -m '^[tT]est' --with-coverage \
+ --cover-package=sphinx $(TEST)
+
+build:
+ @$(PYTHON) setup.py build
+
+ifeq ($(PYTHON), python3)
+convert-utils:
+ @python3 utils/convert.py -i utils/convert.py utils/
+endif
diff --git a/README b/README
index bb2dea9d..e31d6b93 100644
--- a/README
+++ b/README
@@ -26,6 +26,18 @@ Then, direct your browser to ``_build/html/index.html``.
Or read them online at <http://sphinx.pocoo.org/>.
+Testing
+=======
+
+To run the tests with the interpreter available as ``python``, use::
+
+ make test
+
+If you want to use a different interpreter, e.g. ``python3``, use::
+
+ PYTHON=python3 make test
+
+
Contributing
============
diff --git a/custom_fixers/__init__.py b/custom_fixers/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/custom_fixers/__init__.py
diff --git a/custom_fixers/fix_alt_unicode.py b/custom_fixers/fix_alt_unicode.py
new file mode 100644
index 00000000..55175e90
--- /dev/null
+++ b/custom_fixers/fix_alt_unicode.py
@@ -0,0 +1,12 @@
+from lib2to3.fixer_base import BaseFix
+from lib2to3.fixer_util import Name
+
+class FixAltUnicode(BaseFix):
+ PATTERN = """
+ func=funcdef< 'def' name='__unicode__'
+ parameters< '(' NAME ')' > any+ >
+ """
+
+ def transform(self, node, results):
+ name = results['name']
+ name.replace(Name('__str__', prefix=name.prefix))
diff --git a/distribute_setup.py b/distribute_setup.py
new file mode 100644
index 00000000..37117b34
--- /dev/null
+++ b/distribute_setup.py
@@ -0,0 +1,485 @@
+#!python
+"""Bootstrap distribute installation
+
+If you want to use setuptools in your package's setup.py, just include this
+file in the same directory with it, and add this to the top of your setup.py::
+
+ from distribute_setup import use_setuptools
+ use_setuptools()
+
+If you want to require a specific version of setuptools, set a download
+mirror, or use an alternate download directory, you can do so by supplying
+the appropriate options to ``use_setuptools()``.
+
+This file can also be run as a script to install or upgrade setuptools.
+"""
+import os
+import sys
+import time
+import fnmatch
+import tempfile
+import tarfile
+from distutils import log
+
+try:
+ from site import USER_SITE
+except ImportError:
+ USER_SITE = None
+
+try:
+ import subprocess
+
+ def _python_cmd(*args):
+ args = (sys.executable,) + args
+ return subprocess.call(args) == 0
+
+except ImportError:
+ # will be used for python 2.3
+ def _python_cmd(*args):
+ args = (sys.executable,) + args
+ # quoting arguments if windows
+ if sys.platform == 'win32':
+ def quote(arg):
+ if ' ' in arg:
+ return '"%s"' % arg
+ return arg
+ args = [quote(arg) for arg in args]
+ return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
+
+DEFAULT_VERSION = "0.6.13"
+DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
+SETUPTOOLS_FAKED_VERSION = "0.6c11"
+
+SETUPTOOLS_PKG_INFO = """\
+Metadata-Version: 1.0
+Name: setuptools
+Version: %s
+Summary: xxxx
+Home-page: xxx
+Author: xxx
+Author-email: xxx
+License: xxx
+Description: xxx
+""" % SETUPTOOLS_FAKED_VERSION
+
+
+def _install(tarball):
+ # extracting the tarball
+ tmpdir = tempfile.mkdtemp()
+ log.warn('Extracting in %s', tmpdir)
+ old_wd = os.getcwd()
+ try:
+ os.chdir(tmpdir)
+ tar = tarfile.open(tarball)
+ _extractall(tar)
+ tar.close()
+
+ # going in the directory
+ subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
+ os.chdir(subdir)
+ log.warn('Now working in %s', subdir)
+
+ # installing
+ log.warn('Installing Distribute')
+ if not _python_cmd('setup.py', 'install'):
+ log.warn('Something went wrong during the installation.')
+ log.warn('See the error message above.')
+ finally:
+ os.chdir(old_wd)
+
+
+def _build_egg(egg, tarball, to_dir):
+ # extracting the tarball
+ tmpdir = tempfile.mkdtemp()
+ log.warn('Extracting in %s', tmpdir)
+ old_wd = os.getcwd()
+ try:
+ os.chdir(tmpdir)
+ tar = tarfile.open(tarball)
+ _extractall(tar)
+ tar.close()
+
+ # going in the directory
+ subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
+ os.chdir(subdir)
+ log.warn('Now working in %s', subdir)
+
+ # building an egg
+ log.warn('Building a Distribute egg in %s', to_dir)
+ _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
+
+ finally:
+ os.chdir(old_wd)
+ # returning the result
+ log.warn(egg)
+ if not os.path.exists(egg):
+ raise IOError('Could not build the egg.')
+
+
+def _do_download(version, download_base, to_dir, download_delay):
+ egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg'
+ % (version, sys.version_info[0], sys.version_info[1]))
+ if not os.path.exists(egg):
+ tarball = download_setuptools(version, download_base,
+ to_dir, download_delay)
+ _build_egg(egg, tarball, to_dir)
+ sys.path.insert(0, egg)
+ import setuptools
+ setuptools.bootstrap_install_from = egg
+
+
+def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
+ to_dir=os.curdir, download_delay=15, no_fake=True):
+ # making sure we use the absolute path
+ to_dir = os.path.abspath(to_dir)
+ was_imported = 'pkg_resources' in sys.modules or \
+ 'setuptools' in sys.modules
+ try:
+ try:
+ import pkg_resources
+ if not hasattr(pkg_resources, '_distribute'):
+ if not no_fake:
+ _fake_setuptools()
+ raise ImportError
+ except ImportError:
+ return _do_download(version, download_base, to_dir, download_delay)
+ try:
+ pkg_resources.require("distribute>="+version)
+ return
+ except pkg_resources.VersionConflict:
+ e = sys.exc_info()[1]
+ if was_imported:
+ sys.stderr.write(
+ "The required version of distribute (>=%s) is not available,\n"
+ "and can't be installed while this script is running. Please\n"
+ "install a more recent version first, using\n"
+ "'easy_install -U distribute'."
+ "\n\n(Currently using %r)\n" % (version, e.args[0]))
+ sys.exit(2)
+ else:
+ del pkg_resources, sys.modules['pkg_resources'] # reload ok
+ return _do_download(version, download_base, to_dir,
+ download_delay)
+ except pkg_resources.DistributionNotFound:
+ return _do_download(version, download_base, to_dir,
+ download_delay)
+ finally:
+ if not no_fake:
+ _create_fake_setuptools_pkg_info(to_dir)
+
+def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
+ to_dir=os.curdir, delay=15):
+ """Download distribute from a specified location and return its filename
+
+ `version` should be a valid distribute version number that is available
+ as an egg for download under the `download_base` URL (which should end
+ with a '/'). `to_dir` is the directory where the egg will be downloaded.
+ `delay` is the number of seconds to pause before an actual download
+ attempt.
+ """
+ # making sure we use the absolute path
+ to_dir = os.path.abspath(to_dir)
+ try:
+ from urllib.request import urlopen
+ except ImportError:
+ from urllib2 import urlopen
+ tgz_name = "distribute-%s.tar.gz" % version
+ url = download_base + tgz_name
+ saveto = os.path.join(to_dir, tgz_name)
+ src = dst = None
+ if not os.path.exists(saveto): # Avoid repeated downloads
+ try:
+ log.warn("Downloading %s", url)
+ src = urlopen(url)
+ # Read/write all in one block, so we don't create a corrupt file
+ # if the download is interrupted.
+ data = src.read()
+ dst = open(saveto, "wb")
+ dst.write(data)
+ finally:
+ if src:
+ src.close()
+ if dst:
+ dst.close()
+ return os.path.realpath(saveto)
+
+def _no_sandbox(function):
+ def __no_sandbox(*args, **kw):
+ try:
+ from setuptools.sandbox import DirectorySandbox
+ if not hasattr(DirectorySandbox, '_old'):
+ def violation(*args):
+ pass
+ DirectorySandbox._old = DirectorySandbox._violation
+ DirectorySandbox._violation = violation
+ patched = True
+ else:
+ patched = False
+ except ImportError:
+ patched = False
+
+ try:
+ return function(*args, **kw)
+ finally:
+ if patched:
+ DirectorySandbox._violation = DirectorySandbox._old
+ del DirectorySandbox._old
+
+ return __no_sandbox
+
+def _patch_file(path, content):
+ """Will backup the file then patch it"""
+ existing_content = open(path).read()
+ if existing_content == content:
+ # already patched
+ log.warn('Already patched.')
+ return False
+ log.warn('Patching...')
+ _rename_path(path)
+ f = open(path, 'w')
+ try:
+ f.write(content)
+ finally:
+ f.close()
+ return True
+
+_patch_file = _no_sandbox(_patch_file)
+
+def _same_content(path, content):
+ return open(path).read() == content
+
+def _rename_path(path):
+ new_name = path + '.OLD.%s' % time.time()
+ log.warn('Renaming %s into %s', path, new_name)
+ os.rename(path, new_name)
+ return new_name
+
+def _remove_flat_installation(placeholder):
+ if not os.path.isdir(placeholder):
+ log.warn('Unkown installation at %s', placeholder)
+ return False
+ found = False
+ for file in os.listdir(placeholder):
+ if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
+ found = True
+ break
+ if not found:
+ log.warn('Could not locate setuptools*.egg-info')
+ return
+
+ log.warn('Removing elements out of the way...')
+ pkg_info = os.path.join(placeholder, file)
+ if os.path.isdir(pkg_info):
+ patched = _patch_egg_dir(pkg_info)
+ else:
+ patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
+
+ if not patched:
+ log.warn('%s already patched.', pkg_info)
+ return False
+ # now let's move the files out of the way
+ for element in ('setuptools', 'pkg_resources.py', 'site.py'):
+ element = os.path.join(placeholder, element)
+ if os.path.exists(element):
+ _rename_path(element)
+ else:
+ log.warn('Could not find the %s element of the '
+ 'Setuptools distribution', element)
+ return True
+
+_remove_flat_installation = _no_sandbox(_remove_flat_installation)
+
+def _after_install(dist):
+ log.warn('After install bootstrap.')
+ placeholder = dist.get_command_obj('install').install_purelib
+ _create_fake_setuptools_pkg_info(placeholder)
+
+def _create_fake_setuptools_pkg_info(placeholder):
+ if not placeholder or not os.path.exists(placeholder):
+ log.warn('Could not find the install location')
+ return
+ pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
+ setuptools_file = 'setuptools-%s-py%s.egg-info' % \
+ (SETUPTOOLS_FAKED_VERSION, pyver)
+ pkg_info = os.path.join(placeholder, setuptools_file)
+ if os.path.exists(pkg_info):
+ log.warn('%s already exists', pkg_info)
+ return
+
+ log.warn('Creating %s', pkg_info)
+ f = open(pkg_info, 'w')
+ try:
+ f.write(SETUPTOOLS_PKG_INFO)
+ finally:
+ f.close()
+
+ pth_file = os.path.join(placeholder, 'setuptools.pth')
+ log.warn('Creating %s', pth_file)
+ f = open(pth_file, 'w')
+ try:
+ f.write(os.path.join(os.curdir, setuptools_file))
+ finally:
+ f.close()
+
+_create_fake_setuptools_pkg_info = _no_sandbox(_create_fake_setuptools_pkg_info)
+
+def _patch_egg_dir(path):
+ # let's check if it's already patched
+ pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
+ if os.path.exists(pkg_info):
+ if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
+ log.warn('%s already patched.', pkg_info)
+ return False
+ _rename_path(path)
+ os.mkdir(path)
+ os.mkdir(os.path.join(path, 'EGG-INFO'))
+ pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
+ f = open(pkg_info, 'w')
+ try:
+ f.write(SETUPTOOLS_PKG_INFO)
+ finally:
+ f.close()
+ return True
+
+_patch_egg_dir = _no_sandbox(_patch_egg_dir)
+
+def _before_install():
+ log.warn('Before install bootstrap.')
+ _fake_setuptools()
+
+
+def _under_prefix(location):
+ if 'install' not in sys.argv:
+ return True
+ args = sys.argv[sys.argv.index('install')+1:]
+ for index, arg in enumerate(args):
+ for option in ('--root', '--prefix'):
+ if arg.startswith('%s=' % option):
+ top_dir = arg.split('root=')[-1]
+ return location.startswith(top_dir)
+ elif arg == option:
+ if len(args) > index:
+ top_dir = args[index+1]
+ return location.startswith(top_dir)
+ if arg == '--user' and USER_SITE is not None:
+ return location.startswith(USER_SITE)
+ return True
+
+
+def _fake_setuptools():
+ log.warn('Scanning installed packages')
+ try:
+ import pkg_resources
+ except ImportError:
+ # we're cool
+ log.warn('Setuptools or Distribute does not seem to be installed.')
+ return
+ ws = pkg_resources.working_set
+ try:
+ setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools',
+ replacement=False))
+ except TypeError:
+ # old distribute API
+ setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools'))
+
+ if setuptools_dist is None:
+ log.warn('No setuptools distribution found')
+ return
+ # detecting if it was already faked
+ setuptools_location = setuptools_dist.location
+ log.warn('Setuptools installation detected at %s', setuptools_location)
+
+ # if --root or --preix was provided, and if
+ # setuptools is not located in them, we don't patch it
+ if not _under_prefix(setuptools_location):
+ log.warn('Not patching, --root or --prefix is installing Distribute'
+ ' in another location')
+ return
+
+ # let's see if its an egg
+ if not setuptools_location.endswith('.egg'):
+ log.warn('Non-egg installation')
+ res = _remove_flat_installation(setuptools_location)
+ if not res:
+ return
+ else:
+ log.warn('Egg installation')
+ pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
+ if (os.path.exists(pkg_info) and
+ _same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
+ log.warn('Already patched.')
+ return
+ log.warn('Patching...')
+ # let's create a fake egg replacing setuptools one
+ res = _patch_egg_dir(setuptools_location)
+ if not res:
+ return
+ log.warn('Patched done.')
+ _relaunch()
+
+
+def _relaunch():
+ log.warn('Relaunching...')
+ # we have to relaunch the process
+ # pip marker to avoid a relaunch bug
+ if sys.argv[:3] == ['-c', 'install', '--single-version-externally-managed']:
+ sys.argv[0] = 'setup.py'
+ args = [sys.executable] + sys.argv
+ sys.exit(subprocess.call(args))
+
+
+def _extractall(self, path=".", members=None):
+ """Extract all members from the archive to the current working
+ directory and set owner, modification time and permissions on
+ directories afterwards. `path' specifies a different directory
+ to extract to. `members' is optional and must be a subset of the
+ list returned by getmembers().
+ """
+ import copy
+ import operator
+ from tarfile import ExtractError
+ directories = []
+
+ if members is None:
+ members = self
+
+ for tarinfo in members:
+ if tarinfo.isdir():
+ # Extract directories with a safe mode.
+ directories.append(tarinfo)
+ tarinfo = copy.copy(tarinfo)
+ tarinfo.mode = 448 # decimal for oct 0700
+ self.extract(tarinfo, path)
+
+ # Reverse sort directories.
+ if sys.version_info < (2, 4):
+ def sorter(dir1, dir2):
+ return cmp(dir1.name, dir2.name)
+ directories.sort(sorter)
+ directories.reverse()
+ else:
+ directories.sort(key=operator.attrgetter('name'), reverse=True)
+
+ # Set correct owner, mtime and filemode on directories.
+ for tarinfo in directories:
+ dirpath = os.path.join(path, tarinfo.name)
+ try:
+ self.chown(tarinfo, dirpath)
+ self.utime(tarinfo, dirpath)
+ self.chmod(tarinfo, dirpath)
+ except ExtractError:
+ e = sys.exc_info()[1]
+ if self.errorlevel > 1:
+ raise
+ else:
+ self._dbg(1, "tarfile: %s" % e)
+
+
+def main(argv, version=DEFAULT_VERSION):
+ """Install or upgrade setuptools and EasyInstall"""
+ tarball = download_setuptools()
+ _install(tarball)
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/doc/Makefile b/doc/Makefile
index 90fb5af2..aa3ecb61 100644
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -29,6 +29,7 @@ help:
@echo " epub to make an epub file"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run pdflatex"
+ @echo " gettext to make PO message catalogs"
@echo " changes to make an overview over all changed/added/deprecated items"
@echo " linkcheck to check all external links for integrity"
@@ -112,6 +113,11 @@ latexpdf:
make -C _build/latex all-pdf
@echo "pdflatex finished; the PDF files are in _build/latex."
+gettext:
+ $(SPHINXBUILD) -b gettext $(ALLSPHINXOPTS) _build/locale
+ @echo
+ @echo "Build finished. The message catalogs are in _build/locale."
+
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) _build/changes
@echo
diff --git a/doc/builders.rst b/doc/builders.rst
index 80203e75..7d182d06 100644
--- a/doc/builders.rst
+++ b/doc/builders.rst
@@ -220,6 +220,18 @@ Note that a direct PDF builder using ReportLab is available in `rst2pdf
.. versionadded:: 0.5
+.. module:: sphinx.builders.intl
+.. class:: MessageCatalogBuilder
+
+ This builder produces a message catalog file for each reST file or
+ subdirectory.
+
+ See the documentation on :ref:`internationalization <intl>` for further reference.
+
+ Its name is ``gettext``.
+
+ .. versionadded:: 1.1
+
.. module:: sphinx.builders.changes
.. class:: ChangesBuilder
diff --git a/doc/config.rst b/doc/config.rst
index e0fbeb46..3115c401 100644
--- a/doc/config.rst
+++ b/doc/config.rst
@@ -300,6 +300,7 @@ Project information
* ``pt_BR`` -- Brazilian Portuguese
* ``ru`` -- Russian
* ``sl`` -- Slovenian
+ * ``sv`` -- Swedish
* ``tr`` -- Turkish
* ``uk_UA`` -- Ukrainian
* ``zh_CN`` -- Simplified Chinese
diff --git a/doc/contents.rst b/doc/contents.rst
index 079f93f2..3bbc2835 100644
--- a/doc/contents.rst
+++ b/doc/contents.rst
@@ -14,9 +14,11 @@ Sphinx documentation contents
domains
builders
config
+ intl
theming
templating
extensions
+ websupport
faq
glossary
diff --git a/doc/ext/graphviz.rst b/doc/ext/graphviz.rst
index 3741cec6..de6e03e2 100644
--- a/doc/ext/graphviz.rst
+++ b/doc/ext/graphviz.rst
@@ -29,6 +29,17 @@ It adds these directives:
:confval:`graphviz_output_format`). In LaTeX output, the code will be
rendered to an embeddable PDF file.
+ You can also embed external dot files, by giving the file name as an
+ argument to :rst:dir:`graphviz` and no additional content::
+
+ .. graphviz:: external.dot
+
+ As for all file references in Sphinx, if the filename is absolute, it is
+ taken as relative to the source directory.
+
+ .. versionchanged:: 1.1
+ Added support for external files.
+
.. rst:directive:: graph
diff --git a/doc/intl.rst b/doc/intl.rst
new file mode 100644
index 00000000..b6036767
--- /dev/null
+++ b/doc/intl.rst
@@ -0,0 +1,13 @@
+.. _intl:
+
+Internationalization
+====================
+
+.. versionadded:: 1.1
+
+Complementary to translations provided for Sphinx-generated messages such as
+navigation bars, Sphinx provides mechanisms facilitating *document* translations
+in itself. It relies on the existing configuration values :confval:`language`
+and :confval:`locale_dirs`.
+
+.. XXX write more!
diff --git a/doc/intro.rst b/doc/intro.rst
index 1a39e266..caff141d 100644
--- a/doc/intro.rst
+++ b/doc/intro.rst
@@ -45,15 +45,19 @@ See the :ref:`pertinent section in the FAQ list <usingwith>`.
Prerequisites
-------------
-Sphinx needs at least **Python 2.4** to run, as well as the docutils_ and
-Jinja2_ libraries. Sphinx should work with docutils version 0.5 or some
-(not broken) SVN trunk snapshot. If you like to have source code highlighting
-support, you must also install the Pygments_ library.
+Sphinx needs at least **Python 2.4** or **Python 3.1** to run, as well as the
+docutils_ and Jinja2_ libraries. Sphinx should work with docutils version 0.5
+or some (not broken) SVN trunk snapshot. If you like to have source code
+highlighting support, you must also install the Pygments_ library.
+
+If you use **Python 2.4** you also need uuid_.
.. _reStructuredText: http://docutils.sf.net/rst.html
.. _docutils: http://docutils.sf.net/
.. _Jinja2: http://jinja.pocoo.org/2/
.. _Pygments: http://pygments.org/
+.. The given homepage is only a directory listing so I'm using the pypi site.
+.. _uuid: http://pypi.python.org/pypi/uuid/
Usage
diff --git a/doc/markup/inline.rst b/doc/markup/inline.rst
index 35981edc..78aaea69 100644
--- a/doc/markup/inline.rst
+++ b/doc/markup/inline.rst
@@ -309,6 +309,7 @@ in a different style:
If you don't need the "variable part" indication, use the standard
````code```` instead.
+There is also an :rst:role:`index` role to generate index entries.
The following roles generate external links:
diff --git a/doc/markup/misc.rst b/doc/markup/misc.rst
index 6173589b..44da3aac 100644
--- a/doc/markup/misc.rst
+++ b/doc/markup/misc.rst
@@ -62,6 +62,85 @@ Meta-information markup
:confval:`show_authors` configuration value is True.
+Index-generating markup
+-----------------------
+
+Sphinx automatically creates index entries from all object descriptions (like
+functions, classes or attributes) like discussed in :ref:`domains`.
+
+However, there is also explicit markup available, to make the index more
+comprehensive and enable index entries in documents where information is not
+mainly contained in information units, such as the language reference.
+
+.. rst:directive:: .. index:: <entries>
+
+ This directive contains one or more index entries. Each entry consists of a
+ type and a value, separated by a colon.
+
+ For example::
+
+ .. index::
+ single: execution; context
+ module: __main__
+ module: sys
+ triple: module; search; path
+
+ The execution context
+ ---------------------
+
+ ...
+
+ This directive contains five entries, which will be converted to entries in
+ the generated index which link to the exact location of the index statement
+ (or, in case of offline media, the corresponding page number).
+
+ Since index directives generate cross-reference targets at their location in
+ the source, it makes sense to put them *before* the thing they refer to --
+ e.g. a heading, as in the example above.
+
+ The possible entry types are:
+
+ single
+ Creates a single index entry. Can be made a subentry by separating the
+ subentry text with a semicolon (this notation is also used below to
+ describe what entries are created).
+ pair
+ ``pair: loop; statement`` is a shortcut that creates two index entries,
+ namely ``loop; statement`` and ``statement; loop``.
+ triple
+ Likewise, ``triple: module; search; path`` is a shortcut that creates
+ three index entries, which are ``module; search path``, ``search; path,
+ module`` and ``path; module search``.
+ module, keyword, operator, object, exception, statement, builtin
+ These all create two index entries. For example, ``module: hashlib``
+ creates the entries ``module; hashlib`` and ``hashlib; module``. (These
+ are Python-specific and therefore deprecated.)
+
+ For index directives containing only "single" entries, there is a shorthand
+ notation::
+
+ .. index:: BNF, grammar, syntax, notation
+
+ This creates four index entries.
+
+.. rst:role:: index
+
+ While the :rst:dir:`index` directive is a block-level markup and links to the
+ beginning of the next paragraph, there is also a corresponding role that sets
+ the link target directly where it is used.
+
+ The content of the role can be a simple phrase, which is then kept in the
+ text and used as an index entry. It can also be a combination of text and
+ index entry, styled like with explicit targets of cross-references. In that
+ case, the "target" part can be a full entry as described for the directive
+ above. For example::
+
+ This is a normal reST :index:`paragraph` that contains several
+ :index:`index entries <pair: index; entry>`.
+
+ .. versionadded:: 1.1
+
+
.. _tags:
Including content based on tags
diff --git a/doc/markup/para.rst b/doc/markup/para.rst
index ecc6b4a6..52a5019b 100644
--- a/doc/markup/para.rst
+++ b/doc/markup/para.rst
@@ -144,68 +144,6 @@ For local tables of contents, use the standard reST :dudir:`contents directive
<contents>`.
-Index-generating markup
------------------------
-
-Sphinx automatically creates index entries from all object descriptions (like
-functions, classes or attributes) like discussed in :ref:`domains`.
-
-However, there is also an explicit directive available, to make the index more
-comprehensive and enable index entries in documents where information is not
-mainly contained in information units, such as the language reference.
-
-.. rst:directive:: .. index:: <entries>
-
- This directive contains one or more index entries. Each entry consists of a
- type and a value, separated by a colon.
-
- For example::
-
- .. index::
- single: execution; context
- module: __main__
- module: sys
- triple: module; search; path
-
- The execution context
- ---------------------
-
- ...
-
- This directive contains five entries, which will be converted to entries in
- the generated index which link to the exact location of the index statement
- (or, in case of offline media, the corresponding page number).
-
- Since index directives generate cross-reference targets at their location in
- the source, it makes sense to put them *before* the thing they refer to --
- e.g. a heading, as in the example above.
-
- The possible entry types are:
-
- single
- Creates a single index entry. Can be made a subentry by separating the
- subentry text with a semicolon (this notation is also used below to
- describe what entries are created).
- pair
- ``pair: loop; statement`` is a shortcut that creates two index entries,
- namely ``loop; statement`` and ``statement; loop``.
- triple
- Likewise, ``triple: module; search; path`` is a shortcut that creates
- three index entries, which are ``module; search path``, ``search; path,
- module`` and ``path; module search``.
- module, keyword, operator, object, exception, statement, builtin
- These all create two index entries. For example, ``module: hashlib``
- creates the entries ``module; hashlib`` and ``hashlib; module``. (These
- are Python-specific and therefore deprecated.)
-
- For index directives containing only "single" entries, there is a shorthand
- notation::
-
- .. index:: BNF, grammar, syntax, notation
-
- This creates four index entries.
-
-
Glossary
--------
diff --git a/doc/markup/toctree.rst b/doc/markup/toctree.rst
index 2c0a418a..0b6a46c1 100644
--- a/doc/markup/toctree.rst
+++ b/doc/markup/toctree.rst
@@ -41,6 +41,8 @@ tables of contents. The ``toctree`` directive is the central element.
document, the library index. From this information it generates "next
chapter", "previous chapter" and "parent chapter" links.
+ **Entries**
+
Document titles in the :rst:dir:`toctree` will be automatically read from the
title of the referenced document. If that isn't what you want, you can
specify an explicit title and target using a similar syntax to reST
@@ -59,8 +61,10 @@ tables of contents. The ``toctree`` directive is the central element.
You can also add external links, by giving an HTTP URL instead of a document
name.
+ **Section numbering**
+
If you want to have section numbers even in HTML output, give the toctree a
- ``numbered`` flag option. For example::
+ ``numbered`` option. For example::
.. toctree::
:numbered:
@@ -71,6 +75,11 @@ tables of contents. The ``toctree`` directive is the central element.
Numbering then starts at the heading of ``foo``. Sub-toctrees are
automatically numbered (don't give the ``numbered`` flag to those).
+ Numbering up to a specific depth is also possible, by giving the depth as a
+ numeric argument to ``numbered``.
+
+ **Additional options**
+
If you want only the titles of documents in the tree to show up, not other
headings of the same level, you can use the ``titlesonly`` option::
@@ -133,6 +142,9 @@ tables of contents. The ``toctree`` directive is the central element.
.. versionchanged:: 1.0
Added "titlesonly" option.
+ .. versionchanged:: 1.1
+ Added numeric argument to "numbered".
+
Special names
-------------
diff --git a/doc/web/api.rst b/doc/web/api.rst
new file mode 100644
index 00000000..070cd3a2
--- /dev/null
+++ b/doc/web/api.rst
@@ -0,0 +1,65 @@
+.. _websupportapi:
+
+.. currentmodule:: sphinx.websupport
+
+The WebSupport Class
+====================
+
+.. class:: WebSupport
+
+ The main API class for the web support package. All interactions with the
+ web support package should occur through this class.
+
+ The class takes the following keyword arguments:
+
+ srcdir
+ The directory containing reStructuredText source files.
+
+ builddir
+ The directory that build data and static files should be placed in. This
+ should be used when creating a :class:`WebSupport` object that will be
+ used to build data.
+
+ datadir
+ The directory that the web support data is in. This should be used when
+ creating a :class:`WebSupport` object that will be used to retrieve data.
+
+ search
+ This may contain either a string (e.g. 'xapian') referencing a built-in
+ search adapter to use, or an instance of a subclass of
+ :class:`~.search.BaseSearch`.
+
+ storage
+ This may contain either a string representing a database uri, or an
+ instance of a subclass of :class:`~.storage.StorageBackend`. If this is
+ not provided, a new sqlite database will be created.
+
+ moderation_callback
+ A callable to be called when a new comment is added that is not
+ displayed. It must accept one argument: a dictionary representing the
+ comment that was added.
+
+ staticdir
+ If static files are served from a location besides ``'/static'``, this
+ should be a string with the name of that location
+ (e.g. ``'/static_files'``).
+
+ docroot
+ If the documentation is not served from the base path of a URL, this
+ should be a string specifying that path (e.g. ``'docs'``).
+
+
+Methods
+~~~~~~~
+
+.. automethod:: sphinx.websupport.WebSupport.build
+
+.. automethod:: sphinx.websupport.WebSupport.get_document
+
+.. automethod:: sphinx.websupport.WebSupport.get_data
+
+.. automethod:: sphinx.websupport.WebSupport.add_comment
+
+.. automethod:: sphinx.websupport.WebSupport.process_vote
+
+.. automethod:: sphinx.websupport.WebSupport.get_search_results
diff --git a/doc/web/quickstart.rst b/doc/web/quickstart.rst
new file mode 100644
index 00000000..0627c9c3
--- /dev/null
+++ b/doc/web/quickstart.rst
@@ -0,0 +1,261 @@
+.. _websupportquickstart:
+
+Web Support Quick Start
+=======================
+
+Building Documentation Data
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To make use of the web support package in your application you'll need to build
+the data it uses. This data includes pickle files representing documents,
+search indices, and node data that is used to track where comments and other
+things are in a document. To do this you will need to create an instance of the
+:class:`~.WebSupport` class and call its :meth:`~.WebSupport.build` method::
+
+ from sphinx.websupport import WebSupport
+
+ support = WebSupport(srcdir='/path/to/rst/sources/',
+ builddir='/path/to/build/outdir',
+ search='xapian')
+
+ support.build()
+
+This will read reStructuredText sources from `srcdir` and place the necessary
+data in `builddir`. The `builddir` will contain two sub-directories: one named
+"data" that contains all the data needed to display documents, search through
+documents, and add comments to documents. The other directory will be called
+"static" and contains static files that should be served from "/static".
+
+.. note::
+
+ If you wish to serve static files from a path other than "/static", you can
+ do so by providing the *staticdir* keyword argument when creating the
+ :class:`~.WebSupport` object.
+
+
+Integrating Sphinx Documents Into Your Webapp
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Now that the data is built, it's time to do something useful with it. Start off
+by creating a :class:`~.WebSupport` object for your application::
+
+ from sphinx.websupport import WebSupport
+
+ support = WebSupport(datadir='/path/to/the/data',
+ search='xapian')
+
+You'll only need one of these for each set of documentation you will be working
+with. You can then call it's :meth:`~.WebSupport.get_document` method to access
+individual documents::
+
+ contents = support.get_document('contents')
+
+This will return a dictionary containing the following items:
+
+* **body**: The main body of the document as HTML
+* **sidebar**: The sidebar of the document as HTML
+* **relbar**: A div containing links to related documents
+* **title**: The title of the document
+* **css**: Links to css files used by Sphinx
+* **js**: Javascript containing comment options
+
+This dict can then be used as context for templates. The goal is to be easy to
+integrate with your existing templating system. An example using `Jinja2
+<http://jinja.pocoo.org/2/>`_ is:
+
+.. sourcecode:: html+jinja
+
+ {%- extends "layout.html" %}
+
+ {%- block title %}
+ {{ document.title }}
+ {%- endblock %}
+
+ {% block css %}
+ {{ super() }}
+ {{ document.css|safe }}
+ <link rel="stylesheet" href="/static/websupport-custom.css" type="text/css">
+ {% endblock %}
+
+ {%- block js %}
+ {{ super() }}
+ {{ document.js|safe }}
+ {%- endblock %}
+
+ {%- block relbar %}
+ {{ document.relbar|safe }}
+ {%- endblock %}
+
+ {%- block body %}
+ {{ document.body|safe }}
+ {%- endblock %}
+
+ {%- block sidebar %}
+ {{ document.sidebar|safe }}
+ {%- endblock %}
+
+
+Authentication
+--------------
+
+To use certain features such as voting, it must be possible to authenticate
+users. The details of the authentication are left to your application. Once a
+user has been authenticated you can pass the user's details to certain
+:class:`~.WebSupport` methods using the *username* and *moderator* keyword
+arguments. The web support package will store the username with comments and
+votes. The only caveat is that if you allow users to change their username you
+must update the websupport package's data::
+
+ support.update_username(old_username, new_username)
+
+*username* should be a unique string which identifies a user, and *moderator*
+should be a boolean representing whether the user has moderation privilieges.
+The default value for *moderator* is *False*.
+
+An example `Flask <http://flask.pocoo.org/>`_ function that checks whether a
+user is logged in and then retrieves a document is::
+
+ from sphinx.websupport.errors import *
+
+ @app.route('/<path:docname>')
+ def doc(docname):
+ username = g.user.name if g.user else ''
+ moderator = g.user.moderator if g.user else False
+ try:
+ document = support.get_document(docname, username, moderator)
+ except DocumentNotFoundError:
+ abort(404)
+ return render_template('doc.html', document=document)
+
+The first thing to notice is that the *docname* is just the request path. This
+makes accessing the correct document easy from a single view. If the user is
+authenticated, then the username and moderation status are passed along with the
+docname to :meth:`~.WebSupport.get_document`. The web support package will then
+add this data to the ``COMMENT_OPTIONS`` that are used in the template.
+
+.. note::
+
+ This only works works if your documentation is served from your
+ document root. If it is served from another directory, you will
+ need to prefix the url route with that directory, and give the `docroot`
+ keyword argument when creating the web support object::
+
+ support = WebSupport(..., docroot='docs')
+
+ @app.route('/docs/<path:docname>')
+
+
+Performing Searches
+~~~~~~~~~~~~~~~~~~~
+
+To use the search form built-in to the Sphinx sidebar, create a function to
+handle requests to the url 'search' relative to the documentation root. The
+user's search query will be in the GET parameters, with the key `q`. Then use
+the :meth:`~sphinx.websupport.WebSupport.get_search_results` method to retrieve
+search results. In `Flask <http://flask.pocoo.org/>`_ that would be like this::
+
+ @app.route('/search')
+ def search():
+ q = request.args.get('q')
+ document = support.get_search_results(q)
+ return render_template('doc.html', document=document)
+
+Note that we used the same template to render our search results as we did to
+render our documents. That's because :meth:`~.WebSupport.get_search_results`
+returns a context dict in the same format that :meth:`~.WebSupport.get_document`
+does.
+
+
+Comments & Proposals
+~~~~~~~~~~~~~~~~~~~~
+
+Now that this is done it's time to define the functions that handle the AJAX
+calls from the script. You will need three functions. The first function is
+used to add a new comment, and will call the web support method
+:meth:`~.WebSupport.add_comment`::
+
+ @app.route('/docs/add_comment', methods=['POST'])
+ def add_comment():
+ parent_id = request.form.get('parent', '')
+ node_id = request.form.get('node', '')
+ text = request.form.get('text', '')
+ proposal = request.form.get('proposal', '')
+ username = g.user.name if g.user is not None else 'Anonymous'
+ comment = support.add_comment(text, node_id='node_id',
+ parent_id='parent_id',
+ username=username, proposal=proposal)
+ return jsonify(comment=comment)
+
+You'll notice that both a `parent_id` and `node_id` are sent with the
+request. If the comment is being attached directly to a node, `parent_id`
+will be empty. If the comment is a child of another comment, then `node_id`
+will be empty. Then next function handles the retrieval of comments for a
+specific node, and is aptly named
+:meth:`~sphinx.websupport.WebSupport.get_data`::
+
+ @app.route('/docs/get_comments')
+ def get_comments():
+ username = g.user.name if g.user else None
+ moderator = g.user.moderator if g.user else False
+ node_id = request.args.get('node', '')
+ data = support.get_data(parent_id, user_id)
+ return jsonify(**data)
+
+The final function that is needed will call :meth:`~.WebSupport.process_vote`,
+and will handle user votes on comments::
+
+ @app.route('/docs/process_vote', methods=['POST'])
+ def process_vote():
+ if g.user is None:
+ abort(401)
+ comment_id = request.form.get('comment_id')
+ value = request.form.get('value')
+ if value is None or comment_id is None:
+ abort(400)
+ support.process_vote(comment_id, g.user.id, value)
+ return "success"
+
+
+Comment Moderation
+~~~~~~~~~~~~~~~~~~
+
+By default, all comments added through :meth:`~.WebSupport.add_comment` are
+automatically displayed. If you wish to have some form of moderation, you can
+pass the `displayed` keyword argument::
+
+ comment = support.add_comment(text, node_id='node_id',
+ parent_id='parent_id',
+ username=username, proposal=proposal,
+ displayed=False)
+
+You can then create two new views to handle the moderation of comments. The
+first will be called when a moderator decides a comment should be accepted and
+displayed::
+
+ @app.route('/docs/accept_comment', methods=['POST'])
+ def accept_comment():
+ moderator = g.user.moderator if g.user else False
+ comment_id = request.form.get('id')
+ support.accept_comment(comment_id, moderator=moderator)
+ return 'OK'
+
+The next is very similar, but used when rejecting a comment::
+
+ @app.route('/docs/reject_comment', methods=['POST'])
+ def reject_comment():
+ moderator = g.user.moderator if g.user else False
+ comment_id = request.form.get('id')
+ support.reject_comment(comment_id, moderator=moderator)
+ return 'OK'
+
+To perform a custom action (such as emailing a moderator) when a new comment is
+added but not displayed, you can pass callable to the :class:`~.WebSupport`
+class when instantiating your support object::
+
+ def moderation_callback(comment):
+ """Do something..."""
+
+ support = WebSupport(..., moderation_callback=moderation_callback)
+
+The moderation callback must take one argument, which will be the same comment
+dict that is returned by :meth:`add_comment`.
diff --git a/doc/web/searchadapters.rst b/doc/web/searchadapters.rst
new file mode 100644
index 00000000..7d8634f7
--- /dev/null
+++ b/doc/web/searchadapters.rst
@@ -0,0 +1,45 @@
+.. _searchadapters:
+
+.. currentmodule:: sphinx.websupport.search
+
+Search Adapters
+===============
+
+To create a custom search adapter you will need to subclass the
+:class:`BaseSearch` class. Then create an instance of the new class and pass
+that as the `search` keyword argument when you create the :class:`~.WebSupport`
+object::
+
+ support = WebSupport(srcdir=srcdir,
+ builddir=builddir,
+ search=MySearch())
+
+For more information about creating a custom search adapter, please see the
+documentation of the :class:`BaseSearch` class below.
+
+.. class:: BaseSearch
+
+ Defines an interface for search adapters.
+
+
+BaseSearch Methods
+~~~~~~~~~~~~~~~~~~
+
+ The following methods are defined in the BaseSearch class. Some methods do
+ not need to be overridden, but some (:meth:`~BaseSearch.add_document` and
+ :meth:`~BaseSearch.handle_query`) must be overridden in your subclass. For a
+ working example, look at the built-in adapter for whoosh.
+
+.. automethod:: BaseSearch.init_indexing
+
+.. automethod:: BaseSearch.finish_indexing
+
+.. automethod:: BaseSearch.feed
+
+.. automethod:: BaseSearch.add_document
+
+.. automethod:: BaseSearch.query
+
+.. automethod:: BaseSearch.handle_query
+
+.. automethod:: BaseSearch.extract_context
diff --git a/doc/web/storagebackends.rst b/doc/web/storagebackends.rst
new file mode 100644
index 00000000..a46ea9e5
--- /dev/null
+++ b/doc/web/storagebackends.rst
@@ -0,0 +1,46 @@
+.. _storagebackends:
+
+.. currentmodule:: sphinx.websupport.storage
+
+Storage Backends
+================
+
+To create a custom storage backend you will need to subclass the
+:class:`StorageBackend` class. Then create an instance of the new class and
+pass that as the `storage` keyword argument when you create the
+:class:`~.WebSupport` object::
+
+ support = WebSupport(srcdir=srcdir,
+ builddir=builddir,
+ storage=MyStorage())
+
+For more information about creating a custom storage backend, please see the
+documentation of the :class:`StorageBackend` class below.
+
+.. class:: StorageBackend
+
+ Defines an interface for storage backends.
+
+
+StorageBackend Methods
+~~~~~~~~~~~~~~~~~~~~~~
+
+.. automethod:: StorageBackend.pre_build
+
+.. automethod:: StorageBackend.add_node
+
+.. automethod:: StorageBackend.post_build
+
+.. automethod:: StorageBackend.add_comment
+
+.. automethod:: StorageBackend.delete_comment
+
+.. automethod:: StorageBackend.get_data
+
+.. automethod:: StorageBackend.process_vote
+
+.. automethod:: StorageBackend.update_username
+
+.. automethod:: StorageBackend.accept_comment
+
+.. automethod:: StorageBackend.reject_comment
diff --git a/doc/websupport.rst b/doc/websupport.rst
new file mode 100644
index 00000000..3ccae246
--- /dev/null
+++ b/doc/websupport.rst
@@ -0,0 +1,16 @@
+.. _websupport:
+
+Sphinx Web Support
+==================
+
+.. versionadded:: 1.1
+
+Sphinx provides a Python API to easily integrate Sphinx documentation into your
+web application. To learn more read the :ref:`websupportquickstart`.
+
+.. toctree::
+
+ web/quickstart
+ web/api
+ web/searchadapters
+ web/storagebackends
diff --git a/ez_setup.py b/ez_setup.py
deleted file mode 100644
index d24e845e..00000000
--- a/ez_setup.py
+++ /dev/null
@@ -1,276 +0,0 @@
-#!python
-"""Bootstrap setuptools installation
-
-If you want to use setuptools in your package's setup.py, just include this
-file in the same directory with it, and add this to the top of your setup.py::
-
- from ez_setup import use_setuptools
- use_setuptools()
-
-If you want to require a specific version of setuptools, set a download
-mirror, or use an alternate download directory, you can do so by supplying
-the appropriate options to ``use_setuptools()``.
-
-This file can also be run as a script to install or upgrade setuptools.
-"""
-import sys
-DEFAULT_VERSION = "0.6c9"
-DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
-
-md5_data = {
- 'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
- 'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
- 'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
- 'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
- 'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
- 'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
- 'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
- 'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
- 'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
- 'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
- 'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
- 'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
- 'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
- 'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
- 'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
- 'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
- 'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
- 'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
- 'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
- 'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
- 'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
- 'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
- 'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
- 'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
- 'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
- 'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
- 'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
- 'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
- 'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
- 'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
- 'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03',
- 'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a',
- 'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6',
- 'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a',
-}
-
-import sys, os
-try: from hashlib import md5
-except ImportError: from md5 import md5
-
-def _validate_md5(egg_name, data):
- if egg_name in md5_data:
- digest = md5(data).hexdigest()
- if digest != md5_data[egg_name]:
- print >>sys.stderr, (
- "md5 validation of %s failed! (Possible download problem?)"
- % egg_name
- )
- sys.exit(2)
- return data
-
-def use_setuptools(
- version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
- download_delay=15
-):
- """Automatically find/download setuptools and make it available on sys.path
-
- `version` should be a valid setuptools version number that is available
- as an egg for download under the `download_base` URL (which should end with
- a '/'). `to_dir` is the directory where setuptools will be downloaded, if
- it is not already available. If `download_delay` is specified, it should
- be the number of seconds that will be paused before initiating a download,
- should one be required. If an older version of setuptools is installed,
- this routine will print a message to ``sys.stderr`` and raise SystemExit in
- an attempt to abort the calling script.
- """
- was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
- def do_download():
- egg = download_setuptools(version, download_base, to_dir, download_delay)
- sys.path.insert(0, egg)
- import setuptools; setuptools.bootstrap_install_from = egg
- try:
- import pkg_resources
- except ImportError:
- return do_download()
- try:
- pkg_resources.require("setuptools>="+version); return
- except pkg_resources.VersionConflict, e:
- if was_imported:
- print >>sys.stderr, (
- "The required version of setuptools (>=%s) is not available, and\n"
- "can't be installed while this script is running. Please install\n"
- " a more recent version first, using 'easy_install -U setuptools'."
- "\n\n(Currently using %r)"
- ) % (version, e.args[0])
- sys.exit(2)
- else:
- del pkg_resources, sys.modules['pkg_resources'] # reload ok
- return do_download()
- except pkg_resources.DistributionNotFound:
- return do_download()
-
-def download_setuptools(
- version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
- delay = 15
-):
- """Download setuptools from a specified location and return its filename
-
- `version` should be a valid setuptools version number that is available
- as an egg for download under the `download_base` URL (which should end
- with a '/'). `to_dir` is the directory where the egg will be downloaded.
- `delay` is the number of seconds to pause before an actual download attempt.
- """
- import urllib2, shutil
- egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
- url = download_base + egg_name
- saveto = os.path.join(to_dir, egg_name)
- src = dst = None
- if not os.path.exists(saveto): # Avoid repeated downloads
- try:
- from distutils import log
- if delay:
- log.warn("""
----------------------------------------------------------------------------
-This script requires setuptools version %s to run (even to display
-help). I will attempt to download it for you (from
-%s), but
-you may need to enable firewall access for this script first.
-I will start the download in %d seconds.
-
-(Note: if this machine does not have network access, please obtain the file
-
- %s
-
-and place it in this directory before rerunning this script.)
----------------------------------------------------------------------------""",
- version, download_base, delay, url
- ); from time import sleep; sleep(delay)
- log.warn("Downloading %s", url)
- src = urllib2.urlopen(url)
- # Read/write all in one block, so we don't create a corrupt file
- # if the download is interrupted.
- data = _validate_md5(egg_name, src.read())
- dst = open(saveto,"wb"); dst.write(data)
- finally:
- if src: src.close()
- if dst: dst.close()
- return os.path.realpath(saveto)
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-def main(argv, version=DEFAULT_VERSION):
- """Install or upgrade setuptools and EasyInstall"""
- try:
- import setuptools
- except ImportError:
- egg = None
- try:
- egg = download_setuptools(version, delay=0)
- sys.path.insert(0,egg)
- from setuptools.command.easy_install import main
- return main(list(argv)+[egg]) # we're done here
- finally:
- if egg and os.path.exists(egg):
- os.unlink(egg)
- else:
- if setuptools.__version__ == '0.0.1':
- print >>sys.stderr, (
- "You have an obsolete version of setuptools installed. Please\n"
- "remove it from your system entirely before rerunning this script."
- )
- sys.exit(2)
-
- req = "setuptools>="+version
- import pkg_resources
- try:
- pkg_resources.require(req)
- except pkg_resources.VersionConflict:
- try:
- from setuptools.command.easy_install import main
- except ImportError:
- from easy_install import main
- main(list(argv)+[download_setuptools(delay=0)])
- sys.exit(0) # try to force an exit
- else:
- if argv:
- from setuptools.command.easy_install import main
- main(argv)
- else:
- print "Setuptools version",version,"or greater has been installed."
- print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
-
-def update_md5(filenames):
- """Update our built-in md5 registry"""
-
- import re
-
- for name in filenames:
- base = os.path.basename(name)
- f = open(name,'rb')
- md5_data[base] = md5(f.read()).hexdigest()
- f.close()
-
- data = [" %r: %r,\n" % it for it in md5_data.items()]
- data.sort()
- repl = "".join(data)
-
- import inspect
- srcfile = inspect.getsourcefile(sys.modules[__name__])
- f = open(srcfile, 'rb'); src = f.read(); f.close()
-
- match = re.search("\nmd5_data = {\n([^}]+)}", src)
- if not match:
- print >>sys.stderr, "Internal error!"
- sys.exit(2)
-
- src = src[:match.start(1)] + repl + src[match.end(1):]
- f = open(srcfile,'w')
- f.write(src)
- f.close()
-
-
-if __name__=='__main__':
- if len(sys.argv)>2 and sys.argv[1]=='--md5update':
- update_md5(sys.argv[2:])
- else:
- main(sys.argv[1:])
-
-
-
-
-
-
diff --git a/setup.py b/setup.py
index 183fcceb..3b1d2d91 100644
--- a/setup.py
+++ b/setup.py
@@ -2,8 +2,8 @@
try:
from setuptools import setup, find_packages
except ImportError:
- import ez_setup
- ez_setup.use_setuptools()
+ import distribute_setup
+ distribute_setup.use_setuptools()
from setuptools import setup, find_packages
import os
@@ -47,7 +47,7 @@ A development egg can be found `here
requires = ['Pygments>=0.8', 'Jinja2>=2.2', 'docutils>=0.5']
if sys.version_info < (2, 4):
- print 'ERROR: Sphinx requires at least Python 2.4 to run.'
+ print('ERROR: Sphinx requires at least Python 2.4 to run.')
sys.exit(1)
if sys.version_info < (2, 5):
@@ -63,6 +63,9 @@ if sys.version_info < (2, 5):
else:
del requires[-1]
+ # The uuid module is new in the stdlib in 2.5
+ requires.append('uuid>=1.30')
+
# Provide a "compile_catalog" command that also creates the translated
# JavaScript files if Babel is available.
@@ -198,4 +201,6 @@ setup(
},
install_requires=requires,
cmdclass=cmdclass,
+ use_2to3=True,
+ use_2to3_fixers=['custom_fixers'],
)
diff --git a/sphinx/__init__.py b/sphinx/__init__.py
index 5a6c3571..211e2413 100644
--- a/sphinx/__init__.py
+++ b/sphinx/__init__.py
@@ -9,11 +9,14 @@
:license: BSD, see LICENSE for details.
"""
+# Keep this file executable as-is in Python 3!
+# (Otherwise getting the version out of it from setup.py is impossible.)
+
import sys
from os import path
-__version__ = '1.0.3+'
-__released__ = '1.0.3' # used when Sphinx builds its own docs
+__version__ = '1.1pre'
+__released__ = '1.1 (hg)' # used when Sphinx builds its own docs
package_dir = path.abspath(path.dirname(__file__))
@@ -34,14 +37,16 @@ if '+' in __version__ or 'pre' in __version__:
def main(argv=sys.argv):
+ """Sphinx build "main" command-line entry."""
if sys.version_info[:3] < (2, 4, 0):
- print >>sys.stderr, \
- 'Error: Sphinx requires at least Python 2.4 to run.'
+ sys.stderr.write('Error: Sphinx requires at least '
+ 'Python 2.4 to run.\n')
return 1
try:
from sphinx import cmdline
- except ImportError, err:
+ except ImportError:
+ err = sys.exc_info()[1]
errstr = str(err)
if errstr.lower().startswith('no module named'):
whichmod = errstr[16:]
@@ -54,14 +59,14 @@ def main(argv=sys.argv):
whichmod = 'roman module (which is distributed with Docutils)'
hint = ('This can happen if you upgraded docutils using\n'
'easy_install without uninstalling the old version'
- 'first.')
+ 'first.\n')
else:
whichmod += ' module'
- print >>sys.stderr, ('Error: The %s cannot be found. '
- 'Did you install Sphinx and its dependencies '
- 'correctly?' % whichmod)
+ sys.stderr.write('Error: The %s cannot be found. '
+ 'Did you install Sphinx and its dependencies '
+ 'correctly?\n' % whichmod)
if hint:
- print >> sys.stderr, hint
+ sys.stderr.write(hint)
return 1
raise
return cmdline.main(argv)
diff --git a/sphinx/addnodes.py b/sphinx/addnodes.py
index 0a2f0f7f..592bef5d 100644
--- a/sphinx/addnodes.py
+++ b/sphinx/addnodes.py
@@ -11,103 +11,168 @@
from docutils import nodes
-# index markup
-class index(nodes.Invisible, nodes.Inline, nodes.TextElement): pass
+
+class toctree(nodes.General, nodes.Element):
+ """Node for inserting a "TOC tree"."""
+
# domain-specific object descriptions (class, function etc.)
-# parent node for signature and content
-class desc(nodes.Admonition, nodes.Element): pass
+class desc(nodes.Admonition, nodes.Element):
+ """Node for object descriptions.
+
+ This node is similar to a "definition list" with one definition. It
+ contains one or more ``desc_signature`` and a ``desc_content``.
+ """
+
+class desc_signature(nodes.Part, nodes.Inline, nodes.TextElement):
+ """Node for object signatures.
-# additional name parts (module name, class name)
-class desc_addname(nodes.Part, nodes.Inline, nodes.TextElement): pass
+ The "term" part of the custom Sphinx definition list.
+ """
+
+
+# nodes to use within a desc_signature
+
+class desc_addname(nodes.Part, nodes.Inline, nodes.TextElement):
+ """Node for additional name parts (module name, class name)."""
# compatibility alias
desc_classname = desc_addname
-# return type (C); object type
-class desc_type(nodes.Part, nodes.Inline, nodes.TextElement): pass
-# -> annotation (Python)
+
+class desc_type(nodes.Part, nodes.Inline, nodes.TextElement):
+ """Node for return types or object type names."""
+
class desc_returns(desc_type):
+ """Node for a "returns" annotation (a la -> in Python)."""
def astext(self):
return ' -> ' + nodes.TextElement.astext(self)
-# main name of object
-class desc_name(nodes.Part, nodes.Inline, nodes.TextElement): pass
-# argument list
-class desc_signature(nodes.Part, nodes.Inline, nodes.TextElement): pass
+
+class desc_name(nodes.Part, nodes.Inline, nodes.TextElement):
+ """Node for the main object name."""
+
class desc_parameterlist(nodes.Part, nodes.Inline, nodes.TextElement):
+ """Node for a general parameter list."""
child_text_separator = ', '
-class desc_parameter(nodes.Part, nodes.Inline, nodes.TextElement): pass
+
+class desc_parameter(nodes.Part, nodes.Inline, nodes.TextElement):
+ """Node for a single parameter."""
+
class desc_optional(nodes.Part, nodes.Inline, nodes.TextElement):
+ """Node for marking optional parts of the parameter list."""
child_text_separator = ', '
def astext(self):
return '[' + nodes.TextElement.astext(self) + ']'
-# annotation (not Python 3-style annotations)
-class desc_annotation(nodes.Part, nodes.Inline, nodes.TextElement): pass
-# node for content
-class desc_content(nodes.General, nodes.Element): pass
+class desc_annotation(nodes.Part, nodes.Inline, nodes.TextElement):
+ """Node for signature annotations (not Python 3-style annotations)."""
+
+class desc_content(nodes.General, nodes.Element):
+ """Node for object description content.
+
+ This is the "definition" part of the custom Sphinx definition list.
+ """
+
+
+# new admonition-like constructs
+
+class versionmodified(nodes.Admonition, nodes.TextElement):
+ """Node for version change entries.
+
+ Currently used for "versionadded", "versionchanged" and "deprecated"
+ directives.
+ """
+
+class seealso(nodes.Admonition, nodes.Element):
+ """Custom "see also" admonition."""
+
+class productionlist(nodes.Admonition, nodes.Element):
+ """Node for grammar production lists.
+
+ Contains ``production`` nodes.
+ """
+
+class production(nodes.Part, nodes.Inline, nodes.TextElement):
+ """Node for a single grammar production rule."""
+
+
+# other directive-level nodes
+
+class index(nodes.Invisible, nodes.Inline, nodes.TextElement):
+ """Node for index entries.
+
+ This node is created by the ``index`` directive and has one attribute,
+ ``entries``. Its value is a list of 4-tuples of ``(entrytype, entryname,
+ target, ignored)``.
+
+ *entrytype* is one of "single", "pair", "double", "triple".
+ """
+
+class centered(nodes.Part, nodes.Element):
+ """Deprecated."""
+
+class acks(nodes.Element):
+ """Special node for "acks" lists."""
+
+class hlist(nodes.Element):
+ """Node for "horizontal lists", i.e. lists that should be compressed to
+ take up less vertical space.
+ """
-# \versionadded, \versionchanged, \deprecated
-class versionmodified(nodes.Admonition, nodes.TextElement): pass
+class hlistcol(nodes.Element):
+ """Node for one column in a horizontal list."""
-# seealso
-class seealso(nodes.Admonition, nodes.Element): pass
+class compact_paragraph(nodes.paragraph):
+ """Node for a compact paragraph (which never makes a <p> node)."""
-# productionlist
-class productionlist(nodes.Admonition, nodes.Element): pass
-class production(nodes.Part, nodes.Inline, nodes.TextElement): pass
+class glossary(nodes.Element):
+ """Node to insert a glossary."""
-# toc tree
-class toctree(nodes.General, nodes.Element): pass
+class only(nodes.Element):
+ """Node for "only" directives (conditional inclusion based on tags)."""
-# centered
-class centered(nodes.Part, nodes.Element): pass
-# pending xref
-class pending_xref(nodes.Inline, nodes.Element): pass
+# meta-information nodes
-# compact paragraph -- never makes a <p>
-class compact_paragraph(nodes.paragraph): pass
+class start_of_file(nodes.Element):
+ """Node to mark start of a new file, used in the LaTeX builder only."""
-# reference to a file to download
-class download_reference(nodes.reference): pass
+class highlightlang(nodes.Element):
+ """Inserted to set the highlight language and line number options for
+ subsequent code blocks.
+ """
-# for the ACKS list
-class acks(nodes.Element): pass
+class tabular_col_spec(nodes.Element):
+ """Node for specifying tabular columns, used for LaTeX output."""
-# for horizontal lists
-class hlist(nodes.Element): pass
-class hlistcol(nodes.Element): pass
+class meta(nodes.Special, nodes.PreBibliographic, nodes.Element):
+ """Node for meta directive -- same as docutils' standard meta node,
+ but pickleable.
+ """
-# sets the highlighting language for literal blocks
-class highlightlang(nodes.Element): pass
-# like emphasis, but doesn't apply further text processors, e.g. smartypants
-class literal_emphasis(nodes.emphasis): pass
+# inline nodes
-# for abbreviations (with explanations)
-class abbreviation(nodes.Inline, nodes.TextElement): pass
+class pending_xref(nodes.Inline, nodes.Element):
+ """Node for cross-references that cannot be resolved without complete
+ information about all documents.
-# glossary
-class glossary(nodes.Element): pass
+ These nodes are resolved before writing output, in
+ BuildEnvironment.resolve_references.
+ """
-# start of a file, used in the LaTeX builder only
-class start_of_file(nodes.Element): pass
+class download_reference(nodes.reference):
+ """Node for download references, similar to pending_xref."""
-# tabular column specification, used for the LaTeX writer
-class tabular_col_spec(nodes.Element): pass
+class literal_emphasis(nodes.emphasis):
+ """Node that behaves like `emphasis`, but further text processors are not
+ applied (e.g. smartypants for HTML output).
+ """
-# only (in/exclusion based on tags)
-class only(nodes.Element): pass
+class abbreviation(nodes.Inline, nodes.TextElement):
+ """Node for abbreviations with explanations."""
-# meta directive -- same as docutils' standard meta node, but pickleable
-class meta(nodes.Special, nodes.PreBibliographic, nodes.Element): pass
-# make them known to docutils. this is needed, because the HTML writer
-# will choke at some point if these are not added
-nodes._add_node_class_names("""index desc desc_content desc_signature
- desc_type desc_returns desc_addname desc_name desc_parameterlist
- desc_parameter desc_optional download_reference hlist hlistcol
- centered versionmodified seealso productionlist production toctree
- pending_xref compact_paragraph highlightlang literal_emphasis
- abbreviation glossary acks module start_of_file tabular_col_spec
- meta""".split())
+# make the new nodes known to docutils; needed because the HTML writer will
+# choke at some point if these are not added
+nodes._add_node_class_names(k for k in globals().keys()
+ if k != 'nodes' and k[0] != '_')
diff --git a/sphinx/application.py b/sphinx/application.py
index 11f887da..50f4102e 100644
--- a/sphinx/application.py
+++ b/sphinx/application.py
@@ -37,9 +37,6 @@ from sphinx.util.osutil import ENOENT
from sphinx.util.console import bold
-# Directive is either new-style or old-style
-clstypes = (type, types.ClassType)
-
# List of all known core events. Maps name to arguments description.
events = {
'builder-inited': '',
@@ -136,9 +133,8 @@ class Sphinx(object):
self._init_builder(buildername)
def _init_i18n(self):
- """
- Load translated strings from the configured localedirs if
- enabled in the configuration.
+ """Load translated strings from the configured localedirs if enabled in
+ the configuration.
"""
if self.config.language is not None:
self.info(bold('loading translations [%s]... ' %
@@ -490,8 +486,7 @@ class TemplateBridge(object):
"""
def init(self, builder, theme=None, dirs=None):
- """
- Called by the builder to initialize the template system.
+ """Called by the builder to initialize the template system.
*builder* is the builder object; you'll probably want to look at the
value of ``builder.config.templates_path``.
@@ -502,23 +497,20 @@ class TemplateBridge(object):
raise NotImplementedError('must be implemented in subclasses')
def newest_template_mtime(self):
- """
- Called by the builder to determine if output files are outdated
+ """Called by the builder to determine if output files are outdated
because of template changes. Return the mtime of the newest template
file that was changed. The default implementation returns ``0``.
"""
return 0
def render(self, template, context):
- """
- Called by the builder to render a template given as a filename with a
- specified context (a Python dictionary).
+ """Called by the builder to render a template given as a filename with
+ a specified context (a Python dictionary).
"""
raise NotImplementedError('must be implemented in subclasses')
def render_string(self, template, context):
- """
- Called by the builder to render a template given as a string with a
+ """Called by the builder to render a template given as a string with a
specified context (a Python dictionary).
"""
raise NotImplementedError('must be implemented in subclasses')
diff --git a/sphinx/builders/__init__.py b/sphinx/builders/__init__.py
index e345d570..ce04f769 100644
--- a/sphinx/builders/__init__.py
+++ b/sphinx/builders/__init__.py
@@ -55,16 +55,13 @@ class Builder(object):
# helper methods
def init(self):
- """
- Load necessary templates and perform initialization. The default
+ """Load necessary templates and perform initialization. The default
implementation does nothing.
"""
pass
def create_template_bridge(self):
- """
- Return the template bridge configured.
- """
+ """Return the template bridge configured."""
if self.config.template_bridge:
self.templates = self.app.import_object(
self.config.template_bridge, 'template_bridge setting')()
@@ -73,23 +70,23 @@ class Builder(object):
self.templates = BuiltinTemplateLoader()
def get_target_uri(self, docname, typ=None):
- """
- Return the target URI for a document name (*typ* can be used to qualify
- the link characteristic for individual builders).
+ """Return the target URI for a document name.
+
+ *typ* can be used to qualify the link characteristic for individual
+ builders.
"""
raise NotImplementedError
def get_relative_uri(self, from_, to, typ=None):
- """
- Return a relative URI between two source filenames. May raise
- environment.NoUri if there's no way to return a sensible URI.
+ """Return a relative URI between two source filenames.
+
+ May raise environment.NoUri if there's no way to return a sensible URI.
"""
return relative_uri(self.get_target_uri(from_),
self.get_target_uri(to, typ))
def get_outdated_docs(self):
- """
- Return an iterable of output files that are outdated, or a string
+ """Return an iterable of output files that are outdated, or a string
describing what an update build will build.
If the builder does not output individual files corresponding to
@@ -129,9 +126,7 @@ class Builder(object):
supported_image_types = []
def post_process_images(self, doctree):
- """
- Pick the best candidate for all image URIs.
- """
+ """Pick the best candidate for all image URIs."""
for node in doctree.traverse(nodes.image):
if '?' in node['candidates']:
# don't rewrite nonlocal image URIs
@@ -198,9 +193,9 @@ class Builder(object):
'out of date' % len(to_build))
def build(self, docnames, summary=None, method='update'):
- """
- Main build method. First updates the environment, and then
- calls :meth:`write`.
+ """Main build method.
+
+ First updates the environment, and then calls :meth:`write`.
"""
if summary:
self.info(bold('building [%s]: ' % self.name), nonl=1)
@@ -302,15 +297,18 @@ class Builder(object):
raise NotImplementedError
def finish(self):
- """
- Finish the building process. The default implementation does nothing.
+ """Finish the building process.
+
+ The default implementation does nothing.
"""
pass
def cleanup(self):
+ """Cleanup any resources.
+
+ The default implementation does nothing.
"""
- Cleanup any resources. The default implementation does nothing.
- """
+ pass
BUILTIN_BUILDERS = {
@@ -329,4 +327,6 @@ BUILTIN_BUILDERS = {
'man': ('manpage', 'ManualPageBuilder'),
'changes': ('changes', 'ChangesBuilder'),
'linkcheck': ('linkcheck', 'CheckExternalLinksBuilder'),
+ 'websupport': ('websupport', 'WebSupportBuilder'),
+ 'gettext': ('intl', 'MessageCatalogBuilder'),
}
diff --git a/sphinx/builders/devhelp.py b/sphinx/builders/devhelp.py
index a5a0f280..d43cd624 100644
--- a/sphinx/builders/devhelp.py
+++ b/sphinx/builders/devhelp.py
@@ -42,7 +42,6 @@ except ImportError:
class DevhelpBuilder(StandaloneHTMLBuilder):
"""
Builder that also outputs GNOME Devhelp file.
-
"""
name = 'devhelp'
diff --git a/sphinx/builders/epub.py b/sphinx/builders/epub.py
index aea07d4d..3e123a0a 100644
--- a/sphinx/builders/epub.py
+++ b/sphinx/builders/epub.py
@@ -130,7 +130,8 @@ _refuri_re = re.compile("([^#:]*#)(.*)")
# The epub publisher
class EpubBuilder(StandaloneHTMLBuilder):
- """Builder that outputs epub files.
+ """
+ Builder that outputs epub files.
It creates the metainfo files container.opf, toc.ncx, mimetype, and
META-INF/container.xml. Afterwards, all necessary files are zipped to an
@@ -222,12 +223,12 @@ class EpubBuilder(StandaloneHTMLBuilder):
})
def fix_fragment(self, match):
- """Return a href attribute with colons replaced by hyphens.
- """
+ """Return a href attribute with colons replaced by hyphens."""
return match.group(1) + match.group(2).replace(':', '-')
def fix_ids(self, tree):
"""Replace colons with hyphens in href and id attributes.
+
Some readers crash because they interpret the part as a
transport protocol specification.
"""
@@ -246,8 +247,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
node.attributes['ids'] = newids
def add_visible_links(self, tree):
- """Append visible link targets after external links.
- """
+ """Append visible link targets after external links."""
for node in tree.traverse(nodes.reference):
uri = node.get('refuri', '')
if (uri.startswith('http:') or uri.startswith('https:') or
@@ -261,6 +261,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
def write_doc(self, docname, doctree):
"""Write one document file.
+
This method is overwritten in order to fix fragment identifiers
and to add visible external links.
"""
@@ -269,8 +270,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
return StandaloneHTMLBuilder.write_doc(self, docname, doctree)
def fix_genindex(self, tree):
- """Fix href attributes for genindex pages.
- """
+ """Fix href attributes for genindex pages."""
# XXX: modifies tree inline
# Logic modeled from themes/basic/genindex.html
for key, columns in tree:
@@ -288,8 +288,9 @@ class EpubBuilder(StandaloneHTMLBuilder):
def handle_page(self, pagename, addctx, templatename='page.html',
outfilename=None, event_arg=None):
"""Create a rendered page.
- This method is overwritten for genindex pages in order to fix
- href link attributes.
+
+ This method is overwritten for genindex pages in order to fix href link
+ attributes.
"""
if pagename.startswith('genindex'):
self.fix_genindex(addctx['genindexentries'])
@@ -413,6 +414,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
def insert_subnav(self, node, subnav):
"""Insert nested navpoints for given node.
+
The node and subnav are already rendered to text.
"""
nlist = node.rsplit('\n', 1)
@@ -422,8 +424,8 @@ class EpubBuilder(StandaloneHTMLBuilder):
def build_navpoints(self, nodes):
"""Create the toc navigation structure.
- Subelements of a node are nested inside the navpoint.
- For nested nodes the parent node is reinserted in the subnav.
+ Subelements of a node are nested inside the navpoint. For nested nodes
+ the parent node is reinserted in the subnav.
"""
navstack = []
navlist = []
@@ -461,8 +463,8 @@ class EpubBuilder(StandaloneHTMLBuilder):
return '\n'.join(navlist)
def toc_metadata(self, level, navpoints):
- """Create a dictionary with all metadata for the toc.ncx
- file properly escaped.
+ """Create a dictionary with all metadata for the toc.ncx file
+ properly escaped.
"""
metadata = {}
metadata['uid'] = self.config.epub_uid
@@ -487,8 +489,8 @@ class EpubBuilder(StandaloneHTMLBuilder):
def build_epub(self, outdir, outname):
"""Write the epub file.
- It is a zip file with the mimetype file stored uncompressed
- as the first entry.
+ It is a zip file with the mimetype file stored uncompressed as the first
+ entry.
"""
self.info('writing %s file...' % outname)
projectfiles = ['META-INF/container.xml', 'content.opf', 'toc.ncx'] \
diff --git a/sphinx/builders/html.py b/sphinx/builders/html.py
index 951f516d..78bafda8 100644
--- a/sphinx/builders/html.py
+++ b/sphinx/builders/html.py
@@ -35,7 +35,7 @@ from sphinx.util.osutil import SEP, os_path, relative_uri, ensuredir, \
movefile, ustrftime, copyfile
from sphinx.util.nodes import inline_all_toctrees
from sphinx.util.matching import patmatch, compile_matchers
-from sphinx.util.pycompat import any
+from sphinx.util.pycompat import any, b
from sphinx.errors import SphinxError
from sphinx.locale import _
from sphinx.search import js_index
@@ -63,6 +63,7 @@ class StandaloneHTMLBuilder(Builder):
out_suffix = '.html'
link_suffix = '.html' # defaults to matching out_suffix
indexer_format = js_index
+ indexer_dumps_unicode = True
supported_image_types = ['image/svg+xml', 'image/png',
'image/gif', 'image/jpeg']
searchindex_filename = 'searchindex.js'
@@ -146,8 +147,9 @@ class StandaloneHTMLBuilder(Builder):
cfgdict = dict((name, self.config[name])
for (name, desc) in self.config.values.iteritems()
if desc[1] == 'html')
- self.config_hash = md5(str(cfgdict)).hexdigest()
- self.tags_hash = md5(str(sorted(self.tags))).hexdigest()
+ self.config_hash = md5(unicode(cfgdict).encode('utf-8')).hexdigest()
+ self.tags_hash = md5(unicode(sorted(self.tags)).encode('utf-8')) \
+ .hexdigest()
old_config_hash = old_tags_hash = ''
try:
fp = open(path.join(self.outdir, '.buildinfo'))
@@ -199,7 +201,7 @@ class StandaloneHTMLBuilder(Builder):
"""Utility: Render a lone doctree node."""
if node is None:
return {'fragment': ''}
- doc = new_document('<partial node>')
+ doc = new_document(b('<partial node>'))
doc.append(node)
if self._publisher is None:
@@ -585,8 +587,7 @@ class StandaloneHTMLBuilder(Builder):
self.theme.cleanup()
def post_process_images(self, doctree):
- """
- Pick the best candidate for an image and link down-scaled images to
+ """Pick the best candidate for an image and link down-scaled images to
their high res version.
"""
Builder.post_process_images(self, doctree)
@@ -730,10 +731,12 @@ class StandaloneHTMLBuilder(Builder):
self.info(bold('dumping object inventory... '), nonl=True)
f = open(path.join(self.outdir, INVENTORY_FILENAME), 'wb')
try:
- f.write('# Sphinx inventory version 2\n')
- f.write('# Project: %s\n' % self.config.project.encode('utf-8'))
- f.write('# Version: %s\n' % self.config.version.encode('utf-8'))
- f.write('# The remainder of this file is compressed using zlib.\n')
+ f.write((u'# Sphinx inventory version 2\n'
+ u'# Project: %s\n'
+ u'# Version: %s\n'
+ u'# The remainder of this file is compressed using zlib.\n'
+ % (self.config.project, self.config.version)
+ ).encode('utf-8'))
compressor = zlib.compressobj(9)
for domainname, domain in self.env.domains.iteritems():
for name, dispname, type, docname, anchor, prio in \
@@ -745,11 +748,9 @@ class StandaloneHTMLBuilder(Builder):
if dispname == name:
dispname = u'-'
f.write(compressor.compress(
- '%s %s:%s %s %s %s\n' % (name.encode('utf-8'),
- domainname.encode('utf-8'),
- type.encode('utf-8'), prio,
- uri.encode('utf-8'),
- dispname.encode('utf-8'))))
+ (u'%s %s:%s %s %s %s\n' % (name, domainname, type,
+ prio, uri, dispname)
+ ).encode('utf-8')))
f.write(compressor.flush())
finally:
f.close()
@@ -761,7 +762,10 @@ class StandaloneHTMLBuilder(Builder):
searchindexfn = path.join(self.outdir, self.searchindex_filename)
# first write to a temporary file, so that if dumping fails,
# the existing index won't be overwritten
- f = open(searchindexfn + '.tmp', 'wb')
+ if self.indexer_dumps_unicode:
+ f = codecs.open(searchindexfn + '.tmp', 'w', encoding='utf-8')
+ else:
+ f = open(searchindexfn + '.tmp', 'wb')
try:
self.indexer.dump(f, self.indexer_format)
finally:
@@ -918,6 +922,7 @@ class SerializingHTMLBuilder(StandaloneHTMLBuilder):
#: implements a `dump`, `load`, `dumps` and `loads` functions
#: (pickle, simplejson etc.)
implementation = None
+ implementation_dumps_unicode = False
#: the filename for the global context file
globalcontext_filename = None
@@ -940,6 +945,17 @@ class SerializingHTMLBuilder(StandaloneHTMLBuilder):
return docname[:-5] # up to sep
return docname + SEP
+ def dump_context(self, context, filename):
+ if self.implementation_dumps_unicode:
+ f = codecs.open(filename, 'w', encoding='utf-8')
+ else:
+ f = open(filename, 'wb')
+ try:
+ # XXX: the third argument is pickle-specific!
+ self.implementation.dump(context, f, 2)
+ finally:
+ f.close()
+
def handle_page(self, pagename, ctx, templatename='page.html',
outfilename=None, event_arg=None):
ctx['current_page_name'] = pagename
@@ -953,11 +969,7 @@ class SerializingHTMLBuilder(StandaloneHTMLBuilder):
ctx, event_arg)
ensuredir(path.dirname(outfilename))
- f = open(outfilename, 'wb')
- try:
- self.implementation.dump(ctx, f, 2)
- finally:
- f.close()
+ self.dump_context(ctx, outfilename)
# if there is a source file, copy the source file for the
# "show source" link
@@ -970,11 +982,7 @@ class SerializingHTMLBuilder(StandaloneHTMLBuilder):
def handle_finish(self):
# dump the global context
outfilename = path.join(self.outdir, self.globalcontext_filename)
- f = open(outfilename, 'wb')
- try:
- self.implementation.dump(self.globalcontext, f, 2)
- finally:
- f.close()
+ self.dump_context(self.globalcontext, outfilename)
# super here to dump the search index
StandaloneHTMLBuilder.handle_finish(self)
@@ -994,7 +1002,9 @@ class PickleHTMLBuilder(SerializingHTMLBuilder):
A Builder that dumps the generated HTML into pickle files.
"""
implementation = pickle
+ implementation_dumps_unicode = False
indexer_format = pickle
+ indexer_dumps_unicode = False
name = 'pickle'
out_suffix = '.fpickle'
globalcontext_filename = 'globalcontext.pickle'
@@ -1009,7 +1019,9 @@ class JSONHTMLBuilder(SerializingHTMLBuilder):
A builder that dumps the generated HTML into JSON files.
"""
implementation = jsonimpl
+ implementation_dumps_unicode = True
indexer_format = jsonimpl
+ indexer_dumps_unicode = True
name = 'json'
out_suffix = '.fjson'
globalcontext_filename = 'globalcontext.json'
diff --git a/sphinx/builders/htmlhelp.py b/sphinx/builders/htmlhelp.py
index 848068e5..f5d716d9 100644
--- a/sphinx/builders/htmlhelp.py
+++ b/sphinx/builders/htmlhelp.py
@@ -258,7 +258,8 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder):
def write_index(title, refs, subitems):
def write_param(name, value):
item = ' <param name="%s" value="%s">\n' % (name, value)
- f.write(item.encode('ascii', 'xmlcharrefreplace'))
+ f.write(item.encode('ascii', 'xmlcharrefreplace')
+ .decode('ascii'))
title = cgi.escape(title)
f.write('<LI> <OBJECT type="text/sitemap">\n')
write_param('Keyword', title)
diff --git a/sphinx/builders/intl.py b/sphinx/builders/intl.py
new file mode 100644
index 00000000..1572747e
--- /dev/null
+++ b/sphinx/builders/intl.py
@@ -0,0 +1,110 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.builders.intl
+ ~~~~~~~~~~~~~~~~~~~~
+
+ The MessageCatalogBuilder class.
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from os import path
+from codecs import open
+from datetime import datetime
+from collections import defaultdict
+
+from docutils import nodes
+
+from sphinx.builders import Builder
+from sphinx.builders.versioning import VersioningBuilderMixin
+from sphinx.util.nodes import extract_messages
+from sphinx.util.osutil import SEP, copyfile
+from sphinx.util.console import darkgreen
+
+POHEADER = ur"""
+# SOME DESCRIPTIVE TITLE.
+# Copyright (C) %(copyright)s
+# This file is distributed under the same license as the %(project)s package.
+# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: %(version)s\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: %(ctime)s\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
+"Language-Team: LANGUAGE <LL@li.org>\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+
+"""[1:]
+
+
+class I18nBuilder(Builder, VersioningBuilderMixin):
+ """
+ General i18n builder.
+ """
+ name = 'i18n'
+
+ def init(self):
+ Builder.init(self)
+ VersioningBuilderMixin.init(self)
+ self.catalogs = defaultdict(dict)
+
+ def get_target_uri(self, docname, typ=None):
+ return ''
+
+ def get_outdated_docs(self):
+ return self.env.found_docs
+
+ def prepare_writing(self, docnames):
+ return
+
+ def write_doc(self, docname, doctree):
+ catalog = self.catalogs[docname.split(SEP, 1)[0]]
+
+ self.handle_versioning(docname, doctree, nodes.TextElement)
+
+ for node, msg in extract_messages(doctree):
+ catalog.setdefault(node.uid, msg)
+
+ def finish(self):
+ Builder.finish(self)
+ VersioningBuilderMixin.finish(self)
+
+
+class MessageCatalogBuilder(I18nBuilder):
+ """
+ Builds gettext-style message catalogs (.pot files).
+ """
+ name = 'gettext'
+
+ def finish(self):
+ I18nBuilder.finish(self)
+ data = dict(
+ version = self.config.version,
+ copyright = self.config.copyright,
+ project = self.config.project,
+ # XXX should supply tz
+ ctime = datetime.now().strftime('%Y-%m-%d %H:%M%z'),
+ )
+ for section, messages in self.status_iterator(
+ self.catalogs.iteritems(), "writing message catalogs... ",
+ lambda (section, _):darkgreen(section), len(self.catalogs)):
+
+ pofn = path.join(self.outdir, section + '.pot')
+ pofile = open(pofn, 'w', encoding='utf-8')
+ try:
+ pofile.write(POHEADER % data)
+ for uid, message in messages.iteritems():
+ # message contains *one* line of text ready for translation
+ message = message.replace(u'\\', ur'\\'). \
+ replace(u'"', ur'\"')
+ pomsg = u'#%s\nmsgid "%s"\nmsgstr ""\n\n' % (uid, message)
+ pofile.write(pomsg)
+ finally:
+ pofile.close()
diff --git a/sphinx/builders/qthelp.py b/sphinx/builders/qthelp.py
index ffc52334..e86f1921 100644
--- a/sphinx/builders/qthelp.py
+++ b/sphinx/builders/qthelp.py
@@ -130,8 +130,16 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
for indexname, indexcls, content, collapse in self.domain_indices:
item = section_template % {'title': indexcls.localname,
'ref': '%s.html' % indexname}
- sections.append(' '*4*4 + item)
- sections = '\n'.join(sections)
+ sections.append((' ' * 4 * 4 + item).encode('utf-8'))
+ # sections may be unicode strings or byte strings, we have to make sure
+ # they are all byte strings before joining them
+ new_sections = []
+ for section in sections:
+ if isinstance(section, unicode):
+ new_sections.append(section.encode('utf-8'))
+ else:
+ new_sections.append(section)
+ sections = u'\n'.encode('utf-8').join(new_sections)
# keywords
keywords = []
@@ -230,7 +238,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
link = node['refuri']
title = escape(node.astext()).replace('"','&quot;')
item = section_template % {'title': title, 'ref': link}
- item = ' '*4*indentlevel + item.encode('ascii', 'xmlcharrefreplace')
+ item = u' ' * 4 * indentlevel + item
parts.append(item.encode('ascii', 'xmlcharrefreplace'))
elif isinstance(node, nodes.bullet_list):
for subnode in node:
diff --git a/sphinx/builders/versioning.py b/sphinx/builders/versioning.py
new file mode 100644
index 00000000..6c2bccca
--- /dev/null
+++ b/sphinx/builders/versioning.py
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.builders.versioning
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+import os
+import pickle
+
+from docutils.utils import Reporter
+
+from sphinx.util.osutil import copyfile
+from sphinx.environment import WarningStream
+from sphinx.versioning import add_uids, merge_doctrees
+
+
+class VersioningBuilderMixin(object):
+ def walk_doctree_files(self):
+ for root, dirs, files in os.walk(self.doctreedir):
+ for fn in files:
+ yield os.path.join(root, fn)
+
+ def init(self):
+ for fp in self.walk_doctree_files():
+ if fp.endswith('.doctree'):
+ copyfile(fp, fp + '.old')
+
+ def get_old_doctree(self, docname):
+ fp = self.env.doc2path(docname, self.doctreedir, '.doctree.old')
+ try:
+ f = open(fp, 'rb')
+ try:
+ doctree = pickle.load(f)
+ finally:
+ f.close()
+ except IOError:
+ return None
+ doctree.settings.env = self.env
+ doctree.reporter = Reporter(self.env.doc2path(docname), 2, 5,
+ stream=WarningStream(self.env._warnfunc))
+ return doctree
+
+ def resave_doctree(self, docname, doctree):
+ reporter = doctree.reporter
+ doctree.reporter = None
+ doctree.settings.warning_stream = None
+ doctree.settings.env = None
+ doctree.settings.record_dependencies = None
+
+ fp = self.env.doc2path(docname, self.doctreedir, '.doctree')
+ f = open(fp, 'wb')
+ try:
+ pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
+ finally:
+ f.close()
+
+ doctree.reporter = reporter
+
+ def handle_versioning(self, docname, doctree, condition):
+ old_doctree = self.get_old_doctree(docname)
+ if old_doctree:
+ list(merge_doctrees(old_doctree, doctree, condition))
+ else:
+ list(add_uids(doctree, condition))
+ self.resave_doctree(docname, doctree)
+
+ def finish(self):
+ for fp in self.walk_doctree_files():
+ if fp.endswith('.doctree.old'):
+ os.remove(fp)
diff --git a/sphinx/builders/websupport.py b/sphinx/builders/websupport.py
new file mode 100644
index 00000000..303adfe6
--- /dev/null
+++ b/sphinx/builders/websupport.py
@@ -0,0 +1,161 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.builders.websupport
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Builder for the web support package.
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import cPickle as pickle
+from os import path
+from cgi import escape
+import posixpath
+import shutil
+
+from docutils.io import StringOutput
+
+from sphinx.util.osutil import os_path, relative_uri, ensuredir, copyfile
+from sphinx.util.jsonimpl import dumps as dump_json
+from sphinx.util.websupport import is_commentable
+from sphinx.builders.html import StandaloneHTMLBuilder
+from sphinx.builders.versioning import VersioningBuilderMixin
+from sphinx.writers.websupport import WebSupportTranslator
+
+
+class WebSupportBuilder(StandaloneHTMLBuilder, VersioningBuilderMixin):
+ """
+ Builds documents for the web support package.
+ """
+ name = 'websupport'
+ out_suffix = '.fpickle'
+
+ def init(self):
+ StandaloneHTMLBuilder.init(self)
+ VersioningBuilderMixin.init(self)
+
+ def init_translator_class(self):
+ self.translator_class = WebSupportTranslator
+
+ def write_doc(self, docname, doctree):
+ destination = StringOutput(encoding='utf-8')
+ doctree.settings = self.docsettings
+
+ self.handle_versioning(docname, doctree, is_commentable)
+
+ self.cur_docname = docname
+ self.secnumbers = self.env.toc_secnumbers.get(docname, {})
+ self.imgpath = '/' + posixpath.join(self.app.staticdir, '_images')
+ self.post_process_images(doctree)
+ self.dlpath = '/' + posixpath.join(self.app.staticdir, '_downloads')
+ self.docwriter.write(doctree, destination)
+ self.docwriter.assemble_parts()
+ body = self.docwriter.parts['fragment']
+ metatags = self.docwriter.clean_meta
+
+ ctx = self.get_doc_context(docname, body, metatags)
+ self.index_page(docname, doctree, ctx.get('title', ''))
+ self.handle_page(docname, ctx, event_arg=doctree)
+
+ def get_target_uri(self, docname, typ=None):
+ return docname
+
+ def load_indexer(self, docnames):
+ self.indexer = self.app.search
+ self.indexer.init_indexing(changed=docnames)
+
+ def handle_page(self, pagename, addctx, templatename='page.html',
+ outfilename=None, event_arg=None):
+ # This is mostly copied from StandaloneHTMLBuilder. However, instead
+ # of rendering the template and saving the html, create a context
+ # dict and pickle it.
+ ctx = self.globalcontext.copy()
+ ctx['pagename'] = pagename
+
+ def pathto(otheruri, resource=False,
+ baseuri=self.get_target_uri(pagename)):
+ if not resource:
+ otheruri = self.get_target_uri(otheruri)
+ return relative_uri(baseuri, otheruri) or '#'
+ else:
+ return '/' + posixpath.join(self.app.staticdir, otheruri)
+ ctx['pathto'] = pathto
+ ctx['hasdoc'] = lambda name: name in self.env.all_docs
+ ctx['encoding'] = encoding = self.config.html_output_encoding
+ ctx['toctree'] = lambda **kw: self._get_local_toctree(pagename, **kw)
+ self.add_sidebars(pagename, ctx)
+ ctx.update(addctx)
+
+ self.app.emit('html-page-context', pagename, templatename,
+ ctx, event_arg)
+
+ # Create a dict that will be pickled and used by webapps.
+ css = '<link rel="stylesheet" href="%s" type=text/css />' % \
+ pathto('_static/pygments.css', 1)
+ doc_ctx = {'body': ctx.get('body', ''),
+ 'title': ctx.get('title', ''),
+ 'css': css,
+ 'js': self._make_js(ctx)}
+ # Partially render the html template to proved a more useful ctx.
+ template = self.templates.environment.get_template(templatename)
+ template_module = template.make_module(ctx)
+ if hasattr(template_module, 'sidebar'):
+ doc_ctx['sidebar'] = template_module.sidebar()
+ if hasattr(template_module, 'relbar'):
+ doc_ctx['relbar'] = template_module.relbar()
+
+ if not outfilename:
+ outfilename = path.join(self.outdir, 'pickles',
+ os_path(pagename) + self.out_suffix)
+
+ ensuredir(path.dirname(outfilename))
+ f = open(outfilename, 'wb')
+ try:
+ pickle.dump(doc_ctx, f, pickle.HIGHEST_PROTOCOL)
+ finally:
+ f.close()
+
+ # if there is a source file, copy the source file for the
+ # "show source" link
+ if ctx.get('sourcename'):
+ source_name = path.join(self.app.builddir, self.app.staticdir,
+ '_sources', os_path(ctx['sourcename']))
+ ensuredir(path.dirname(source_name))
+ copyfile(self.env.doc2path(pagename), source_name)
+
+ def handle_finish(self):
+ StandaloneHTMLBuilder.handle_finish(self)
+ VersioningBuilderMixin.finish(self)
+ directories = ['_images', '_static']
+ for directory in directories:
+ src = path.join(self.outdir, directory)
+ dst = path.join(self.app.builddir, self.app.staticdir, directory)
+ if path.isdir(src):
+ if path.isdir(dst):
+ shutil.rmtree(dst)
+ shutil.move(src, dst)
+
+ def dump_search_index(self):
+ self.indexer.finish_indexing()
+
+ def _make_js(self, ctx):
+ def make_script(file):
+ path = ctx['pathto'](file, 1)
+ return '<script type="text/javascript" src="%s"></script>' % path
+
+ opts = {
+ 'URL_ROOT': ctx.get('url_root', ''),
+ 'VERSION': ctx['release'],
+ 'COLLAPSE_INDEX': False,
+ 'FILE_SUFFIX': '',
+ 'HAS_SOURCE': ctx['has_source']
+ }
+ scripts = [make_script(file) for file in ctx['script_files']]
+ scripts.append(make_script('_static/websupport.js'))
+ return '\n'.join([
+ '<script type="text/javascript">'
+ 'var DOCUMENTATION_OPTIONS = %s;' % dump_json(opts),
+ '</script>'
+ ] + scripts)
diff --git a/sphinx/config.py b/sphinx/config.py
index 12c2a04b..2e0a116c 100644
--- a/sphinx/config.py
+++ b/sphinx/config.py
@@ -11,16 +11,23 @@
import os
import re
+import sys
from os import path
from sphinx.errors import ConfigError
from sphinx.util.osutil import make_filename
+from sphinx.util.pycompat import bytes, b, convert_with_2to3
-nonascii_re = re.compile(r'[\x80-\xff]')
+nonascii_re = re.compile(b(r'[\x80-\xff]'))
+CONFIG_SYNTAX_ERROR = "There is a syntax error in your configuration file: %s"
+if sys.version_info >= (3, 0):
+ CONFIG_SYNTAX_ERROR += "\nDid you change the syntax from 2.x to 3.x?"
class Config(object):
- """Configuration file abstraction."""
+ """
+ Configuration file abstraction.
+ """
# the values are: (default, what needs to be rebuilt if changed)
@@ -163,12 +170,30 @@ class Config(object):
config['tags'] = tags
olddir = os.getcwd()
try:
+ # we promise to have the config dir as current dir while the
+ # config file is executed
+ os.chdir(dirname)
+ # get config source
+ f = open(config_file, 'rb')
try:
- os.chdir(dirname)
- execfile(config['__file__'], config)
+ source = f.read()
+ finally:
+ f.close()
+ try:
+ # compile to a code object, handle syntax errors
+ try:
+ code = compile(source, config_file, 'exec')
+ except SyntaxError:
+ if convert_with_2to3:
+ # maybe the file uses 2.x syntax; try to refactor to
+ # 3.x syntax using 2to3
+ source = convert_with_2to3(config_file)
+ code = compile(source, config_file, 'exec')
+ else:
+ raise
+ exec code in config
except SyntaxError, err:
- raise ConfigError('There is a syntax error in your '
- 'configuration file: ' + str(err))
+ raise ConfigError(CONFIG_SYNTAX_ERROR % err)
finally:
os.chdir(olddir)
@@ -182,10 +207,11 @@ class Config(object):
# check all string values for non-ASCII characters in bytestrings,
# since that can result in UnicodeErrors all over the place
for name, value in self._raw_config.iteritems():
- if isinstance(value, str) and nonascii_re.search(value):
+ if isinstance(value, bytes) and nonascii_re.search(value):
warn('the config value %r is set to a string with non-ASCII '
'characters; this can lead to Unicode errors occurring. '
- 'Please use Unicode strings, e.g. u"Content".' % name)
+ 'Please use Unicode strings, e.g. %r.' % (name, u'Content')
+ )
def init_values(self):
config = self._raw_config
diff --git a/sphinx/directives/code.py b/sphinx/directives/code.py
index 0647daf0..d235c4ca 100644
--- a/sphinx/directives/code.py
+++ b/sphinx/directives/code.py
@@ -7,10 +7,8 @@
:license: BSD, see LICENSE for details.
"""
-import os
import sys
import codecs
-from os import path
from docutils import nodes
from docutils.parsers.rst import Directive, directives
@@ -93,23 +91,11 @@ class LiteralInclude(Directive):
def run(self):
document = self.state.document
- filename = self.arguments[0]
if not document.settings.file_insertion_enabled:
return [document.reporter.warning('File insertion disabled',
line=self.lineno)]
env = document.settings.env
- if filename.startswith('/') or filename.startswith(os.sep):
- rel_fn = filename[1:]
- else:
- docdir = path.dirname(env.doc2path(env.docname, base=None))
- rel_fn = path.join(docdir, filename)
- try:
- fn = path.join(env.srcdir, rel_fn)
- except UnicodeDecodeError:
- # the source directory is a bytestring with non-ASCII characters;
- # let's try to encode the rel_fn in the file system encoding
- rel_fn = rel_fn.encode(sys.getfilesystemencoding())
- fn = path.join(env.srcdir, rel_fn)
+ rel_filename, filename = env.relfn2path(self.arguments[0])
if 'pyobject' in self.options and 'lines' in self.options:
return [document.reporter.warning(
@@ -119,7 +105,7 @@ class LiteralInclude(Directive):
encoding = self.options.get('encoding', env.config.source_encoding)
codec_info = codecs.lookup(encoding)
try:
- f = codecs.StreamReaderWriter(open(fn, 'U'),
+ f = codecs.StreamReaderWriter(open(filename, 'rb'),
codec_info[2], codec_info[3], 'strict')
lines = f.readlines()
f.close()
@@ -136,7 +122,7 @@ class LiteralInclude(Directive):
objectname = self.options.get('pyobject')
if objectname is not None:
from sphinx.pycode import ModuleAnalyzer
- analyzer = ModuleAnalyzer.for_file(fn, '')
+ analyzer = ModuleAnalyzer.for_file(filename, '')
tags = analyzer.find_tags()
if objectname not in tags:
return [document.reporter.warning(
@@ -178,13 +164,13 @@ class LiteralInclude(Directive):
text = ''.join(lines)
if self.options.get('tab-width'):
text = text.expandtabs(self.options['tab-width'])
- retnode = nodes.literal_block(text, text, source=fn)
+ retnode = nodes.literal_block(text, text, source=filename)
retnode.line = 1
if self.options.get('language', ''):
retnode['language'] = self.options['language']
if 'linenos' in self.options:
retnode['linenos'] = True
- document.settings.env.note_dependency(rel_fn)
+ env.note_dependency(rel_filename)
return [retnode]
diff --git a/sphinx/directives/other.py b/sphinx/directives/other.py
index 332c4084..cbf19b55 100644
--- a/sphinx/directives/other.py
+++ b/sphinx/directives/other.py
@@ -13,13 +13,19 @@ from docutils import nodes
from docutils.parsers.rst import Directive, directives
from sphinx import addnodes
-from sphinx.locale import pairindextypes, _
+from sphinx.locale import _
from sphinx.util import url_re, docname_join
-from sphinx.util.nodes import explicit_title_re
+from sphinx.util.nodes import explicit_title_re, process_index_entry
from sphinx.util.compat import make_admonition
from sphinx.util.matching import patfilter
+def int_or_nothing(argument):
+ if not argument:
+ return 999
+ return int(argument)
+
+
class TocTree(Directive):
"""
Directive to notify Sphinx about the hierarchical structure of the docs,
@@ -34,7 +40,7 @@ class TocTree(Directive):
'maxdepth': int,
'glob': directives.flag,
'hidden': directives.flag,
- 'numbered': directives.flag,
+ 'numbered': int_or_nothing,
'titlesonly': directives.flag,
}
@@ -98,7 +104,7 @@ class TocTree(Directive):
subnode['maxdepth'] = self.options.get('maxdepth', -1)
subnode['glob'] = glob
subnode['hidden'] = 'hidden' in self.options
- subnode['numbered'] = 'numbered' in self.options
+ subnode['numbered'] = self.options.get('numbered', 0)
subnode['titlesonly'] = 'titlesonly' in self.options
wrappernode = nodes.compound(classes=['toctree-wrapper'])
wrappernode.append(subnode)
@@ -151,10 +157,6 @@ class Index(Directive):
final_argument_whitespace = True
option_spec = {}
- indextypes = [
- 'single', 'pair', 'double', 'triple',
- ]
-
def run(self):
arguments = self.arguments[0].split('\n')
env = self.state.document.settings.env
@@ -164,28 +166,7 @@ class Index(Directive):
indexnode = addnodes.index()
indexnode['entries'] = ne = []
for entry in arguments:
- entry = entry.strip()
- for type in pairindextypes:
- if entry.startswith(type+':'):
- value = entry[len(type)+1:].strip()
- value = pairindextypes[type] + '; ' + value
- ne.append(('pair', value, targetid, value))
- break
- else:
- for type in self.indextypes:
- if entry.startswith(type+':'):
- value = entry[len(type)+1:].strip()
- if type == 'double':
- type = 'pair'
- ne.append((type, value, targetid, value))
- break
- # shorthand notation for single entries
- else:
- for value in entry.split(','):
- value = value.strip()
- if not value:
- continue
- ne.append(('single', value, targetid, value))
+ ne.extend(process_index_entry(entry, targetid))
return [indexnode, targetnode]
@@ -369,14 +350,13 @@ from docutils.parsers.rst.directives.misc import Include as BaseInclude
class Include(BaseInclude):
"""
Like the standard "Include" directive, but interprets absolute paths
- correctly.
+ "correctly", i.e. relative to source directory.
"""
def run(self):
- if self.arguments[0].startswith('/') or \
- self.arguments[0].startswith(os.sep):
- env = self.state.document.settings.env
- self.arguments[0] = os.path.join(env.srcdir, self.arguments[0][1:])
+ env = self.state.document.settings.env
+ rel_filename, filename = env.relfn2path(self.arguments[0])
+ self.arguments[0] = filename
return BaseInclude.run(self)
diff --git a/sphinx/domains/__init__.py b/sphinx/domains/__init__.py
index d133a812..484cd968 100644
--- a/sphinx/domains/__init__.py
+++ b/sphinx/domains/__init__.py
@@ -66,9 +66,8 @@ class Index(object):
self.domain = domain
def generate(self, docnames=None):
- """
- Return entries for the index given by *name*. If *docnames* is given,
- restrict to entries referring to these docnames.
+ """Return entries for the index given by *name*. If *docnames* is
+ given, restrict to entries referring to these docnames.
The return value is a tuple of ``(content, collapse)``, where *collapse*
is a boolean that determines if sub-entries should start collapsed (for
@@ -158,8 +157,7 @@ class Domain(object):
self.objtypes_for_role = self._role2type.get
def role(self, name):
- """
- Return a role adapter function that always gives the registered
+ """Return a role adapter function that always gives the registered
role its full name ('domain:name') as the first argument.
"""
if name in self._role_cache:
@@ -175,8 +173,7 @@ class Domain(object):
return role_adapter
def directive(self, name):
- """
- Return a directive adapter class that always gives the registered
+ """Return a directive adapter class that always gives the registered
directive its full name ('domain:name') as ``self.name``.
"""
if name in self._directive_cache:
@@ -195,21 +192,16 @@ class Domain(object):
# methods that should be overwritten
def clear_doc(self, docname):
- """
- Remove traces of a document in the domain-specific inventories.
- """
+ """Remove traces of a document in the domain-specific inventories."""
pass
def process_doc(self, env, docname, document):
- """
- Process a document after it is read by the environment.
- """
+ """Process a document after it is read by the environment."""
pass
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
- """
- Resolve the ``pending_xref`` *node* with the given *typ* and *target*.
+ """Resolve the pending_xref *node* with the given *typ* and *target*.
This method should return a new node, to replace the xref node,
containing the *contnode* which is the markup content of the
@@ -225,8 +217,7 @@ class Domain(object):
pass
def get_objects(self):
- """
- Return an iterable of "object descriptions", which are tuples with
+ """Return an iterable of "object descriptions", which are tuples with
five items:
* `name` -- fully qualified name
@@ -245,9 +236,7 @@ class Domain(object):
return []
def get_type_name(self, type, primary=False):
- """
- Return full name for given ObjType.
- """
+ """Return full name for given ObjType."""
if primary:
return type.lname
return _('%s %s') % (self.label, type.lname)
diff --git a/sphinx/domains/cpp.py b/sphinx/domains/cpp.py
index 4dac8925..a59b2b50 100644
--- a/sphinx/domains/cpp.py
+++ b/sphinx/domains/cpp.py
@@ -110,7 +110,7 @@ class DefinitionError(Exception):
return self.description
def __str__(self):
- return unicode(self.encode('utf-8'))
+ return unicode(self).encode('utf-8')
class DefExpr(object):
@@ -132,30 +132,34 @@ class DefExpr(object):
def __ne__(self, other):
return not self.__eq__(other)
+ __hash__ = None
+
def clone(self):
- """Close a definition expression node"""
+ """Clone a definition expression node."""
return deepcopy(self)
def get_id(self):
- """Returns the id for the node"""
+ """Return the id for the node."""
return u''
def get_name(self):
- """Returns the name. Returns either `None` or a node with
- a name you might call :meth:`split_owner` on.
+ """Return the name.
+
+ Returns either `None` or a node with a name you might call
+ :meth:`split_owner` on.
"""
return None
def split_owner(self):
- """Nodes returned by :meth:`get_name` can split off their
- owning parent. This function returns the owner and the
- name as a tuple of two items. If a node does not support
- it, :exc:`NotImplementedError` is raised.
+ """Nodes returned by :meth:`get_name` can split off their owning parent.
+
+ This function returns the owner and the name as a tuple of two items.
+ If a node does not support it, :exc:`NotImplementedError` is raised.
"""
raise NotImplementedError()
def prefix(self, prefix):
- """Prefixes a name node (a node returned by :meth:`get_name`)."""
+ """Prefix a name node (a node returned by :meth:`get_name`)."""
raise NotImplementedError()
def __str__(self):
@@ -982,8 +986,9 @@ class CPPFunctionObject(CPPObject):
class CPPCurrentNamespace(Directive):
- """This directive is just to tell Sphinx that we're documenting
- stuff in namespace foo.
+ """
+ This directive is just to tell Sphinx that we're documenting stuff in
+ namespace foo.
"""
has_content = False
diff --git a/sphinx/domains/javascript.py b/sphinx/domains/javascript.py
index 98c7948c..2b615847 100644
--- a/sphinx/domains/javascript.py
+++ b/sphinx/domains/javascript.py
@@ -130,7 +130,7 @@ class JSCallable(JSObject):
class JSConstructor(JSCallable):
- """Like a callable but with a different prefix"""
+ """Like a callable but with a different prefix."""
display_prefix = 'class '
diff --git a/sphinx/domains/python.py b/sphinx/domains/python.py
index cb34492f..e3090a29 100644
--- a/sphinx/domains/python.py
+++ b/sphinx/domains/python.py
@@ -108,22 +108,21 @@ class PyObject(ObjectDescription):
]
def get_signature_prefix(self, sig):
- """
- May return a prefix to put before the object name in the signature.
+ """May return a prefix to put before the object name in the
+ signature.
"""
return ''
def needs_arglist(self):
- """
- May return true if an empty argument list is to be generated even if
+ """May return true if an empty argument list is to be generated even if
the document contains none.
"""
return False
def handle_signature(self, sig, signode):
- """
- Transform a Python signature into RST nodes.
- Returns (fully qualified name of the thing, classname if any).
+ """Transform a Python signature into RST nodes.
+
+ Return (fully qualified name of the thing, classname if any).
If inside a class, the current class name is handled intelligently:
* it is stripped from the displayed name if present
@@ -193,9 +192,7 @@ class PyObject(ObjectDescription):
return fullname, name_prefix
def get_index_text(self, modname, name):
- """
- Return the text for the index entry of the object.
- """
+ """Return the text for the index entry of the object."""
raise NotImplementedError('must be implemented in subclasses')
def add_target_and_index(self, name_cls, sig, signode):
@@ -574,9 +571,8 @@ class PythonDomain(Domain):
del self.data['modules'][modname]
def find_obj(self, env, modname, classname, name, type, searchmode=0):
- """
- Find a Python object for "name", perhaps using the given module and/or
- classname. Returns a list of (name, object entry) tuples.
+ """Find a Python object for "name", perhaps using the given module
+ and/or classname. Returns a list of (name, object entry) tuples.
"""
# skip parens
if name[-2:] == '()':
diff --git a/sphinx/domains/rst.py b/sphinx/domains/rst.py
index d3ffc6bd..30134d9e 100644
--- a/sphinx/domains/rst.py
+++ b/sphinx/domains/rst.py
@@ -59,9 +59,10 @@ class ReSTMarkup(ObjectDescription):
def parse_directive(d):
- """
- Parses a directive signature. Returns (directive, arguments) string tuple.
- if no arguments are given, returns (directive, '').
+ """Parse a directive signature.
+
+ Returns (directive, arguments) string tuple. If no arguments are given,
+ returns (directive, '').
"""
dir = d.strip()
if not dir.startswith('.'):
diff --git a/sphinx/environment.py b/sphinx/environment.py
index 4919331b..883c91e9 100644
--- a/sphinx/environment.py
+++ b/sphinx/environment.py
@@ -11,11 +11,13 @@
import re
import os
+import sys
import time
import types
import codecs
import imghdr
import string
+import posixpath
import cPickle as pickle
from os import path
from glob import glob
@@ -24,9 +26,9 @@ from itertools import izip, groupby
from docutils import nodes
from docutils.io import FileInput, NullOutput
from docutils.core import Publisher
-from docutils.utils import Reporter, relative_path
+from docutils.utils import Reporter, relative_path, new_document
from docutils.readers import standalone
-from docutils.parsers.rst import roles, directives
+from docutils.parsers.rst import roles, directives, Parser as RSTParser
from docutils.parsers.rst.languages import en as english
from docutils.parsers.rst.directives.html import MetaBody
from docutils.writers import UnfilteredWriter
@@ -36,13 +38,14 @@ from docutils.transforms.parts import ContentsFilter
from sphinx import addnodes
from sphinx.util import url_re, get_matching_docs, docname_join, \
FilenameUniqDict
-from sphinx.util.nodes import clean_astext, make_refnode
+from sphinx.util.nodes import clean_astext, make_refnode, extract_messages
from sphinx.util.osutil import movefile, SEP, ustrftime
from sphinx.util.matching import compile_matchers
-from sphinx.util.pycompat import all
+from sphinx.util.pycompat import all, class_types
from sphinx.errors import SphinxError, ExtensionError
-from sphinx.locale import _
+from sphinx.locale import _, init as init_locale
+fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
orig_role_function = roles.role
orig_directive_function = directives.directive
@@ -80,7 +83,7 @@ class WarningStream(object):
self.warnfunc = warnfunc
def write(self, text):
if text.strip():
- self.warnfunc(text, None, '')
+ self.warnfunc(text.strip(), None, '')
class NoUri(Exception):
@@ -182,12 +185,50 @@ class CitationReferences(Transform):
citnode.parent.replace(citnode, refnode)
+class Locale(Transform):
+ """
+ Replace translatable nodes with their translated doctree.
+ """
+ default_priority = 0
+ def apply(self):
+ env = self.document.settings.env
+ settings, source = self.document.settings, self.document['source']
+ # XXX check if this is reliable
+ assert source.startswith(env.srcdir)
+ docname = posixpath.splitext(source[len(env.srcdir):].lstrip('/'))[0]
+ section = docname.split(SEP, 1)[0]
+
+ # fetch translations
+ dirs = [path.join(env.srcdir, x)
+ for x in env.config.locale_dirs]
+ catalog, empty = init_locale(dirs, env.config.language, section)
+ if not empty:
+ return
+
+ parser = RSTParser()
+
+ for node, msg in extract_messages(self.document):
+ # XXX ctx not used
+ #ctx = node.parent
+ patch = new_document(source, settings)
+ msgstr = catalog.gettext(msg)
+ # XXX add marker to untranslated parts
+ if not msgstr or msgstr == msg: # as-of-yet untranslated
+ continue
+ parser.parse(msgstr, patch)
+ patch = patch[0]
+ assert isinstance(patch, nodes.paragraph)
+ for child in patch.children: # update leaves
+ child.parent = node
+ node.children = patch.children
+
+
class SphinxStandaloneReader(standalone.Reader):
"""
Add our own transforms.
"""
- transforms = [CitationReferences, DefaultSubstitutions, MoveModuleTargets,
- HandleCodeBlocks, SortIds]
+ transforms = [Locale, CitationReferences, DefaultSubstitutions,
+ MoveModuleTargets, HandleCodeBlocks, SortIds]
def get_transforms(self):
return standalone.Reader.get_transforms(self) + self.transforms
@@ -251,7 +292,7 @@ class BuildEnvironment:
if key.startswith('_') or \
isinstance(val, types.ModuleType) or \
isinstance(val, types.FunctionType) or \
- isinstance(val, (type, types.ClassType)):
+ isinstance(val, class_types):
del self.config[key]
try:
pickle.dump(self, picklefile, pickle.HIGHEST_PROTOCOL)
@@ -376,25 +417,46 @@ class BuildEnvironment:
domain.clear_doc(docname)
def doc2path(self, docname, base=True, suffix=None):
+ """Return the filename for the document name.
+
+ If *base* is True, return absolute path under self.srcdir.
+ If *base* is None, return relative path to self.srcdir.
+ If *base* is a path string, return absolute path under that.
+ If *suffix* is not None, add it instead of config.source_suffix.
"""
- Return the filename for the document name.
- If base is True, return absolute path under self.srcdir.
- If base is None, return relative path to self.srcdir.
- If base is a path string, return absolute path under that.
- If suffix is not None, add it instead of config.source_suffix.
- """
+ docname = docname.replace(SEP, path.sep)
suffix = suffix or self.config.source_suffix
if base is True:
- return path.join(self.srcdir,
- docname.replace(SEP, path.sep)) + suffix
+ return path.join(self.srcdir, docname) + suffix
elif base is None:
- return docname.replace(SEP, path.sep) + suffix
+ return docname + suffix
else:
- return path.join(base, docname.replace(SEP, path.sep)) + suffix
+ return path.join(base, docname) + suffix
- def find_files(self, config):
+ def relfn2path(self, filename, docname=None):
+ """Return paths to a file referenced from a document, relative to
+ documentation root and absolute.
+
+ Absolute filenames are relative to the source dir, while relative
+ filenames are relative to the dir of the containing document.
"""
- Find all source files in the source dir and put them in self.found_docs.
+ if filename.startswith('/') or filename.startswith(os.sep):
+ rel_fn = filename[1:]
+ else:
+ docdir = path.dirname(self.doc2path(docname or self.docname,
+ base=None))
+ rel_fn = path.join(docdir, filename)
+ try:
+ return rel_fn, path.join(self.srcdir, rel_fn)
+ except UnicodeDecodeError:
+ # the source directory is a bytestring with non-ASCII characters;
+ # let's try to encode the rel_fn in the file system encoding
+ enc_rel_fn = rel_fn.encode(sys.getfilesystemencoding())
+ return rel_fn, path.join(self.srcdir, enc_rel_fn)
+
+ def find_files(self, config):
+ """Find all source files in the source dir and put them in
+ self.found_docs.
"""
matchers = compile_matchers(
config.exclude_patterns[:] +
@@ -407,9 +469,7 @@ class BuildEnvironment:
self.srcdir, config.source_suffix, exclude_matchers=matchers))
def get_outdated_files(self, config_changed):
- """
- Return (added, changed, removed) sets.
- """
+ """Return (added, changed, removed) sets."""
# clear all files no longer present
removed = set(self.all_docs) - self.found_docs
@@ -455,12 +515,12 @@ class BuildEnvironment:
return added, changed, removed
def update(self, config, srcdir, doctreedir, app=None):
- """
- (Re-)read all files new or changed since last update. Returns a
- summary, the total count of documents to reread and an iterator that
- yields docnames as it processes them. Store all environment docnames in
- the canonical format (ie using SEP as a separator in place of
- os.path.sep).
+ """(Re-)read all files new or changed since last update.
+
+ Returns a summary, the total count of documents to reread and an
+ iterator that yields docnames as it processes them. Store all
+ environment docnames in the canonical format (ie using SEP as a
+ separator in place of os.path.sep).
"""
config_changed = False
if self.config is None:
@@ -591,8 +651,8 @@ class BuildEnvironment:
roles.role = role
def read_doc(self, docname, src_path=None, save_parsed=True, app=None):
- """
- Parse a file and add/update inventory entries for the doctree.
+ """Parse a file and add/update inventory entries for the doctree.
+
If srcpath is given, read from a different source file.
"""
# remove all inventory entries for that file
@@ -628,6 +688,8 @@ class BuildEnvironment:
class SphinxSourceClass(FileInput):
def decode(self_, data):
+ if isinstance(data, unicode):
+ return data
return data.decode(self_.encoding, 'sphinx')
def read(self_):
@@ -649,7 +711,7 @@ class BuildEnvironment:
destination_class=NullOutput)
pub.set_components(None, 'restructuredtext', None)
pub.process_programmatic_settings(None, self.settings, None)
- pub.set_source(None, src_path)
+ pub.set_source(None, src_path.encode(fs_encoding))
pub.set_destination(None, None)
try:
pub.publish()
@@ -742,18 +804,15 @@ class BuildEnvironment:
# post-processing of read doctrees
def filter_messages(self, doctree):
- """
- Filter system messages from a doctree.
- """
+ """Filter system messages from a doctree."""
filterlevel = self.config.keep_warnings and 2 or 5
for node in doctree.traverse(nodes.system_message):
if node['level'] < filterlevel:
node.parent.remove(node)
+
def process_dependencies(self, docname, doctree):
- """
- Process docutils-generated dependency info.
- """
+ """Process docutils-generated dependency info."""
cwd = os.getcwd()
frompath = path.join(path.normpath(self.srcdir), 'dummy')
deps = doctree.settings.record_dependencies
@@ -767,30 +826,20 @@ class BuildEnvironment:
self.dependencies.setdefault(docname, set()).add(relpath)
def process_downloads(self, docname, doctree):
- """
- Process downloadable file paths.
- """
- docdir = path.dirname(self.doc2path(docname, base=None))
+ """Process downloadable file paths. """
for node in doctree.traverse(addnodes.download_reference):
targetname = node['reftarget']
- if targetname.startswith('/') or targetname.startswith(os.sep):
- # absolute
- filepath = targetname[1:]
- else:
- filepath = path.normpath(path.join(docdir, node['reftarget']))
- self.dependencies.setdefault(docname, set()).add(filepath)
- if not os.access(path.join(self.srcdir, filepath), os.R_OK):
- self.warn(docname, 'download file not readable: %s' % filepath,
+ rel_filename, filename = self.relfn2path(targetname, docname)
+ self.dependencies.setdefault(docname, set()).add(rel_filename)
+ if not os.access(filename, os.R_OK):
+ self.warn(docname, 'download file not readable: %s' % filename,
getattr(node, 'line', None))
continue
- uniquename = self.dlfiles.add_file(docname, filepath)
+ uniquename = self.dlfiles.add_file(docname, filename)
node['filename'] = uniquename
def process_images(self, docname, doctree):
- """
- Process and rewrite image URIs.
- """
- docdir = path.dirname(self.doc2path(docname, base=None))
+ """Process and rewrite image URIs."""
for node in doctree.traverse(nodes.image):
# Map the mimetype to the corresponding image. The writer may
# choose the best image from these candidates. The special key * is
@@ -803,16 +852,11 @@ class BuildEnvironment:
node.line)
candidates['?'] = imguri
continue
- # imgpath is the image path *from srcdir*
- if imguri.startswith('/') or imguri.startswith(os.sep):
- # absolute path (= relative to srcdir)
- imgpath = path.normpath(imguri[1:])
- else:
- imgpath = path.normpath(path.join(docdir, imguri))
+ rel_imgpath, full_imgpath = self.relfn2path(imguri, docname)
# set imgpath as default URI
- node['uri'] = imgpath
- if imgpath.endswith(os.extsep + '*'):
- for filename in glob(path.join(self.srcdir, imgpath)):
+ node['uri'] = rel_imgpath
+ if rel_imgpath.endswith(os.extsep + '*'):
+ for filename in glob(full_imgpath):
new_imgpath = relative_path(self.srcdir, filename)
if filename.lower().endswith('.pdf'):
candidates['application/pdf'] = new_imgpath
@@ -832,7 +876,7 @@ class BuildEnvironment:
if imgtype:
candidates['image/' + imgtype] = new_imgpath
else:
- candidates['*'] = imgpath
+ candidates['*'] = rel_imgpath
# map image paths to unique image names (so that they can be put
# into a single directory)
for imgpath in candidates.itervalues():
@@ -844,8 +888,8 @@ class BuildEnvironment:
self.images.add_file(docname, imgpath)
def process_metadata(self, docname, doctree):
- """
- Process the docinfo part of the doctree as metadata.
+ """Process the docinfo part of the doctree as metadata.
+
Keep processing minimal -- just return what docutils says.
"""
self.metadata[docname] = md = {}
@@ -930,8 +974,7 @@ class BuildEnvironment:
item.replace(para, compact_para)
def create_title_from(self, docname, document):
- """
- Add a title node to the document (just copy the first section title),
+ """Add a title node to the document (just copy the first section title),
and store that title in the environment.
"""
titlenode = nodes.title()
@@ -969,7 +1012,8 @@ class BuildEnvironment:
def note_toctree(self, docname, toctreenode):
"""Note a TOC tree directive in a document and gather information about
- file relations from it."""
+ file relations from it.
+ """
if toctreenode['glob']:
self.glob_toctrees.add(docname)
if toctreenode.get('numbered'):
@@ -1075,7 +1119,9 @@ class BuildEnvironment:
def get_domain(self, domainname):
"""Return the domain instance with the specified name.
- Raises an ExtensionError if the domain is not registered."""
+
+ Raises an ExtensionError if the domain is not registered.
+ """
try:
return self.domains[domainname]
except KeyError:
@@ -1100,7 +1146,8 @@ class BuildEnvironment:
def get_and_resolve_doctree(self, docname, builder, doctree=None,
prune_toctrees=True):
"""Read the doctree from the pickle, resolve cross-references and
- toctrees and return it."""
+ toctrees and return it.
+ """
if doctree is None:
doctree = self.get_doctree(docname)
@@ -1120,8 +1167,7 @@ class BuildEnvironment:
def resolve_toctree(self, docname, builder, toctree, prune=True, maxdepth=0,
titles_only=False, collapse=False, includehidden=False):
- """
- Resolve a *toctree* node into individual bullet lists with titles
+ """Resolve a *toctree* node into individual bullet lists with titles
as items, returning None (if no containing titles are found) or
a new node.
@@ -1380,46 +1426,54 @@ class BuildEnvironment:
old_secnumbers = self.toc_secnumbers
self.toc_secnumbers = {}
- def _walk_toc(node, secnums, titlenode=None):
+ def _walk_toc(node, secnums, depth, titlenode=None):
# titlenode is the title of the document, it will get assigned a
# secnumber too, so that it shows up in next/prev/parent rellinks
for subnode in node.children:
if isinstance(subnode, nodes.bullet_list):
numstack.append(0)
- _walk_toc(subnode, secnums, titlenode)
+ _walk_toc(subnode, secnums, depth-1, titlenode)
numstack.pop()
titlenode = None
elif isinstance(subnode, nodes.list_item):
- _walk_toc(subnode, secnums, titlenode)
+ _walk_toc(subnode, secnums, depth, titlenode)
titlenode = None
elif isinstance(subnode, addnodes.compact_paragraph):
numstack[-1] += 1
+ if depth > 0:
+ number = tuple(numstack)
+ else:
+ number = None
secnums[subnode[0]['anchorname']] = \
- subnode[0]['secnumber'] = tuple(numstack)
+ subnode[0]['secnumber'] = number
if titlenode:
- titlenode['secnumber'] = tuple(numstack)
+ titlenode['secnumber'] = number
titlenode = None
elif isinstance(subnode, addnodes.toctree):
- _walk_toctree(subnode)
+ _walk_toctree(subnode, depth)
- def _walk_toctree(toctreenode):
+ def _walk_toctree(toctreenode, depth):
+ if depth == 0:
+ return
for (title, ref) in toctreenode['entries']:
if url_re.match(ref) or ref == 'self':
# don't mess with those
continue
if ref in self.tocs:
secnums = self.toc_secnumbers[ref] = {}
- _walk_toc(self.tocs[ref], secnums, self.titles.get(ref))
+ _walk_toc(self.tocs[ref], secnums, depth,
+ self.titles.get(ref))
if secnums != old_secnumbers.get(ref):
rewrite_needed.append(ref)
for docname in self.numbered_toctrees:
doctree = self.get_doctree(docname)
for toctreenode in doctree.traverse(addnodes.toctree):
- if toctreenode.get('numbered'):
+ depth = toctreenode.get('numbered', 0)
+ if depth:
# every numbered toctree gets new numbering
numstack = [0]
- _walk_toctree(toctreenode)
+ _walk_toctree(toctreenode, depth)
return rewrite_needed
@@ -1518,8 +1572,9 @@ class BuildEnvironment:
i += 1
# group the entries by letter
- def keyfunc2((k, v), letters=string.ascii_uppercase + '_'):
+ def keyfunc2(item, letters=string.ascii_uppercase + '_'):
# hack: mutating the subitems dicts to a list in the keyfunc
+ k, v = item
v[1] = sorted((si, se) for (si, (se, void)) in v[1].iteritems())
# now calculate the key
letter = k[0].upper()
@@ -1578,7 +1633,6 @@ class BuildEnvironment:
def check_consistency(self):
"""Do consistency checks."""
-
for docname in sorted(self.all_docs):
if docname not in self.files_to_rebuild:
if docname == self.config.master_doc:
diff --git a/sphinx/ext/autodoc.py b/sphinx/ext/autodoc.py
index adf08bcd..3a2476a6 100644
--- a/sphinx/ext/autodoc.py
+++ b/sphinx/ext/autodoc.py
@@ -14,7 +14,7 @@
import re
import sys
import inspect
-from types import FunctionType, BuiltinFunctionType, MethodType, ClassType
+from types import FunctionType, BuiltinFunctionType, MethodType
from docutils import nodes
from docutils.utils import assemble_option_dict
@@ -27,15 +27,10 @@ from sphinx.application import ExtensionError
from sphinx.util.nodes import nested_parse_with_titles
from sphinx.util.compat import Directive
from sphinx.util.inspect import isdescriptor, safe_getmembers, safe_getattr
+from sphinx.util.pycompat import base_exception, class_types
from sphinx.util.docstrings import prepare_docstring
-try:
- base_exception = BaseException
-except NameError:
- base_exception = Exception
-
-
#: extended signature RE: with explicit module name separated by ::
py_ext_sig_re = re.compile(
r'''^ ([\w.]+::)? # explicit module name
@@ -90,7 +85,8 @@ def members_set_option(arg):
def bool_option(arg):
"""Used to convert flag options to auto directives. (Instead of
- directives.flag(), which returns None.)"""
+ directives.flag(), which returns None).
+ """
return True
@@ -138,8 +134,7 @@ class AutodocReporter(object):
# Some useful event listener factories for autodoc-process-docstring.
def cut_lines(pre, post=0, what=None):
- """
- Return a listener that removes the first *pre* and last *post*
+ """Return a listener that removes the first *pre* and last *post*
lines of every docstring. If *what* is a sequence of strings,
only docstrings of a type in *what* will be processed.
@@ -165,9 +160,8 @@ def cut_lines(pre, post=0, what=None):
return process
def between(marker, what=None, keepempty=False, exclude=False):
- """
- Return a listener that either keeps, or if *exclude* is True excludes, lines
- between lines that match the *marker* regular expression. If no line
+ """Return a listener that either keeps, or if *exclude* is True excludes,
+ lines between lines that match the *marker* regular expression. If no line
matches, the resulting docstring would be empty, so no change will be made
unless *keepempty* is true.
@@ -256,6 +250,9 @@ class Documenter(object):
self.retann = None
# the object to document (set after import_object succeeds)
self.object = None
+ self.object_name = None
+ # the parent/owner of the object to document
+ self.parent = None
# the module analyzer to get at attribute docs, or None
self.analyzer = None
@@ -264,8 +261,7 @@ class Documenter(object):
self.directive.result.append(self.indent + line, source, *lineno)
def resolve_name(self, modname, parents, path, base):
- """
- Resolve the module and name of the object to document given by the
+ """Resolve the module and name of the object to document given by the
arguments and the current module/class.
Must return a pair of the module name and a chain of attributes; for
@@ -275,8 +271,7 @@ class Documenter(object):
raise NotImplementedError('must be implemented in subclasses')
def parse_name(self):
- """
- Determine what module to import and what attribute to document.
+ """Determine what module to import and what attribute to document.
Returns True and sets *self.modname*, *self.objpath*, *self.fullname*,
*self.args* and *self.retann* if parsing and resolving was successful.
@@ -313,17 +308,20 @@ class Documenter(object):
return True
def import_object(self):
- """
- Import the object given by *self.modname* and *self.objpath* and sets
+ """Import the object given by *self.modname* and *self.objpath* and set
it as *self.object*.
Returns True if successful, False if an error occurred.
"""
try:
__import__(self.modname)
+ parent = None
obj = self.module = sys.modules[self.modname]
for part in self.objpath:
+ parent = obj
obj = self.get_attr(obj, part)
+ self.object_name = part
+ self.parent = parent
self.object = obj
return True
# this used to only catch SyntaxError, ImportError and AttributeError,
@@ -336,15 +334,15 @@ class Documenter(object):
return False
def get_real_modname(self):
- """
- Get the real module name of an object to document. (It can differ
- from the name of the module through which the object was imported.)
+ """Get the real module name of an object to document.
+
+ It can differ from the name of the module through which the object was
+ imported.
"""
return self.get_attr(self.object, '__module__', None) or self.modname
def check_module(self):
- """
- Check if *self.object* is really defined in the module given by
+ """Check if *self.object* is really defined in the module given by
*self.modname*.
"""
modname = self.get_attr(self.object, '__module__', None)
@@ -353,25 +351,26 @@ class Documenter(object):
return True
def format_args(self):
- """
- Format the argument signature of *self.object*. Should return None if
- the object does not have a signature.
+ """Format the argument signature of *self.object*.
+
+ Should return None if the object does not have a signature.
"""
return None
def format_name(self):
- """
- Format the name of *self.object*. This normally should be something
- that can be parsed by the generated directive, but doesn't need to be
- (Sphinx will display it unparsed then).
+ """Format the name of *self.object*.
+
+ This normally should be something that can be parsed by the generated
+ directive, but doesn't need to be (Sphinx will display it unparsed
+ then).
"""
# normally the name doesn't contain the module (except for module
# directives of course)
return '.'.join(self.objpath) or self.modname
def format_signature(self):
- """
- Format the signature (arguments and return annotation) of the object.
+ """Format the signature (arguments and return annotation) of the object.
+
Let the user process it via the ``autodoc-process-signature`` event.
"""
if self.args is not None:
@@ -416,9 +415,11 @@ class Documenter(object):
def get_doc(self, encoding=None):
"""Decode and return lines of the docstring(s) for the object."""
docstring = self.get_attr(self.object, '__doc__', None)
- if docstring:
- # make sure we have Unicode docstrings, then sanitize and split
- # into lines
+ # make sure we have Unicode docstrings, then sanitize and split
+ # into lines
+ if isinstance(docstring, unicode):
+ return [prepare_docstring(docstring)]
+ elif docstring:
return [prepare_docstring(force_decode(docstring, encoding))]
return []
@@ -438,8 +439,11 @@ class Documenter(object):
# set sourcename and add content from attribute documentation
if self.analyzer:
# prevent encoding errors when the file name is non-ASCII
- filename = unicode(self.analyzer.srcname,
- sys.getfilesystemencoding(), 'replace')
+ if not isinstance(self.analyzer.srcname, unicode):
+ filename = unicode(self.analyzer.srcname,
+ sys.getfilesystemencoding(), 'replace')
+ else:
+ filename = self.analyzer.srcname
sourcename = u'%s:docstring of %s' % (filename, self.fullname)
attr_docs = self.analyzer.find_attr_docs()
@@ -466,8 +470,7 @@ class Documenter(object):
self.add_line(line, src[0], src[1])
def get_object_members(self, want_all):
- """
- Return `(members_check_module, members)` where `members` is a
+ """Return `(members_check_module, members)` where `members` is a
list of `(membername, member)` pairs of the members of *self.object*.
If *want_all* is True, return all members. Else, only return those
@@ -511,8 +514,9 @@ class Documenter(object):
return False, sorted(members)
def filter_members(self, members, want_all):
- """
- Filter the given member list: members are skipped if
+ """Filter the given member list.
+
+ Members are skipped if
- they are private (except if given explicitly)
- they are undocumented (except if undoc-members is given)
@@ -565,9 +569,10 @@ class Documenter(object):
return ret
def document_members(self, all_members=False):
- """
- Generate reST for member documentation. If *all_members* is True,
- do all members, else those given by *self.options.members*.
+ """Generate reST for member documentation.
+
+ If *all_members* is True, do all members, else those given by
+ *self.options.members*.
"""
# set current namespace for finding members
self.env.temp_data['autodoc:module'] = self.modname
@@ -625,8 +630,8 @@ class Documenter(object):
def generate(self, more_content=None, real_modname=None,
check_module=False, all_members=False):
- """
- Generate reST for the object given by *self.name*, and possibly members.
+ """Generate reST for the object given by *self.name*, and possibly for
+ its members.
If *more_content* is given, include that content. If *real_modname* is
given, use that module name to find attribute docs. If *check_module* is
@@ -866,7 +871,7 @@ class ClassDocumenter(ModuleLevelDocumenter):
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
- return isinstance(member, (type, ClassType))
+ return isinstance(member, class_types)
def import_object(self):
ret = ModuleLevelDocumenter.import_object(self)
@@ -939,9 +944,12 @@ class ClassDocumenter(ModuleLevelDocumenter):
docstrings = [initdocstring]
else:
docstrings.append(initdocstring)
-
- return [prepare_docstring(force_decode(docstring, encoding))
- for docstring in docstrings]
+ doc = []
+ for docstring in docstrings:
+ if not isinstance(docstring, unicode):
+ docstring = force_decode(docstring, encoding)
+ doc.append(prepare_docstring(docstring))
+ return doc
def add_content(self, more_content, no_docstring=False):
if self.doc_as_attr:
@@ -972,7 +980,7 @@ class ExceptionDocumenter(ClassDocumenter):
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
- return isinstance(member, (type, ClassType)) and \
+ return isinstance(member, class_types) and \
issubclass(member, base_exception)
@@ -1004,24 +1012,38 @@ class MethodDocumenter(ClassLevelDocumenter):
return inspect.isroutine(member) and \
not isinstance(parent, ModuleDocumenter)
- def import_object(self):
- ret = ClassLevelDocumenter.import_object(self)
- if isinstance(self.object, classmethod) or \
- (isinstance(self.object, MethodType) and
- self.object.im_self is not None):
- self.directivetype = 'classmethod'
- # document class and static members before ordinary ones
- self.member_order = self.member_order - 1
- elif isinstance(self.object, FunctionType) or \
- (isinstance(self.object, BuiltinFunctionType) and
- hasattr(self.object, '__self__') and
- self.object.__self__ is not None):
- self.directivetype = 'staticmethod'
- # document class and static members before ordinary ones
- self.member_order = self.member_order - 1
- else:
- self.directivetype = 'method'
- return ret
+ if sys.version_info >= (3, 0):
+ def import_object(self):
+ ret = ClassLevelDocumenter.import_object(self)
+ obj_from_parent = self.parent.__dict__.get(self.object_name)
+ if isinstance(obj_from_parent, classmethod):
+ self.directivetype = 'classmethod'
+ self.member_order = self.member_order - 1
+ elif isinstance(obj_from_parent, staticmethod):
+ self.directivetype = 'staticmethod'
+ self.member_order = self.member_order - 1
+ else:
+ self.directivetype = 'method'
+ return ret
+ else:
+ def import_object(self):
+ ret = ClassLevelDocumenter.import_object(self)
+ if isinstance(self.object, classmethod) or \
+ (isinstance(self.object, MethodType) and
+ self.object.im_self is not None):
+ self.directivetype = 'classmethod'
+ # document class and static members before ordinary ones
+ self.member_order = self.member_order - 1
+ elif isinstance(self.object, FunctionType) or \
+ (isinstance(self.object, BuiltinFunctionType) and
+ hasattr(self.object, '__self__') and
+ self.object.__self__ is not None):
+ self.directivetype = 'staticmethod'
+ # document class and static members before ordinary ones
+ self.member_order = self.member_order - 1
+ else:
+ self.directivetype = 'method'
+ return ret
def format_args(self):
if inspect.isbuiltin(self.object) or \
diff --git a/sphinx/ext/autosummary/__init__.py b/sphinx/ext/autosummary/__init__.py
index cf67c7fb..8186a2e5 100644
--- a/sphinx/ext/autosummary/__init__.py
+++ b/sphinx/ext/autosummary/__init__.py
@@ -73,8 +73,7 @@ class autosummary_toc(nodes.comment):
pass
def process_autosummary_toc(app, doctree):
- """
- Insert items described in autosummary:: to the TOC tree, but do
+ """Insert items described in autosummary:: to the TOC tree, but do
not generate the toctree:: list.
"""
env = app.builder.env
@@ -135,8 +134,8 @@ except AttributeError:
isgetsetdescriptor = ismemberdescriptor
def get_documenter(obj):
- """
- Get an autodoc.Documenter class suitable for documenting the given object
+ """Get an autodoc.Documenter class suitable for documenting the given
+ object.
"""
import sphinx.ext.autodoc as autodoc
@@ -218,8 +217,7 @@ class Autosummary(Directive):
return self.warnings + nodes
def get_items(self, names):
- """
- Try to import the given names, and return a list of
+ """Try to import the given names, and return a list of
``[(name, signature, summary_string, real_name), ...]``.
"""
env = self.state.document.settings.env
@@ -287,8 +285,7 @@ class Autosummary(Directive):
return items
def get_table(self, items):
- """
- Generate a proper list of table nodes for autosummary:: directive.
+ """Generate a proper list of table nodes for autosummary:: directive.
*items* is a list produced by :meth:`get_items`.
"""
@@ -351,8 +348,7 @@ def mangle_signature(sig, max_chars=30):
return u"(%s)" % sig
def limited_join(sep, items, max_chars=30, overflow_marker="..."):
- """
- Join a number of strings to one, limiting the length to *max_chars*.
+ """Join a number of strings to one, limiting the length to *max_chars*.
If the string overflows this limit, replace the last fitting item by
*overflow_marker*.
@@ -377,8 +373,7 @@ def limited_join(sep, items, max_chars=30, overflow_marker="..."):
# -- Importing items -----------------------------------------------------------
def import_by_name(name, prefixes=[None]):
- """
- Import a Python object that has the given *name*, under one of the
+ """Import a Python object that has the given *name*, under one of the
*prefixes*. The first name that succeeds is used.
"""
tried = []
@@ -435,8 +430,7 @@ def _import_by_name(name):
def autolink_role(typ, rawtext, etext, lineno, inliner,
options={}, content=[]):
- """
- Smart linking role.
+ """Smart linking role.
Expands to ':obj:`text`' if `text` is an object that can be imported;
otherwise expands to '*text*'.
diff --git a/sphinx/ext/autosummary/generate.py b/sphinx/ext/autosummary/generate.py
index 66a124d2..4b6348b5 100644
--- a/sphinx/ext/autosummary/generate.py
+++ b/sphinx/ext/autosummary/generate.py
@@ -17,6 +17,7 @@
:copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+
import os
import re
import sys
@@ -193,8 +194,8 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
# -- Finding documented entries in files ---------------------------------------
def find_autosummary_in_files(filenames):
- """
- Find out what items are documented in source/*.rst.
+ """Find out what items are documented in source/*.rst.
+
See `find_autosummary_in_lines`.
"""
documented = []
@@ -206,8 +207,8 @@ def find_autosummary_in_files(filenames):
return documented
def find_autosummary_in_docstring(name, module=None, filename=None):
- """
- Find out what items are documented in the given object's docstring.
+ """Find out what items are documented in the given object's docstring.
+
See `find_autosummary_in_lines`.
"""
try:
@@ -221,8 +222,8 @@ def find_autosummary_in_docstring(name, module=None, filename=None):
return []
def find_autosummary_in_lines(lines, module=None, filename=None):
- """
- Find out what items appear in autosummary:: directives in the given lines.
+ """Find out what items appear in autosummary:: directives in the
+ given lines.
Returns a list of (name, toctree, template) where *name* is a name
of an object and *toctree* the :toctree: path of the corresponding
diff --git a/sphinx/ext/coverage.py b/sphinx/ext/coverage.py
index 4924d30b..f41820e2 100644
--- a/sphinx/ext/coverage.py
+++ b/sphinx/ext/coverage.py
@@ -173,8 +173,11 @@ class CoverageBuilder(Builder):
attrs = []
+ for attr_name in dir(obj):
+ attr = getattr(obj, attr_name)
for attr_name, attr in inspect.getmembers(
- obj, inspect.ismethod):
+ obj, lambda x: inspect.ismethod(x) or \
+ inspect.isfunction(x)):
if attr_name[0] == '_':
# starts with an underscore, ignore it
continue
diff --git a/sphinx/ext/doctest.py b/sphinx/ext/doctest.py
index 9d681f90..62fbfdff 100644
--- a/sphinx/ext/doctest.py
+++ b/sphinx/ext/doctest.py
@@ -149,14 +149,14 @@ class TestCode(object):
class SphinxDocTestRunner(doctest.DocTestRunner):
def summarize(self, out, verbose=None):
- io = StringIO.StringIO()
+ string_io = StringIO.StringIO()
old_stdout = sys.stdout
- sys.stdout = io
+ sys.stdout = string_io
try:
res = doctest.DocTestRunner.summarize(self, verbose)
finally:
sys.stdout = old_stdout
- out(io.getvalue())
+ out(string_io.getvalue())
return res
def _DocTestRunner__patched_linecache_getlines(self, filename,
diff --git a/sphinx/ext/graphviz.py b/sphinx/ext/graphviz.py
index 106de7a6..19dcd951 100644
--- a/sphinx/ext/graphviz.py
+++ b/sphinx/ext/graphviz.py
@@ -11,6 +11,7 @@
"""
import re
+import codecs
import posixpath
from os import path
from math import ceil
@@ -46,18 +47,38 @@ class Graphviz(Directive):
"""
has_content = True
required_arguments = 0
- optional_arguments = 0
+ optional_arguments = 1
final_argument_whitespace = False
option_spec = {
'alt': directives.unchanged,
}
def run(self):
- dotcode = '\n'.join(self.content)
- if not dotcode.strip():
- return [self.state_machine.reporter.warning(
- 'Ignoring "graphviz" directive without content.',
- line=self.lineno)]
+ if self.arguments:
+ document = self.state.document
+ if self.content:
+ return [document.reporter.warning(
+ 'Graphviz directive cannot have both content and '
+ 'a filename argument', line=self.lineno)]
+ env = self.state.document.settings.env
+ rel_filename, filename = env.relfn2path(self.arguments[0])
+ env.note_dependency(rel_filename)
+ try:
+ fp = codecs.open(filename, 'r', 'utf-8')
+ try:
+ dotcode = fp.read()
+ finally:
+ fp.close()
+ except (IOError, OSError):
+ return [document.reporter.warning(
+ 'External Graphviz file %r not found or reading '
+ 'it failed' % filename, line=self.lineno)]
+ else:
+ dotcode = '\n'.join(self.content)
+ if not dotcode.strip():
+ return [self.state_machine.reporter.warning(
+ 'Ignoring "graphviz" directive without content.',
+ line=self.lineno)]
node = graphviz()
node['code'] = dotcode
node['options'] = []
@@ -89,10 +110,9 @@ class GraphvizSimple(Directive):
def render_dot(self, code, options, format, prefix='graphviz'):
- """
- Render graphviz code into a PNG or PDF output file.
- """
+ """Render graphviz code into a PNG or PDF output file."""
hashkey = code.encode('utf-8') + str(options) + \
+ str(self.builder.config.graphviz_dot) + \
str(self.builder.config.graphviz_dot_args)
fname = '%s-%s.%s' % (prefix, sha(hashkey).hexdigest(), format)
if hasattr(self.builder, 'imgpath'):
diff --git a/sphinx/ext/inheritance_diagram.py b/sphinx/ext/inheritance_diagram.py
index 22c0e20f..3f6f0b4d 100644
--- a/sphinx/ext/inheritance_diagram.py
+++ b/sphinx/ext/inheritance_diagram.py
@@ -67,8 +67,7 @@ class InheritanceGraph(object):
graphviz dot graph from them.
"""
def __init__(self, class_names, currmodule, show_builtins=False, parts=0):
- """
- *class_names* is a list of child classes to show bases from.
+ """*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
in the graph.
@@ -81,9 +80,7 @@ class InheritanceGraph(object):
'inheritance diagram')
def _import_class_or_module(self, name, currmodule):
- """
- Import a class using its fully-qualified *name*.
- """
+ """Import a class using its fully-qualified *name*."""
try:
path, base = class_sig_re.match(name).groups()
except ValueError:
@@ -182,9 +179,7 @@ class InheritanceGraph(object):
return '.'.join(name_parts[-parts:])
def get_all_class_names(self):
- """
- Get all of the class names involved in the graph.
- """
+ """Get all of the class names involved in the graph."""
return [fullname for (_, fullname, _) in self.class_info]
# These are the default attrs for graphviz
@@ -213,9 +208,8 @@ class InheritanceGraph(object):
def generate_dot(self, name, urls={}, env=None,
graph_attrs={}, node_attrs={}, edge_attrs={}):
- """
- Generate a graphviz dot graph from the classes that
- were passed in to __init__.
+ """Generate a graphviz dot graph from the classes that were passed in
+ to __init__.
*name* is the name of the graph.
diff --git a/sphinx/ext/intersphinx.py b/sphinx/ext/intersphinx.py
index 10015fc1..442617e1 100644
--- a/sphinx/ext/intersphinx.py
+++ b/sphinx/ext/intersphinx.py
@@ -26,6 +26,7 @@
import time
import zlib
+import codecs
import urllib2
import posixpath
from os import path
@@ -33,19 +34,26 @@ from os import path
from docutils import nodes
from sphinx.builders.html import INVENTORY_FILENAME
+from sphinx.util.pycompat import b
+
handlers = [urllib2.ProxyHandler(), urllib2.HTTPRedirectHandler(),
urllib2.HTTPHandler()]
-if hasattr(urllib2, 'HTTPSHandler'):
+try:
handlers.append(urllib2.HTTPSHandler)
+except NameError:
+ pass
urllib2.install_opener(urllib2.build_opener(*handlers))
+UTF8StreamReader = codecs.lookup('utf-8')[2]
+
def read_inventory_v1(f, uri, join):
+ f = UTF8StreamReader(f)
invdata = {}
line = f.next()
- projname = line.rstrip()[11:].decode('utf-8')
+ projname = line.rstrip()[11:]
line = f.next()
version = line.rstrip()[11:]
for line in f:
@@ -68,25 +76,25 @@ def read_inventory_v2(f, uri, join, bufsize=16*1024):
projname = line.rstrip()[11:].decode('utf-8')
line = f.readline()
version = line.rstrip()[11:].decode('utf-8')
- line = f.readline()
+ line = f.readline().decode('utf-8')
if 'zlib' not in line:
raise ValueError
def read_chunks():
decompressor = zlib.decompressobj()
- for chunk in iter(lambda: f.read(bufsize), ''):
+ for chunk in iter(lambda: f.read(bufsize), b('')):
yield decompressor.decompress(chunk)
yield decompressor.flush()
def split_lines(iter):
- buf = ''
+ buf = b('')
for chunk in iter:
buf += chunk
- lineend = buf.find('\n')
+ lineend = buf.find(b('\n'))
while lineend != -1:
yield buf[:lineend].decode('utf-8')
buf = buf[lineend+1:]
- lineend = buf.find('\n')
+ lineend = buf.find(b('\n'))
assert not buf
for line in split_lines(read_chunks()):
@@ -109,13 +117,13 @@ def fetch_inventory(app, uri, inv):
if inv.find('://') != -1:
f = urllib2.urlopen(inv)
else:
- f = open(path.join(app.srcdir, inv))
+ f = open(path.join(app.srcdir, inv), 'rb')
except Exception, err:
app.warn('intersphinx inventory %r not fetchable due to '
'%s: %s' % (inv, err.__class__, err))
return
try:
- line = f.readline().rstrip()
+ line = f.readline().rstrip().decode('utf-8')
try:
if line == '# Sphinx inventory version 1':
invdata = read_inventory_v1(f, uri, join)
diff --git a/sphinx/ext/oldcmarkup.py b/sphinx/ext/oldcmarkup.py
index 00ac3749..bc921a23 100644
--- a/sphinx/ext/oldcmarkup.py
+++ b/sphinx/ext/oldcmarkup.py
@@ -18,6 +18,7 @@ WARNING_MSG = 'using old C markup; please migrate to new-style markup ' \
'(e.g. c:function instead of cfunction), see ' \
'http://sphinx.pocoo.org/domains.html'
+
class OldCDirective(Directive):
has_content = True
required_arguments = 1
diff --git a/sphinx/ext/pngmath.py b/sphinx/ext/pngmath.py
index 7f399754..e4e7c2d0 100644
--- a/sphinx/ext/pngmath.py
+++ b/sphinx/ext/pngmath.py
@@ -26,6 +26,7 @@ from docutils import nodes
from sphinx.errors import SphinxError
from sphinx.util.png import read_png_depth, write_png_depth
from sphinx.util.osutil import ensuredir, ENOENT
+from sphinx.util.pycompat import b
from sphinx.ext.mathbase import setup_math as mathbase_setup, wrap_displaymath
class MathExtError(SphinxError):
@@ -58,11 +59,10 @@ DOC_BODY_PREVIEW = r'''
\end{document}
'''
-depth_re = re.compile(r'\[\d+ depth=(-?\d+)\]')
+depth_re = re.compile(b(r'\[\d+ depth=(-?\d+)\]'))
def render_math(self, math):
- """
- Render the LaTeX math expression *math* using latex and dvipng.
+ """Render the LaTeX math expression *math* using latex and dvipng.
Return the filename relative to the built document and the "depth",
that is, the distance of image bottom and baseline in pixels, if the
diff --git a/sphinx/ext/viewcode.py b/sphinx/ext/viewcode.py
index 81881beb..b9bb9d77 100644
--- a/sphinx/ext/viewcode.py
+++ b/sphinx/ext/viewcode.py
@@ -31,7 +31,11 @@ def doctree_read(app, doctree):
env._viewcode_modules[modname] = False
return
analyzer.find_tags()
- entry = analyzer.code.decode(analyzer.encoding), analyzer.tags, {}
+ if not isinstance(analyzer.code, unicode):
+ code = analyzer.code.decode(analyzer.encoding)
+ else:
+ code = analyzer.code
+ entry = code, analyzer.tags, {}
env._viewcode_modules[modname] = entry
elif entry is False:
return
@@ -47,7 +51,7 @@ def doctree_read(app, doctree):
for signode in objnode:
if not isinstance(signode, addnodes.desc_signature):
continue
- modname = signode['module']
+ modname = signode.get('module')
if not modname:
continue
fullname = signode['fullname']
diff --git a/sphinx/highlighting.py b/sphinx/highlighting.py
index 0dcbc021..6d710919 100644
--- a/sphinx/highlighting.py
+++ b/sphinx/highlighting.py
@@ -156,7 +156,7 @@ class PygmentsBridge(object):
if sys.version_info >= (2, 5):
src = 'from __future__ import with_statement\n' + src
- if isinstance(src, unicode):
+ if sys.version_info < (3, 0) and isinstance(src, unicode):
# Non-ASCII chars will only occur in string literals
# and comments. If we wanted to give them to the parser
# correctly, we'd have to find out the correct source
@@ -175,7 +175,7 @@ class PygmentsBridge(object):
return True
def highlight_block(self, source, lang, linenos=False, warn=None):
- if isinstance(source, str):
+ if not isinstance(source, unicode):
source = source.decode()
if not pygments:
return self.unhighlighted(source)
diff --git a/sphinx/jinja2glue.py b/sphinx/jinja2glue.py
index a6f1a853..29ee334e 100644
--- a/sphinx/jinja2glue.py
+++ b/sphinx/jinja2glue.py
@@ -37,8 +37,10 @@ def accesskey(context, key):
class SphinxFileSystemLoader(FileSystemLoader):
- """FileSystemLoader subclass that is not so strict about '..'
- entries in template names."""
+ """
+ FileSystemLoader subclass that is not so strict about '..' entries in
+ template names.
+ """
def get_source(self, environment, template):
for searchpath in self.searchpath:
diff --git a/sphinx/locale/__init__.py b/sphinx/locale/__init__.py
index b0b89720..126a37b5 100644
--- a/sphinx/locale/__init__.py
+++ b/sphinx/locale/__init__.py
@@ -8,13 +8,16 @@
:copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+
+import sys
import gettext
import UserString
class _TranslationProxy(UserString.UserString, object):
- """Class for proxy strings from gettext translations. This is a helper
- for the lazy_* functions from this module.
+ """
+ Class for proxy strings from gettext translations. This is a helper for the
+ lazy_* functions from this module.
The proxy implementation attempts to be as complete as possible, so that
the lazy objects should mostly work as expected, for example for sorting.
@@ -135,7 +138,8 @@ class _TranslationProxy(UserString.UserString, object):
def mygettext(string):
"""Used instead of _ when creating TranslationProxies, because _ is
- not bound yet at that time."""
+ not bound yet at that time.
+ """
return _(string)
def lazy_gettext(string):
@@ -176,18 +180,32 @@ pairindextypes = {
'builtin': l_('built-in function'),
}
-translator = None
+translators = {}
-def _(message):
- return translator.ugettext(message)
+if sys.version_info >= (3, 0):
+ def _(message):
+ return translators['sphinx'].gettext(message)
+else:
+ def _(message):
+ return translators['sphinx'].ugettext(message)
-def init(locale_dirs, language):
- global translator
+
+def init(locale_dirs, language, catalog='sphinx'):
+ """Look for message catalogs in `locale_dirs` and *ensure* that there is at
+ least a NullTranslations catalog set in `translators`. If called multiple
+ times or if several ``.mo`` files are found, their contents are merged
+ together (thus making ``init`` reentrable).
+ """
+ global translators
+ translator = translators.get(catalog)
+ # ignore previously failed attempts to find message catalogs
+ if isinstance(translator, gettext.NullTranslations):
+ translator = None
# the None entry is the system's default locale path
has_translation = True
for dir_ in locale_dirs:
try:
- trans = gettext.translation('sphinx', localedir=dir_,
+ trans = gettext.translation(catalog, localedir=dir_,
languages=[language])
if translator is None:
translator = trans
@@ -196,7 +214,11 @@ def init(locale_dirs, language):
except Exception:
# Language couldn't be found in the specified path
pass
+ # guarantee translations[catalog] exists
if translator is None:
translator = gettext.NullTranslations()
has_translation = False
+ translators[catalog] = translator
+ if hasattr(translator, 'ugettext'):
+ translator.gettext = translator.ugettext
return translator, has_translation
diff --git a/sphinx/locale/sv/LC_MESSAGES/sphinx.js b/sphinx/locale/sv/LC_MESSAGES/sphinx.js
new file mode 100644
index 00000000..0cedfb45
--- /dev/null
+++ b/sphinx/locale/sv/LC_MESSAGES/sphinx.js
@@ -0,0 +1 @@
+Documentation.addTranslations({"locale": "sv", "plural_expr": "(n != 1)", "messages": {"Search Results": "S\u00f6kresultat", "Preparing search...": "F\u00f6rbereder s\u00f6kning...", "Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories.": "Din s\u00f6kning gav inga resultat. Kolla stavning och att du valt tillr\u00e4ckligt med kategorier.", "Search finished, found %s page(s) matching the search query.": "S\u00f6kning f\u00e4rdig, hittade %s tr\u00e4ffar.", ", in ": ", i ", "Expand sidebar": "Expandera sidolist", "Permalink to this headline": "Permalink till denna rubrik", "Searching": "S\u00f6ker", "Collapse sidebar": "D\u00f6lj sidolist", "Permalink to this definition": "Permalink till denna definition", "Hide Search Matches": "D\u00f6lj S\u00f6kresultat"}}); \ No newline at end of file
diff --git a/sphinx/locale/sv/LC_MESSAGES/sphinx.mo b/sphinx/locale/sv/LC_MESSAGES/sphinx.mo
new file mode 100644
index 00000000..8cf76751
--- /dev/null
+++ b/sphinx/locale/sv/LC_MESSAGES/sphinx.mo
Binary files differ
diff --git a/sphinx/locale/sv/LC_MESSAGES/sphinx.po b/sphinx/locale/sv/LC_MESSAGES/sphinx.po
new file mode 100644
index 00000000..f449e8f6
--- /dev/null
+++ b/sphinx/locale/sv/LC_MESSAGES/sphinx.po
@@ -0,0 +1,797 @@
+msgid ""
+msgstr ""
+"Project-Id-Version: \n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2010-05-24 23:53+0200\n"
+"PO-Revision-Date: 2010-08-25 12:36+0200\n"
+"Last-Translator: Henrik Holmboe <henrik@holmboe.se>\n"
+"Language-Team: \n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"X-Poedit-Language: Swedish\n"
+"X-Poedit-Country: SWEDEN\n"
+
+# Translators (rev. chron. order):
+# Ludvig Ericson <ludvig@lericson.se>
+# Henrik Holmboe <henrik@holmboe.se>
+
+#: sphinx/environment.py:106
+#: sphinx/writers/latex.py:184
+#: sphinx/writers/manpage.py:67
+#, python-format
+msgid "%B %d, %Y"
+msgstr "%B %d, %Y"
+
+#: sphinx/roles.py:174
+#, python-format
+msgid "Python Enhancement Proposals!PEP %s"
+msgstr "Python Enhancement Proposals!PEP %s"
+
+#: sphinx/builders/changes.py:72
+msgid "Builtins"
+msgstr "Inbyggda"
+
+#: sphinx/builders/changes.py:74
+msgid "Module level"
+msgstr "Modulnivå"
+
+#: sphinx/builders/html.py:266
+#, python-format
+msgid "%b %d, %Y"
+msgstr "%b %d, %Y"
+
+#: sphinx/builders/html.py:285
+#: sphinx/themes/basic/defindex.html:30
+msgid "General Index"
+msgstr "Huvudindex"
+
+#: sphinx/builders/html.py:285
+msgid "index"
+msgstr "index"
+
+#: sphinx/builders/html.py:345
+msgid "next"
+msgstr "nästa"
+
+#: sphinx/builders/html.py:354
+msgid "previous"
+msgstr "föregående"
+
+#: sphinx/builders/latex.py:151
+msgid " (in "
+msgstr "(i "
+
+#: sphinx/directives/other.py:127
+msgid "Section author: "
+msgstr "Sektionsförfattare"
+
+#: sphinx/directives/other.py:129
+msgid "Module author: "
+msgstr "Modulförfattare"
+
+#: sphinx/directives/other.py:131
+msgid "Code author: "
+msgstr "Källkodsförfattare"
+
+#: sphinx/directives/other.py:133
+msgid "Author: "
+msgstr "Upphovsman:"
+
+#: sphinx/directives/other.py:238
+msgid "See also"
+msgstr "Se även"
+
+#: sphinx/domains/__init__.py:253
+#, python-format
+msgid "%s %s"
+msgstr "%s %s"
+
+#: sphinx/domains/c.py:51
+#: sphinx/domains/python.py:49
+msgid "Parameters"
+msgstr "Parametrar"
+
+#: sphinx/domains/c.py:54
+#: sphinx/domains/javascript.py:137
+#: sphinx/domains/python.py:59
+msgid "Returns"
+msgstr "Returnerar"
+
+#: sphinx/domains/c.py:56
+#: sphinx/domains/python.py:61
+msgid "Return type"
+msgstr "Returtyp"
+
+#: sphinx/domains/c.py:133
+#, python-format
+msgid "%s (C function)"
+msgstr "%s (C-funktion)"
+
+#: sphinx/domains/c.py:135
+#, python-format
+msgid "%s (C member)"
+msgstr "%s (C-medlem)"
+
+#: sphinx/domains/c.py:137
+#, python-format
+msgid "%s (C macro)"
+msgstr "%s (C-makro)"
+
+#: sphinx/domains/c.py:139
+#, python-format
+msgid "%s (C type)"
+msgstr "%s (C-typ)"
+
+#: sphinx/domains/c.py:141
+#, python-format
+msgid "%s (C variable)"
+msgstr "%s (C-variabel)"
+
+#: sphinx/domains/c.py:171
+#: sphinx/domains/cpp.py:1031
+#: sphinx/domains/javascript.py:166
+#: sphinx/domains/python.py:497
+msgid "function"
+msgstr "funktion"
+
+#: sphinx/domains/c.py:172
+#: sphinx/domains/cpp.py:1032
+msgid "member"
+msgstr "medlem"
+
+#: sphinx/domains/c.py:173
+msgid "macro"
+msgstr "makro"
+
+#: sphinx/domains/c.py:174
+#: sphinx/domains/cpp.py:1033
+msgid "type"
+msgstr "typ"
+
+#: sphinx/domains/c.py:175
+msgid "variable"
+msgstr "variabel"
+
+#: sphinx/domains/cpp.py:876
+#, python-format
+msgid "%s (C++ class)"
+msgstr "%s (C++-klass)"
+
+#: sphinx/domains/cpp.py:891
+#, python-format
+msgid "%s (C++ type)"
+msgstr "%s (C++-typ)"
+
+#: sphinx/domains/cpp.py:910
+#, python-format
+msgid "%s (C++ member)"
+msgstr "%s (C++-medlem)"
+
+#: sphinx/domains/cpp.py:962
+#, python-format
+msgid "%s (C++ function)"
+msgstr "%s (C++-funktion)"
+
+#: sphinx/domains/cpp.py:1030
+#: sphinx/domains/python.py:499
+msgid "class"
+msgstr "klass"
+
+#: sphinx/domains/javascript.py:117
+#: sphinx/domains/python.py:221
+#, python-format
+msgid "%s() (built-in function)"
+msgstr "%s() (inbyggd funktion)"
+
+#: sphinx/domains/javascript.py:118
+#: sphinx/domains/python.py:285
+#, python-format
+msgid "%s() (%s method)"
+msgstr "%s() (%s metod)"
+
+#: sphinx/domains/javascript.py:120
+#, python-format
+msgid "%s (global variable or constant)"
+msgstr "%s (global variabel eller konstant)"
+
+#: sphinx/domains/javascript.py:122
+#: sphinx/domains/python.py:323
+#, python-format
+msgid "%s (%s attribute)"
+msgstr "%s (%s attribut)"
+
+#: sphinx/domains/javascript.py:131
+msgid "Arguments"
+msgstr "Argument"
+
+#: sphinx/domains/javascript.py:134
+msgid "Throws"
+msgstr "Kastar"
+
+#: sphinx/domains/javascript.py:167
+#: sphinx/domains/python.py:498
+msgid "data"
+msgstr "data"
+
+#: sphinx/domains/javascript.py:168
+#: sphinx/domains/python.py:504
+msgid "attribute"
+msgstr "attribut"
+
+#: sphinx/domains/python.py:53
+msgid "Variables"
+msgstr "Variabler"
+
+#: sphinx/domains/python.py:56
+msgid "Raises"
+msgstr "Väcker"
+
+#: sphinx/domains/python.py:222
+#: sphinx/domains/python.py:279
+#: sphinx/domains/python.py:291
+#: sphinx/domains/python.py:304
+#, python-format
+msgid "%s() (in module %s)"
+msgstr "%s() (i modul %s)"
+
+#: sphinx/domains/python.py:225
+#, python-format
+msgid "%s (built-in variable)"
+msgstr "%s (inbyggd variabel)"
+
+#: sphinx/domains/python.py:226
+#: sphinx/domains/python.py:317
+#, python-format
+msgid "%s (in module %s)"
+msgstr "%s (i modul %s)"
+
+#: sphinx/domains/python.py:242
+#, python-format
+msgid "%s (built-in class)"
+msgstr "%s (inbyggd klass)"
+
+#: sphinx/domains/python.py:243
+#, python-format
+msgid "%s (class in %s)"
+msgstr "%s (klass i %s)"
+
+#: sphinx/domains/python.py:283
+#, python-format
+msgid "%s() (%s.%s method)"
+msgstr "%s() (%s.%s metod)"
+
+#: sphinx/domains/python.py:295
+#, python-format
+msgid "%s() (%s.%s static method)"
+msgstr "%s() (%s.%s statisk metod)"
+
+#: sphinx/domains/python.py:298
+#, python-format
+msgid "%s() (%s static method)"
+msgstr "%s() (%s statisk metod)"
+
+#: sphinx/domains/python.py:308
+#, python-format
+msgid "%s() (%s.%s class method)"
+msgstr "%s() (%s.%s klassmetod)"
+
+#: sphinx/domains/python.py:311
+#, python-format
+msgid "%s() (%s class method)"
+msgstr "%s() (%s klassmetod)"
+
+#: sphinx/domains/python.py:321
+#, python-format
+msgid "%s (%s.%s attribute)"
+msgstr "%s (%s.%s attribut)"
+
+#: sphinx/domains/python.py:366
+msgid "Platforms: "
+msgstr "Plattformar:"
+
+#: sphinx/domains/python.py:372
+#, python-format
+msgid "%s (module)"
+msgstr "%s (modul)"
+
+#: sphinx/domains/python.py:429
+msgid "Python Module Index"
+msgstr "Python Modulindex"
+
+#: sphinx/domains/python.py:430
+msgid "modules"
+msgstr "moduler"
+
+#: sphinx/domains/python.py:475
+msgid "Deprecated"
+msgstr "Ersatt"
+
+#: sphinx/domains/python.py:500
+#: sphinx/locale/__init__.py:162
+msgid "exception"
+msgstr "undantag"
+
+#: sphinx/domains/python.py:501
+msgid "method"
+msgstr "metod"
+
+#: sphinx/domains/python.py:502
+msgid "class method"
+msgstr "klassmetod"
+
+#: sphinx/domains/python.py:503
+msgid "static method"
+msgstr "statisk metod"
+
+#: sphinx/domains/python.py:505
+#: sphinx/locale/__init__.py:158
+msgid "module"
+msgstr "modul"
+
+#: sphinx/domains/rst.py:53
+#, python-format
+msgid "%s (directive)"
+msgstr "%s (direktiv)"
+
+#: sphinx/domains/rst.py:55
+#, python-format
+msgid "%s (role)"
+msgstr "%s (roll)"
+
+#: sphinx/domains/rst.py:103
+msgid "directive"
+msgstr "direktiv"
+
+#: sphinx/domains/rst.py:104
+msgid "role"
+msgstr "roll"
+
+#: sphinx/domains/std.py:68
+#: sphinx/domains/std.py:84
+#, python-format
+msgid "environment variable; %s"
+msgstr "miljövariabel; %s"
+
+#: sphinx/domains/std.py:160
+#, python-format
+msgid "%scommand line option; %s"
+msgstr "%skommandorad växel; %s"
+
+#: sphinx/domains/std.py:328
+msgid "glossary term"
+msgstr "ordlista"
+
+#: sphinx/domains/std.py:329
+msgid "grammar token"
+msgstr "grammatisk token"
+
+#: sphinx/domains/std.py:330
+msgid "reference label"
+msgstr "referensetikett"
+
+#: sphinx/domains/std.py:331
+msgid "environment variable"
+msgstr "miljövariabel"
+
+#: sphinx/domains/std.py:332
+msgid "program option"
+msgstr "programväxel"
+
+#: sphinx/domains/std.py:360
+#: sphinx/themes/basic/genindex-single.html:11
+#: sphinx/themes/basic/genindex-split.html:11
+#: sphinx/themes/basic/genindex-split.html:14
+#: sphinx/themes/basic/genindex.html:11
+#: sphinx/themes/basic/genindex.html:14
+#: sphinx/themes/basic/genindex.html:50
+#: sphinx/themes/basic/layout.html:125
+#: sphinx/writers/latex.py:173
+msgid "Index"
+msgstr "Index"
+
+#: sphinx/domains/std.py:361
+msgid "Module Index"
+msgstr "Modulindex"
+
+#: sphinx/domains/std.py:362
+#: sphinx/themes/basic/defindex.html:25
+msgid "Search Page"
+msgstr "Söksida"
+
+#: sphinx/ext/autodoc.py:917
+#, python-format
+msgid " Bases: %s"
+msgstr " Baserad: %s"
+
+#: sphinx/ext/autodoc.py:950
+#, python-format
+msgid "alias of :class:`%s`"
+msgstr "alias för :class:`%s`"
+
+#: sphinx/ext/todo.py:41
+msgid "Todo"
+msgstr "Att göra"
+
+#: sphinx/ext/todo.py:109
+#, python-format
+msgid "(The <<original entry>> is located in %s, line %d.)"
+msgstr "(<<Ursprunget>> finns i %s, på rad %d.)"
+
+#: sphinx/ext/todo.py:117
+msgid "original entry"
+msgstr "ursprungsvärde"
+
+#: sphinx/ext/viewcode.py:66
+msgid "[source]"
+msgstr "[source]"
+
+#: sphinx/ext/viewcode.py:109
+msgid "[docs]"
+msgstr "[docs]"
+
+#: sphinx/ext/viewcode.py:123
+msgid "Module code"
+msgstr "Modulkällkod"
+
+#: sphinx/ext/viewcode.py:129
+#, python-format
+msgid "<h1>Source code for %s</h1>"
+msgstr "<h1>Källkod för %s</h1>"
+
+#: sphinx/ext/viewcode.py:156
+msgid "Overview: module code"
+msgstr "Översikt: modulkällkod"
+
+#: sphinx/ext/viewcode.py:157
+msgid "<h1>All modules for which code is available</h1>"
+msgstr "<h1>Alla moduler där källkod finns</h1>"
+
+#: sphinx/locale/__init__.py:139
+msgid "Attention"
+msgstr "Uppmärksamma"
+
+#: sphinx/locale/__init__.py:140
+msgid "Caution"
+msgstr "Varning"
+
+#: sphinx/locale/__init__.py:141
+msgid "Danger"
+msgstr "Risk"
+
+#: sphinx/locale/__init__.py:142
+msgid "Error"
+msgstr "Fel"
+
+#: sphinx/locale/__init__.py:143
+msgid "Hint"
+msgstr "Råd"
+
+#: sphinx/locale/__init__.py:144
+msgid "Important"
+msgstr "Viktigt"
+
+#: sphinx/locale/__init__.py:145
+msgid "Note"
+msgstr "Observera"
+
+#: sphinx/locale/__init__.py:146
+msgid "See Also"
+msgstr "Se även"
+
+#: sphinx/locale/__init__.py:147
+msgid "Tip"
+msgstr "Tips"
+
+#: sphinx/locale/__init__.py:148
+msgid "Warning"
+msgstr "Varning"
+
+#: sphinx/locale/__init__.py:152
+#, python-format
+msgid "New in version %s"
+msgstr "Nyheter i version %s"
+
+#: sphinx/locale/__init__.py:153
+#, python-format
+msgid "Changed in version %s"
+msgstr "Förändrat i version %s"
+
+#: sphinx/locale/__init__.py:154
+#, python-format
+msgid "Deprecated since version %s"
+msgstr "Ersatt sedan version %s"
+
+#: sphinx/locale/__init__.py:159
+msgid "keyword"
+msgstr "nyckelord"
+
+#: sphinx/locale/__init__.py:160
+msgid "operator"
+msgstr "operator"
+
+#: sphinx/locale/__init__.py:161
+msgid "object"
+msgstr "objekt"
+
+#: sphinx/locale/__init__.py:163
+msgid "statement"
+msgstr "uttryck"
+
+#: sphinx/locale/__init__.py:164
+msgid "built-in function"
+msgstr "inbyggda funktioner"
+
+#: sphinx/themes/agogo/layout.html:45
+#: sphinx/themes/basic/globaltoc.html:10
+#: sphinx/themes/basic/localtoc.html:11
+msgid "Table Of Contents"
+msgstr "Innehållsförteckning"
+
+#: sphinx/themes/agogo/layout.html:49
+#: sphinx/themes/basic/layout.html:128
+#: sphinx/themes/basic/search.html:11
+#: sphinx/themes/basic/search.html:14
+msgid "Search"
+msgstr "Sök"
+
+#: sphinx/themes/agogo/layout.html:52
+#: sphinx/themes/basic/searchbox.html:15
+msgid "Go"
+msgstr "Gå"
+
+#: sphinx/themes/agogo/layout.html:57
+#: sphinx/themes/basic/searchbox.html:20
+msgid "Enter search terms or a module, class or function name."
+msgstr "Ange sökord eller modul-, klass- eller funktionsnamn."
+
+#: sphinx/themes/agogo/layout.html:78
+#: sphinx/themes/basic/sourcelink.html:14
+msgid "Show Source"
+msgstr "Visa källfil"
+
+#: sphinx/themes/basic/defindex.html:11
+msgid "Overview"
+msgstr "Översikt"
+
+#: sphinx/themes/basic/defindex.html:20
+msgid "Indices and tables:"
+msgstr "Index och tabeller"
+
+#: sphinx/themes/basic/defindex.html:23
+msgid "Complete Table of Contents"
+msgstr "Komplett Innehållsförteckning"
+
+#: sphinx/themes/basic/defindex.html:24
+msgid "lists all sections and subsections"
+msgstr "lista över alla paragrafer och underparagrafer"
+
+#: sphinx/themes/basic/defindex.html:26
+msgid "search this documentation"
+msgstr "sök i det här dokumentet"
+
+#: sphinx/themes/basic/defindex.html:28
+msgid "Global Module Index"
+msgstr "Global Modulindex"
+
+#: sphinx/themes/basic/defindex.html:29
+msgid "quick access to all modules"
+msgstr "genväg till alla moduler"
+
+#: sphinx/themes/basic/defindex.html:31
+msgid "all functions, classes, terms"
+msgstr "alla funktioner, klasser, villkor"
+
+#: sphinx/themes/basic/genindex-single.html:14
+#, python-format
+msgid "Index &ndash; %(key)s"
+msgstr "Index &ndash; %(key)s"
+
+#: sphinx/themes/basic/genindex-single.html:46
+#: sphinx/themes/basic/genindex-split.html:24
+#: sphinx/themes/basic/genindex-split.html:38
+#: sphinx/themes/basic/genindex.html:56
+msgid "Full index on one page"
+msgstr "Hela innehållsförteckningen på en sida"
+
+#: sphinx/themes/basic/genindex-split.html:16
+msgid "Index pages by letter"
+msgstr "Innehållsförteckning per inledande bokstav"
+
+#: sphinx/themes/basic/genindex-split.html:25
+msgid "can be huge"
+msgstr "kan bli stort"
+
+#: sphinx/themes/basic/layout.html:23
+msgid "Navigation"
+msgstr "Navigation"
+
+#: sphinx/themes/basic/layout.html:113
+#, python-format
+msgid "Search within %(docstitle)s"
+msgstr "Sök bland %(docstitle)s"
+
+#: sphinx/themes/basic/layout.html:122
+msgid "About these documents"
+msgstr "Om dessa dokument"
+
+#: sphinx/themes/basic/layout.html:131
+msgid "Copyright"
+msgstr "Copyright"
+
+#: sphinx/themes/basic/layout.html:180
+#, python-format
+msgid "&copy; <a href=\"%(path)s\">Copyright</a> %(copyright)s."
+msgstr "&copy; <a href=\"%(path)s\">Copyright</a> %(copyright)s."
+
+#: sphinx/themes/basic/layout.html:182
+#, python-format
+msgid "&copy; Copyright %(copyright)s."
+msgstr "&copy; Copyright %(copyright)s."
+
+#: sphinx/themes/basic/layout.html:186
+#, python-format
+msgid "Last updated on %(last_updated)s."
+msgstr "Senast uppdaterad %(last_updated)s."
+
+#: sphinx/themes/basic/layout.html:189
+#, python-format
+msgid "Created using <a href=\"http://sphinx.pocoo.org/\">Sphinx</a> %(sphinx_version)s."
+msgstr "Skapad med <a href=\"http://sphinx.pocoo.org/\">Sphinx</a> %(sphinx_version)s."
+
+#: sphinx/themes/basic/opensearch.xml:4
+#, python-format
+msgid "Search %(docstitle)s"
+msgstr "Sök %(docstitle)s"
+
+#: sphinx/themes/basic/relations.html:11
+msgid "Previous topic"
+msgstr "Föregående titel"
+
+#: sphinx/themes/basic/relations.html:13
+msgid "previous chapter"
+msgstr "Föregående kapitel"
+
+#: sphinx/themes/basic/relations.html:16
+msgid "Next topic"
+msgstr "Nästa titel"
+
+#: sphinx/themes/basic/relations.html:18
+msgid "next chapter"
+msgstr "Nästa kapitel"
+
+#: sphinx/themes/basic/search.html:18
+msgid ""
+"Please activate JavaScript to enable the search\n"
+" functionality."
+msgstr "Var god aktivera JavaScript för sökfunktionalitet."
+
+#: sphinx/themes/basic/search.html:23
+msgid ""
+"From here you can search these documents. Enter your search\n"
+" words into the box below and click \"search\". Note that the search\n"
+" function will automatically search for all of the words. Pages\n"
+" containing fewer words won't appear in the result list."
+msgstr ""
+"Här kan du söka bland dessa dokument. Ange sökord nedan och klicka \"sök\".\n"
+" Sökningen måste träffa på samtliga angivna sökord."
+
+#: sphinx/themes/basic/search.html:30
+msgid "search"
+msgstr "sök"
+
+#: sphinx/themes/basic/search.html:34
+#: sphinx/themes/basic/static/searchtools.js:489
+msgid "Search Results"
+msgstr "Sökresultat"
+
+#: sphinx/themes/basic/search.html:36
+msgid "Your search did not match any results."
+msgstr "Din sökning gav inga resultat."
+
+#: sphinx/themes/basic/searchbox.html:12
+msgid "Quick search"
+msgstr "Snabbsök"
+
+#: sphinx/themes/basic/sourcelink.html:11
+msgid "This Page"
+msgstr "Denna Sida"
+
+#: sphinx/themes/basic/changes/frameset.html:5
+#: sphinx/themes/basic/changes/versionchanges.html:12
+#, python-format
+msgid "Changes in Version %(version)s &mdash; %(docstitle)s"
+msgstr "Förändringar i Version %(version)s &mdash; %(docstitle)s"
+
+#: sphinx/themes/basic/changes/rstsource.html:5
+#, python-format
+msgid "%(filename)s &mdash; %(docstitle)s"
+msgstr "%(filename)s &mdash; %(docstitle)s"
+
+#: sphinx/themes/basic/changes/versionchanges.html:17
+#, python-format
+msgid "Automatically generated list of changes in version %(version)s"
+msgstr "Automatiskt genererad lista över förändringar i version %(version)s"
+
+#: sphinx/themes/basic/changes/versionchanges.html:18
+msgid "Library changes"
+msgstr "Förändringar i bibliotek"
+
+#: sphinx/themes/basic/changes/versionchanges.html:23
+msgid "C API changes"
+msgstr "Förändringar i C-API"
+
+#: sphinx/themes/basic/changes/versionchanges.html:25
+msgid "Other changes"
+msgstr "Övriga förändringar"
+
+#: sphinx/themes/basic/static/doctools.js:154
+#: sphinx/writers/html.py:482
+#: sphinx/writers/html.py:487
+msgid "Permalink to this headline"
+msgstr "Permalink till denna rubrik"
+
+#: sphinx/themes/basic/static/doctools.js:160
+#: sphinx/writers/html.py:87
+msgid "Permalink to this definition"
+msgstr "Permalink till denna definition"
+
+#: sphinx/themes/basic/static/doctools.js:189
+msgid "Hide Search Matches"
+msgstr "Dölj Sökresultat"
+
+#: sphinx/themes/basic/static/searchtools.js:285
+msgid "Searching"
+msgstr "Söker"
+
+#: sphinx/themes/basic/static/searchtools.js:290
+msgid "Preparing search..."
+msgstr "Förbereder sökning..."
+
+#: sphinx/themes/basic/static/searchtools.js:364
+msgid ", in "
+msgstr ", i "
+
+#: sphinx/themes/basic/static/searchtools.js:491
+msgid "Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories."
+msgstr "Din sökning gav inga resultat. Kolla stavning och att du valt tillräckligt med kategorier."
+
+#: sphinx/themes/basic/static/searchtools.js:493
+#, python-format
+msgid "Search finished, found %s page(s) matching the search query."
+msgstr "Sökning färdig, hittade %s träffar."
+
+#: sphinx/themes/default/static/sidebar.js:66
+msgid "Expand sidebar"
+msgstr "Expandera sidolist"
+
+#: sphinx/themes/default/static/sidebar.js:79
+#: sphinx/themes/default/static/sidebar.js:106
+msgid "Collapse sidebar"
+msgstr "Dölj sidolist"
+
+#: sphinx/themes/haiku/layout.html:26
+msgid "Contents"
+msgstr "Innehåll"
+
+#: sphinx/writers/latex.py:171
+msgid "Release"
+msgstr "Utgåva"
+
+#: sphinx/writers/latex.py:572
+#: sphinx/writers/manpage.py:178
+msgid "Footnotes"
+msgstr "Fotnoter"
+
+#: sphinx/writers/latex.py:641
+msgid "continued from previous page"
+msgstr "fortsättning från föregående sida"
+
+#: sphinx/writers/latex.py:646
+msgid "Continued on next page"
+msgstr "Fortsätter på nästa sida"
+
+#: sphinx/writers/text.py:422
+msgid "[image]"
+msgstr "[image]"
+
diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py
index b8e2fded..ef92297c 100644
--- a/sphinx/pycode/__init__.py
+++ b/sphinx/pycode/__init__.py
@@ -18,6 +18,7 @@ from sphinx.errors import PycodeError
from sphinx.pycode import nodes
from sphinx.pycode.pgen2 import driver, token, tokenize, parse, literals
from sphinx.util import get_module_source
+from sphinx.util.pycompat import next
from sphinx.util.docstrings import prepare_docstring, prepare_commentdoc
@@ -98,7 +99,8 @@ class AttrDocVisitor(nodes.NodeVisitor):
if not pnode or pnode.type not in (token.INDENT, token.DEDENT):
break
prefix = pnode.get_prefix()
- prefix = prefix.decode(self.encoding)
+ if not isinstance(prefix, unicode):
+ prefix = prefix.decode(self.encoding)
docstring = prepare_commentdoc(prefix)
self.add_docstring(node, docstring)
@@ -278,7 +280,7 @@ class ModuleAnalyzer(object):
result[fullname] = (dtype, startline, endline)
expect_indent = False
if tok in ('def', 'class'):
- name = tokeniter.next()[1]
+ name = next(tokeniter)[1]
namespace.append(name)
fullname = '.'.join(namespace)
stack.append((tok, fullname, spos[0], indent))
diff --git a/sphinx/pycode/nodes.py b/sphinx/pycode/nodes.py
index e7184677..fc6eb93a 100644
--- a/sphinx/pycode/nodes.py
+++ b/sphinx/pycode/nodes.py
@@ -29,6 +29,8 @@ class BaseNode(object):
return NotImplemented
return not self._eq(other)
+ __hash__ = None
+
def get_prev_sibling(self):
"""Return previous child in parent's children, or None."""
if self.parent is None:
diff --git a/sphinx/pycode/pgen2/literals.py b/sphinx/pycode/pgen2/literals.py
index 31900291..d4893702 100644
--- a/sphinx/pycode/pgen2/literals.py
+++ b/sphinx/pycode/pgen2/literals.py
@@ -66,7 +66,7 @@ uni_escape_re = re.compile(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3}|"
def evalString(s, encoding=None):
regex = escape_re
repl = escape
- if encoding:
+ if encoding and not isinstance(s, unicode):
s = s.decode(encoding)
if s.startswith('u') or s.startswith('U'):
regex = uni_escape_re
diff --git a/sphinx/pycode/pgen2/tokenize.py b/sphinx/pycode/pgen2/tokenize.py
index 4489db89..7ad9f012 100644
--- a/sphinx/pycode/pgen2/tokenize.py
+++ b/sphinx/pycode/pgen2/tokenize.py
@@ -143,7 +143,9 @@ class TokenError(Exception): pass
class StopTokenizing(Exception): pass
-def printtoken(type, token, (srow, scol), (erow, ecol), line): # for testing
+def printtoken(type, token, scell, ecell, line): # for testing
+ srow, scol = scell
+ erow, ecol = ecell
print "%d,%d-%d,%d:\t%s\t%s" % \
(srow, scol, erow, ecol, tok_name[type], repr(token))
diff --git a/sphinx/quickstart.py b/sphinx/quickstart.py
index 884caca7..fdac4cbe 100644
--- a/sphinx/quickstart.py
+++ b/sphinx/quickstart.py
@@ -9,8 +9,9 @@
:license: BSD, see LICENSE for details.
"""
-import sys, os, time
+import sys, os, time, re
from os import path
+from codecs import open
TERM_ENCODING = getattr(sys.stdin, 'encoding', None)
@@ -20,10 +21,23 @@ from sphinx.util.console import purple, bold, red, turquoise, \
nocolor, color_terminal
from sphinx.util import texescape
+# function to get input from terminal -- overridden by the test suite
+try:
+ # this raw_input is not converted by 2to3
+ term_input = raw_input
+except NameError:
+ term_input = input
+
PROMPT_PREFIX = '> '
-QUICKSTART_CONF = '''\
+if sys.version_info >= (3, 0):
+ # prevents that the file is checked for being written in Python 2.x syntax
+ QUICKSTART_CONF = '#!/usr/bin/env python3\n'
+else:
+ QUICKSTART_CONF = ''
+
+QUICKSTART_CONF += '''\
# -*- coding: utf-8 -*-
#
# %(project)s documentation build configuration file, created by
@@ -330,7 +344,7 @@ ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) \
$(SPHINXOPTS) %(rsrcdir)s
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp \
-epub latex latexpdf text man changes linkcheck doctest
+epub latex latexpdf text man changes linkcheck doctest gettext
help:
\t@echo "Please use \\`make <target>' where <target> is one of"
@@ -347,6 +361,7 @@ help:
\t@echo " latexpdf to make LaTeX files and run them through pdflatex"
\t@echo " text to make text files"
\t@echo " man to make manual pages"
+\t@echo " gettext to make PO message catalogs"
\t@echo " changes to make an overview of all changed/added/deprecated items"
\t@echo " linkcheck to check all external links for integrity"
\t@echo " doctest to run all doctests embedded in the documentation \
@@ -433,6 +448,11 @@ man:
\t@echo
\t@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+gettext:
+\t$(SPHINXBUILD) -b gettext $(ALLSPHINXOPTS) $(BUILDDIR)/locale
+\t@echo
+\t@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
changes:
\t$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
\t@echo
@@ -481,6 +501,7 @@ if "%%1" == "help" (
\techo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
\techo. text to make text files
\techo. man to make manual pages
+\techo. gettext to make PO message catalogs
\techo. changes to make an overview over all changed/added/deprecated items
\techo. linkcheck to check all external links for integrity
\techo. doctest to run all doctests embedded in the documentation if enabled
@@ -582,6 +603,13 @@ if "%%1" == "man" (
\tgoto end
)
+if "%%1" == "gettext" (
+\t%%SPHINXBUILD%% -b gettext %%ALLSPHINXOPTS%% %%BUILDDIR%%/locale
+\techo.
+\techo.Build finished. The message catalogs are in %%BUILDDIR%%/locale.
+\tgoto end
+)
+
if "%%1" == "changes" (
\t%%SPHINXBUILD%% -b changes %%ALLSPHINXOPTS%% %%BUILDDIR%%/changes
\techo.
@@ -656,20 +684,22 @@ def do_prompt(d, key, text, default=None, validator=nonempty):
prompt = purple(PROMPT_PREFIX + '%s [%s]: ' % (text, default))
else:
prompt = purple(PROMPT_PREFIX + text + ': ')
- x = raw_input(prompt)
+ x = term_input(prompt)
if default and not x:
x = default
- if x.decode('ascii', 'replace').encode('ascii', 'replace') != x:
- if TERM_ENCODING:
- x = x.decode(TERM_ENCODING)
- else:
- print turquoise('* Note: non-ASCII characters entered '
- 'and terminal encoding unknown -- assuming '
- 'UTF-8 or Latin-1.')
- try:
- x = x.decode('utf-8')
- except UnicodeDecodeError:
- x = x.decode('latin1')
+ if not isinstance(x, unicode):
+ # for Python 2.x, try to get a Unicode string out of it
+ if x.decode('ascii', 'replace').encode('ascii', 'replace') != x:
+ if TERM_ENCODING:
+ x = x.decode(TERM_ENCODING)
+ else:
+ print turquoise('* Note: non-ASCII characters entered '
+ 'and terminal encoding unknown -- assuming '
+ 'UTF-8 or Latin-1.')
+ try:
+ x = x.decode('utf-8')
+ except UnicodeDecodeError:
+ x = x.decode('latin1')
try:
x = validator(x)
except ValidationError, err:
@@ -679,6 +709,18 @@ def do_prompt(d, key, text, default=None, validator=nonempty):
d[key] = x
+if sys.version_info >= (3, 0):
+ # remove Unicode literal prefixes
+ _unicode_string_re = re.compile(r"[uU]('.*?')")
+ def _convert_python_source(source):
+ return _unicode_string_re.sub('\\1', source)
+
+ for f in ['QUICKSTART_CONF', 'EPUB_CONFIG', 'INTERSPHINX_CONFIG']:
+ globals()[f] = _convert_python_source(globals()[f])
+
+ del _unicode_string_re, _convert_python_source
+
+
def inner_main(args):
d = {}
texescape.init()
@@ -834,28 +876,28 @@ directly.'''
if d['ext_intersphinx']:
conf_text += INTERSPHINX_CONFIG
- f = open(path.join(srcdir, 'conf.py'), 'w')
- f.write(conf_text.encode('utf-8'))
+ f = open(path.join(srcdir, 'conf.py'), 'w', encoding='utf-8')
+ f.write(conf_text)
f.close()
masterfile = path.join(srcdir, d['master'] + d['suffix'])
- f = open(masterfile, 'w')
- f.write((MASTER_FILE % d).encode('utf-8'))
+ f = open(masterfile, 'w', encoding='utf-8')
+ f.write(MASTER_FILE % d)
f.close()
if d['makefile']:
d['rsrcdir'] = d['sep'] and 'source' or '.'
d['rbuilddir'] = d['sep'] and 'build' or d['dot'] + 'build'
# use binary mode, to avoid writing \r\n on Windows
- f = open(path.join(d['path'], 'Makefile'), 'wb')
- f.write((MAKEFILE % d).encode('utf-8'))
+ f = open(path.join(d['path'], 'Makefile'), 'wb', encoding='utf-8')
+ f.write(MAKEFILE % d)
f.close()
if d['batchfile']:
d['rsrcdir'] = d['sep'] and 'source' or '.'
d['rbuilddir'] = d['sep'] and 'build' or d['dot'] + 'build'
- f = open(path.join(d['path'], 'make.bat'), 'w')
- f.write((BATCHFILE % d).encode('utf-8'))
+ f = open(path.join(d['path'], 'make.bat'), 'w', encoding='utf-8')
+ f.write(BATCHFILE % d)
f.close()
print
diff --git a/sphinx/roles.py b/sphinx/roles.py
index 0ea0ec48..b44868e6 100644
--- a/sphinx/roles.py
+++ b/sphinx/roles.py
@@ -18,7 +18,7 @@ from docutils.parsers.rst import roles
from sphinx import addnodes
from sphinx.locale import _
from sphinx.util import ws_re
-from sphinx.util.nodes import split_explicit_title
+from sphinx.util.nodes import split_explicit_title, process_index_entry
generic_docroles = {
@@ -139,16 +139,15 @@ class XRefRole(object):
# methods that can be overwritten
def process_link(self, env, refnode, has_explicit_title, title, target):
- """
- Called after parsing title and target text, and creating the reference
- node (given in *refnode*). This method can alter the reference node and
- must return a new (or the same) ``(title, target)`` tuple.
+ """Called after parsing title and target text, and creating the
+ reference node (given in *refnode*). This method can alter the
+ reference node and must return a new (or the same) ``(title, target)``
+ tuple.
"""
return title, ws_re.sub(' ', target)
def result_nodes(self, document, env, node, is_ref):
- """
- Called before returning the finished nodes. *node* is the reference
+ """Called before returning the finished nodes. *node* is the reference
node if one was created (*is_ref* is then true), else the content node.
This method can add other nodes and must return a ``(nodes, messages)``
tuple (the usual return value of a role function).
@@ -269,6 +268,27 @@ def abbr_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
return [addnodes.abbreviation(abbr, abbr, explanation=expl)], []
+def index_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
+ # create new reference target
+ env = inliner.document.settings.env
+ targetid = 'index-%s' % env.new_serialno('index')
+ targetnode = nodes.target('', '', ids=[targetid])
+ # split text and target in role content
+ has_explicit_title, title, target = split_explicit_title(text)
+ title = utils.unescape(title)
+ target = utils.unescape(target)
+ # if an explicit target is given, we can process it as a full entry
+ if has_explicit_title:
+ entries = process_index_entry(target, targetid)
+ # otherwise we just create a "single" entry
+ else:
+ entries = [('single', target, targetid, target)]
+ indexnode = addnodes.index()
+ indexnode['entries'] = entries
+ textnode = nodes.Text(title, title)
+ return [indexnode, targetnode, textnode], []
+
+
specific_docroles = {
# links to download references
'download': XRefRole(nodeclass=addnodes.download_reference),
@@ -282,6 +302,7 @@ specific_docroles = {
'file': emph_literal_role,
'samp': emph_literal_role,
'abbr': abbr_role,
+ 'index': index_role,
}
for rolename, func in specific_docroles.iteritems():
diff --git a/sphinx/setup_command.py b/sphinx/setup_command.py
index 939fbb21..5fe34f18 100644
--- a/sphinx/setup_command.py
+++ b/sphinx/setup_command.py
@@ -22,7 +22,8 @@ from sphinx.util.console import darkred, nocolor, color_terminal
class BuildDoc(Command):
- """Distutils command to build Sphinx documentation.
+ """
+ Distutils command to build Sphinx documentation.
The Sphinx build can then be triggered from distutils, and some Sphinx
options can be set in ``setup.py`` or ``setup.cfg`` instead of Sphinx own
diff --git a/sphinx/themes/basic/searchresults.html b/sphinx/themes/basic/searchresults.html
new file mode 100644
index 00000000..4b5da1a3
--- /dev/null
+++ b/sphinx/themes/basic/searchresults.html
@@ -0,0 +1,36 @@
+{#
+ basic/searchresults.html
+ ~~~~~~~~~~~~~~~~~
+
+ Template for the body of the search results page.
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+#}
+<h1 id="search-documentation">Search</h1>
+<p>
+ From here you can search these documents. Enter your search
+ words into the box below and click "search".
+</p>
+<form action="" method="get">
+ <input type="text" name="q" value="" />
+ <input type="submit" value="search" />
+ <span id="search-progress" style="padding-left: 10px"></span>
+</form>
+{% if search_performed %}
+<h2>Search Results</h2>
+{% if not search_results %}
+<p>Your search did not match any results.</p>
+{% endif %}
+{% endif %}
+<div id="search-results">
+ {% if search_results %}
+ <ul class="search">
+ {% for href, caption, context in search_results %}
+ <li><a href="{{ href }}?highlight={{ q }}">{{ caption }}</a>
+ <div class="context">{{ context|e }}</div>
+ </li>
+ {% endfor %}
+ </ul>
+ {% endif %}
+</div>
diff --git a/sphinx/themes/basic/static/ajax-loader.gif b/sphinx/themes/basic/static/ajax-loader.gif
new file mode 100644
index 00000000..61faf8ca
--- /dev/null
+++ b/sphinx/themes/basic/static/ajax-loader.gif
Binary files differ
diff --git a/sphinx/themes/basic/static/comment-bright.png b/sphinx/themes/basic/static/comment-bright.png
new file mode 100644
index 00000000..551517b8
--- /dev/null
+++ b/sphinx/themes/basic/static/comment-bright.png
Binary files differ
diff --git a/sphinx/themes/basic/static/comment-close.png b/sphinx/themes/basic/static/comment-close.png
new file mode 100644
index 00000000..09b54be4
--- /dev/null
+++ b/sphinx/themes/basic/static/comment-close.png
Binary files differ
diff --git a/sphinx/themes/basic/static/comment.png b/sphinx/themes/basic/static/comment.png
new file mode 100644
index 00000000..92feb52b
--- /dev/null
+++ b/sphinx/themes/basic/static/comment.png
Binary files differ
diff --git a/sphinx/themes/basic/static/down-pressed.png b/sphinx/themes/basic/static/down-pressed.png
new file mode 100644
index 00000000..6f7ad782
--- /dev/null
+++ b/sphinx/themes/basic/static/down-pressed.png
Binary files differ
diff --git a/sphinx/themes/basic/static/down.png b/sphinx/themes/basic/static/down.png
new file mode 100644
index 00000000..3003a887
--- /dev/null
+++ b/sphinx/themes/basic/static/down.png
Binary files differ
diff --git a/sphinx/themes/basic/static/up-pressed.png b/sphinx/themes/basic/static/up-pressed.png
new file mode 100644
index 00000000..8bd587af
--- /dev/null
+++ b/sphinx/themes/basic/static/up-pressed.png
Binary files differ
diff --git a/sphinx/themes/basic/static/up.png b/sphinx/themes/basic/static/up.png
new file mode 100644
index 00000000..b9462568
--- /dev/null
+++ b/sphinx/themes/basic/static/up.png
Binary files differ
diff --git a/sphinx/themes/basic/static/websupport.js b/sphinx/themes/basic/static/websupport.js
new file mode 100644
index 00000000..870b0cdc
--- /dev/null
+++ b/sphinx/themes/basic/static/websupport.js
@@ -0,0 +1,762 @@
+(function($) {
+ $.fn.autogrow = function(){
+ return this.each(function(){
+ var textarea = this;
+
+ $.fn.autogrow.resize(textarea);
+
+ $(textarea)
+ .focus(function() {
+ textarea.interval = setInterval(function() {
+ $.fn.autogrow.resize(textarea);
+ }, 500);
+ })
+ .blur(function() {
+ clearInterval(textarea.interval);
+ });
+ });
+ };
+
+ $.fn.autogrow.resize = function(textarea) {
+ var lineHeight = parseInt($(textarea).css('line-height'), 10);
+ var lines = textarea.value.split('\n');
+ var columns = textarea.cols;
+ var lineCount = 0;
+ $.each(lines, function() {
+ lineCount += Math.ceil(this.length / columns) || 1;
+ });
+ var height = lineHeight * (lineCount + 1);
+ $(textarea).css('height', height);
+ };
+})(jQuery);
+
+(function($) {
+ var comp, by;
+
+ function init() {
+ initEvents();
+ initComparator();
+ }
+
+ function initEvents() {
+ $('a.comment_close').live("click", function(event) {
+ hide($(this).attr('id').substring(2));
+ return false;
+ });
+ $('.vote').live("click", function() {
+ handleVote($(this));
+ return false;
+ });
+ $('a.reply').live("click", function() {
+ openReply($(this).attr('id').substring(2));
+ return false;
+ });
+ $('a.close_reply').live("click", function() {
+ closeReply($(this).attr('id').substring(2));
+ return false;
+ });
+ $('a.sort_option').live("click", function(event) {
+ handleReSort($(this));
+ return false;
+ });
+ $('a.show_proposal').live("click", function() {
+ showProposal($(this).attr('id').substring(2));
+ return false;
+ });
+ $('a.hide_proposal').live("click", function() {
+ hideProposal($(this).attr('id').substring(2));
+ return false;
+ });
+ $('a.show_propose_change').live("click", function() {
+ showProposeChange($(this).attr('id').substring(2));
+ return false;
+ });
+ $('a.hide_propose_change').live("click", function() {
+ hideProposeChange($(this).attr('id').substring(2));
+ return false;
+ });
+ $('a.accept_comment').live("click", function() {
+ acceptComment($(this).attr('id').substring(2));
+ return false;
+ });
+ $('a.reject_comment').live("click", function() {
+ rejectComment($(this).attr('id').substring(2));
+ return false;
+ });
+ $('a.delete_comment').live("click", function() {
+ deleteComment($(this).attr('id').substring(2));
+ return false;
+ });
+ }
+
+ /*
+ Set comp, which is a comparator function used for sorting and
+ inserting comments into the list.
+ */
+ function setComparator() {
+ // If the first three letters are "asc", sort in ascending order
+ // and remove the prefix.
+ if (by.substring(0,3) == 'asc') {
+ var i = by.substring(3);
+ comp = function(a, b) { return a[i] - b[i]; };
+ } else {
+ // Otherwise sort in descending order.
+ comp = function(a, b) { return b[by] - a[by]; };
+ }
+
+ // Reset link styles and format the selected sort option.
+ $('a.sel').attr('href', '#').removeClass('sel');
+ $('a.' + by).removeAttr('href').addClass('sel');
+ }
+
+ /*
+ Create a comp function. If the user has preferences stored in
+ the sortBy cookie, use those, otherwise use the default.
+ */
+ function initComparator() {
+ by = 'rating'; // Default to sort by rating.
+ // If the sortBy cookie is set, use that instead.
+ if (document.cookie.length > 0) {
+ var start = document.cookie.indexOf('sortBy=');
+ if (start != -1) {
+ start = start + 7;
+ var end = document.cookie.indexOf(";", start);
+ if (end == -1) {
+ end = document.cookie.length;
+ by = unescape(document.cookie.substring(start, end));
+ }
+ }
+ }
+ setComparator();
+ }
+
+ /*
+ Show a comment div.
+ */
+ function show(id) {
+ $('#ao' + id).hide();
+ $('#ah' + id).show();
+ var context = $.extend({id: id}, opts);
+ var popup = $(renderTemplate(popupTemplate, context)).hide();
+ popup.find('textarea[name="proposal"]').hide();
+ popup.find('a.' + by).addClass('sel');
+ var form = popup.find('#cf' + id);
+ form.submit(function(event) {
+ event.preventDefault();
+ addComment(form);
+ });
+ $('#s' + id).after(popup);
+ popup.slideDown('fast', function() {
+ getComments(id);
+ });
+ }
+
+ /*
+ Hide a comment div.
+ */
+ function hide(id) {
+ $('#ah' + id).hide();
+ $('#ao' + id).show();
+ var div = $('#sc' + id);
+ div.slideUp('fast', function() {
+ div.remove();
+ });
+ }
+
+ /*
+ Perform an ajax request to get comments for a node
+ and insert the comments into the comments tree.
+ */
+ function getComments(id) {
+ $.ajax({
+ type: 'GET',
+ url: opts.getCommentsURL,
+ data: {node: id},
+ success: function(data, textStatus, request) {
+ var ul = $('#cl' + id);
+ var speed = 100;
+ $('#cf' + id)
+ .find('textarea[name="proposal"]')
+ .data('source', data.source);
+
+ if (data.comments.length === 0) {
+ ul.html('<li>No comments yet.</li>');
+ ul.data('empty', true);
+ } else {
+ // If there are comments, sort them and put them in the list.
+ var comments = sortComments(data.comments);
+ speed = data.comments.length * 100;
+ appendComments(comments, ul);
+ ul.data('empty', false);
+ }
+ $('#cn' + id).slideUp(speed + 200);
+ ul.slideDown(speed);
+ },
+ error: function(request, textStatus, error) {
+ showError('Oops, there was a problem retrieving the comments.');
+ },
+ dataType: 'json'
+ });
+ }
+
+ /*
+ Add a comment via ajax and insert the comment into the comment tree.
+ */
+ function addComment(form) {
+ // Disable the form that is being submitted.
+ form.find('textarea,input').attr('disabled', 'disabled');
+ var node_id = form.find('input[name="node"]').val();
+ var parent_id = form.find('input[name="parent"]').val();
+
+ // Send the comment to the server.
+ $.ajax({
+ type: "POST",
+ url: opts.addCommentURL,
+ dataType: 'json',
+ data: {
+ node: node_id,
+ parent: parent_id,
+ text: form.find('textarea[name="comment"]').val(),
+ proposal: form.find('textarea[name="proposal"]').val()
+ },
+ success: function(data, textStatus, error) {
+ // Reset the form.
+ if (node_id) {
+ hideProposeChange(node_id);
+ }
+ form.find('textarea')
+ .val('')
+ .add(form.find('input'))
+ .removeAttr('disabled');
+ var ul = $('#cl' + (node_id || parent_id));
+ if (ul.data('empty')) {
+ $(ul).empty();
+ ul.data('empty', false);
+ }
+ insertComment(data.comment);
+ },
+ error: function(request, textStatus, error) {
+ form.find('textarea,input').removeAttr('disabled');
+ showError('Oops, there was a problem adding the comment.');
+ }
+ });
+ }
+
+ /*
+ Recursively append comments to the main comment list and children
+ lists, creating the comment tree.
+ */
+ function appendComments(comments, ul) {
+ $.each(comments, function() {
+ var div = createCommentDiv(this);
+ ul.append($(document.createElement('li')).html(div));
+ appendComments(this.children, div.find('ul.children'));
+ // To avoid stagnating data, don't store the comments children in data.
+ this.children = null;
+ div.data('comment', this);
+ });
+ }
+
+ /*
+ After adding a new comment, it must be inserted in the correct
+ location in the comment tree.
+ */
+ function insertComment(comment) {
+ var div = createCommentDiv(comment);
+
+ // To avoid stagnating data, don't store the comments children in data.
+ comment.children = null;
+ div.data('comment', comment);
+
+ var ul = $('#cl' + (comment.node || comment.parent));
+ var siblings = getChildren(ul);
+
+ var li = $(document.createElement('li'));
+ li.hide();
+
+ // Determine where in the parents children list to insert this comment.
+ for(i=0; i < siblings.length; i++) {
+ if (comp(comment, siblings[i]) <= 0) {
+ $('#cd' + siblings[i].id)
+ .parent()
+ .before(li.html(div));
+ li.slideDown('fast');
+ return;
+ }
+ }
+
+ // If we get here, this comment rates lower than all the others,
+ // or it is the only comment in the list.
+ ul.append(li.html(div));
+ li.slideDown('fast');
+ }
+
+ function acceptComment(id) {
+ $.ajax({
+ type: 'POST',
+ url: opts.acceptCommentURL,
+ data: {id: id},
+ success: function(data, textStatus, request) {
+ $('#cm' + id).fadeOut('fast');
+ },
+ error: function(request, textStatus, error) {
+ showError("Oops, there was a problem accepting the comment.");
+ }
+ });
+ }
+
+ function rejectComment(id) {
+ $.ajax({
+ type: 'POST',
+ url: opts.rejectCommentURL,
+ data: {id: id},
+ success: function(data, textStatus, request) {
+ var div = $('#cd' + id);
+ div.slideUp('fast', function() {
+ div.remove();
+ });
+ },
+ error: function(request, textStatus, error) {
+ showError("Oops, there was a problem rejecting the comment.");
+ }
+ });
+ }
+
+ function deleteComment(id) {
+ $.ajax({
+ type: 'POST',
+ url: opts.deleteCommentURL,
+ data: {id: id},
+ success: function(data, textStatus, request) {
+ var div = $('#cd' + id);
+ div
+ .find('span.user_id:first')
+ .text('[deleted]').end()
+ .find('p.comment_text:first')
+ .text('[deleted]').end()
+ .find('#cm' + id + ', #dc' + id + ', #ac' + id + ', #rc' + id +
+ ', #sp' + id + ', #hp' + id + ', #cr' + id + ', #rl' + id)
+ .remove();
+ var comment = div.data('comment');
+ comment.username = '[deleted]';
+ comment.text = '[deleted]';
+ div.data('comment', comment);
+ },
+ error: function(request, textStatus, error) {
+ showError("Oops, there was a problem deleting the comment.");
+ }
+ });
+ }
+
+ function showProposal(id) {
+ $('#sp' + id).hide();
+ $('#hp' + id).show();
+ $('#pr' + id).slideDown('fast');
+ }
+
+ function hideProposal(id) {
+ $('#hp' + id).hide();
+ $('#sp' + id).show();
+ $('#pr' + id).slideUp('fast');
+ }
+
+ function showProposeChange(id) {
+ $('#pc' + id).hide();
+ $('#hc' + id).show();
+ var textarea = $('#pt' + id);
+ textarea.val(textarea.data('source'));
+ $.fn.autogrow.resize(textarea[0]);
+ textarea.slideDown('fast');
+ }
+
+ function hideProposeChange(id) {
+ $('#hc' + id).hide();
+ $('#pc' + id).show();
+ var textarea = $('#pt' + id);
+ textarea.val('').removeAttr('disabled');
+ textarea.slideUp('fast');
+ }
+
+ /*
+ Handle when the user clicks on a sort by link.
+ */
+ function handleReSort(link) {
+ var classes = link.attr('class').split(/\s+/);
+ for (var i=0; i<classes.length; i++) {
+ if (classes[i] != 'sort_option') {
+ by = classes[i];
+ }
+ }
+ setComparator();
+ // Save/update the sortBy cookie.
+ var expiration = new Date();
+ expiration.setDate(expiration.getDate() + 365);
+ document.cookie= 'sortBy=' + escape(by) +
+ ';expires=' + expiration.toUTCString();
+ $('ul.comment_ul').each(function(index, ul) {
+ var comments = getChildren($(ul), true);
+ comments = sortComments(comments);
+ appendComments(comments, $(ul).empty());
+ });
+ }
+
+ /*
+ Function to process a vote when a user clicks an arrow.
+ */
+ function handleVote(link) {
+ if (!opts.voting) {
+ showError("You'll need to login to vote.");
+ return;
+ }
+
+ var id = link.attr('id');
+ // If it is an unvote, the new vote value is 0,
+ // Otherwise it's 1 for an upvote, or -1 for a downvote.
+ var value = 0;
+ if (id.charAt(1) != 'u') {
+ value = id.charAt(0) == 'u' ? 1 : -1;
+ }
+ // The data to be sent to the server.
+ var d = {
+ comment_id: id.substring(2),
+ value: value
+ };
+
+ // Swap the vote and unvote links.
+ link.hide();
+ $('#' + id.charAt(0) + (id.charAt(1) == 'u' ? 'v' : 'u') + d.comment_id)
+ .show();
+
+ // The div the comment is displayed in.
+ var div = $('div#cd' + d.comment_id);
+ var data = div.data('comment');
+
+ // If this is not an unvote, and the other vote arrow has
+ // already been pressed, unpress it.
+ if ((d.value !== 0) && (data.vote === d.value * -1)) {
+ $('#' + (d.value == 1 ? 'd' : 'u') + 'u' + d.comment_id).hide();
+ $('#' + (d.value == 1 ? 'd' : 'u') + 'v' + d.comment_id).show();
+ }
+
+ // Update the comments rating in the local data.
+ data.rating += (data.vote === 0) ? d.value : (d.value - data.vote);
+ data.vote = d.value;
+ div.data('comment', data);
+
+ // Change the rating text.
+ div.find('.rating:first')
+ .text(data.rating + ' point' + (data.rating == 1 ? '' : 's'));
+
+ // Send the vote information to the server.
+ $.ajax({
+ type: "POST",
+ url: opts.processVoteURL,
+ data: d,
+ error: function(request, textStatus, error) {
+ showError("Oops, there was a problem casting that vote.");
+ }
+ });
+ }
+
+ /*
+ Open a reply form used to reply to an existing comment.
+ */
+ function openReply(id) {
+ // Swap out the reply link for the hide link
+ $('#rl' + id).hide();
+ $('#cr' + id).show();
+
+ // Add the reply li to the children ul.
+ var div = $(renderTemplate(replyTemplate, {id: id})).hide();
+ $('#cl' + id)
+ .prepend(div)
+ // Setup the submit handler for the reply form.
+ .find('#rf' + id)
+ .submit(function(event) {
+ event.preventDefault();
+ addComment($('#rf' + id));
+ closeReply(id);
+ });
+ div.slideDown('fast');
+ }
+
+ /*
+ Close the reply form opened with openReply.
+ */
+ function closeReply(id) {
+ // Remove the reply div from the DOM.
+ $('#rd' + id).slideUp('fast', function() {
+ $(this).remove();
+ });
+
+ // Swap out the hide link for the reply link
+ $('#cr' + id).hide();
+ $('#rl' + id).show();
+ }
+
+ /*
+ Recursively sort a tree of comments using the comp comparator.
+ */
+ function sortComments(comments) {
+ comments.sort(comp);
+ $.each(comments, function() {
+ this.children = sortComments(this.children);
+ });
+ return comments;
+ }
+
+ /*
+ Get the children comments from a ul. If recursive is true,
+ recursively include childrens' children.
+ */
+ function getChildren(ul, recursive) {
+ var children = [];
+ ul.children().children("[id^='cd']")
+ .each(function() {
+ var comment = $(this).data('comment');
+ if (recursive) {
+ comment.children = getChildren($(this).find('#cl' + comment.id), true);
+ }
+ children.push(comment);
+ });
+ return children;
+ }
+
+ /*
+ Create a div to display a comment in.
+ */
+ function createCommentDiv(comment) {
+ // Prettify the comment rating.
+ comment.pretty_rating = comment.rating + ' point' +
+ (comment.rating == 1 ? '' : 's');
+ // Create a div for this comment.
+ var context = $.extend({}, opts, comment);
+ var div = $(renderTemplate(commentTemplate, context));
+
+ // If the user has voted on this comment, highlight the correct arrow.
+ if (comment.vote) {
+ var direction = (comment.vote == 1) ? 'u' : 'd';
+ div.find('#' + direction + 'v' + comment.id).hide();
+ div.find('#' + direction + 'u' + comment.id).show();
+ }
+
+ if (comment.text != '[deleted]') {
+ div.find('a.reply').show();
+ if (comment.proposal_diff) {
+ div.find('#sp' + comment.id).show();
+ }
+ if (opts.moderator && !comment.displayed) {
+ div.find('#cm' + comment.id).show();
+ }
+ if (opts.moderator || (opts.username == comment.username)) {
+ div.find('#dc' + comment.id).show();
+ }
+ }
+ return div;
+ }
+
+ /*
+ A simple template renderer. Placeholders such as <%id%> are replaced
+ by context['id'] with items being escaped. Placeholders such as <#id#>
+ are not escaped.
+ */
+ function renderTemplate(template, context) {
+ var esc = $(document.createElement('div'));
+
+ function handle(ph, escape) {
+ var cur = context;
+ $.each(ph.split('.'), function() {
+ cur = cur[this];
+ });
+ return escape ? esc.text(cur || "").html() : cur;
+ }
+
+ return template.replace(/<([%#])([\w\.]*)\1>/g, function(){
+ return handle(arguments[2], arguments[1] == '%' ? true : false);
+ });
+ }
+
+ function showError(message) {
+ $(document.createElement('div')).attr({'class': 'popup_error'})
+ .append($(document.createElement('h1')).text(message))
+ .appendTo('body')
+ .fadeIn("slow")
+ .delay(2000)
+ .fadeOut("slow");
+ }
+
+ /*
+ Add a link the user uses to open the comments popup.
+ */
+ $.fn.comment = function() {
+ return this.each(function() {
+ var id = $(this).attr('id').substring(1);
+ var count = COMMENT_METADATA[id];
+ var title = count + ' comment' + (count == 1 ? '' : 's');
+ var image = count > 0 ? opts.commentBrightImage : opts.commentImage;
+ $(this)
+ .append(
+ $(document.createElement('a')).attr({
+ href: '#',
+ 'class': 'sphinx_comment',
+ id: 'ao' + id
+ })
+ .append($(document.createElement('img')).attr({
+ src: image,
+ alt: 'comment',
+ title: title
+ }))
+ .click(function(event) {
+ event.preventDefault();
+ show($(this).attr('id').substring(2));
+ })
+ )
+ .append(
+ $(document.createElement('a')).attr({
+ href: '#',
+ 'class': 'sphinx_comment_close hidden',
+ id: 'ah' + id
+ })
+ .append($(document.createElement('img')).attr({
+ src: opts.closeCommentImage,
+ alt: 'close',
+ title: 'close'
+ }))
+ .click(function(event) {
+ event.preventDefault();
+ hide($(this).attr('id').substring(2));
+ })
+ );
+ });
+ };
+
+ var opts = jQuery.extend({
+ processVoteURL: '/process_vote',
+ addCommentURL: '/add_comment',
+ getCommentsURL: '/get_comments',
+ acceptCommentURL: '/accept_comment',
+ rejectCommentURL: '/reject_comment',
+ deleteCommentURL: '/delete_comment',
+ commentImage: '/static/_static/comment.png',
+ closeCommentImage: '/static/_static/comment-close.png',
+ loadingImage: '/static/_static/ajax-loader.gif',
+ commentBrightImage: '/static/_static/comment-bright.png',
+ upArrow: '/static/_static/up.png',
+ downArrow: '/static/_static/down.png',
+ upArrowPressed: '/static/_static/up-pressed.png',
+ downArrowPressed: '/static/_static/down-pressed.png',
+ voting: false,
+ moderator: false
+ }, COMMENT_OPTIONS);
+
+ var replyTemplate = '\
+ <li>\
+ <div class="reply_div" id="rd<%id%>">\
+ <form id="rf<%id%>">\
+ <textarea name="comment" cols="80"></textarea>\
+ <input type="submit" value="add reply" />\
+ <input type="hidden" name="parent" value="<%id%>" />\
+ <input type="hidden" name="node" value="" />\
+ </form>\
+ </div>\
+ </li>';
+
+ var commentTemplate = '\
+ <div id="cd<%id%>" class="spxcdiv">\
+ <div class="vote">\
+ <div class="arrow">\
+ <a href="#" id="uv<%id%>" class="vote">\
+ <img src="<%upArrow%>" />\
+ </a>\
+ <a href="#" id="uu<%id%>" class="un vote">\
+ <img src="<%upArrowPressed%>" />\
+ </a>\
+ </div>\
+ <div class="arrow">\
+ <a href="#" id="dv<%id%>" class="vote">\
+ <img src="<%downArrow%>" id="da<%id%>" />\
+ </a>\
+ <a href="#" id="du<%id%>" class="un vote">\
+ <img src="<%downArrowPressed%>" />\
+ </a>\
+ </div>\
+ </div>\
+ <div class="comment_content">\
+ <p class="tagline comment">\
+ <span class="user_id"><%username%></span>\
+ <span class="rating"><%pretty_rating%></span>\
+ <span class="delta"><%time.delta%></span>\
+ </p>\
+ <p class="comment_text comment"><%text%></p>\
+ <p class="comment_opts comment">\
+ <a href="#" class="reply hidden" id="rl<%id%>">reply &#9657;</a>\
+ <a href="#" class="close_reply" id="cr<%id%>">reply &#9663;</a>\
+ <a href="#" id="sp<%id%>" class="show_proposal">\
+ proposal &#9657;\
+ </a>\
+ <a href="#" id="hp<%id%>" class="hide_proposal">\
+ proposal &#9663;\
+ </a>\
+ <a href="#" id="dc<%id%>" class="delete_comment hidden">\
+ delete\
+ </a>\
+ <span id="cm<%id%>" class="moderation hidden">\
+ <a href="#" id="ac<%id%>" class="accept_comment">accept</a>\
+ <a href="#" id="rc<%id%>" class="reject_comment">reject</a>\
+ </span>\
+ </p>\
+ <pre class="proposal" id="pr<%id%>">\
+<#proposal_diff#>\
+ </pre>\
+ <ul class="children" id="cl<%id%>"></ul>\
+ </div>\
+ <div class="clearleft"></div>\
+ </div>\
+ </div>';
+
+ var popupTemplate = '\
+ <div class="sphinx_comments" id="sc<%id%>">\
+ <h1>Comments</h1>\
+ <form method="post" id="cf<%id%>" class="comment_form" action="/docs/add_comment">\
+ <textarea name="comment" cols="80"></textarea>\
+ <p class="propose_button">\
+ <a href="#" id="pc<%id%>" class="show_propose_change">\
+ Propose a change &#9657;\
+ </a>\
+ <a href="#" id="hc<%id%>" class="hide_propose_change">\
+ Propose a change &#9663;\
+ </a>\
+ </p>\
+ <textarea name="proposal" id="pt<%id%>" cols="80" spellcheck="false"></textarea>\
+ <input type="submit" value="add comment" />\
+ <input type="hidden" name="node" value="<%id%>" />\
+ <input type="hidden" name="parent" value="" />\
+ <p class="sort_options">\
+ Sort by:\
+ <a href="#" class="sort_option rating">top</a>\
+ <a href="#" class="sort_option ascage">newest</a>\
+ <a href="#" class="sort_option age">oldest</a>\
+ </p>\
+ </form>\
+ <h3 id="cn<%id%>">loading comments... <img src="<%loadingImage%>" alt="" /></h3>\
+ <ul id="cl<%id%>" class="comment_ul"></ul>\
+ </div>';
+
+ $(document).ready(function() {
+ init();
+ });
+})(jQuery);
+
+$(document).ready(function() {
+ $('.spxcmt').comment();
+
+ /** Highlight search words in search results. */
+ $("div.context").each(function() {
+ var params = $.getQueryParameters();
+ var terms = (params.q) ? params.q[0].split(/\s+/) : [];
+ var result = $(this);
+ $.each(terms, function() {
+ result.highlightText(this.toLowerCase(), 'highlighted');
+ });
+ });
+});
diff --git a/sphinx/theming.py b/sphinx/theming.py
index 0d0f2863..92e63f31 100644
--- a/sphinx/theming.py
+++ b/sphinx/theming.py
@@ -98,8 +98,7 @@ class Theme(object):
self.base = Theme(inherit)
def get_confstr(self, section, name, default=NODEFAULT):
- """
- Return the value for a theme configuration setting, searching the
+ """Return the value for a theme configuration setting, searching the
base theme chain.
"""
try:
@@ -114,9 +113,7 @@ class Theme(object):
return default
def get_options(self, overrides):
- """
- Return a dictionary of theme options and their values.
- """
+ """Return a dictionary of theme options and their values."""
chain = [self.themeconf]
base = self.base
while base is not None:
@@ -135,8 +132,7 @@ class Theme(object):
return options
def get_dirchain(self):
- """
- Return a list of theme directories, beginning with this theme's,
+ """Return a list of theme directories, beginning with this theme's,
then the base theme's, then that one's base theme's, etc.
"""
chain = [self.themedir]
@@ -147,9 +143,7 @@ class Theme(object):
return chain
def cleanup(self):
- """
- Remove temporary directories.
- """
+ """Remove temporary directories."""
if self.themedir_created:
try:
shutil.rmtree(self.themedir)
diff --git a/sphinx/util/__init__.py b/sphinx/util/__init__.py
index 8d1298cd..a3d30d9d 100644
--- a/sphinx/util/__init__.py
+++ b/sphinx/util/__init__.py
@@ -18,6 +18,8 @@ import tempfile
import posixpath
import traceback
from os import path
+from codecs import open
+from collections import deque
import docutils
from docutils.utils import relative_path
@@ -48,8 +50,7 @@ def docname_join(basedocname, docname):
def get_matching_files(dirname, exclude_matchers=()):
- """
- Get all file names in a directory, recursively.
+ """Get all file names in a directory, recursively.
Exclude files and dirs matching some matcher in *exclude_matchers*.
"""
@@ -75,9 +76,8 @@ def get_matching_files(dirname, exclude_matchers=()):
def get_matching_docs(dirname, suffix, exclude_matchers=()):
- """
- Get all file names (without suffix) matching a suffix in a
- directory, recursively.
+ """Get all file names (without suffix) matching a suffix in a directory,
+ recursively.
Exclude files and dirs matching a pattern in *exclude_patterns*.
"""
@@ -140,8 +140,8 @@ def copy_static_entry(source, targetdir, builder, context={},
target = path.join(targetdir, path.basename(source))
if source.lower().endswith('_t') and builder.templates:
# templated!
- fsrc = open(source, 'rb')
- fdst = open(target[:-2], 'wb')
+ fsrc = open(source, 'r', encoding='utf-8')
+ fdst = open(target[:-2], 'w', encoding='utf-8')
fdst.write(builder.templates.render_string(fsrc.read(), context))
fsrc.close()
fdst.close()
@@ -162,17 +162,21 @@ def copy_static_entry(source, targetdir, builder, context={},
shutil.copytree(source, target)
+_DEBUG_HEADER = '''\
+# Sphinx version: %s
+# Docutils version: %s %s
+# Jinja2 version: %s
+'''
+
def save_traceback():
- """
- Save the current exception's traceback in a temporary file.
- """
+ """Save the current exception's traceback in a temporary file."""
exc = traceback.format_exc()
fd, path = tempfile.mkstemp('.log', 'sphinx-err-')
- os.write(fd, '# Sphinx version: %s\n' % sphinx.__version__)
- os.write(fd, '# Docutils version: %s %s\n' % (docutils.__version__,
- docutils.__version_details__))
- os.write(fd, '# Jinja2 version: %s\n' % jinja2.__version__)
- os.write(fd, exc)
+ os.write(fd, (_DEBUG_HEADER %
+ (sphinx.__version__,
+ docutils.__version__, docutils.__version_details__,
+ jinja2.__version__)).encode('utf-8'))
+ os.write(fd, exc.encode('utf-8'))
os.close(fd)
return path
@@ -225,8 +229,7 @@ class Tee(object):
def parselinenos(spec, total):
- """
- Parse a line number spec (such as "1,2,4-6") and return a list of
+ """Parse a line number spec (such as "1,2,4-6") and return a list of
wanted line numbers.
"""
items = list()
@@ -280,9 +283,7 @@ def rpartition(s, t):
def format_exception_cut_frames(x=1):
- """
- Format an exception with traceback, but only the last x frames.
- """
+ """Format an exception with traceback, but only the last x frames."""
typ, val, tb = sys.exc_info()
#res = ['Traceback (most recent call last):\n']
res = []
@@ -290,3 +291,34 @@ def format_exception_cut_frames(x=1):
res += tbres[-x:]
res += traceback.format_exception_only(typ, val)
return ''.join(res)
+
+
+class PeekableIterator(object):
+ """
+ An iterator which wraps any iterable and makes it possible to peek to see
+ what's the next item.
+ """
+ def __init__(self, iterable):
+ self.remaining = deque()
+ self._iterator = iter(iterable)
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ """Return the next item from the iterator."""
+ if self.remaining:
+ return self.remaining.popleft()
+ return self._iterator.next()
+
+ def push(self, item):
+ """Push the `item` on the internal stack, it will be returned on the
+ next :meth:`next` call.
+ """
+ self.remaining.append(item)
+
+ def peek(self):
+ """Return the next item without changing the state of the iterator."""
+ item = self.next()
+ self.push(item)
+ return item
diff --git a/sphinx/util/docstrings.py b/sphinx/util/docstrings.py
index 538af653..d1a2ff8d 100644
--- a/sphinx/util/docstrings.py
+++ b/sphinx/util/docstrings.py
@@ -13,11 +13,11 @@ import sys
def prepare_docstring(s):
- """
- Convert a docstring into lines of parseable reST. Return it as a list of
- lines usable for inserting into a docutils ViewList (used as argument
- of nested_parse().) An empty line is added to act as a separator between
- this docstring and following content.
+ """Convert a docstring into lines of parseable reST.
+
+ Return it as a list of lines usable for inserting into a docutils ViewList
+ (used as argument of nested_parse().) An empty line is added to act as a
+ separator between this docstring and following content.
"""
lines = s.expandtabs().splitlines()
# Find minimum indentation of any non-blank lines after first line.
@@ -42,9 +42,8 @@ def prepare_docstring(s):
def prepare_commentdoc(s):
- """
- Extract documentation comment lines (starting with #:) and return them as a
- list of lines. Returns an empty list if there is no documentation.
+ """Extract documentation comment lines (starting with #:) and return them
+ as a list of lines. Returns an empty list if there is no documentation.
"""
result = []
lines = [line.strip() for line in s.expandtabs().splitlines()]
diff --git a/sphinx/util/jsonimpl.py b/sphinx/util/jsonimpl.py
index fda85b5e..f654ef22 100644
--- a/sphinx/util/jsonimpl.py
+++ b/sphinx/util/jsonimpl.py
@@ -13,7 +13,7 @@ import UserString
try:
import json
- # json-py's json module has not JSONEncoder; this will raise AttributeError
+ # json-py's json module has no JSONEncoder; this will raise AttributeError
# if json-py is imported instead of the built-in json module
JSONEncoder = json.JSONEncoder
except (ImportError, AttributeError):
diff --git a/sphinx/util/matching.py b/sphinx/util/matching.py
index c459aca2..3746c87c 100644
--- a/sphinx/util/matching.py
+++ b/sphinx/util/matching.py
@@ -13,8 +13,7 @@ import re
def _translate_pattern(pat):
- """
- Translate a shell-style glob pattern to a regular expression.
+ """Translate a shell-style glob pattern to a regular expression.
Adapted from the fnmatch module, but enhanced so that single stars don't
match slashes.
@@ -65,16 +64,14 @@ def compile_matchers(patterns):
_pat_cache = {}
def patmatch(name, pat):
- """
- Return if name matches pat. Adapted from fnmatch module.
- """
+ """Return if name matches pat. Adapted from fnmatch module."""
if pat not in _pat_cache:
_pat_cache[pat] = re.compile(_translate_pattern(pat))
return _pat_cache[pat].match(name)
def patfilter(names, pat):
- """
- Return the subset of the list NAMES that match PAT.
+ """Return the subset of the list NAMES that match PAT.
+
Adapted from fnmatch module.
"""
if pat not in _pat_cache:
diff --git a/sphinx/util/nodes.py b/sphinx/util/nodes.py
index 97b58569..adce565c 100644
--- a/sphinx/util/nodes.py
+++ b/sphinx/util/nodes.py
@@ -10,11 +10,12 @@
"""
import re
-import types
from docutils import nodes
from sphinx import addnodes
+from sphinx.locale import pairindextypes
+from sphinx.util.pycompat import class_types
# \x00 means the "<" was backslash-escaped
@@ -22,7 +23,28 @@ explicit_title_re = re.compile(r'^(.+?)\s*(?<!\x00)<(.*?)>$', re.DOTALL)
caption_ref_re = explicit_title_re # b/w compat alias
+def extract_messages(doctree):
+ """Extract translatable messages from a document tree."""
+ for node in doctree.traverse(nodes.TextElement):
+ if isinstance(node, (nodes.Invisible, nodes.Inline)):
+ continue
+ # <field_name>orphan</field_name>
+ # XXX ignore all metadata (== docinfo)
+ if isinstance(node, nodes.field_name) and node.children[0] == 'orphan':
+ continue
+ msg = node.rawsource.replace('\n', ' ').strip()
+ # XXX nodes rendering empty are likely a bug in sphinx.addnodes
+ if msg:
+ yield node, msg
+
+
def nested_parse_with_titles(state, content, node):
+ """Version of state.nested_parse() that allows titles and does not require
+ titles to have the same decoration as the calling document.
+
+ This is useful when the parsed content comes from a completely different
+ context, such as docstrings.
+ """
# hack around title style bookkeeping
surrounding_title_styles = state.memo.title_styles
surrounding_section_level = state.memo.section_level
@@ -51,6 +73,37 @@ def split_explicit_title(text):
return False, text, text
+indextypes = [
+ 'single', 'pair', 'double', 'triple',
+]
+
+def process_index_entry(entry, targetid):
+ indexentries = []
+ entry = entry.strip()
+ for type in pairindextypes:
+ if entry.startswith(type+':'):
+ value = entry[len(type)+1:].strip()
+ value = pairindextypes[type] + '; ' + value
+ indexentries.append(('pair', value, targetid, value))
+ break
+ else:
+ for type in indextypes:
+ if entry.startswith(type+':'):
+ value = entry[len(type)+1:].strip()
+ if type == 'double':
+ type = 'pair'
+ indexentries.append((type, value, targetid, value))
+ break
+ # shorthand notation for single entries
+ else:
+ for value in entry.split(','):
+ value = value.strip()
+ if not value:
+ continue
+ indexentries.append(('single', value, targetid, value))
+ return indexentries
+
+
def inline_all_toctrees(builder, docnameset, docname, tree, colorfunc):
"""Inline all toctrees in the *tree*.
@@ -115,7 +168,7 @@ def _new_traverse(self, condition=None,
if include_self and descend and not siblings and not ascend:
if condition is None:
return self._all_traverse([])
- elif isinstance(condition, (types.ClassType, type)):
+ elif isinstance(condition, class_types):
return self._fast_traverse(condition, [])
return self._old_traverse(condition, include_self,
descend, siblings, ascend)
diff --git a/sphinx/util/osutil.py b/sphinx/util/osutil.py
index beab38cb..464e56ed 100644
--- a/sphinx/util/osutil.py
+++ b/sphinx/util/osutil.py
@@ -11,6 +11,7 @@
import os
import re
+import sys
import time
import errno
import shutil
@@ -58,8 +59,8 @@ def ensuredir(path):
def walk(top, topdown=True, followlinks=False):
- """
- Backport of os.walk from 2.6, where the followlinks argument was added.
+ """Backport of os.walk from 2.6, where the *followlinks* argument was
+ added.
"""
names = os.listdir(top)
@@ -124,7 +125,10 @@ no_fn_re = re.compile(r'[^a-zA-Z0-9_-]')
def make_filename(string):
return no_fn_re.sub('', string)
-
-def ustrftime(format, *args):
- # strftime for unicode strings
- return time.strftime(unicode(format).encode('utf-8'), *args).decode('utf-8')
+if sys.version_info < (3, 0):
+ def ustrftime(format, *args):
+ # strftime for unicode strings
+ return time.strftime(unicode(format).encode('utf-8'), *args) \
+ .decode('utf-8')
+else:
+ ustrftime = time.strftime
diff --git a/sphinx/util/png.py b/sphinx/util/png.py
index 2cb2aa9d..59c32715 100644
--- a/sphinx/util/png.py
+++ b/sphinx/util/png.py
@@ -12,18 +12,18 @@
import struct
import binascii
+from sphinx.util.pycompat import b
+
LEN_IEND = 12
LEN_DEPTH = 22
DEPTH_CHUNK_LEN = struct.pack('!i', 10)
-DEPTH_CHUNK_START = 'tEXtDepth\x00'
-IEND_CHUNK = '\x00\x00\x00\x00IEND\xAE\x42\x60\x82'
+DEPTH_CHUNK_START = b('tEXtDepth\x00')
+IEND_CHUNK = b('\x00\x00\x00\x00IEND\xAE\x42\x60\x82')
def read_png_depth(filename):
- """
- Read the special tEXt chunk indicating the depth from a PNG file.
- """
+ """Read the special tEXt chunk indicating the depth from a PNG file."""
result = None
f = open(filename, 'rb')
try:
@@ -39,8 +39,8 @@ def read_png_depth(filename):
def write_png_depth(filename, depth):
- """
- Write the special tEXt chunk indicating the depth to a PNG file.
+ """Write the special tEXt chunk indicating the depth to a PNG file.
+
The chunk is placed immediately before the special IEND chunk.
"""
data = struct.pack('!i', depth)
diff --git a/sphinx/util/pycompat.py b/sphinx/util/pycompat.py
index bdd9507d..319312a7 100644
--- a/sphinx/util/pycompat.py
+++ b/sphinx/util/pycompat.py
@@ -13,11 +13,101 @@ import sys
import codecs
import encodings
-
-try:
+# ------------------------------------------------------------------------------
+# Python 2/3 compatibility
+
+if sys.version_info >= (3, 0):
+ # Python 3
+ class_types = (type,)
+ # the ubiquitous "bytes" helper functions
+ def b(s):
+ return s.encode('utf-8')
+ bytes = bytes
+ # support for running 2to3 over config files
+ def convert_with_2to3(filepath):
+ from lib2to3.refactor import RefactoringTool, get_fixers_from_package
+ from lib2to3.pgen2.parse import ParseError
+ fixers = get_fixers_from_package('lib2to3.fixes')
+ refactoring_tool = RefactoringTool(fixers)
+ source = refactoring_tool._read_python_source(filepath)[0]
+ try:
+ tree = refactoring_tool.refactor_string(source, 'conf.py')
+ except ParseError, err:
+ # do not propagate lib2to3 exceptions
+ lineno, offset = err.context[1]
+ # try to match ParseError details with SyntaxError details
+ raise SyntaxError(err.msg, (filepath, lineno, offset, err.value))
+ return unicode(tree)
+
+else:
+ # Python 2
+ from types import ClassType
+ class_types = (type, ClassType)
+ b = str
+ bytes = str
+ # no need to refactor on 2.x versions
+ convert_with_2to3 = None
+
+
+# ------------------------------------------------------------------------------
+# Missing builtins and itertools in Python < 2.6
+
+if sys.version_info >= (2, 6):
+ # Python >= 2.6
+ next = next
+
+ from itertools import product
+ try:
+ from itertools import zip_longest # Python 3 name
+ except ImportError:
+ from itertools import izip_longest as zip_longest
+
+else:
+ # Python < 2.6
+ from itertools import izip, repeat, chain
+
+ # this is on Python 2, where the method is called "next" (it is refactored
+ # to __next__ by 2to3, but in that case never executed)
+ def next(iterator):
+ return iterator.next()
+
+ # These replacement functions have been taken from the Python 2.6
+ # itertools documentation.
+ def product(*args, **kwargs):
+ pools = map(tuple, args) * kwargs.get('repeat', 1)
+ result = [[]]
+ for pool in pools:
+ result = [x + [y] for x in result for y in pool]
+ for prod in result:
+ yield tuple(prod)
+
+ def zip_longest(*args, **kwds):
+ # zip_longest('ABCD', 'xy', fillvalue='-') --> Ax By C- D-
+ fillvalue = kwds.get('fillvalue')
+ def sentinel(counter = ([fillvalue]*(len(args)-1)).pop):
+ yield counter() # yields the fillvalue, or raises IndexError
+ fillers = repeat(fillvalue)
+ iters = [chain(it, sentinel(), fillers) for it in args]
+ try:
+ for tup in izip(*iters):
+ yield tup
+ except IndexError:
+ pass
+
+
+# ------------------------------------------------------------------------------
+# Missing builtins and codecs in Python < 2.5
+
+if sys.version_info >= (2, 5):
+ # Python >= 2.5
+ base_exception = BaseException
any = any
all = all
-except NameError:
+
+else:
+ # Python 2.4
+ base_exception = Exception
+
def all(gen):
for i in gen:
if not i:
@@ -30,8 +120,6 @@ except NameError:
return True
return False
-
-if sys.version_info < (2, 5):
# Python 2.4 doesn't know the utf-8-sig encoding, so deliver it here
def my_search_function(encoding):
diff --git a/sphinx/util/websupport.py b/sphinx/util/websupport.py
new file mode 100644
index 00000000..510ecbe0
--- /dev/null
+++ b/sphinx/util/websupport.py
@@ -0,0 +1,11 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.util.websupport
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+def is_commentable(node):
+ return node.__class__.__name__ in ('paragraph', 'literal_block')
diff --git a/sphinx/versioning.py b/sphinx/versioning.py
new file mode 100644
index 00000000..5b0b2127
--- /dev/null
+++ b/sphinx/versioning.py
@@ -0,0 +1,128 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.versioning
+ ~~~~~~~~~~~~~~~~~
+
+ Implements the low-level algorithms Sphinx uses for the versioning of
+ doctrees.
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+from uuid import uuid4
+from operator import itemgetter
+from collections import defaultdict
+
+from sphinx.util.pycompat import product, zip_longest
+
+
+# anything below that ratio is considered equal/changed
+VERSIONING_RATIO = 65
+
+
+def add_uids(doctree, condition):
+ """Add a unique id to every node in the `doctree` which matches the
+ condition and yield the nodes.
+
+ :param doctree:
+ A :class:`docutils.nodes.document` instance.
+
+ :param condition:
+ A callable which returns either ``True`` or ``False`` for a given node.
+ """
+ for node in doctree.traverse(condition):
+ node.uid = uuid4().hex
+ yield node
+
+
+def merge_doctrees(old, new, condition):
+ """Merge the `old` doctree with the `new` one while looking at nodes
+ matching the `condition`.
+
+ Each node which replaces another one or has been added to the `new` doctree
+ will be yielded.
+
+ :param condition:
+ A callable which returns either ``True`` or ``False`` for a given node.
+ """
+ old_iter = old.traverse(condition)
+ new_iter = new.traverse(condition)
+ old_nodes = []
+ new_nodes = []
+ ratios = defaultdict(list)
+ seen = set()
+ # compare the nodes each doctree in order
+ for old_node, new_node in zip_longest(old_iter, new_iter):
+ if old_node is None:
+ new_nodes.append(new_node)
+ continue
+ if new_node is None:
+ old_nodes.append(old_node)
+ continue
+ ratio = get_ratio(old_node.rawsource, new_node.rawsource)
+ if ratio == 0:
+ new_node.uid = old_node.uid
+ seen.add(new_node)
+ else:
+ ratios[old_node, new_node] = ratio
+ old_nodes.append(old_node)
+ new_nodes.append(new_node)
+ # calculate the ratios for each unequal pair of nodes, should we stumble
+ # on a pair which is equal we set the uid and add it to the seen ones
+ for old_node, new_node in product(old_nodes, new_nodes):
+ if new_node in seen or (old_node, new_node) in ratios:
+ continue
+ ratio = get_ratio(old_node.rawsource, new_node.rawsource)
+ if ratio == 0:
+ new_node.uid = old_node.uid
+ seen.add(new_node)
+ else:
+ ratios[old_node, new_node] = ratio
+ # choose the old node with the best ratio for each new node and set the uid
+ # as long as the ratio is under a certain value, in which case we consider
+ # them not changed but different
+ ratios = sorted(ratios.iteritems(), key=itemgetter(1))
+ for (old_node, new_node), ratio in ratios:
+ if new_node in seen:
+ continue
+ else:
+ seen.add(new_node)
+ if ratio < VERSIONING_RATIO:
+ new_node.uid = old_node.uid
+ else:
+ new_node.uid = uuid4().hex
+ yield new_node
+ # create new uuids for any new node we left out earlier, this happens
+ # if one or more nodes are simply added.
+ for new_node in set(new_nodes) - seen:
+ new_node.uid = uuid4().hex
+ yield new_node
+
+
+def get_ratio(old, new):
+ """Return a "similiarity ratio" (in percent) representing the similarity
+ between the two strings where 0 is equal and anything above less than equal.
+ """
+ if not all([old, new]):
+ return VERSIONING_RATIO
+ return levenshtein_distance(old, new) / (len(old) / 100.0)
+
+
+def levenshtein_distance(a, b):
+ """Return the Levenshtein edit distance between two strings *a* and *b*."""
+ if a == b:
+ return 0
+ if len(a) < len(b):
+ a, b = b, a
+ if not a:
+ return len(b)
+ previous_row = xrange(len(b) + 1)
+ for i, column1 in enumerate(a):
+ current_row = [i + 1]
+ for j, column2 in enumerate(b):
+ insertions = previous_row[j + 1] + 1
+ deletions = current_row[j] + 1
+ substitutions = previous_row[j] + (column1 != column2)
+ current_row.append(min(insertions, deletions, substitutions))
+ previous_row = current_row
+ return previous_row[-1]
diff --git a/sphinx/websupport/__init__.py b/sphinx/websupport/__init__.py
new file mode 100644
index 00000000..30303132
--- /dev/null
+++ b/sphinx/websupport/__init__.py
@@ -0,0 +1,414 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.websupport
+ ~~~~~~~~~~~~~~~~~
+
+ Base Module for web support functions.
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import sys
+import cPickle as pickle
+import posixpath
+from os import path
+
+from jinja2 import Environment, FileSystemLoader
+
+from sphinx.application import Sphinx
+from sphinx.util.osutil import ensuredir
+from sphinx.util.jsonimpl import dumps as dump_json
+from sphinx.websupport.search import BaseSearch, SEARCH_ADAPTERS
+from sphinx.websupport.storage import StorageBackend
+from sphinx.websupport.errors import *
+
+
+class WebSupportApp(Sphinx):
+ def __init__(self, *args, **kwargs):
+ self.staticdir = kwargs.pop('staticdir', None)
+ self.builddir = kwargs.pop('builddir', None)
+ self.search = kwargs.pop('search', None)
+ self.storage = kwargs.pop('storage', None)
+ Sphinx.__init__(self, *args, **kwargs)
+
+
+class WebSupport(object):
+ """The main API class for the web support package. All interactions
+ with the web support package should occur through this class.
+ """
+ def __init__(self, srcdir='', builddir='', datadir='', search=None,
+ storage=None, status=sys.stdout, warning=sys.stderr,
+ moderation_callback=None, staticdir='static',
+ docroot=''):
+ self.srcdir = srcdir
+ self.builddir = builddir
+ self.outdir = path.join(builddir, 'data')
+ self.datadir = datadir or self.outdir
+ self.staticdir = staticdir.strip('/')
+ self.docroot = docroot.strip('/')
+ self.status = status
+ self.warning = warning
+ self.moderation_callback = moderation_callback
+
+ self._init_templating()
+ self._init_search(search)
+ self._init_storage(storage)
+
+ self._make_base_comment_options()
+
+ def _init_storage(self, storage):
+ if isinstance(storage, StorageBackend):
+ self.storage = storage
+ else:
+ # If a StorageBackend isn't provided, use the default
+ # SQLAlchemy backend.
+ from sphinx.websupport.storage.sqlalchemystorage \
+ import SQLAlchemyStorage
+ if not storage:
+ # no explicit DB path given; create default sqlite database
+ db_path = path.join(self.datadir, 'db', 'websupport.db')
+ ensuredir(path.dirname(db_path))
+ storage = 'sqlite:///' + db_path
+ self.storage = SQLAlchemyStorage(storage)
+
+ def _init_templating(self):
+ import sphinx
+ template_path = path.join(path.dirname(sphinx.__file__),
+ 'themes', 'basic')
+ loader = FileSystemLoader(template_path)
+ self.template_env = Environment(loader=loader)
+
+ def _init_search(self, search):
+ if isinstance(search, BaseSearch):
+ self.search = search
+ else:
+ mod, cls = SEARCH_ADAPTERS[search or 'null']
+ mod = 'sphinx.websupport.search.' + mod
+ SearchClass = getattr(__import__(mod, None, None, [cls]), cls)
+ search_path = path.join(self.datadir, 'search')
+ self.search = SearchClass(search_path)
+ self.results_template = \
+ self.template_env.get_template('searchresults.html')
+
+ def build(self):
+ """Build the documentation. Places the data into the `outdir`
+ directory. Use it like this::
+
+ support = WebSupport(srcdir, builddir, search='xapian')
+ support.build()
+
+ This will read reStructured text files from `srcdir`. Then it will
+ build the pickles and search index, placing them into `builddir`.
+ It will also save node data to the database.
+ """
+ if not self.srcdir:
+ raise SrcdirNotSpecifiedError( \
+ 'No srcdir associated with WebSupport object')
+ doctreedir = path.join(self.outdir, 'doctrees')
+ app = WebSupportApp(self.srcdir, self.srcdir,
+ self.outdir, doctreedir, 'websupport',
+ search=self.search, status=self.status,
+ warning=self.warning, storage=self.storage,
+ staticdir=self.staticdir, builddir=self.builddir)
+
+ self.storage.pre_build()
+ app.build()
+ self.storage.post_build()
+
+ def get_document(self, docname, username='', moderator=False):
+ """Load and return a document from a pickle. The document will
+ be a dict object which can be used to render a template::
+
+ support = WebSupport(datadir=datadir)
+ support.get_document('index', username, moderator)
+
+ In most cases `docname` will be taken from the request path and
+ passed directly to this function. In Flask, that would be something
+ like this::
+
+ @app.route('/<path:docname>')
+ def index(docname):
+ username = g.user.name if g.user else ''
+ moderator = g.user.moderator if g.user else False
+ try:
+ document = support.get_document(docname, username,
+ moderator)
+ except DocumentNotFoundError:
+ abort(404)
+ render_template('doc.html', document=document)
+
+ The document dict that is returned contains the following items
+ to be used during template rendering.
+
+ * **body**: The main body of the document as HTML
+ * **sidebar**: The sidebar of the document as HTML
+ * **relbar**: A div containing links to related documents
+ * **title**: The title of the document
+ * **css**: Links to css files used by Sphinx
+ * **js**: Javascript containing comment options
+
+ This raises :class:`~sphinx.websupport.errors.DocumentNotFoundError`
+ if a document matching `docname` is not found.
+
+ :param docname: the name of the document to load.
+ """
+ infilename = path.join(self.datadir, 'pickles', docname + '.fpickle')
+
+ try:
+ f = open(infilename, 'rb')
+ except IOError:
+ raise DocumentNotFoundError(
+ 'The document "%s" could not be found' % docname)
+
+ document = pickle.load(f)
+ comment_opts = self._make_comment_options(username, moderator)
+ comment_metadata = self.storage.get_metadata(docname, moderator)
+
+ document['js'] = '\n'.join([comment_opts,
+ self._make_metadata(comment_metadata),
+ document['js']])
+ return document
+
+ def get_search_results(self, q):
+ """Perform a search for the query `q`, and create a set
+ of search results. Then render the search results as html and
+ return a context dict like the one created by
+ :meth:`get_document`::
+
+ document = support.get_search_results(q)
+
+ :param q: the search query
+ """
+ results = self.search.query(q)
+ ctx = {'search_performed': True,
+ 'search_results': results,
+ 'q': q}
+ document = self.get_document('search')
+ document['body'] = self.results_template.render(ctx)
+ document['title'] = 'Search Results'
+ return document
+
+ def get_data(self, node_id, username=None, moderator=False):
+ """Get the comments and source associated with `node_id`. If
+ `username` is given vote information will be included with the
+ returned comments. The default CommentBackend returns a dict with
+ two keys, *source*, and *comments*. *source* is raw source of the
+ node and is used as the starting point for proposals a user can
+ add. *comments* is a list of dicts that represent a comment, each
+ having the following items:
+
+ ============= ======================================================
+ Key Contents
+ ============= ======================================================
+ text The comment text.
+ username The username that was stored with the comment.
+ id The comment's unique identifier.
+ rating The comment's current rating.
+ age The time in seconds since the comment was added.
+ time A dict containing time information. It contains the
+ following keys: year, month, day, hour, minute, second,
+ iso, and delta. `iso` is the time formatted in ISO
+ 8601 format. `delta` is a printable form of how old
+ the comment is (e.g. "3 hours ago").
+ vote If `user_id` was given, this will be an integer
+ representing the vote. 1 for an upvote, -1 for a
+ downvote, or 0 if unvoted.
+ node The id of the node that the comment is attached to.
+ If the comment's parent is another comment rather than
+ a node, this will be null.
+ parent The id of the comment that this comment is attached
+ to if it is not attached to a node.
+ children A list of all children, in this format.
+ proposal_diff An HTML representation of the differences between the
+ the current source and the user's proposed source.
+ ============= ======================================================
+
+ :param node_id: the id of the node to get comments for.
+ :param username: the username of the user viewing the comments.
+ :param moderator: whether the user is a moderator.
+ """
+ return self.storage.get_data(node_id, username, moderator)
+
+ def delete_comment(self, comment_id, username='', moderator=False):
+ """Delete a comment. Doesn't actually delete the comment, but
+ instead replaces the username and text files with "[deleted]" so
+ as not to leave any comments orphaned.
+
+ If `moderator` is True, the comment will always be deleted. If
+ `moderator` is False, the comment will only be deleted if the
+ `username` matches the `username` on the comment.
+
+ This raises :class:`~sphinx.websupport.errors.UserNotAuthorizedError`
+ if moderator is False and `username` doesn't match username on the
+ comment.
+
+ :param comment_id: the id of the comment to delete.
+ :param username: the username requesting the deletion.
+ :param moderator: whether the requestor is a moderator.
+ """
+ self.storage.delete_comment(comment_id, username, moderator)
+
+ def add_comment(self, text, node_id='', parent_id='', displayed=True,
+ username=None, time=None, proposal=None,
+ moderator=False):
+ """Add a comment to a node or another comment. Returns the comment
+ in the same format as :meth:`get_comments`. If the comment is being
+ attached to a node, pass in the node's id (as a string) with the
+ node keyword argument::
+
+ comment = support.add_comment(text, node_id=node_id)
+
+ If the comment is the child of another comment, provide the parent's
+ id (as a string) with the parent keyword argument::
+
+ comment = support.add_comment(text, parent_id=parent_id)
+
+ If you would like to store a username with the comment, pass
+ in the optional `username` keyword argument::
+
+ comment = support.add_comment(text, node=node_id,
+ username=username)
+
+ :param parent_id: the prefixed id of the comment's parent.
+ :param text: the text of the comment.
+ :param displayed: for moderation purposes
+ :param username: the username of the user making the comment.
+ :param time: the time the comment was created, defaults to now.
+ """
+ comment = self.storage.add_comment(text, displayed, username,
+ time, proposal, node_id,
+ parent_id, moderator)
+ if not displayed and self.moderation_callback:
+ self.moderation_callback(comment)
+ return comment
+
+ def process_vote(self, comment_id, username, value):
+ """Process a user's vote. The web support package relies
+ on the API user to perform authentication. The API user will
+ typically receive a comment_id and value from a form, and then
+ make sure the user is authenticated. A unique username must be
+ passed in, which will also be used to retrieve the user's past
+ voting data. An example, once again in Flask::
+
+ @app.route('/docs/process_vote', methods=['POST'])
+ def process_vote():
+ if g.user is None:
+ abort(401)
+ comment_id = request.form.get('comment_id')
+ value = request.form.get('value')
+ if value is None or comment_id is None:
+ abort(400)
+ support.process_vote(comment_id, g.user.name, value)
+ return "success"
+
+ :param comment_id: the comment being voted on
+ :param username: the unique username of the user voting
+ :param value: 1 for an upvote, -1 for a downvote, 0 for an unvote.
+ """
+ value = int(value)
+ if not -1 <= value <= 1:
+ raise ValueError('vote value %s out of range (-1, 1)' % value)
+ self.storage.process_vote(comment_id, username, value)
+
+ def update_username(self, old_username, new_username):
+ """To remain decoupled from a webapp's authentication system, the
+ web support package stores a user's username with each of their
+ comments and votes. If the authentication system allows a user to
+ change their username, this can lead to stagnate data in the web
+ support system. To avoid this, each time a username is changed, this
+ method should be called.
+
+ :param old_username: The original username.
+ :param new_username: The new username.
+ """
+ self.storage.update_username(old_username, new_username)
+
+ def accept_comment(self, comment_id, moderator=False):
+ """Accept a comment that is pending moderation.
+
+ This raises :class:`~sphinx.websupport.errors.UserNotAuthorizedError`
+ if moderator is False.
+
+ :param comment_id: The id of the comment that was accepted.
+ :param moderator: Whether the user making the request is a moderator.
+ """
+ if not moderator:
+ raise UserNotAuthorizedError()
+ self.storage.accept_comment(comment_id)
+
+ def reject_comment(self, comment_id, moderator=False):
+ """Reject a comment that is pending moderation.
+
+ This raises :class:`~sphinx.websupport.errors.UserNotAuthorizedError`
+ if moderator is False.
+
+ :param comment_id: The id of the comment that was accepted.
+ :param moderator: Whether the user making the request is a moderator.
+ """
+ if not moderator:
+ raise UserNotAuthorizedError()
+ self.storage.reject_comment(comment_id)
+
+ def _make_base_comment_options(self):
+ """Helper method to create the part of the COMMENT_OPTIONS javascript
+ that remains the same throughout the lifetime of the
+ :class:`~sphinx.websupport.WebSupport` object.
+ """
+ self.base_comment_opts = {}
+
+ if self.docroot is not '':
+ comment_urls = [
+ ('addCommentURL', 'add_comment'),
+ ('getCommentsURL', 'get_comments'),
+ ('processVoteURL', 'process_vote'),
+ ('acceptCommentURL', 'accept_comment'),
+ ('rejectCommentURL', 'reject_comment'),
+ ('deleteCommentURL', 'delete_comment')
+ ]
+ for key, value in comment_urls:
+ self.base_comment_opts[key] = \
+ '/' + posixpath.join(self.docroot, value)
+ if self.staticdir != 'static':
+ static_urls = [
+ ('commentImage', 'comment.png'),
+ ('closeCommentImage', 'comment-close.png'),
+ ('loadingImage', 'ajax-loader.gif'),
+ ('commentBrightImage', 'comment-bright.png'),
+ ('upArrow', 'up.png'),
+ ('upArrowPressed', 'up-pressed.png'),
+ ('downArrow', 'down.png'),
+ ('downArrowPressed', 'down-pressed.png')
+ ]
+ for key, value in static_urls:
+ self.base_comment_opts[key] = \
+ '/' + posixpath.join(self.staticdir, '_static', value)
+
+ def _make_comment_options(self, username, moderator):
+ """Helper method to create the parts of the COMMENT_OPTIONS
+ javascript that are unique to each request.
+
+ :param username: The username of the user making the request.
+ :param moderator: Whether the user making the request is a moderator.
+ """
+ # XXX parts is not used?
+ #parts = [self.base_comment_opts]
+ rv = self.base_comment_opts.copy()
+ if username:
+ rv.update({
+ 'voting': True,
+ 'username': username,
+ 'moderator': moderator,
+ })
+ return '\n'.join([
+ '<script type="text/javascript">',
+ 'var COMMENT_OPTIONS = %s;' % dump_json(rv),
+ '</script>'
+ ])
+
+ def _make_metadata(self, data):
+ return '\n'.join([
+ '<script type="text/javascript">',
+ 'var COMMENT_METADATA = %s;' % dump_json(data),
+ '</script>'
+ ])
diff --git a/sphinx/websupport/errors.py b/sphinx/websupport/errors.py
new file mode 100644
index 00000000..53106dfb
--- /dev/null
+++ b/sphinx/websupport/errors.py
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.websupport.errors
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Contains Error classes for the web support package.
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+__all__ = ['DocumentNotFoundError', 'SrcdirNotSpecifiedError',
+ 'UserNotAuthorizedError', 'CommentNotAllowedError',
+ 'NullSearchException']
+
+class DocumentNotFoundError(Exception):
+ pass
+
+
+class SrcdirNotSpecifiedError(Exception):
+ pass
+
+
+class UserNotAuthorizedError(Exception):
+ pass
+
+
+class CommentNotAllowedError(Exception):
+ pass
+
+
+class NullSearchException(Exception):
+ pass
diff --git a/sphinx/websupport/search/__init__.py b/sphinx/websupport/search/__init__.py
new file mode 100644
index 00000000..0cba0f77
--- /dev/null
+++ b/sphinx/websupport/search/__init__.py
@@ -0,0 +1,121 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.websupport.search
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Server side search support for the web support package.
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+
+class BaseSearch(object):
+ def __init__(self, path):
+ pass
+
+ def init_indexing(self, changed=[]):
+ """Called by the builder to initialize the search indexer. `changed`
+ is a list of pagenames that will be reindexed. You may want to remove
+ these from the search index before indexing begins.
+
+ :param changed: a list of pagenames that will be re-indexed
+ """
+ pass
+
+ def finish_indexing(self):
+ """Called by the builder when writing has been completed. Use this
+ to perform any finalization or cleanup actions after indexing is
+ complete.
+ """
+ pass
+
+ def feed(self, pagename, title, doctree):
+ """Called by the builder to add a doctree to the index. Converts the
+ `doctree` to text and passes it to :meth:`add_document`. You probably
+ won't want to override this unless you need access to the `doctree`.
+ Override :meth:`add_document` instead.
+
+ :param pagename: the name of the page to be indexed
+ :param title: the title of the page to be indexed
+ :param doctree: is the docutils doctree representation of the page
+ """
+ self.add_document(pagename, title, doctree.astext())
+
+ def add_document(self, pagename, title, text):
+ """Called by :meth:`feed` to add a document to the search index.
+ This method should should do everything necessary to add a single
+ document to the search index.
+
+ `pagename` is name of the page being indexed. It is the combination
+ of the source files relative path and filename,
+ minus the extension. For example, if the source file is
+ "ext/builders.rst", the `pagename` would be "ext/builders". This
+ will need to be returned with search results when processing a
+ query.
+
+ :param pagename: the name of the page being indexed
+ :param title: the page's title
+ :param text: the full text of the page
+ """
+ raise NotImplementedError()
+
+ def query(self, q):
+ """Called by the web support api to get search results. This method
+ compiles the regular expression to be used when :meth:`extracting
+ context <extract_context>`, then calls :meth:`handle_query`. You
+ won't want to override this unless you don't want to use the included
+ :meth:`extract_context` method. Override :meth:`handle_query` instead.
+
+ :param q: the search query string.
+ """
+ self.context_re = re.compile('|'.join(q.split()), re.I)
+ return self.handle_query(q)
+
+ def handle_query(self, q):
+ """Called by :meth:`query` to retrieve search results for a search
+ query `q`. This should return an iterable containing tuples of the
+ following format::
+
+ (<path>, <title>, <context>)
+
+ `path` and `title` are the same values that were passed to
+ :meth:`add_document`, and `context` should be a short text snippet
+ of the text surrounding the search query in the document.
+
+ The :meth:`extract_context` method is provided as a simple way
+ to create the `context`.
+
+ :param q: the search query
+ """
+ raise NotImplementedError()
+
+ def extract_context(self, text, length=240):
+ """Extract the context for the search query from the document's
+ full `text`.
+
+ :param text: the full text of the document to create the context for
+ :param length: the length of the context snippet to return.
+ """
+ res = self.context_re.search(text)
+ if res is None:
+ return ''
+ context_start = max(res.start() - length/2, 0)
+ context_end = context_start + length
+ context = ''.join(['...' if context_start > 0 else '',
+ text[context_start:context_end],
+ '...' if context_end < len(text) else ''])
+
+ try:
+ return unicode(context, errors='ignore')
+ except TypeError:
+ return context
+
+# The built-in search adapters.
+SEARCH_ADAPTERS = {
+ 'xapian': ('xapiansearch', 'XapianSearch'),
+ 'whoosh': ('whooshsearch', 'WhooshSearch'),
+ 'null': ('nullsearch', 'NullSearch'),
+}
diff --git a/sphinx/websupport/search/nullsearch.py b/sphinx/websupport/search/nullsearch.py
new file mode 100644
index 00000000..fd6d4dcf
--- /dev/null
+++ b/sphinx/websupport/search/nullsearch.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.websupport.search.nullsearch
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ The default search adapter, does nothing.
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from sphinx.websupport.search import BaseSearch
+from sphinx.websupport.errors import NullSearchException
+
+
+class NullSearch(BaseSearch):
+ """A search adapter that does nothing. Used when no search adapter
+ is specified.
+ """
+ def feed(self, pagename, title, doctree):
+ pass
+
+ def query(self, q):
+ raise NullSearchException('No search adapter specified.')
diff --git a/sphinx/websupport/search/whooshsearch.py b/sphinx/websupport/search/whooshsearch.py
new file mode 100644
index 00000000..e58c7342
--- /dev/null
+++ b/sphinx/websupport/search/whooshsearch.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.websupport.search.whooshsearch
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Whoosh search adapter.
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from whoosh import index
+from whoosh.fields import Schema, ID, TEXT
+from whoosh.qparser import QueryParser
+from whoosh.analysis import StemmingAnalyzer
+
+from sphinx.util.osutil import ensuredir
+from sphinx.websupport.search import BaseSearch
+
+
+class WhooshSearch(BaseSearch):
+ """The whoosh search adapter for sphinx web support."""
+
+ # Define the Whoosh Schema for the search index.
+ schema = Schema(path=ID(stored=True, unique=True),
+ title=TEXT(field_boost=2.0, stored=True),
+ text=TEXT(analyzer=StemmingAnalyzer(), stored=True))
+
+ def __init__(self, db_path):
+ ensuredir(db_path)
+ if index.exists_in(db_path):
+ self.index = index.open_dir(db_path)
+ else:
+ self.index = index.create_in(db_path, schema=self.schema)
+ self.qparser = QueryParser('text', self.schema)
+
+ def init_indexing(self, changed=[]):
+ for changed_path in changed:
+ self.index.delete_by_term('path', changed_path)
+ self.index_writer = self.index.writer()
+
+ def finish_indexing(self):
+ self.index_writer.commit()
+
+ def add_document(self, pagename, title, text):
+ self.index_writer.add_document(path=unicode(pagename),
+ title=title,
+ text=text)
+
+ def handle_query(self, q):
+ searcher = self.index.searcher()
+ whoosh_results = searcher.search(self.qparser.parse(q))
+ results = []
+ for result in whoosh_results:
+ context = self.extract_context(result['text'])
+ results.append((result['path'],
+ result.get('title', ''),
+ context))
+ return results
diff --git a/sphinx/websupport/search/xapiansearch.py b/sphinx/websupport/search/xapiansearch.py
new file mode 100644
index 00000000..b0475435
--- /dev/null
+++ b/sphinx/websupport/search/xapiansearch.py
@@ -0,0 +1,81 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.websupport.search.xapiansearch
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Xapian search adapter.
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import xapian
+
+from sphinx.util.osutil import ensuredir
+from sphinx.websupport.search import BaseSearch
+
+
+class XapianSearch(BaseSearch):
+ # Adapted from the GSOC 2009 webapp project.
+
+ # Xapian metadata constants
+ DOC_PATH = 0
+ DOC_TITLE = 1
+
+ def __init__(self, db_path):
+ self.db_path = db_path
+
+ def init_indexing(self, changed=[]):
+ ensuredir(self.db_path)
+ self.database = xapian.WritableDatabase(self.db_path,
+ xapian.DB_CREATE_OR_OPEN)
+ self.indexer = xapian.TermGenerator()
+ stemmer = xapian.Stem("english")
+ self.indexer.set_stemmer(stemmer)
+
+ def finish_indexing(self):
+ # Ensure the db lock is removed.
+ del self.database
+
+ def add_document(self, path, title, text):
+ self.database.begin_transaction()
+ # sphinx_page_path is used to easily retrieve documents by path.
+ sphinx_page_path = '"sphinxpagepath%s"' % path.replace('/', '_')
+ # Delete the old document if it exists.
+ self.database.delete_document(sphinx_page_path)
+
+ doc = xapian.Document()
+ doc.set_data(text)
+ doc.add_value(self.DOC_PATH, path)
+ doc.add_value(self.DOC_TITLE, title)
+ self.indexer.set_document(doc)
+ self.indexer.index_text(text)
+ doc.add_term(sphinx_page_path)
+ for word in text.split():
+ doc.add_posting(word, 1)
+ self.database.add_document(doc)
+ self.database.commit_transaction()
+
+ def handle_query(self, q):
+ database = xapian.Database(self.db_path)
+ enquire = xapian.Enquire(database)
+ qp = xapian.QueryParser()
+ stemmer = xapian.Stem("english")
+ qp.set_stemmer(stemmer)
+ qp.set_database(database)
+ qp.set_stemming_strategy(xapian.QueryParser.STEM_SOME)
+ query = qp.parse_query(q)
+
+ # Find the top 100 results for the query.
+ enquire.set_query(query)
+ matches = enquire.get_mset(0, 100)
+
+ results = []
+
+ for m in matches:
+ context = self.extract_context(m.document.get_data())
+ results.append((m.document.get_value(self.DOC_PATH),
+ m.document.get_value(self.DOC_TITLE),
+ ''.join(context) ))
+
+ return results
diff --git a/sphinx/websupport/storage/__init__.py b/sphinx/websupport/storage/__init__.py
new file mode 100644
index 00000000..3d8a9ab5
--- /dev/null
+++ b/sphinx/websupport/storage/__init__.py
@@ -0,0 +1,123 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.websupport.storage
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Storage for the websupport package.
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+class StorageBackend(object):
+ def pre_build(self):
+ """Called immediately before the build process begins. Use this
+ to prepare the StorageBackend for the addition of nodes.
+ """
+ pass
+
+ def has_node(self, id):
+ """Check to see if a node exists.
+
+ :param id: the id to check for.
+ """
+ raise NotImplementedError()
+
+ def add_node(self, id, document, source):
+ """Add a node to the StorageBackend.
+
+ :param id: a unique id for the comment.
+ :param document: the name of the document the node belongs to.
+ :param source: the source files name.
+ """
+ raise NotImplementedError()
+
+ def post_build(self):
+ """Called after a build has completed. Use this to finalize the
+ addition of nodes if needed.
+ """
+ pass
+
+ def add_comment(self, text, displayed, username, time,
+ proposal, node_id, parent_id, moderator):
+ """Called when a comment is being added.
+
+ :param text: the text of the comment
+ :param displayed: whether the comment should be displayed
+ :param username: the name of the user adding the comment
+ :param time: a date object with the time the comment was added
+ :param proposal: the text of the proposal the user made
+ :param node_id: the id of the node that the comment is being added to
+ :param parent_id: the id of the comment's parent comment.
+ :param moderator: whether the user adding the comment is a moderator
+ """
+ raise NotImplementedError()
+
+ def delete_comment(self, comment_id, username, moderator):
+ """Delete a comment.
+
+ Raises :class:`~sphinx.websupport.errors.UserNotAuthorizedError`
+ if moderator is False and `username` doesn't match the username
+ on the comment.
+
+ :param comment_id: The id of the comment being deleted.
+ :param username: The username of the user requesting the deletion.
+ :param moderator: Whether the user is a moderator.
+ """
+ raise NotImplementedError()
+
+ def get_metadata(self, docname, moderator):
+ """Get metadata for a document. This is currently just a dict
+ of node_id's with associated comment counts.
+
+ :param docname: the name of the document to get metadata for.
+ :param moderator: whether the requester is a moderator.
+ """
+ raise NotImplementedError()
+
+ def get_data(self, node_id, username, moderator):
+ """Called to retrieve all data for a node. This should return a
+ dict with two keys, *source* and *comments* as described by
+ :class:`~sphinx.websupport.WebSupport`'s
+ :meth:`~sphinx.websupport.WebSupport.get_data` method.
+
+ :param node_id: The id of the node to get data for.
+ :param username: The name of the user requesting the data.
+ :param moderator: Whether the requestor is a moderator.
+ """
+ raise NotImplementedError()
+
+ def process_vote(self, comment_id, username, value):
+ """Process a vote that is being cast. `value` will be either -1, 0,
+ or 1.
+
+ :param comment_id: The id of the comment being voted on.
+ :param username: The username of the user casting the vote.
+ :param value: The value of the vote being cast.
+ """
+ raise NotImplementedError()
+
+ def update_username(self, old_username, new_username):
+ """If a user is allowed to change their username this method should
+ be called so that there is not stagnate data in the storage system.
+
+ :param old_username: The username being changed.
+ :param new_username: What the username is being changed to.
+ """
+ raise NotImplementedError()
+
+ def accept_comment(self, comment_id):
+ """Called when a moderator accepts a comment. After the method is
+ called the comment should be displayed to all users.
+
+ :param comment_id: The id of the comment being accepted.
+ """
+ raise NotImplementedError()
+
+ def reject_comment(self, comment_id):
+ """Called when a moderator rejects a comment. The comment should
+ then be deleted.
+
+ :param comment_id: The id of the comment being accepted.
+ """
+ raise NotImplementedError()
diff --git a/sphinx/websupport/storage/differ.py b/sphinx/websupport/storage/differ.py
new file mode 100644
index 00000000..d5225071
--- /dev/null
+++ b/sphinx/websupport/storage/differ.py
@@ -0,0 +1,79 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.websupport.storage.differ
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ A differ for creating an HTML representations of proposal diffs
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+from cgi import escape
+from difflib import Differ
+
+
+class CombinedHtmlDiff(object):
+ """Create an HTML representation of the differences between two pieces
+ of text.
+ """
+ highlight_regex = re.compile(r'([\+\-\^]+)')
+
+ def make_html(self, source, proposal):
+ """Return the HTML representation of the differences between
+ `source` and `proposal`.
+
+ :param source: the original text
+ :param proposal: the proposed text
+ """
+ proposal = escape(proposal)
+
+ differ = Differ()
+ diff = list(differ.compare(source.splitlines(1),
+ proposal.splitlines(1)))
+ html = []
+ line = diff.pop(0)
+ next = diff.pop(0)
+ while True:
+ html.append(self._handle_line(line, next))
+ line = next
+ try:
+ next = diff.pop(0)
+ except IndexError:
+ html.append(self._handle_line(line))
+ break
+ return ''.join(html).rstrip()
+
+ def _handle_line(self, line, next=None):
+ """Handle an individual line in a diff."""
+ prefix = line[0]
+ text = line[2:]
+
+ if prefix == ' ':
+ return text
+ elif prefix == '?':
+ return ''
+
+ if next is not None and next[0] == '?':
+ tag = 'ins' if prefix == '+' else 'del'
+ text = self._highlight_text(text, next, tag)
+ css_class = 'prop_added' if prefix == '+' else 'prop_removed'
+
+ return '<span class="%s">%s</span>\n' % (css_class, text.rstrip())
+
+ def _highlight_text(self, text, next, tag):
+ """Highlight the specific changes made to a line by adding
+ <ins> and <del> tags.
+ """
+ next = next[2:]
+ new_text = []
+ start = 0
+ for match in self.highlight_regex.finditer(next):
+ new_text.append(text[start:match.start()])
+ new_text.append('<%s>' % tag)
+ new_text.append(text[match.start():match.end()])
+ new_text.append('</%s>' % tag)
+ start = match.end()
+ new_text.append(text[start:])
+ return ''.join(new_text)
diff --git a/sphinx/websupport/storage/sqlalchemy_db.py b/sphinx/websupport/storage/sqlalchemy_db.py
new file mode 100644
index 00000000..4e2757a9
--- /dev/null
+++ b/sphinx/websupport/storage/sqlalchemy_db.py
@@ -0,0 +1,205 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.websupport.storage.sqlalchemy_db
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ SQLAlchemy table and mapper definitions used by the
+ :class:`sphinx.websupport.storage.sqlalchemystorage.SQLAlchemyStorage`.
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from datetime import datetime
+
+from sqlalchemy import Column, Integer, Text, String, Boolean, \
+ ForeignKey, DateTime
+from sqlalchemy.orm import relation, sessionmaker, aliased
+from sqlalchemy.ext.declarative import declarative_base
+
+Base = declarative_base()
+Session = sessionmaker()
+
+db_prefix = 'sphinx_'
+
+
+class Node(Base):
+ """Data about a Node in a doctree."""
+ __tablename__ = db_prefix + 'nodes'
+
+ id = Column(String(32), primary_key=True)
+ document = Column(String(256), nullable=False)
+ source = Column(Text, nullable=False)
+
+ def nested_comments(self, username, moderator):
+ """Create a tree of comments. First get all comments that are
+ descendents of this node, then convert them to a tree form.
+
+ :param username: the name of the user to get comments for.
+ :param moderator: whether the user is moderator.
+ """
+ session = Session()
+
+ if username:
+ # If a username is provided, create a subquery to retrieve all
+ # votes by this user. We will outerjoin with the comment query
+ # with this subquery so we have a user's voting information.
+ sq = session.query(CommentVote).\
+ filter(CommentVote.username == username).subquery()
+ cvalias = aliased(CommentVote, sq)
+ q = session.query(Comment, cvalias.value).outerjoin(cvalias)
+ else:
+ # If a username is not provided, we don't need to join with
+ # CommentVote.
+ q = session.query(Comment)
+
+ # Filter out all comments not descending from this node.
+ q = q.filter(Comment.path.like(str(self.id) + '.%'))
+
+ if not moderator:
+ q = q.filter(Comment.displayed == True)
+
+ # Retrieve all results. Results must be ordered by Comment.path
+ # so that we can easily transform them from a flat list to a tree.
+ results = q.order_by(Comment.path).all()
+ session.close()
+
+ return self._nest_comments(results, username)
+
+ def _nest_comments(self, results, username):
+ """Given the flat list of results, convert the list into a
+ tree.
+
+ :param results: the flat list of comments
+ :param username: the name of the user requesting the comments.
+ """
+ comments = []
+ list_stack = [comments]
+ for r in results:
+ comment, vote = r if username else (r, 0)
+
+ inheritance_chain = comment.path.split('.')[1:]
+
+ if len(inheritance_chain) == len(list_stack) + 1:
+ parent = list_stack[-1][-1]
+ list_stack.append(parent['children'])
+ elif len(inheritance_chain) < len(list_stack):
+ while len(inheritance_chain) < len(list_stack):
+ list_stack.pop()
+
+ list_stack[-1].append(comment.serializable(vote=vote))
+
+ return comments
+
+ def __init__(self, id, document, source):
+ self.id = id
+ self.document = document
+ self.source = source
+
+
+class Comment(Base):
+ """An individual Comment being stored."""
+ __tablename__ = db_prefix + 'comments'
+
+ id = Column(Integer, primary_key=True)
+ rating = Column(Integer, nullable=False)
+ time = Column(DateTime, nullable=False)
+ text = Column(Text, nullable=False)
+ displayed = Column(Boolean, index=True, default=False)
+ username = Column(String(64))
+ proposal = Column(Text)
+ proposal_diff = Column(Text)
+ path = Column(String(256), index=True)
+
+ node_id = Column(String, ForeignKey(db_prefix + 'nodes.id'))
+ node = relation(Node, backref="comments")
+
+ def __init__(self, text, displayed, username, rating, time,
+ proposal, proposal_diff):
+ self.text = text
+ self.displayed = displayed
+ self.username = username
+ self.rating = rating
+ self.time = time
+ self.proposal = proposal
+ self.proposal_diff = proposal_diff
+
+ def set_path(self, node_id, parent_id):
+ """Set the materialized path for this comment."""
+ # This exists because the path can't be set until the session has
+ # been flushed and this Comment has an id.
+ if node_id:
+ self.node_id = node_id
+ self.path = '%s.%s' % (node_id, self.id)
+ else:
+ session = Session()
+ parent_path = session.query(Comment.path).\
+ filter(Comment.id == parent_id).one().path
+ session.close()
+ self.node_id = parent_path.split('.')[0]
+ self.path = '%s.%s' % (parent_path, self.id)
+
+ def serializable(self, vote=0):
+ """Creates a serializable representation of the comment. This is
+ converted to JSON, and used on the client side.
+ """
+ delta = datetime.now() - self.time
+
+ time = {'year': self.time.year,
+ 'month': self.time.month,
+ 'day': self.time.day,
+ 'hour': self.time.hour,
+ 'minute': self.time.minute,
+ 'second': self.time.second,
+ 'iso': self.time.isoformat(),
+ 'delta': self.pretty_delta(delta)}
+
+ path = self.path.split('.')
+ node = path[0] if len(path) == 2 else None
+ parent = path[-2] if len(path) > 2 else None
+
+ return {'text': self.text,
+ 'username': self.username or 'Anonymous',
+ 'id': self.id,
+ 'node': node,
+ 'parent': parent,
+ 'rating': self.rating,
+ 'displayed': self.displayed,
+ 'age': delta.seconds,
+ 'time': time,
+ 'vote': vote or 0,
+ 'proposal_diff': self.proposal_diff,
+ 'children': []}
+
+ def pretty_delta(self, delta):
+ """Create a pretty representation of the Comment's age.
+ (e.g. 2 minutes).
+ """
+ days = delta.days
+ seconds = delta.seconds
+ hours = seconds / 3600
+ minutes = seconds / 60
+
+ if days == 0:
+ dt = (minutes, 'minute') if hours == 0 else (hours, 'hour')
+ else:
+ dt = (days, 'day')
+
+ return '%s %s ago' % dt if dt[0] == 1 else '%s %ss ago' % dt
+
+
+class CommentVote(Base):
+ """A vote a user has made on a Comment."""
+ __tablename__ = db_prefix + 'commentvote'
+
+ username = Column(String(64), primary_key=True)
+ comment_id = Column(Integer, ForeignKey(db_prefix + 'comments.id'),
+ primary_key=True)
+ comment = relation(Comment, backref="votes")
+ # -1 if downvoted, +1 if upvoted, 0 if voted then unvoted.
+ value = Column(Integer, nullable=False)
+
+ def __init__(self, comment_id, username, value):
+ self.comment_id = comment_id
+ self.username = username
+ self.value = value
diff --git a/sphinx/websupport/storage/sqlalchemystorage.py b/sphinx/websupport/storage/sqlalchemystorage.py
new file mode 100644
index 00000000..6f13c91b
--- /dev/null
+++ b/sphinx/websupport/storage/sqlalchemystorage.py
@@ -0,0 +1,174 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.websupport.storage.sqlalchemystorage
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ An SQLAlchemy storage backend.
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from datetime import datetime
+
+import sqlalchemy
+from sqlalchemy.orm import aliased
+from sqlalchemy.sql import func
+
+if sqlalchemy.__version__[:3] < '0.5':
+ raise ImportError('SQLAlchemy version 0.5 or greater is required for this '
+ 'storage backend; you have version %s' % sqlalchemy.__version__)
+
+from sphinx.websupport.errors import CommentNotAllowedError, \
+ UserNotAuthorizedError
+from sphinx.websupport.storage import StorageBackend
+from sphinx.websupport.storage.sqlalchemy_db import Base, Node, \
+ Comment, CommentVote, Session
+from sphinx.websupport.storage.differ import CombinedHtmlDiff
+
+
+class SQLAlchemyStorage(StorageBackend):
+ """
+ A :class:`.StorageBackend` using SQLAlchemy.
+ """
+
+ def __init__(self, uri):
+ self.engine = sqlalchemy.create_engine(uri)
+ Base.metadata.bind = self.engine
+ Base.metadata.create_all()
+ Session.configure(bind=self.engine)
+
+ def pre_build(self):
+ self.build_session = Session()
+
+ def has_node(self, id):
+ session = Session()
+ node = session.query(Node).filter(Node.id == id).first()
+ session.close()
+ return True if node else False
+
+ def add_node(self, id, document, source):
+ node = Node(id, document, source)
+ self.build_session.add(node)
+ self.build_session.flush()
+
+ def post_build(self):
+ self.build_session.commit()
+ self.build_session.close()
+
+ def add_comment(self, text, displayed, username, time,
+ proposal, node_id, parent_id, moderator):
+ session = Session()
+ proposal_diff = None
+
+ if node_id and proposal:
+ node = session.query(Node).filter(Node.id == node_id).one()
+ differ = CombinedHtmlDiff()
+ proposal_diff = differ.make_html(node.source, proposal)
+ elif parent_id:
+ parent = session.query(Comment.displayed).\
+ filter(Comment.id == parent_id).one()
+ if not parent.displayed:
+ raise CommentNotAllowedError(
+ "Can't add child to a parent that is not displayed")
+
+ comment = Comment(text, displayed, username, 0,
+ time or datetime.now(), proposal, proposal_diff)
+ session.add(comment)
+ session.flush()
+ # We have to flush the session before setting the path so the
+ # Comment has an id.
+ comment.set_path(node_id, parent_id)
+ session.commit()
+ d = comment.serializable()
+ session.close()
+ return d
+
+ def delete_comment(self, comment_id, username, moderator):
+ session = Session()
+ comment = session.query(Comment).\
+ filter(Comment.id == comment_id).one()
+ if moderator or comment.username == username:
+ comment.username = '[deleted]'
+ comment.text = '[deleted]'
+ comment.proposal = ''
+ session.commit()
+ session.close()
+ else:
+ session.close()
+ raise UserNotAuthorizedError()
+
+ def get_metadata(self, docname, moderator):
+ session = Session()
+ subquery = session.query(
+ Comment.id, Comment.node_id,
+ func.count('*').label('comment_count')).group_by(
+ Comment.node_id).subquery()
+ nodes = session.query(Node.id, subquery.c.comment_count).outerjoin(
+ (subquery, Node.id==subquery.c.node_id)).filter(
+ Node.document==docname)
+ session.close()
+ session.commit()
+ return dict([(k, v or 0) for k, v in nodes])
+
+ def get_data(self, node_id, username, moderator):
+ session = Session()
+ node = session.query(Node).filter(Node.id == node_id).one()
+ session.close()
+ comments = node.nested_comments(username, moderator)
+ return {'source': node.source,
+ 'comments': comments}
+
+ def process_vote(self, comment_id, username, value):
+ session = Session()
+
+ subquery = session.query(CommentVote).filter(
+ CommentVote.username == username).subquery()
+ vote_alias = aliased(CommentVote, subquery)
+ q = session.query(Comment, vote_alias).outerjoin(vote_alias).filter(
+ Comment.id == comment_id)
+ comment, vote = q.one()
+
+ if vote is None:
+ vote = CommentVote(comment_id, username, value)
+ comment.rating += value
+ else:
+ comment.rating += value - vote.value
+ vote.value = value
+
+ session.add(vote)
+ session.commit()
+ session.close()
+
+ def update_username(self, old_username, new_username):
+ session = Session()
+
+ session.query(Comment).filter(Comment.username == old_username).\
+ update({Comment.username: new_username})
+ session.query(CommentVote).\
+ filter(CommentVote.username == old_username).\
+ update({CommentVote.username: new_username})
+
+ session.commit()
+ session.close()
+
+ def accept_comment(self, comment_id):
+ session = Session()
+
+ # XXX assignment to "comment" needed?
+ comment = session.query(Comment).filter(
+ Comment.id == comment_id).update(
+ {Comment.displayed: True})
+
+ session.commit()
+ session.close()
+
+ def reject_comment(self, comment_id):
+ session = Session()
+
+ comment = session.query(Comment).\
+ filter(Comment.id == comment_id).one()
+ session.delete(comment)
+
+ session.commit()
+ session.close()
diff --git a/sphinx/writers/html.py b/sphinx/writers/html.py
index f206e479..33d90c91 100644
--- a/sphinx/writers/html.py
+++ b/sphinx/writers/html.py
@@ -180,7 +180,7 @@ class HTMLTranslator(BaseTranslator):
atts['title'] = node['reftitle']
self.body.append(self.starttag(node, 'a', '', **atts))
- if node.hasattr('secnumber'):
+ if node.get('secnumber'):
self.body.append(('%s' + self.secnumber_suffix) %
'.'.join(map(str, node['secnumber'])))
@@ -202,14 +202,14 @@ class HTMLTranslator(BaseTranslator):
self.depart_admonition(node)
def add_secnumber(self, node):
- if node.hasattr('secnumber'):
+ if node.get('secnumber'):
self.body.append('.'.join(map(str, node['secnumber'])) +
self.secnumber_suffix)
elif isinstance(node.parent, nodes.section):
anchorname = '#' + node.parent['ids'][0]
if anchorname not in self.builder.secnumbers:
anchorname = '' # try first heading which has no anchor
- if anchorname in self.builder.secnumbers:
+ if self.builder.secnumbers.get(anchorname):
numbers = self.builder.secnumbers[anchorname]
self.body.append('.'.join(map(str, numbers)) +
self.secnumber_suffix)
diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py
index 75676f03..de57e35a 100644
--- a/sphinx/writers/latex.py
+++ b/sphinx/writers/latex.py
@@ -317,7 +317,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
# ... and all others are the appendices
self.body.append(u'\n\\appendix\n')
self.first_document = -1
- if node.has_key('docname'):
+ if 'docname' in node:
self.body.append(self.hypertarget(':doc'))
# "- 1" because the level is increased before the title is visited
self.sectionlevel = self.top_sectionlevel - 1
@@ -701,7 +701,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.table.rowcount += 1
def visit_entry(self, node):
- if node.has_key('morerows') or node.has_key('morecols'):
+ if 'morerows' in node or 'morecols' in node:
raise UnsupportedError('%s:%s: column or row spanning cells are '
'not yet implemented.' %
(self.curfilestack[-1], node.line or ''))
@@ -758,7 +758,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
def visit_term(self, node):
ctx = '}] \\leavevmode'
- if node.has_key('ids') and node['ids']:
+ if node.get('ids'):
ctx += self.hypertarget(node['ids'][0])
self.body.append('\\item[{')
self.context.append(ctx)
@@ -840,20 +840,20 @@ class LaTeXTranslator(nodes.NodeVisitor):
post = []
include_graphics_options = []
is_inline = self.is_inline(node)
- if attrs.has_key('scale'):
+ if 'scale' in attrs:
# Could also be done with ``scale`` option to
# ``\includegraphics``; doing it this way for consistency.
pre.append('\\scalebox{%f}{' % (attrs['scale'] / 100.0,))
post.append('}')
- if attrs.has_key('width'):
+ if 'width' in attrs:
w = self.latex_image_length(attrs['width'])
if w:
include_graphics_options.append('width=%s' % w)
- if attrs.has_key('height'):
+ if 'height' in attrs:
h = self.latex_image_length(attrs['height'])
if h:
include_graphics_options.append('height=%s' % h)
- if attrs.has_key('align'):
+ if 'align' in attrs:
align_prepost = {
# By default latex aligns the top of an image.
(1, 'top'): ('', ''),
@@ -898,13 +898,13 @@ class LaTeXTranslator(nodes.NodeVisitor):
for id in self.next_figure_ids:
ids += self.hypertarget(id, anchor=False)
self.next_figure_ids.clear()
- if node.has_key('width') and node.get('align', '') in ('left', 'right'):
+ if 'width' in node and node.get('align', '') in ('left', 'right'):
self.body.append('\\begin{wrapfigure}{%s}{%s}\n\\centering' %
(node['align'] == 'right' and 'r' or 'l',
node['width']))
self.context.append(ids + '\\end{wrapfigure}\n')
else:
- if (not node.attributes.has_key('align') or
+ if (not 'align' in node.attributes or
node.attributes['align'] == 'center'):
# centering does not add vertical space like center.
align = '\n\\centering'
@@ -1181,7 +1181,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.no_contractions -= 1
if self.in_title:
self.body.append(r'\texttt{%s}' % content)
- elif node.has_key('role') and node['role'] == 'samp':
+ elif node.get('role') == 'samp':
self.body.append(r'\samp{%s}' % content)
else:
self.body.append(r'\code{%s}' % content)
@@ -1210,10 +1210,10 @@ class LaTeXTranslator(nodes.NodeVisitor):
code = self.verbatim.rstrip('\n')
lang = self.hlsettingstack[-1][0]
linenos = code.count('\n') >= self.hlsettingstack[-1][1] - 1
- if node.has_key('language'):
+ if 'language' in node:
# code-block directives
lang = node['language']
- if node.has_key('linenos'):
+ if 'linenos' in node:
linenos = node['linenos']
hlcode = self.highlighter.highlight_block(code, lang, linenos)
# workaround for Unicode issue
diff --git a/sphinx/writers/text.py b/sphinx/writers/text.py
index 98528d5b..b28b2379 100644
--- a/sphinx/writers/text.py
+++ b/sphinx/writers/text.py
@@ -390,7 +390,7 @@ class TextTranslator(nodes.NodeVisitor):
self.add_text(''.join(out) + '\n')
def writerow(row):
- lines = map(None, *row)
+ lines = zip(*row)
for line in lines:
out = ['|']
for i, cell in enumerate(line):
diff --git a/sphinx/writers/websupport.py b/sphinx/writers/websupport.py
new file mode 100644
index 00000000..bb80fb7e
--- /dev/null
+++ b/sphinx/writers/websupport.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.writers.websupport
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ sphinx.websupport writer that adds comment-related annotations.
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from sphinx.writers.html import HTMLTranslator
+from sphinx.util.websupport import is_commentable
+
+
+class WebSupportTranslator(HTMLTranslator):
+ """
+ Our custom HTML translator.
+ """
+
+ def __init__(self, builder, *args, **kwargs):
+ HTMLTranslator.__init__(self, builder, *args, **kwargs)
+ self.comment_class = 'spxcmt'
+
+ def dispatch_visit(self, node):
+ if is_commentable(node):
+ self.handle_visit_commentable(node)
+ HTMLTranslator.dispatch_visit(self, node)
+
+ def handle_visit_commentable(self, node):
+ # We will place the node in the HTML id attribute. If the node
+ # already has an id (for indexing purposes) put an empty
+ # span with the existing id directly before this node's HTML.
+ self.add_db_node(node)
+ if node.attributes['ids']:
+ self.body.append('<span id="%s"></span>'
+ % node.attributes['ids'][0])
+ node.attributes['ids'] = ['s%s' % node.uid]
+ node.attributes['classes'].append(self.comment_class)
+
+ def add_db_node(self, node):
+ storage = self.builder.app.storage
+ if not storage.has_node(node.uid):
+ storage.add_node(id=node.uid,
+ document=self.builder.cur_docname,
+ source=node.rawsource or node.astext())
diff --git a/tests/etree13/ElementTree.py b/tests/etree13/ElementTree.py
index d3732504..f459c7f8 100644
--- a/tests/etree13/ElementTree.py
+++ b/tests/etree13/ElementTree.py
@@ -1425,12 +1425,16 @@ class XMLParser(object):
err.position = value.lineno, value.offset
raise err
- def _fixtext(self, text):
- # convert text string to ascii, if possible
- try:
- return text.encode("ascii")
- except UnicodeError:
+ if sys.version_info >= (3, 0):
+ def _fixtext(self, text):
return text
+ else:
+ def _fixtext(self, text):
+ # convert text string to ascii, if possible
+ try:
+ return text.encode("ascii")
+ except UnicodeError:
+ return text
def _fixname(self, key):
# expand qname, and convert name string to ascii, if possible
diff --git a/tests/path.py b/tests/path.py
index ceb895f5..8e9afeaa 100644
--- a/tests/path.py
+++ b/tests/path.py
@@ -1,953 +1,196 @@
-""" path.py - An object representing a path to a file or directory.
-
-Example:
-
-from path import path
-d = path('/home/guido/bin')
-for f in d.files('*.py'):
- f.chmod(0755)
-
-This module requires Python 2.2 or later.
-
-
-URL: http://www.jorendorff.com/articles/python/path
-Author: Jason Orendorff <jason.orendorff\x40gmail\x2ecom> (and others - see the url!)
-Date: 9 Mar 2007
+#!/usr/bin/env python
+# coding: utf-8
"""
+ path
+ ~~~~
+ :copyright: Copyright 2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+import os
+import sys
+import shutil
+from codecs import open
-# TODO
-# - Tree-walking functions don't avoid symlink loops. Matt Harrison
-# sent me a patch for this.
-# - Bug in write_text(). It doesn't support Universal newline mode.
-# - Better error message in listdir() when self isn't a
-# directory. (On Windows, the error message really sucks.)
-# - Make sure everything has a good docstring.
-# - Add methods for regex find and replace.
-# - guess_content_type() method?
-# - Perhaps support arguments to touch().
-
-from __future__ import generators
-
-import sys, warnings, os, fnmatch, glob, shutil, codecs
-
-__version__ = '2.2'
-__all__ = ['path']
-
-# Platform-specific support for path.owner
-if os.name == 'nt':
- try:
- import win32security
- except ImportError:
- win32security = None
-else:
- try:
- import pwd
- except ImportError:
- pwd = None
-
-# Pre-2.3 support. Are unicode filenames supported?
-_base = str
-_getcwd = os.getcwd
-try:
- if os.path.supports_unicode_filenames:
- _base = unicode
- _getcwd = os.getcwdu
-except AttributeError:
- pass
-
-# Pre-2.3 workaround for booleans
-try:
- True, False
-except NameError:
- True, False = 1, 0
-
-# Pre-2.3 workaround for basestring.
-try:
- basestring
-except NameError:
- basestring = (str, unicode)
-
-# Universal newline support
-_textmode = 'r'
-if hasattr(file, 'newlines'):
- _textmode = 'U'
-
-
-class TreeWalkWarning(Warning):
- pass
-
-class path(_base):
- """ Represents a filesystem path.
-
- For documentation on individual methods, consult their
- counterparts in os.path.
- """
-
- # --- Special Python methods.
- def __repr__(self):
- return 'path(%s)' % _base.__repr__(self)
+FILESYSTEMENCODING = sys.getfilesystemencoding() or sys.getdefaultencoding()
- # Adding a path and a string yields a path.
- def __add__(self, more):
- try:
- resultStr = _base.__add__(self, more)
- except TypeError: #Python bug
- resultStr = NotImplemented
- if resultStr is NotImplemented:
- return resultStr
- return self.__class__(resultStr)
-
- def __radd__(self, other):
- if isinstance(other, basestring):
- return self.__class__(other.__add__(self))
- else:
- return NotImplemented
- # The / operator joins paths.
- def __div__(self, rel):
- """ fp.__div__(rel) == fp / rel == fp.joinpath(rel)
+class path(str):
+ """
+ Represents a path which behaves like a string.
+ """
+ if sys.version_info < (3, 0):
+ def __new__(cls, s, encoding=FILESYSTEMENCODING, errors='strict'):
+ if isinstance(s, unicode):
+ s = s.encode(encoding, errors=errors)
+ return str.__new__(cls, s)
+ return str.__new__(cls, s)
- Join two path components, adding a separator character if
- needed.
+ @property
+ def parent(self):
"""
- return self.__class__(os.path.join(self, rel))
-
- # Make the / operator work even when true division is enabled.
- __truediv__ = __div__
-
- def getcwd(cls):
- """ Return the current working directory as a path object. """
- return cls(_getcwd())
- getcwd = classmethod(getcwd)
-
-
- # --- Operations on path strings.
-
- isabs = os.path.isabs
- def abspath(self): return self.__class__(os.path.abspath(self))
- def normcase(self): return self.__class__(os.path.normcase(self))
- def normpath(self): return self.__class__(os.path.normpath(self))
- def realpath(self): return self.__class__(os.path.realpath(self))
- def expanduser(self): return self.__class__(os.path.expanduser(self))
- def expandvars(self): return self.__class__(os.path.expandvars(self))
- def dirname(self): return self.__class__(os.path.dirname(self))
- basename = os.path.basename
-
- def expand(self):
- """ Clean up a filename by calling expandvars(),
- expanduser(), and normpath() on it.
-
- This is commonly everything needed to clean up a filename
- read from a configuration file, for example.
+ The name of the directory the file or directory is in.
"""
- return self.expandvars().expanduser().normpath()
-
- def _get_namebase(self):
- base, ext = os.path.splitext(self.name)
- return base
-
- def _get_ext(self):
- f, ext = os.path.splitext(_base(self))
- return ext
-
- def _get_drive(self):
- drive, r = os.path.splitdrive(self)
- return self.__class__(drive)
-
- parent = property(
- dirname, None, None,
- """ This path's parent directory, as a new path object.
-
- For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
- """)
-
- name = property(
- basename, None, None,
- """ The name of this file or directory without the full path.
-
- For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
- """)
+ return self.__class__(os.path.dirname(self))
- namebase = property(
- _get_namebase, None, None,
- """ The same as path.name, but with one file extension stripped off.
-
- For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz',
- but path('/home/guido/python.tar.gz').namebase == 'python.tar'
- """)
-
- ext = property(
- _get_ext, None, None,
- """ The file extension, for example '.py'. """)
-
- drive = property(
- _get_drive, None, None,
- """ The drive specifier, for example 'C:'.
- This is always empty on systems that don't use drive specifiers.
- """)
-
- def splitpath(self):
- """ p.splitpath() -> Return (p.parent, p.name). """
- parent, child = os.path.split(self)
- return self.__class__(parent), child
-
- def splitdrive(self):
- """ p.splitdrive() -> Return (p.drive, <the rest of p>).
-
- Split the drive specifier from this path. If there is
- no drive specifier, p.drive is empty, so the return value
- is simply (path(''), p). This is always the case on Unix.
+ def abspath(self):
"""
- drive, rel = os.path.splitdrive(self)
- return self.__class__(drive), rel
-
- def splitext(self):
- """ p.splitext() -> Return (p.stripext(), p.ext).
-
- Split the filename extension from this path and return
- the two parts. Either part may be empty.
-
- The extension is everything from '.' to the end of the
- last path segment. This has the property that if
- (a, b) == p.splitext(), then a + b == p.
+ Returns the absolute path.
"""
- filename, ext = os.path.splitext(self)
- return self.__class__(filename), ext
-
- def stripext(self):
- """ p.stripext() -> Remove one file extension from the path.
+ return self.__class__(os.path.abspath(self))
- For example, path('/home/guido/python.tar.gz').stripext()
- returns path('/home/guido/python.tar').
+ def isabs(self):
"""
- return self.splitext()[0]
-
- if hasattr(os.path, 'splitunc'):
- def splitunc(self):
- unc, rest = os.path.splitunc(self)
- return self.__class__(unc), rest
-
- def _get_uncshare(self):
- unc, r = os.path.splitunc(self)
- return self.__class__(unc)
-
- uncshare = property(
- _get_uncshare, None, None,
- """ The UNC mount point for this path.
- This is empty for paths on local drives. """)
-
- def joinpath(self, *args):
- """ Join two or more path components, adding a separator
- character (os.sep) if needed. Returns a new path
- object.
- """
- return self.__class__(os.path.join(self, *args))
-
- def splitall(self):
- r""" Return a list of the path components in this path.
-
- The first item in the list will be a path. Its value will be
- either os.curdir, os.pardir, empty, or the root directory of
- this path (for example, '/' or 'C:\\'). The other items in
- the list will be strings.
-
- path.path.joinpath(*result) will yield the original path.
- """
- parts = []
- loc = self
- while loc != os.curdir and loc != os.pardir:
- prev = loc
- loc, child = prev.splitpath()
- if loc == prev:
- break
- parts.append(child)
- parts.append(loc)
- parts.reverse()
- return parts
-
- def relpath(self):
- """ Return this path as a relative path,
- based from the current working directory.
- """
- cwd = self.__class__(os.getcwd())
- return cwd.relpathto(self)
-
- def relpathto(self, dest):
- """ Return a relative path from self to dest.
-
- If there is no relative path from self to dest, for example if
- they reside on different drives in Windows, then this returns
- dest.abspath().
- """
- origin = self.abspath()
- dest = self.__class__(dest).abspath()
-
- orig_list = origin.normcase().splitall()
- # Don't normcase dest! We want to preserve the case.
- dest_list = dest.splitall()
-
- if orig_list[0] != os.path.normcase(dest_list[0]):
- # Can't get here from there.
- return dest
-
- # Find the location where the two paths start to differ.
- i = 0
- for start_seg, dest_seg in zip(orig_list, dest_list):
- if start_seg != os.path.normcase(dest_seg):
- break
- i += 1
-
- # Now i is the point where the two paths diverge.
- # Need a certain number of "os.pardir"s to work up
- # from the origin to the point of divergence.
- segments = [os.pardir] * (len(orig_list) - i)
- # Need to add the diverging part of dest_list.
- segments += dest_list[i:]
- if len(segments) == 0:
- # If they happen to be identical, use os.curdir.
- relpath = os.curdir
- else:
- relpath = os.path.join(*segments)
- return self.__class__(relpath)
-
- # --- Listing, searching, walking, and matching
-
- def listdir(self, pattern=None):
- """ D.listdir() -> List of items in this directory.
-
- Use D.files() or D.dirs() instead if you want a listing
- of just files or just subdirectories.
+ Returns ``True`` if the path is absolute.
+ """
+ return os.path.isabs(self)
- The elements of the list are path objects.
+ def isdir(self):
+ """
+ Returns ``True`` if the path is a directory.
+ """
+ return os.path.isdir(self)
- With the optional 'pattern' argument, this only lists
- items whose names match the given pattern.
+ def isfile(self):
"""
- names = os.listdir(self)
- if pattern is not None:
- names = fnmatch.filter(names, pattern)
- return [self / child for child in names]
+ Returns ``True`` if the path is a file.
+ """
+ return os.path.isfile(self)
- def dirs(self, pattern=None):
- """ D.dirs() -> List of this directory's subdirectories.
+ def islink(self):
+ """
+ Returns ``True`` if the path is a symbolic link.
+ """
+ return os.path.islink(self)
- The elements of the list are path objects.
- This does not walk recursively into subdirectories
- (but see path.walkdirs).
+ def ismount(self):
+ """
+ Returns ``True`` if the path is a mount point.
+ """
+ return os.path.ismount(self)
- With the optional 'pattern' argument, this only lists
- directories whose names match the given pattern. For
- example, d.dirs('build-*').
+ def rmtree(self, ignore_errors=False, onerror=None):
"""
- return [p for p in self.listdir(pattern) if p.isdir()]
+ Removes the file or directory and any files or directories it may
+ contain.
- def files(self, pattern=None):
- """ D.files() -> List of the files in this directory.
+ :param ignore_errors:
+ If ``True`` errors are silently ignored, otherwise an exception
+ is raised in case an error occurs.
- The elements of the list are path objects.
- This does not walk into subdirectories (see path.walkfiles).
+ :param onerror:
+ A callback which gets called with the arguments `func`, `path` and
+ `exc_info`. `func` is one of :func:`os.listdir`, :func:`os.remove`
+ or :func:`os.rmdir`. `path` is the argument to the function which
+ caused it to fail and `exc_info` is a tuple as returned by
+ :func:`sys.exc_info`.
+ """
+ shutil.rmtree(self, ignore_errors=ignore_errors, onerror=onerror)
- With the optional 'pattern' argument, this only lists files
- whose names match the given pattern. For example,
- d.files('*.pyc').
+ def copytree(self, destination, symlinks=False):
"""
+ Recursively copy a directory to the given `destination`. If the given
+ `destination` does not exist it will be created.
- return [p for p in self.listdir(pattern) if p.isfile()]
+ :param symlinks:
+ If ``True`` symbolic links in the source tree result in symbolic
+ links in the destination tree otherwise the contents of the files
+ pointed to by the symbolic links are copied.
+ """
+ shutil.copytree(self, destination, symlinks=symlinks)
- def walk(self, pattern=None, errors='strict'):
- """ D.walk() -> iterator over files and subdirs, recursively.
+ def movetree(self, destination):
+ """
+ Recursively move the file or directory to the given `destination`
+ similar to the Unix "mv" command.
- The iterator yields path objects naming each child item of
- this directory and its descendants. This requires that
- D.isdir().
+ If the `destination` is a file it may be overwritten depending on the
+ :func:`os.rename` semantics.
+ """
+ shutil.move(self, destination)
- This performs a depth-first traversal of the directory tree.
- Each directory is returned just before all its children.
+ move = movetree
- The errors= keyword argument controls behavior when an
- error occurs. The default is 'strict', which causes an
- exception. The other allowed values are 'warn', which
- reports the error via warnings.warn(), and 'ignore'.
+ def unlink(self):
"""
- if errors not in ('strict', 'warn', 'ignore'):
- raise ValueError("invalid errors parameter")
-
- try:
- childList = self.listdir()
- except Exception:
- if errors == 'ignore':
- return
- elif errors == 'warn':
- warnings.warn(
- "Unable to list directory '%s': %s"
- % (self, sys.exc_info()[1]),
- TreeWalkWarning)
- return
- else:
- raise
-
- for child in childList:
- if pattern is None or child.fnmatch(pattern):
- yield child
- try:
- isdir = child.isdir()
- except Exception:
- if errors == 'ignore':
- isdir = False
- elif errors == 'warn':
- warnings.warn(
- "Unable to access '%s': %s"
- % (child, sys.exc_info()[1]),
- TreeWalkWarning)
- isdir = False
- else:
- raise
-
- if isdir:
- for item in child.walk(pattern, errors):
- yield item
-
- def walkdirs(self, pattern=None, errors='strict'):
- """ D.walkdirs() -> iterator over subdirs, recursively.
-
- With the optional 'pattern' argument, this yields only
- directories whose names match the given pattern. For
- example, mydir.walkdirs('*test') yields only directories
- with names ending in 'test'.
-
- The errors= keyword argument controls behavior when an
- error occurs. The default is 'strict', which causes an
- exception. The other allowed values are 'warn', which
- reports the error via warnings.warn(), and 'ignore'.
- """
- if errors not in ('strict', 'warn', 'ignore'):
- raise ValueError("invalid errors parameter")
+ Removes a file.
+ """
+ os.unlink(self)
+ def write_text(self, text, **kwargs):
+ """
+ Writes the given `text` to the file.
+ """
+ f = open(self, 'w', **kwargs)
try:
- dirs = self.dirs()
- except Exception:
- if errors == 'ignore':
- return
- elif errors == 'warn':
- warnings.warn(
- "Unable to list directory '%s': %s"
- % (self, sys.exc_info()[1]),
- TreeWalkWarning)
- return
- else:
- raise
-
- for child in dirs:
- if pattern is None or child.fnmatch(pattern):
- yield child
- for subsubdir in child.walkdirs(pattern, errors):
- yield subsubdir
-
- def walkfiles(self, pattern=None, errors='strict'):
- """ D.walkfiles() -> iterator over files in D, recursively.
-
- The optional argument, pattern, limits the results to files
- with names that match the pattern. For example,
- mydir.walkfiles('*.tmp') yields only files with the .tmp
- extension.
- """
- if errors not in ('strict', 'warn', 'ignore'):
- raise ValueError("invalid errors parameter")
+ f.write(text)
+ finally:
+ f.close()
+ def text(self, **kwargs):
+ """
+ Returns the text in the file.
+ """
+ f = open(self, mode='U', **kwargs)
try:
- childList = self.listdir()
- except Exception:
- if errors == 'ignore':
- return
- elif errors == 'warn':
- warnings.warn(
- "Unable to list directory '%s': %s"
- % (self, sys.exc_info()[1]),
- TreeWalkWarning)
- return
- else:
- raise
-
- for child in childList:
- try:
- isfile = child.isfile()
- isdir = not isfile and child.isdir()
- except:
- if errors == 'ignore':
- continue
- elif errors == 'warn':
- warnings.warn(
- "Unable to access '%s': %s"
- % (self, sys.exc_info()[1]),
- TreeWalkWarning)
- continue
- else:
- raise
-
- if isfile:
- if pattern is None or child.fnmatch(pattern):
- yield child
- elif isdir:
- for f in child.walkfiles(pattern, errors):
- yield f
-
- def fnmatch(self, pattern):
- """ Return True if self.name matches the given pattern.
-
- pattern - A filename pattern with wildcards,
- for example '*.py'.
- """
- return fnmatch.fnmatch(self.name, pattern)
-
- def glob(self, pattern):
- """ Return a list of path objects that match the pattern.
-
- pattern - a path relative to this directory, with wildcards.
-
- For example, path('/users').glob('*/bin/*') returns a list
- of all the files users have in their bin directories.
- """
- cls = self.__class__
- return [cls(s) for s in glob.glob(_base(self / pattern))]
-
-
- # --- Reading or writing an entire file at once.
-
- def open(self, mode='r'):
- """ Open this file. Return a file object. """
- return file(self, mode)
+ return f.read()
+ finally:
+ f.close()
def bytes(self):
- """ Open this file, read all bytes, return them as a string. """
- f = self.open('rb')
+ """
+ Returns the bytes in the file.
+ """
+ f = open(self, mode='rb')
try:
return f.read()
finally:
f.close()
def write_bytes(self, bytes, append=False):
- """ Open this file and write the given bytes to it.
+ """
+ Writes the given `bytes` to the file.
- Default behavior is to overwrite any existing file.
- Call p.write_bytes(bytes, append=True) to append instead.
+ :param append:
+ If ``True`` given `bytes` are added at the end of the file.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
- f = self.open(mode)
+ f = open(self, mode=mode)
try:
f.write(bytes)
finally:
f.close()
- def text(self, encoding=None, errors='strict'):
- r""" Open this file, read it in, return the content as a string.
-
- This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
- are automatically translated to '\n'.
-
- Optional arguments:
-
- encoding - The Unicode encoding (or character set) of
- the file. If present, the content of the file is
- decoded and returned as a unicode object; otherwise
- it is returned as an 8-bit str.
- errors - How to handle Unicode errors; see help(str.decode)
- for the options. Default is 'strict'.
+ def exists(self):
"""
- if encoding is None:
- # 8-bit
- f = self.open(_textmode)
- try:
- return f.read()
- finally:
- f.close()
- else:
- # Unicode
- f = codecs.open(self, 'r', encoding, errors)
- # (Note - Can't use 'U' mode here, since codecs.open
- # doesn't support 'U' mode, even in Python 2.3.)
- try:
- t = f.read()
- finally:
- f.close()
- return (t.replace(u'\r\n', u'\n')
- .replace(u'\r\x85', u'\n')
- .replace(u'\r', u'\n')
- .replace(u'\x85', u'\n')
- .replace(u'\u2028', u'\n'))
-
- def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False):
- r""" Write the given text to this file.
-
- The default behavior is to overwrite any existing file;
- to append instead, use the 'append=True' keyword argument.
-
- There are two differences between path.write_text() and
- path.write_bytes(): newline handling and Unicode handling.
- See below.
-
- Parameters:
-
- - text - str/unicode - The text to be written.
-
- - encoding - str - The Unicode encoding that will be used.
- This is ignored if 'text' isn't a Unicode string.
-
- - errors - str - How to handle Unicode encoding errors.
- Default is 'strict'. See help(unicode.encode) for the
- options. This is ignored if 'text' isn't a Unicode
- string.
-
- - linesep - keyword argument - str/unicode - The sequence of
- characters to be used to mark end-of-line. The default is
- os.linesep. You can also specify None; this means to
- leave all newlines as they are in 'text'.
-
- - append - keyword argument - bool - Specifies what to do if
- the file already exists (True: append to the end of it;
- False: overwrite it.) The default is False.
-
-
- --- Newline handling.
-
- write_text() converts all standard end-of-line sequences
- ('\n', '\r', and '\r\n') to your platform's default end-of-line
- sequence (see os.linesep; on Windows, for example, the
- end-of-line marker is '\r\n').
-
- If you don't like your platform's default, you can override it
- using the 'linesep=' keyword argument. If you specifically want
- write_text() to preserve the newlines as-is, use 'linesep=None'.
-
- This applies to Unicode text the same as to 8-bit text, except
- there are three additional standard Unicode end-of-line sequences:
- u'\x85', u'\r\x85', and u'\u2028'.
-
- (This is slightly different from when you open a file for
- writing with fopen(filename, "w") in C or file(filename, 'w')
- in Python.)
-
-
- --- Unicode
-
- If 'text' isn't Unicode, then apart from newline handling, the
- bytes are written verbatim to the file. The 'encoding' and
- 'errors' arguments are not used and must be omitted.
-
- If 'text' is Unicode, it is first converted to bytes using the
- specified 'encoding' (or the default encoding if 'encoding'
- isn't specified). The 'errors' argument applies only to this
- conversion.
-
- """
- if isinstance(text, unicode):
- if linesep is not None:
- # Convert all standard end-of-line sequences to
- # ordinary newline characters.
- text = (text.replace(u'\r\n', u'\n')
- .replace(u'\r\x85', u'\n')
- .replace(u'\r', u'\n')
- .replace(u'\x85', u'\n')
- .replace(u'\u2028', u'\n'))
- text = text.replace(u'\n', linesep)
- if encoding is None:
- encoding = sys.getdefaultencoding()
- bytes = text.encode(encoding, errors)
- else:
- # It is an error to specify an encoding if 'text' is
- # an 8-bit string.
- assert encoding is None
-
- if linesep is not None:
- text = (text.replace('\r\n', '\n')
- .replace('\r', '\n'))
- bytes = text.replace('\n', linesep)
-
- self.write_bytes(bytes, append)
-
- def lines(self, encoding=None, errors='strict', retain=True):
- r""" Open this file, read all lines, return them in a list.
-
- Optional arguments:
- encoding - The Unicode encoding (or character set) of
- the file. The default is None, meaning the content
- of the file is read as 8-bit characters and returned
- as a list of (non-Unicode) str objects.
- errors - How to handle Unicode errors; see help(str.decode)
- for the options. Default is 'strict'
- retain - If true, retain newline characters; but all newline
- character combinations ('\r', '\n', '\r\n') are
- translated to '\n'. If false, newline characters are
- stripped off. Default is True.
-
- This uses 'U' mode in Python 2.3 and later.
- """
- if encoding is None and retain:
- f = self.open(_textmode)
- try:
- return f.readlines()
- finally:
- f.close()
- else:
- return self.text(encoding, errors).splitlines(retain)
-
- def write_lines(self, lines, encoding=None, errors='strict',
- linesep=os.linesep, append=False):
- r""" Write the given lines of text to this file.
-
- By default this overwrites any existing file at this path.
-
- This puts a platform-specific newline sequence on every line.
- See 'linesep' below.
-
- lines - A list of strings.
-
- encoding - A Unicode encoding to use. This applies only if
- 'lines' contains any Unicode strings.
-
- errors - How to handle errors in Unicode encoding. This
- also applies only to Unicode strings.
-
- linesep - The desired line-ending. This line-ending is
- applied to every line. If a line already has any
- standard line ending ('\r', '\n', '\r\n', u'\x85',
- u'\r\x85', u'\u2028'), that will be stripped off and
- this will be used instead. The default is os.linesep,
- which is platform-dependent ('\r\n' on Windows, '\n' on
- Unix, etc.) Specify None to write the lines as-is,
- like file.writelines().
-
- Use the keyword argument append=True to append lines to the
- file. The default is to overwrite the file. Warning:
- When you use this with Unicode data, if the encoding of the
- existing data in the file is different from the encoding
- you specify with the encoding= parameter, the result is
- mixed-encoding data, which can really confuse someone trying
- to read the file later.
+ Returns ``True`` if the path exist.
"""
- if append:
- mode = 'ab'
- else:
- mode = 'wb'
- f = self.open(mode)
- try:
- for line in lines:
- isUnicode = isinstance(line, unicode)
- if linesep is not None:
- # Strip off any existing line-end and add the
- # specified linesep string.
- if isUnicode:
- if line[-2:] in (u'\r\n', u'\x0d\x85'):
- line = line[:-2]
- elif line[-1:] in (u'\r', u'\n',
- u'\x85', u'\u2028'):
- line = line[:-1]
- else:
- if line[-2:] == '\r\n':
- line = line[:-2]
- elif line[-1:] in ('\r', '\n'):
- line = line[:-1]
- line += linesep
- if isUnicode:
- if encoding is None:
- encoding = sys.getdefaultencoding()
- line = line.encode(encoding, errors)
- f.write(line)
- finally:
- f.close()
-
- # --- Methods for querying the filesystem.
-
- exists = os.path.exists
- isdir = os.path.isdir
- isfile = os.path.isfile
- islink = os.path.islink
- ismount = os.path.ismount
-
- if hasattr(os.path, 'samefile'):
- samefile = os.path.samefile
-
- getatime = os.path.getatime
- atime = property(
- getatime, None, None,
- """ Last access time of the file. """)
-
- getmtime = os.path.getmtime
- mtime = property(
- getmtime, None, None,
- """ Last-modified time of the file. """)
-
- if hasattr(os.path, 'getctime'):
- getctime = os.path.getctime
- ctime = property(
- getctime, None, None,
- """ Creation time of the file. """)
-
- getsize = os.path.getsize
- size = property(
- getsize, None, None,
- """ Size of the file, in bytes. """)
-
- if hasattr(os, 'access'):
- def access(self, mode):
- """ Return true if current user has access to this path.
-
- mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
- """
- return os.access(self, mode)
-
- def stat(self):
- """ Perform a stat() system call on this path. """
- return os.stat(self)
-
- def lstat(self):
- """ Like path.stat(), but do not follow symbolic links. """
- return os.lstat(self)
-
- def get_owner(self):
- r""" Return the name of the owner of this file or directory.
-
- This follows symbolic links.
-
- On Windows, this returns a name of the form ur'DOMAIN\User Name'.
- On Windows, a group can own a file or directory.
- """
- if os.name == 'nt':
- if win32security is None:
- raise Exception("path.owner requires win32all to be installed")
- desc = win32security.GetFileSecurity(
- self, win32security.OWNER_SECURITY_INFORMATION)
- sid = desc.GetSecurityDescriptorOwner()
- account, domain, typecode = win32security.LookupAccountSid(None, sid)
- return domain + u'\\' + account
- else:
- if pwd is None:
- raise NotImplementedError("path.owner is not implemented on this platform.")
- st = self.stat()
- return pwd.getpwuid(st.st_uid).pw_name
-
- owner = property(
- get_owner, None, None,
- """ Name of the owner of this file or directory. """)
-
- if hasattr(os, 'statvfs'):
- def statvfs(self):
- """ Perform a statvfs() system call on this path. """
- return os.statvfs(self)
-
- if hasattr(os, 'pathconf'):
- def pathconf(self, name):
- return os.pathconf(self, name)
-
-
- # --- Modifying operations on files and directories
-
- def utime(self, times):
- """ Set the access and modified times of this file. """
- os.utime(self, times)
-
- def chmod(self, mode):
- os.chmod(self, mode)
+ return os.path.exists(self)
- if hasattr(os, 'chown'):
- def chown(self, uid, gid):
- os.chown(self, uid, gid)
-
- def rename(self, new):
- os.rename(self, new)
-
- def renames(self, new):
- os.renames(self, new)
-
-
- # --- Create/delete operations on directories
-
- def mkdir(self, mode=0777):
- os.mkdir(self, mode)
+ def lexists(self):
+ """
+ Returns ``True`` if the path exists unless it is a broken symbolic
+ link.
+ """
+ return os.path.lexists(self)
def makedirs(self, mode=0777):
+ """
+ Recursively create directories.
+ """
os.makedirs(self, mode)
- def rmdir(self):
- os.rmdir(self)
-
- def removedirs(self):
- os.removedirs(self)
-
-
- # --- Modifying operations on files
-
- def touch(self):
- """ Set the access/modified times of this file to the current time.
- Create the file if it does not exist.
+ def joinpath(self, *args):
"""
- fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
- os.close(fd)
- os.utime(self, None)
-
- def remove(self):
- os.remove(self)
-
- def unlink(self):
- os.unlink(self)
-
-
- # --- Links
-
- if hasattr(os, 'link'):
- def link(self, newpath):
- """ Create a hard link at 'newpath', pointing to this file. """
- os.link(self, newpath)
-
- if hasattr(os, 'symlink'):
- def symlink(self, newlink):
- """ Create a symbolic link at 'newlink', pointing here. """
- os.symlink(self, newlink)
-
- if hasattr(os, 'readlink'):
- def readlink(self):
- """ Return the path to which this symbolic link points.
-
- The result may be an absolute or a relative path.
- """
- return self.__class__(os.readlink(self))
-
- def readlinkabs(self):
- """ Return the path to which this symbolic link points.
-
- The result is always an absolute path.
- """
- p = self.readlink()
- if p.isabs():
- return p
- else:
- return (self.parent / p).abspath()
-
-
- # --- High-level functions from shutil
-
- copyfile = shutil.copyfile
- copymode = shutil.copymode
- copystat = shutil.copystat
- copy = shutil.copy
- copy2 = shutil.copy2
- copytree = shutil.copytree
- if hasattr(shutil, 'move'):
- move = shutil.move
- rmtree = shutil.rmtree
-
-
- # --- Special stuff from os
-
- if hasattr(os, 'chroot'):
- def chroot(self):
- os.chroot(self)
+ Joins the path with the argument given and returns the result.
+ """
+ return self.__class__(os.path.join(self, *map(self.__class__, args)))
- if hasattr(os, 'startfile'):
- def startfile(self):
- os.startfile(self)
+ __div__ = __truediv__ = joinpath
+ def __repr__(self):
+ return '%s(%s)' % (self.__class__.__name__, str.__repr__(self))
diff --git a/tests/root/bom.po b/tests/root/bom.po
new file mode 100644
index 00000000..c6025eb1
--- /dev/null
+++ b/tests/root/bom.po
@@ -0,0 +1,12 @@
+#, fuzzy
+msgid ""
+msgstr ""
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+
+msgid "File with UTF-8 BOM"
+msgstr "Datei mit UTF-8"
+
+msgid "This file has a UTF-8 \"BOM\"."
+msgstr "This file has umlauts: äöü."
diff --git a/tests/root/conf.py b/tests/root/conf.py
index a50029e5..89804a30 100644
--- a/tests/root/conf.py
+++ b/tests/root/conf.py
@@ -22,7 +22,7 @@ copyright = '2010, Georg Brandl & Team'
version = '0.6'
release = '0.6alpha1'
today_fmt = '%B %d, %Y'
-#unused_docs = []
+# unused_docs = []
exclude_patterns = ['_build', '**/excluded.*']
keep_warnings = True
pygments_style = 'sphinx'
diff --git a/tests/root/contents.txt b/tests/root/contents.txt
index e052e04b..280953b4 100644
--- a/tests/root/contents.txt
+++ b/tests/root/contents.txt
@@ -26,6 +26,7 @@ Contents:
extensions
doctest
extensions
+ versioning/index
Python <http://python.org/>
diff --git a/tests/root/doctest.txt b/tests/root/doctest.txt
index 35cdd589..ba9a72c5 100644
--- a/tests/root/doctest.txt
+++ b/tests/root/doctest.txt
@@ -30,7 +30,7 @@ Special directives
.. testcode::
- print 1+1
+ print(1+1)
.. testoutput::
@@ -50,30 +50,31 @@ Special directives
.. testsetup:: *
- from math import floor
+ def squared(x):
+ return x * x
.. doctest::
- >>> floor(1.2)
- 1.0
+ >>> squared(2)
+ 4
.. testcode::
- print floor(1.2)
+ print(squared(2))
.. testoutput::
- 1.0
+ 4
- >>> floor(1.2)
- 1.0
+ >>> squared(2)
+ 4
* options for testcode/testoutput blocks
.. testcode::
:hide:
- print 'Output text.'
+ print('Output text.')
.. testoutput::
:hide:
@@ -85,36 +86,38 @@ Special directives
.. testsetup:: group1
- from math import ceil
+ def add(x, y):
+ return x + y
- ``ceil`` is now known in "group1", but not in others.
+
+ ``add`` is now known in "group1", but not in others.
.. doctest:: group1
- >>> ceil(0.8)
- 1.0
+ >>> add(1, 1)
+ 2
.. doctest:: group2
- >>> ceil(0.8)
+ >>> add(1, 1)
Traceback (most recent call last):
...
- NameError: name 'ceil' is not defined
+ NameError: name 'add' is not defined
Interleaving testcode/testoutput:
.. testcode:: group1
- print ceil(0.8)
+ print(squared(3))
.. testcode:: group2
- print floor(0.8)
+ print(squared(4))
.. testoutput:: group1
- 1.0
+ 9
.. testoutput:: group2
- 0.0
+ 16
diff --git a/tests/root/literal.inc b/tests/root/literal.inc
index d5b9890c..694f15ed 100644
--- a/tests/root/literal.inc
+++ b/tests/root/literal.inc
@@ -1,7 +1,7 @@
# Literally included file using Python highlighting
# -*- coding: utf-8 -*-
-foo = u"Including Unicode characters: üöä"
+foo = "Including Unicode characters: üöä"
class Foo:
pass
diff --git a/tests/root/markup.txt b/tests/root/markup.txt
index a72285ed..84d9581a 100644
--- a/tests/root/markup.txt
+++ b/tests/root/markup.txt
@@ -132,6 +132,8 @@ Adding \n to test unescaping.
Test :abbr:`abbr (abbreviation)` and another :abbr:`abbr (abbreviation)`.
+Testing the :index:`index` role, also available with
+:index:`explicit <pair: title; explicit>` title.
.. _with:
diff --git a/tests/root/versioning/added.txt b/tests/root/versioning/added.txt
new file mode 100644
index 00000000..22a70739
--- /dev/null
+++ b/tests/root/versioning/added.txt
@@ -0,0 +1,20 @@
+Versioning test text
+====================
+
+So the thing is I need some kind of text - not the lorem ipsum stuff, that
+doesn't work out that well - to test :mod:`sphinx.versioning`. I couldn't find
+a good text for that under public domain so I thought the easiest solution is
+to write one by myself. It's not really interesting, in fact it is *really*
+boring.
+
+Anyway I need more than one paragraph, at least three for the original
+document, I think, and another one for two different ones.
+
+So the previous paragraph was a bit short because I don't want to test this
+only on long paragraphs, I hope it was short enough to cover most stuff.
+Anyway I see this lacks ``some markup`` so I have to add a **little** bit.
+
+Woho another paragraph, if this test fails we really have a problem because
+this means the algorithm itself fails and not the diffing algorithm which is
+pretty much doomed anyway as it probably fails for some kind of language
+respecting certain nodes anyway but we can't work around that anyway.
diff --git a/tests/root/versioning/deleted.txt b/tests/root/versioning/deleted.txt
new file mode 100644
index 00000000..a1a9c4c9
--- /dev/null
+++ b/tests/root/versioning/deleted.txt
@@ -0,0 +1,12 @@
+Versioning test text
+====================
+
+So the thing is I need some kind of text - not the lorem ipsum stuff, that
+doesn't work out that well - to test :mod:`sphinx.versioning`. I couldn't find
+a good text for that under public domain so I thought the easiest solution is
+to write one by myself. It's not really interesting, in fact it is *really*
+boring.
+
+So the previous paragraph was a bit short because I don't want to test this
+only on long paragraphs, I hope it was short enough to cover most stuff.
+Anyway I see this lacks ``some markup`` so I have to add a **little** bit.
diff --git a/tests/root/versioning/deleted_end.txt b/tests/root/versioning/deleted_end.txt
new file mode 100644
index 00000000..f30e6300
--- /dev/null
+++ b/tests/root/versioning/deleted_end.txt
@@ -0,0 +1,11 @@
+Versioning test text
+====================
+
+So the thing is I need some kind of text - not the lorem ipsum stuff, that
+doesn't work out that well - to test :mod:`sphinx.versioning`. I couldn't find
+a good text for that under public domain so I thought the easiest solution is
+to write one by myself. It's not really interesting, in fact it is *really*
+boring.
+
+Anyway I need more than one paragraph, at least three for the original
+document, I think, and another one for two different ones.
diff --git a/tests/root/versioning/index.txt b/tests/root/versioning/index.txt
new file mode 100644
index 00000000..9d098f75
--- /dev/null
+++ b/tests/root/versioning/index.txt
@@ -0,0 +1,13 @@
+Versioning Stuff
+================
+
+.. toctree::
+
+ original
+ added
+ insert
+ deleted
+ deleted_end
+ modified
+ insert_beginning
+ insert_similar
diff --git a/tests/root/versioning/insert.txt b/tests/root/versioning/insert.txt
new file mode 100644
index 00000000..1c157cc9
--- /dev/null
+++ b/tests/root/versioning/insert.txt
@@ -0,0 +1,18 @@
+Versioning test text
+====================
+
+So the thing is I need some kind of text - not the lorem ipsum stuff, that
+doesn't work out that well - to test :mod:`sphinx.versioning`. I couldn't find
+a good text for that under public domain so I thought the easiest solution is
+to write one by myself. It's not really interesting, in fact it is *really*
+boring.
+
+So this paragraph is just something I inserted in this document to test if our
+algorithm notices that this paragraph is not just a changed version.
+
+Anyway I need more than one paragraph, at least three for the original
+document, I think, and another one for two different ones.
+
+So the previous paragraph was a bit short because I don't want to test this
+only on long paragraphs, I hope it was short enough to cover most stuff.
+Anyway I see this lacks ``some markup`` so I have to add a **little** bit.
diff --git a/tests/root/versioning/insert_beginning.txt b/tests/root/versioning/insert_beginning.txt
new file mode 100644
index 00000000..57102a76
--- /dev/null
+++ b/tests/root/versioning/insert_beginning.txt
@@ -0,0 +1,18 @@
+Versioning test text
+====================
+
+Apperantly inserting a paragraph at the beginning of a document caused
+problems earlier so this document should be used to test that.
+
+So the thing is I need some kind of text - not the lorem ipsum stuff, that
+doesn't work out that well - to test :mod:`sphinx.versioning`. I couldn't find
+a good text for that under public domain so I thought the easiest solution is
+to write one by myself. It's not really interesting, in fact it is *really*
+boring.
+
+Anyway I need more than one paragraph, at least three for the original
+document, I think, and another one for two different ones.
+
+So the previous paragraph was a bit short because I don't want to test this
+only on long paragraphs, I hope it was short enough to cover most stuff.
+Anyway I see this lacks ``some markup`` so I have to add a **little** bit.
diff --git a/tests/root/versioning/insert_similar.txt b/tests/root/versioning/insert_similar.txt
new file mode 100644
index 00000000..ee9b5305
--- /dev/null
+++ b/tests/root/versioning/insert_similar.txt
@@ -0,0 +1,17 @@
+Versioning test text
+====================
+
+So the thing is I need some kind of text - not the lorem ipsum stuff, that
+doesn't work out that well - to test :mod:`sphinx.versioning`. I couldn't find
+a good text for that under public domain so I thought the easiest solution is
+to write one by myself. It's not really interesting, in fact it is *really*
+boring.
+
+Anyway I need more
+
+Anyway I need more than one paragraph, at least three for the original
+document, I think, and another one for two different ones.
+
+So the previous paragraph was a bit short because I don't want to test this
+only on long paragraphs, I hope it was short enough to cover most stuff.
+Anyway I see this lacks ``some markup`` so I have to add a **little** bit.
diff --git a/tests/root/versioning/modified.txt b/tests/root/versioning/modified.txt
new file mode 100644
index 00000000..49cdad93
--- /dev/null
+++ b/tests/root/versioning/modified.txt
@@ -0,0 +1,17 @@
+Versioning test text
+====================
+
+So the thing is I need some kind of text - not the lorem ipsum stuff, that
+doesn't work out that well - to test :mod:`sphinx.versioning`. I couldn't find
+a good text for that under public domain so I thought the easiest solution is
+to write one by myself. Inserting something silly as a modification, btw. have
+you seen the typo below?. It's not really interesting, in fact it is *really*
+boring.
+
+Anyway I need more than one paragraph, at least three for the original
+document, I think, and another one for two different ones. So this is a small
+modification by adding something to this paragraph.
+
+So the previous paragraph was a bit short because I don't want to test this
+only on long paragraphs, I hoep it was short enough to cover most stuff.
+Anyway I see this lacks ``some markup`` so I have to add a **little** bit.
diff --git a/tests/root/versioning/original.txt b/tests/root/versioning/original.txt
new file mode 100644
index 00000000..b3fe0609
--- /dev/null
+++ b/tests/root/versioning/original.txt
@@ -0,0 +1,15 @@
+Versioning test text
+====================
+
+So the thing is I need some kind of text - not the lorem ipsum stuff, that
+doesn't work out that well - to test :mod:`sphinx.versioning`. I couldn't find
+a good text for that under public domain so I thought the easiest solution is
+to write one by myself. It's not really interesting, in fact it is *really*
+boring.
+
+Anyway I need more than one paragraph, at least three for the original
+document, I think, and another one for two different ones.
+
+So the previous paragraph was a bit short because I don't want to test this
+only on long paragraphs, I hope it was short enough to cover most stuff.
+Anyway I see this lacks ``some markup`` so I have to add a **little** bit.
diff --git a/tests/run.py b/tests/run.py
index 0cb41442..50567fbc 100755
--- a/tests/run.py
+++ b/tests/run.py
@@ -11,7 +11,17 @@
"""
import sys
-from os import path
+from os import path, chdir, listdir
+
+if sys.version_info >= (3, 0):
+ print('Copying and converting sources to build/lib/tests...')
+ from distutils.util import copydir_run_2to3
+ testroot = path.dirname(__file__) or '.'
+ newroot = path.join(testroot, path.pardir, 'build')
+ newroot = path.join(newroot, listdir(newroot)[0], 'tests')
+ copydir_run_2to3(testroot, newroot)
+ # switch to the converted dir so nose tests the right tests
+ chdir(newroot)
# always test the sphinx package from this directory
sys.path.insert(0, path.join(path.dirname(__file__), path.pardir))
@@ -19,8 +29,8 @@ sys.path.insert(0, path.join(path.dirname(__file__), path.pardir))
try:
import nose
except ImportError:
- print "The nose package is needed to run the Sphinx test suite."
+ print("The nose package is needed to run the Sphinx test suite.")
sys.exit(1)
-print "Running Sphinx test suite..."
+print("Running Sphinx test suite...")
nose.main()
diff --git a/tests/test_application.py b/tests/test_application.py
index 3d287a57..d1154863 100644
--- a/tests/test_application.py
+++ b/tests/test_application.py
@@ -45,9 +45,11 @@ def test_output():
app = TestApp(status=status, warning=warnings)
try:
status.truncate(0) # __init__ writes to status
+ status.seek(0)
app.info("Nothing here...")
assert status.getvalue() == "Nothing here...\n"
status.truncate(0)
+ status.seek(0)
app.info("Nothing here...", True)
assert status.getvalue() == "Nothing here..."
diff --git a/tests/test_autosummary.py b/tests/test_autosummary.py
index 7e309367..20fb06e0 100644
--- a/tests/test_autosummary.py
+++ b/tests/test_autosummary.py
@@ -9,8 +9,6 @@
:license: BSD, see LICENSE for details.
"""
-import string
-
from util import *
from sphinx.ext.autosummary import mangle_signature
@@ -27,7 +25,7 @@ def test_mangle_signature():
(a, b, c='foobar()', d=123) :: (a, b[, c, d])
"""
- TEST = [map(string.strip, x.split("::")) for x in TEST.split("\n")
+ TEST = [map(lambda x: x.strip(), x.split("::")) for x in TEST.split("\n")
if '::' in x]
for inp, outp in TEST:
res = mangle_signature(inp).strip().replace(u"\u00a0", " ")
diff --git a/tests/test_build_gettext.py b/tests/test_build_gettext.py
new file mode 100644
index 00000000..ba2440fd
--- /dev/null
+++ b/tests/test_build_gettext.py
@@ -0,0 +1,110 @@
+# -*- coding: utf-8 -*-
+"""
+ test_build_gettext
+ ~~~~~~~~~~~~~~~~~~
+
+ Test the build process with gettext builder with the test root.
+
+ :copyright: Copyright 2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import gettext
+import os
+from subprocess import Popen, PIPE
+
+from util import *
+
+
+def teardown_module():
+ (test_root / '_build').rmtree(True)
+
+
+@with_app(buildername='gettext')
+def test_build(app):
+ app.builder.build(['extapi', 'subdir/includes'])
+ # documents end up in a message catalog
+ assert (app.outdir / 'extapi.pot').isfile()
+ # ..and are grouped into sections
+ assert (app.outdir / 'subdir.pot').isfile()
+
+@with_app(buildername='gettext')
+def test_gettext(app):
+ app.builder.build(['markup'])
+
+ (app.outdir / 'en' / 'LC_MESSAGES').makedirs()
+ cwd = os.getcwd()
+ os.chdir(app.outdir)
+ try:
+ try:
+ p = Popen(['msginit', '--no-translator', '-i', 'markup.pot',
+ '--locale', 'en_US'],
+ stdout=PIPE, stderr=PIPE)
+ except OSError:
+ return # most likely msginit was not found
+ else:
+ stdout, stderr = p.communicate()
+ if p.returncode != 0:
+ print stdout
+ print stderr
+ assert False, 'msginit exited with return code %s' % \
+ p.returncode
+ assert (app.outdir / 'en_US.po').isfile(), 'msginit failed'
+ try:
+ p = Popen(['msgfmt', 'en_US.po', '-o',
+ os.path.join('en', 'LC_MESSAGES', 'test_root.mo')],
+ stdout=PIPE, stderr=PIPE)
+ except OSError:
+ return # most likely msgfmt was not found
+ else:
+ stdout, stderr = p.communicate()
+ if p.returncode != 0:
+ print stdout
+ print stderr
+ assert False, 'msgfmt exited with return code %s' % \
+ p.returncode
+ assert (app.outdir / 'en' / 'LC_MESSAGES' / 'test_root.mo').isfile(), \
+ 'msgfmt failed'
+ finally:
+ os.chdir(cwd)
+
+ _ = gettext.translation('test_root', app.outdir, languages=['en']).gettext
+ assert _("Testing various markup") == u"Testing various markup"
+
+@with_app(buildername='gettext')
+def test_all(app):
+ app.builder.build_all()
+
+
+def setup_patch():
+ (test_root / 'xx' / 'LC_MESSAGES').makedirs()
+ try:
+ p = Popen(['msgfmt', test_root / 'bom.po', '-o',
+ test_root / 'xx' / 'LC_MESSAGES' / 'bom.mo'],
+ stdout=PIPE, stderr=PIPE)
+ except OSError:
+ return # most likely msgfmt was not found
+ else:
+ stdout, stderr = p.communicate()
+ if p.returncode != 0:
+ print stdout
+ print stderr
+ assert False, 'msgfmt exited with return code %s' % p.returncode
+ assert (test_root / 'xx' / 'LC_MESSAGES' / 'bom.mo').isfile(), \
+ 'msgfmt failed'
+
+def teardown_patch():
+ (test_root / 'xx').rmtree()
+
+@with_app(buildername='text',
+ confoverrides={'language': 'xx', 'locale_dirs': ['.']})
+def test_patch(app):
+ app.builder.build(['bom'])
+ result = (app.outdir / 'bom.txt').text(encoding='utf-8')
+ expect = (u"\nDatei mit UTF-8"
+ u"\n***************\n" # underline matches new translation
+ u"\nThis file has umlauts: äöü.\n")
+ assert result == expect
+
+test_patch.setup = setup_patch
+test_patch.teardown = teardown_patch
diff --git a/tests/test_build_html.py b/tests/test_build_html.py
index 991c9a30..fee4b389 100644
--- a/tests/test_build_html.py
+++ b/tests/test_build_html.py
@@ -12,6 +12,7 @@
import os
import re
import htmlentitydefs
+import sys
from StringIO import StringIO
try:
@@ -37,7 +38,7 @@ http://www.python.org/logo.png
%(root)s/includes.txt:\\d*: \\(WARNING/2\\) Encoding 'utf-8-sig' used for \
reading included file u'.*?wrongenc.inc' seems to be wrong, try giving an \
:encoding: option\\n?
-%(root)s/includes.txt:4: WARNING: download file not readable: nonexisting.png
+%(root)s/includes.txt:4: WARNING: download file not readable: .*?nonexisting.png
%(root)s/objects.txt:\\d*: WARNING: using old C markup; please migrate to \
new-style markup \(e.g. c:function instead of cfunction\), see \
http://sphinx.pocoo.org/domains.html
@@ -50,6 +51,11 @@ HTML_WARNINGS = ENV_WARNINGS + """\
%(root)s/markup.txt:: WARNING: invalid pair index entry u'keyword; '
"""
+if sys.version_info >= (3, 0):
+ ENV_WARNINGS = remove_unicode_literals(ENV_WARNINGS)
+ HTML_WARNINGS = remove_unicode_literals(HTML_WARNINGS)
+
+
def tail_check(check):
rex = re.compile(check)
def checker(nodes):
@@ -231,7 +237,7 @@ if pygments:
(".//div[@class='inc-lines highlight-text']//pre",
r'^class Foo:\n pass\nclass Bar:\n$'),
(".//div[@class='inc-startend highlight-text']//pre",
- ur'^foo = u"Including Unicode characters: üöä"\n$'),
+ ur'^foo = "Including Unicode characters: üöä"\n$'),
(".//div[@class='inc-preappend highlight-text']//pre",
r'(?m)^START CODE$'),
(".//div[@class='inc-pyobj-dedent highlight-python']//span",
diff --git a/tests/test_build_latex.py b/tests/test_build_latex.py
index 4405395a..6c1ccad9 100644
--- a/tests/test_build_latex.py
+++ b/tests/test_build_latex.py
@@ -32,6 +32,9 @@ None:None: WARNING: no matching candidate for image URI u'foo.\\*'
WARNING: invalid pair index entry u''
"""
+if sys.version_info >= (3, 0):
+ LATEX_WARNINGS = remove_unicode_literals(LATEX_WARNINGS)
+
@with_app(buildername='latex', warning=latex_warnfile, cleanenv=True)
def test_latex(app):
diff --git a/tests/test_config.py b/tests/test_config.py
index cb4e1105..b5f88a6f 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -9,6 +9,7 @@
:copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+import sys
from util import *
@@ -84,11 +85,23 @@ def test_extension_values(app):
@with_tempdir
def test_errors_warnings(dir):
# test the error for syntax errors in the config file
- write_file(dir / 'conf.py', 'project = \n')
+ write_file(dir / 'conf.py', u'project = \n', 'ascii')
raises_msg(ConfigError, 'conf.py', Config, dir, 'conf.py', {}, None)
+ # test the automatic conversion of 2.x only code in configs
+ write_file(dir / 'conf.py', u'# -*- coding: utf-8\n\n'
+ u'project = u"Jägermeister"\n', 'utf-8')
+ cfg = Config(dir, 'conf.py', {}, None)
+ cfg.init_values()
+ assert cfg.project == u'Jägermeister'
+
# test the warning for bytestrings with non-ascii content
- write_file(dir / 'conf.py', '# -*- coding: latin-1\nproject = "foo\xe4"\n')
+ # bytestrings with non-ascii content are a syntax error in python3 so we
+ # skip the test there
+ if sys.version_info >= (3, 0):
+ return
+ write_file(dir / 'conf.py', u'# -*- coding: latin-1\nproject = "fooä"\n',
+ 'latin-1')
cfg = Config(dir, 'conf.py', {}, None)
warned = [False]
def warn(msg):
diff --git a/tests/test_coverage.py b/tests/test_coverage.py
index 1262ebf5..cb831635 100644
--- a/tests/test_coverage.py
+++ b/tests/test_coverage.py
@@ -33,7 +33,7 @@ def test_build(app):
assert 'api.h' in c_undoc
assert ' * Py_SphinxTest' in c_undoc
- undoc_py, undoc_c = pickle.loads((app.outdir / 'undoc.pickle').text())
+ undoc_py, undoc_c = pickle.loads((app.outdir / 'undoc.pickle').bytes())
assert len(undoc_c) == 1
# the key is the full path to the header file, which isn't testable
assert undoc_c.values()[0] == [('function', 'Py_SphinxTest')]
diff --git a/tests/test_env.py b/tests/test_env.py
index 4ecbaac4..124ed08c 100644
--- a/tests/test_env.py
+++ b/tests/test_env.py
@@ -8,6 +8,7 @@
:copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+import sys
from util import *
@@ -54,8 +55,10 @@ def test_images():
app._warning.reset()
htmlbuilder = StandaloneHTMLBuilder(app)
htmlbuilder.post_process_images(tree)
- assert "no matching candidate for image URI u'foo.*'" in \
- app._warning.content[-1]
+ image_uri_message = "no matching candidate for image URI u'foo.*'"
+ if sys.version_info >= (3, 0):
+ image_uri_message = remove_unicode_literals(image_uri_message)
+ assert image_uri_message in app._warning.content[-1]
assert set(htmlbuilder.images.keys()) == \
set(['subdir/img.png', 'img.png', 'subdir/simg.png', 'svgimg.svg'])
assert set(htmlbuilder.images.values()) == \
@@ -64,8 +67,7 @@ def test_images():
app._warning.reset()
latexbuilder = LaTeXBuilder(app)
latexbuilder.post_process_images(tree)
- assert "no matching candidate for image URI u'foo.*'" in \
- app._warning.content[-1]
+ assert image_uri_message in app._warning.content[-1]
assert set(latexbuilder.images.keys()) == \
set(['subdir/img.png', 'subdir/simg.png', 'img.png', 'img.pdf',
'svgimg.pdf'])
diff --git a/tests/test_intersphinx.py b/tests/test_intersphinx.py
index 3b50cc78..990e35bd 100644
--- a/tests/test_intersphinx.py
+++ b/tests/test_intersphinx.py
@@ -11,7 +11,10 @@
import zlib
import posixpath
-from cStringIO import StringIO
+try:
+ from io import BytesIO
+except ImportError:
+ from cStringIO import StringIO as BytesIO
from docutils import nodes
@@ -28,23 +31,23 @@ inventory_v1 = '''\
# Version: 1.0
module mod foo.html
module.cls class foo.html
-'''
+'''.encode('utf-8')
inventory_v2 = '''\
# Sphinx inventory version 2
# Project: foo
# Version: 2.0
# The remainder of this file is compressed with zlib.
-''' + zlib.compress('''\
+'''.encode('utf-8') + zlib.compress('''\
module1 py:module 0 foo.html#module-module1 Long Module desc
module2 py:module 0 foo.html#module-$ -
module1.func py:function 1 sub/foo.html#$ -
CFunc c:function 2 cfunc.html#CFunc -
-''')
+'''.encode('utf-8'))
def test_read_inventory_v1():
- f = StringIO(inventory_v1)
+ f = BytesIO(inventory_v1)
f.readline()
invdata = read_inventory_v1(f, '/util', posixpath.join)
assert invdata['py:module']['module'] == \
@@ -54,12 +57,12 @@ def test_read_inventory_v1():
def test_read_inventory_v2():
- f = StringIO(inventory_v2)
+ f = BytesIO(inventory_v2)
f.readline()
invdata1 = read_inventory_v2(f, '/util', posixpath.join)
# try again with a small buffer size to test the chunking algorithm
- f = StringIO(inventory_v2)
+ f = BytesIO(inventory_v2)
f.readline()
invdata2 = read_inventory_v2(f, '/util', posixpath.join, bufsize=5)
diff --git a/tests/test_markup.py b/tests/test_markup.py
index 31817df6..092113bb 100644
--- a/tests/test_markup.py
+++ b/tests/test_markup.py
@@ -17,6 +17,7 @@ from docutils import frontend, utils, nodes
from docutils.parsers import rst
from sphinx.util import texescape
+from sphinx.util.pycompat import b
from sphinx.writers.html import HTMLWriter, SmartyPantsHTMLTranslator
from sphinx.writers.latex import LaTeXWriter, LaTeXTranslator
@@ -50,7 +51,7 @@ class ForgivingLaTeXTranslator(LaTeXTranslator, ForgivingTranslator):
def verify_re(rst, html_expected, latex_expected):
- document = utils.new_document('test data', settings)
+ document = utils.new_document(b('test data'), settings)
document['file'] = 'dummy'
parser.parse(rst, document)
for msg in document.traverse(nodes.system_message):
diff --git a/tests/test_quickstart.py b/tests/test_quickstart.py
index cb40d27c..541959bd 100644
--- a/tests/test_quickstart.py
+++ b/tests/test_quickstart.py
@@ -36,8 +36,13 @@ def mock_raw_input(answers, needanswer=False):
return ''
return raw_input
+try:
+ real_raw_input = raw_input
+except NameError:
+ real_raw_input = input
+
def teardown_module():
- qs.raw_input = __builtin__.raw_input
+ qs.term_input = real_raw_input
qs.TERM_ENCODING = getattr(sys.stdin, 'encoding', None)
coloron()
@@ -51,7 +56,7 @@ def test_do_prompt():
'Q5': 'no',
'Q6': 'foo',
}
- qs.raw_input = mock_raw_input(answers)
+ qs.term_input = mock_raw_input(answers)
try:
qs.do_prompt(d, 'k1', 'Q1')
except AssertionError:
@@ -79,13 +84,18 @@ def test_quickstart_defaults(tempdir):
'Author name': 'Georg Brandl',
'Project version': '0.1',
}
- qs.raw_input = mock_raw_input(answers)
+ qs.term_input = mock_raw_input(answers)
qs.inner_main([])
conffile = tempdir / 'conf.py'
assert conffile.isfile()
ns = {}
- execfile(conffile, ns)
+ f = open(conffile, 'U')
+ try:
+ code = compile(f.read(), conffile, 'exec')
+ finally:
+ f.close()
+ exec code in ns
assert ns['extensions'] == []
assert ns['templates_path'] == ['_templates']
assert ns['source_suffix'] == '.rst'
@@ -112,8 +122,8 @@ def test_quickstart_all_answers(tempdir):
'Root path': tempdir,
'Separate source and build': 'y',
'Name prefix for templates': '.',
- 'Project name': 'STASI\xe2\x84\xa2',
- 'Author name': 'Wolfgang Sch\xc3\xa4uble & G\'Beckstein',
+ 'Project name': u'STASI™'.encode('utf-8'),
+ 'Author name': u'Wolfgang Schäuble & G\'Beckstein'.encode('utf-8'),
'Project version': '2.0',
'Project release': '2.0.1',
'Source file suffix': '.txt',
@@ -131,14 +141,19 @@ def test_quickstart_all_answers(tempdir):
'Create Windows command file': 'no',
'Do you want to use the epub builder': 'yes',
}
- qs.raw_input = mock_raw_input(answers, needanswer=True)
+ qs.term_input = mock_raw_input(answers, needanswer=True)
qs.TERM_ENCODING = 'utf-8'
qs.inner_main([])
conffile = tempdir / 'source' / 'conf.py'
assert conffile.isfile()
ns = {}
- execfile(conffile, ns)
+ f = open(conffile, 'U')
+ try:
+ code = compile(f.read(), conffile, 'exec')
+ finally:
+ f.close()
+ exec code in ns
assert ns['extensions'] == ['sphinx.ext.autodoc', 'sphinx.ext.doctest']
assert ns['templates_path'] == ['.templates']
assert ns['source_suffix'] == '.txt'
diff --git a/tests/test_search.py b/tests/test_search.py
index 0b5b158b..c0750366 100644
--- a/tests/test_search.py
+++ b/tests/test_search.py
@@ -13,6 +13,7 @@ from docutils import frontend, utils
from docutils.parsers import rst
from sphinx.search import IndexBuilder
+from sphinx.util.pycompat import b
settings = parser = None
@@ -31,7 +32,7 @@ test that non-comments are indexed: fermion
'''
def test_wordcollector():
- doc = utils.new_document('test data', settings)
+ doc = utils.new_document(b('test data'), settings)
doc['file'] = 'dummy'
parser.parse(FILE_CONTENTS, doc)
diff --git a/tests/test_searchadapters.py b/tests/test_searchadapters.py
new file mode 100644
index 00000000..cf5accb9
--- /dev/null
+++ b/tests/test_searchadapters.py
@@ -0,0 +1,79 @@
+# -*- coding: utf-8 -*-
+"""
+ test_searchadapters
+ ~~~~~~~~~~~~~~~~~~~
+
+ Test the Web Support Package search adapters.
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import os, sys
+from StringIO import StringIO
+
+from nose import SkipTest
+
+from sphinx.websupport import WebSupport
+
+from test_websupport import sqlalchemy_missing
+from util import *
+
+
+def clear_builddir():
+ (test_root / 'websupport').rmtree(True)
+
+
+def teardown_module():
+ (test_root / 'generated').rmtree(True)
+ clear_builddir()
+
+
+def search_adapter_helper(adapter):
+ clear_builddir()
+
+ settings = {'builddir': os.path.join(test_root, 'websupport'),
+ 'status': StringIO(),
+ 'warning': StringIO()}
+ settings.update({'srcdir': test_root,
+ 'search': adapter})
+ support = WebSupport(**settings)
+ support.build()
+
+ s = support.search
+
+ # Test the adapters query method. A search for "Epigraph" should return
+ # one result.
+ results = s.query(u'Epigraph')
+ assert len(results) == 1, \
+ '%s search adapter returned %s search result(s), should have been 1'\
+ % (adapter, len(results))
+
+ # Make sure documents are properly updated by the search adapter.
+ s.init_indexing(changed=['markup'])
+ s.add_document(u'markup', u'title', u'SomeLongRandomWord')
+ s.finish_indexing()
+ # Now a search for "Epigraph" should return zero results.
+ results = s.query(u'Epigraph')
+ assert len(results) == 0, \
+ '%s search adapter returned %s search result(s), should have been 0'\
+ % (adapter, len(results))
+ # A search for "SomeLongRandomWord" should return one result.
+ results = s.query(u'SomeLongRandomWord')
+ assert len(results) == 1, \
+ '%s search adapter returned %s search result(s), should have been 1'\
+ % (adapter, len(results))
+ # Make sure it works through the WebSupport API
+ html = support.get_search_results(u'SomeLongRandomWord')
+
+
+@skip_unless_importable('xapian', 'needs xapian bindings installed')
+@skip_if(sqlalchemy_missing, 'needs sqlalchemy')
+def test_xapian():
+ search_adapter_helper('xapian')
+
+
+@skip_unless_importable('whoosh', 'needs whoosh package installed')
+@skip_if(sqlalchemy_missing, 'needs sqlalchemy')
+def test_whoosh():
+ search_adapter_helper('whoosh')
diff --git a/tests/test_versioning.py b/tests/test_versioning.py
new file mode 100644
index 00000000..923da203
--- /dev/null
+++ b/tests/test_versioning.py
@@ -0,0 +1,114 @@
+# -*- coding: utf-8 -*-
+"""
+ test_versioning
+ ~~~~~~~~~~~~~~~
+
+ Test the versioning implementation.
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+import pickle
+
+from util import *
+
+from docutils.statemachine import ViewList
+from docutils.parsers.rst.directives.html import MetaBody
+
+from sphinx import addnodes
+from sphinx.versioning import add_uids, merge_doctrees, get_ratio
+
+def setup_module():
+ global app, original, original_uids
+ app = TestApp()
+ app.builder.env.app = app
+ app.connect('doctree-resolved', on_doctree_resolved)
+ app.build()
+ original = doctrees['versioning/original']
+ original_uids = [n.uid for n in add_uids(original, is_paragraph)]
+
+def teardown_module():
+ app.cleanup()
+ (test_root / '_build').rmtree(True)
+
+doctrees = {}
+
+def on_doctree_resolved(app, doctree, docname):
+ doctrees[docname] = doctree
+
+def is_paragraph(node):
+ return node.__class__.__name__ == 'paragraph'
+
+def test_get_ratio():
+ assert get_ratio('', 'a')
+ assert get_ratio('a', '')
+
+def test_add_uids():
+ assert len(original_uids) == 3
+
+def test_picklablility():
+ # we have to modify the doctree so we can pickle it
+ copy = original.copy()
+ copy.reporter = None
+ copy.transformer = None
+ copy.settings.warning_stream = None
+ copy.settings.env = None
+ copy.settings.record_dependencies = None
+ for metanode in copy.traverse(MetaBody.meta):
+ metanode.__class__ = addnodes.meta
+ loaded = pickle.loads(pickle.dumps(copy, pickle.HIGHEST_PROTOCOL))
+ assert all(getattr(n, 'uid', False) for n in loaded.traverse(is_paragraph))
+
+def test_modified():
+ modified = doctrees['versioning/modified']
+ new_nodes = list(merge_doctrees(original, modified, is_paragraph))
+ uids = [n.uid for n in modified.traverse(is_paragraph)]
+ assert not new_nodes
+ assert original_uids == uids
+
+def test_added():
+ added = doctrees['versioning/added']
+ new_nodes = list(merge_doctrees(original, added, is_paragraph))
+ uids = [n.uid for n in added.traverse(is_paragraph)]
+ assert len(new_nodes) == 1
+ assert original_uids == uids[:-1]
+
+def test_deleted():
+ deleted = doctrees['versioning/deleted']
+ new_nodes = list(merge_doctrees(original, deleted, is_paragraph))
+ uids = [n.uid for n in deleted.traverse(is_paragraph)]
+ assert not new_nodes
+ assert original_uids[::2] == uids
+
+def test_deleted_end():
+ deleted_end = doctrees['versioning/deleted_end']
+ new_nodes = list(merge_doctrees(original, deleted_end, is_paragraph))
+ uids = [n.uid for n in deleted_end.traverse(is_paragraph)]
+ assert not new_nodes
+ assert original_uids[:-1] == uids
+
+def test_insert():
+ insert = doctrees['versioning/insert']
+ new_nodes = list(merge_doctrees(original, insert, is_paragraph))
+ uids = [n.uid for n in insert.traverse(is_paragraph)]
+ assert len(new_nodes) == 1
+ assert original_uids[0] == uids[0]
+ assert original_uids[1:] == uids[2:]
+
+def test_insert_beginning():
+ insert_beginning = doctrees['versioning/insert_beginning']
+ new_nodes = list(merge_doctrees(original, insert_beginning, is_paragraph))
+ uids = [n.uid for n in insert_beginning.traverse(is_paragraph)]
+ assert len(new_nodes) == 1
+ assert len(uids) == 4
+ assert original_uids == uids[1:]
+ assert original_uids[0] != uids[0]
+
+def test_insert_similar():
+ insert_similar = doctrees['versioning/insert_similar']
+ new_nodes = list(merge_doctrees(original, insert_similar, is_paragraph))
+ uids = [n.uid for n in insert_similar.traverse(is_paragraph)]
+ assert len(new_nodes) == 1
+ assert new_nodes[0].rawsource == u'Anyway I need more'
+ assert original_uids[0] == uids[0]
+ assert original_uids[1:] == uids[2:]
diff --git a/tests/test_websupport.py b/tests/test_websupport.py
new file mode 100644
index 00000000..65957378
--- /dev/null
+++ b/tests/test_websupport.py
@@ -0,0 +1,279 @@
+# -*- coding: utf-8 -*-
+"""
+ test_websupport
+ ~~~~~~~~~~~~~~~
+
+ Test the Web Support Package
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import os
+from StringIO import StringIO
+
+try:
+ from functools import wraps
+except ImportError:
+ # functools is new in 2.5
+ wraps = lambda f: (lambda w: w)
+
+from nose import SkipTest
+
+from sphinx.websupport import WebSupport
+from sphinx.websupport.errors import *
+from sphinx.websupport.storage import StorageBackend
+from sphinx.websupport.storage.differ import CombinedHtmlDiff
+try:
+ from sphinx.websupport.storage.sqlalchemystorage import Session, \
+ SQLAlchemyStorage, Comment, CommentVote
+ from sphinx.websupport.storage.sqlalchemy_db import Node
+ sqlalchemy_missing = False
+except ImportError:
+ sqlalchemy_missing = True
+
+from util import *
+
+
+default_settings = {'builddir': os.path.join(test_root, 'websupport'),
+ 'status': StringIO(),
+ 'warning': StringIO()}
+
+def teardown_module():
+ (test_root / 'generated').rmtree(True)
+ (test_root / 'websupport').rmtree(True)
+
+
+def with_support(*args, **kwargs):
+ """Make a WebSupport object and pass it the test."""
+ settings = default_settings.copy()
+ settings.update(kwargs)
+
+ def generator(func):
+ @wraps(func)
+ def new_func(*args2, **kwargs2):
+ support = WebSupport(**settings)
+ func(support, *args2, **kwargs2)
+ return new_func
+ return generator
+
+
+class NullStorage(StorageBackend):
+ pass
+
+
+@with_support(storage=NullStorage())
+def test_no_srcdir(support):
+ """Make sure the correct exception is raised if srcdir is not given."""
+ raises(SrcdirNotSpecifiedError, support.build)
+
+
+@skip_if(sqlalchemy_missing, 'needs sqlalchemy')
+@with_support(srcdir=test_root)
+def test_build(support):
+ support.build()
+
+
+@skip_if(sqlalchemy_missing, 'needs sqlalchemy')
+@with_support()
+def test_get_document(support):
+ raises(DocumentNotFoundError, support.get_document, 'nonexisting')
+
+ contents = support.get_document('contents')
+ assert contents['title'] and contents['body'] \
+ and contents['sidebar'] and contents['relbar']
+
+
+@skip_if(sqlalchemy_missing, 'needs sqlalchemy')
+@with_support()
+def test_comments(support):
+ session = Session()
+ nodes = session.query(Node).all()
+ first_node = nodes[0]
+ second_node = nodes[1]
+
+ # Create a displayed comment and a non displayed comment.
+ comment = support.add_comment('First test comment',
+ node_id=first_node.id,
+ username='user_one')
+ hidden_comment = support.add_comment('Hidden comment',
+ node_id=first_node.id,
+ displayed=False)
+ # Make sure that comments can't be added to a comment where
+ # displayed == False, since it could break the algorithm that
+ # converts a nodes comments to a tree.
+ raises(CommentNotAllowedError, support.add_comment, 'Not allowed',
+ parent_id=str(hidden_comment['id']))
+ # Add a displayed and not displayed child to the displayed comment.
+ support.add_comment('Child test comment', parent_id=str(comment['id']),
+ username='user_one')
+ support.add_comment('Hidden child test comment',
+ parent_id=str(comment['id']), displayed=False)
+ # Add a comment to another node to make sure it isn't returned later.
+ support.add_comment('Second test comment',
+ node_id=second_node.id,
+ username='user_two')
+
+ # Access the comments as a moderator.
+ data = support.get_data(first_node.id, moderator=True)
+ comments = data['comments']
+ children = comments[0]['children']
+ assert len(comments) == 2
+ assert comments[1]['text'] == 'Hidden comment'
+ assert len(children) == 2
+ assert children[1]['text'] == 'Hidden child test comment'
+
+ # Access the comments without being a moderator.
+ data = support.get_data(first_node.id)
+ comments = data['comments']
+ children = comments[0]['children']
+ assert len(comments) == 1
+ assert comments[0]['text'] == 'First test comment'
+ assert len(children) == 1
+ assert children[0]['text'] == 'Child test comment'
+
+
+@skip_if(sqlalchemy_missing, 'needs sqlalchemy')
+@with_support()
+def test_voting(support):
+ session = Session()
+ nodes = session.query(Node).all()
+ node = nodes[0]
+
+ comment = support.get_data(node.id)['comments'][0]
+
+ def check_rating(val):
+ data = support.get_data(node.id)
+ comment = data['comments'][0]
+ assert comment['rating'] == val, '%s != %s' % (comment['rating'], val)
+
+ support.process_vote(comment['id'], 'user_one', '1')
+ support.process_vote(comment['id'], 'user_two', '1')
+ support.process_vote(comment['id'], 'user_three', '1')
+ check_rating(3)
+ support.process_vote(comment['id'], 'user_one', '-1')
+ check_rating(1)
+ support.process_vote(comment['id'], 'user_one', '0')
+ check_rating(2)
+
+ # Make sure a vote with value > 1 or < -1 can't be cast.
+ raises(ValueError, support.process_vote, comment['id'], 'user_one', '2')
+ raises(ValueError, support.process_vote, comment['id'], 'user_one', '-2')
+
+ # Make sure past voting data is associated with comments when they are
+ # fetched.
+ data = support.get_data(str(node.id), username='user_two')
+ comment = data['comments'][0]
+ assert comment['vote'] == 1, '%s != 1' % comment['vote']
+
+
+@skip_if(sqlalchemy_missing, 'needs sqlalchemy')
+@with_support()
+def test_proposals(support):
+ session = Session()
+ node = session.query(Node).first()
+
+ data = support.get_data(node.id)
+
+ source = data['source']
+ proposal = source[:5] + source[10:15] + 'asdf' + source[15:]
+
+ comment = support.add_comment('Proposal comment',
+ node_id=node.id,
+ proposal=proposal)
+
+
+@skip_if(sqlalchemy_missing, 'needs sqlalchemy')
+@with_support()
+def test_user_delete_comments(support):
+ def get_comment():
+ session = Session()
+ node = session.query(Node).first()
+ session.close()
+ return support.get_data(node.id)['comments'][0]
+
+ comment = get_comment()
+ assert comment['username'] == 'user_one'
+ # Make sure other normal users can't delete someone elses comments.
+ raises(UserNotAuthorizedError, support.delete_comment,
+ comment['id'], username='user_two')
+ # Now delete the comment using the correct username.
+ support.delete_comment(comment['id'], username='user_one')
+ comment = get_comment()
+ assert comment['username'] == '[deleted]'
+ assert comment['text'] == '[deleted]'
+
+
+@skip_if(sqlalchemy_missing, 'needs sqlalchemy')
+@with_support()
+def test_moderator_delete_comments(support):
+ def get_comment():
+ session = Session()
+ node = session.query(Node).first()
+ session.close()
+ return support.get_data(node.id, moderator=True)['comments'][1]
+
+ comment = get_comment()
+ support.delete_comment(comment['id'], username='user_two',
+ moderator=True)
+ comment = get_comment()
+ assert comment['username'] == '[deleted]'
+ assert comment['text'] == '[deleted]'
+
+
+@skip_if(sqlalchemy_missing, 'needs sqlalchemy')
+@with_support()
+def test_update_username(support):
+ support.update_username('user_two', 'new_user_two')
+ session = Session()
+ comments = session.query(Comment).\
+ filter(Comment.username == 'user_two').all()
+ assert len(comments) == 0
+ votes = session.query(CommentVote).\
+ filter(CommentVote.username == 'user_two')
+ assert len(comments) == 0
+ comments = session.query(Comment).\
+ filter(Comment.username == 'new_user_two').all()
+ assert len(comments) == 1
+ votes = session.query(CommentVote).\
+ filter(CommentVote.username == 'new_user_two')
+ assert len(comments) == 1
+
+
+called = False
+def moderation_callback(comment):
+ global called
+ called = True
+
+
+@skip_if(sqlalchemy_missing, 'needs sqlalchemy')
+@with_support(moderation_callback=moderation_callback)
+def test_moderation(support):
+ session = Session()
+ nodes = session.query(Node).all()
+ node = nodes[7]
+ session.close()
+ accepted = support.add_comment('Accepted Comment', node_id=node.id,
+ displayed=False)
+ rejected = support.add_comment('Rejected comment', node_id=node.id,
+ displayed=False)
+ # Make sure the moderation_callback is called.
+ assert called == True
+ # Make sure the user must be a moderator.
+ raises(UserNotAuthorizedError, support.accept_comment, accepted['id'])
+ raises(UserNotAuthorizedError, support.reject_comment, accepted['id'])
+ support.accept_comment(accepted['id'], moderator=True)
+ support.reject_comment(rejected['id'], moderator=True)
+ comments = support.get_data(node.id)['comments']
+ assert len(comments) == 1
+ comments = support.get_data(node.id, moderator=True)['comments']
+ assert len(comments) == 1
+
+
+def test_differ():
+ differ = CombinedHtmlDiff()
+ source = 'Lorem ipsum dolor sit amet,\nconsectetur adipisicing elit,\n' \
+ 'sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.'
+ prop = 'Lorem dolor sit amet,\nconsectetur nihil adipisicing elit,\n' \
+ 'sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.'
+ differ.make_html(source, prop)
diff --git a/tests/util.py b/tests/util.py
index 1b24af0e..d56f3464 100644
--- a/tests/util.py
+++ b/tests/util.py
@@ -11,6 +11,8 @@ import sys
import StringIO
import tempfile
import shutil
+import re
+from codecs import open
try:
from functools import wraps
@@ -23,15 +25,15 @@ from sphinx.ext.autodoc import AutoDirective
from path import path
-from nose import tools
+from nose import tools, SkipTest
__all__ = [
- 'test_root',
- 'raises', 'raises_msg', 'Struct',
+ 'test_root', 'raises', 'raises_msg',
+ 'skip_if', 'skip_unless', 'skip_unless_importable', 'Struct',
'ListOutput', 'TestApp', 'with_app', 'gen_with_app',
'path', 'with_tempdir', 'write_file',
- 'sprint',
+ 'sprint', 'remove_unicode_literals',
]
@@ -69,6 +71,30 @@ def raises_msg(exc, msg, func, *args, **kwds):
raise AssertionError('%s did not raise %s' %
(func.__name__, _excstr(exc)))
+def skip_if(condition, msg=None):
+ """Decorator to skip test if condition is true."""
+ def deco(test):
+ @tools.make_decorator(test)
+ def skipper(*args, **kwds):
+ if condition:
+ raise SkipTest(msg or 'conditional skip')
+ return test(*args, **kwds)
+ return skipper
+ return deco
+
+def skip_unless(condition, msg=None):
+ """Decorator to skip test if condition is false."""
+ return skip_if(not condition, msg)
+
+def skip_unless_importable(module, msg=None):
+ """Decorator to skip test if module is not importable."""
+ try:
+ __import__(module)
+ except ImportError:
+ return skip_if(True, msg)
+ else:
+ return skip_if(False, msg)
+
class Struct(object):
def __init__(self, **kwds):
@@ -191,11 +217,21 @@ def with_tempdir(func):
return new_func
-def write_file(name, contents):
- f = open(str(name), 'wb')
+def write_file(name, contents, encoding=None):
+ if encoding is None:
+ mode = 'wb'
+ if isinstance(contents, unicode):
+ contents = contents.encode('ascii')
+ else:
+ mode = 'w'
+ f = open(str(name), 'wb', encoding=encoding)
f.write(contents)
f.close()
def sprint(*args):
sys.stderr.write(' '.join(map(str, args)) + '\n')
+
+_unicode_literals_re = re.compile(r'u(".*?")|u(\'.*?\')')
+def remove_unicode_literals(s):
+ return _unicode_literals_re.sub(lambda x: x.group(1) or x.group(2), s)
diff --git a/utils/check_sources.py b/utils/check_sources.py
index 0571ab1e..c412742b 100755
--- a/utils/check_sources.py
+++ b/utils/check_sources.py
@@ -12,10 +12,16 @@
"""
import sys, os, re
-import getopt
import cStringIO
+from optparse import OptionParser
from os.path import join, splitext, abspath
+if sys.version_info >= (3, 0):
+ def b(s):
+ return s.encode('utf-8')
+else:
+ b = str
+
checkers = {}
@@ -30,26 +36,26 @@ def checker(*suffixes, **kwds):
name_mail_re = r'[\w ]+(<.*?>)?'
-copyright_re = re.compile(r'^ :copyright: Copyright 200\d(-20\d\d)? '
- r'by %s(, %s)*[,.]$' %
- (name_mail_re, name_mail_re))
-license_re = re.compile(r" :license: (.*?).\n")
-copyright_2_re = re.compile(r'^ %s(, %s)*[,.]$' %
- (name_mail_re, name_mail_re))
-coding_re = re.compile(r'coding[:=]\s*([-\w.]+)')
-not_ix_re = re.compile(r'\bnot\s+\S+?\s+i[sn]\s\S+')
-is_const_re = re.compile(r'if.*?==\s+(None|False|True)\b')
-
-misspellings = ["developement", "adress", "verificate", # ALLOW-MISSPELLING
- "informations"] # ALLOW-MISSPELLING
-
-
-@checker('.py')
-def check_syntax(fn, lines):
- try:
- compile(''.join(lines), fn, "exec")
- except SyntaxError, err:
- yield 0, "not compilable: %s" % err
+copyright_re = re.compile(b(r'^ :copyright: Copyright 200\d(-20\d\d)? '
+ r'by %s(, %s)*[,.]$' %
+ (name_mail_re, name_mail_re)))
+license_re = re.compile(b(r" :license: (.*?).\n"))
+copyright_2_re = re.compile(b(r'^ %s(, %s)*[,.]$' %
+ (name_mail_re, name_mail_re)))
+coding_re = re.compile(b(r'coding[:=]\s*([-\w.]+)'))
+not_ix_re = re.compile(b(r'\bnot\s+\S+?\s+i[sn]\s\S+'))
+is_const_re = re.compile(b(r'if.*?==\s+(None|False|True)\b'))
+
+misspellings = [b("developement"), b("adress"), # ALLOW-MISSPELLING
+ b("verificate"), b("informations")] # ALLOW-MISSPELLING
+
+if sys.version_info < (3, 0):
+ @checker('.py')
+ def check_syntax(fn, lines):
+ try:
+ compile(b('').join(lines), fn, "exec")
+ except SyntaxError, err:
+ yield 0, "not compilable: %s" % err
@checker('.py')
@@ -61,8 +67,8 @@ def check_style_and_encoding(fn, lines):
if lno < 2:
co = coding_re.search(line)
if co:
- encoding = co.group(1)
- if line.strip().startswith('#'):
+ encoding = co.group(1).decode('ascii')
+ if line.strip().startswith(b('#')):
continue
#m = not_ix_re.search(line)
#if m:
@@ -82,7 +88,7 @@ def check_style_and_encoding(fn, lines):
def check_fileheader(fn, lines):
# line number correction
c = 1
- if lines[0:1] == ['#!/usr/bin/env python\n']:
+ if lines[0:1] == [b('#!/usr/bin/env python\n')]:
lines = lines[1:]
c = 2
@@ -91,38 +97,38 @@ def check_fileheader(fn, lines):
for lno, l in enumerate(lines):
llist.append(l)
if lno == 0:
- if l == '# -*- coding: rot13 -*-\n':
+ if l == b('# -*- coding: rot13 -*-\n'):
# special-case pony package
return
- elif l != '# -*- coding: utf-8 -*-\n':
+ elif l != b('# -*- coding: utf-8 -*-\n'):
yield 1, "missing coding declaration"
elif lno == 1:
- if l != '"""\n' and l != 'r"""\n':
+ if l != b('"""\n') and l != b('r"""\n'):
yield 2, 'missing docstring begin (""")'
else:
docopen = True
elif docopen:
- if l == '"""\n':
+ if l == b('"""\n'):
# end of docstring
if lno <= 4:
yield lno+c, "missing module name in docstring"
break
- if l != "\n" and l[:4] != ' ' and docopen:
+ if l != b("\n") and l[:4] != b(' ') and docopen:
yield lno+c, "missing correct docstring indentation"
if lno == 2:
# if not in package, don't check the module name
modname = fn[:-3].replace('/', '.').replace('.__init__', '')
while modname:
- if l.lower()[4:-1] == modname:
+ if l.lower()[4:-1] == b(modname):
break
modname = '.'.join(modname.split('.')[1:])
else:
yield 3, "wrong module name in docstring heading"
modnamelen = len(l.strip())
elif lno == 3:
- if l.strip() != modnamelen * "~":
+ if l.strip() != modnamelen * b("~"):
yield 4, "wrong module name underline, should be ~~~...~"
else:
@@ -145,16 +151,16 @@ def check_fileheader(fn, lines):
@checker('.py', '.html', '.rst')
def check_whitespace_and_spelling(fn, lines):
for lno, line in enumerate(lines):
- if "\t" in line:
+ if b("\t") in line:
yield lno+1, "OMG TABS!!!1 "
- if line[:-1].rstrip(' \t') != line[:-1]:
+ if line[:-1].rstrip(b(' \t')) != line[:-1]:
yield lno+1, "trailing whitespace"
for word in misspellings:
- if word in line and 'ALLOW-MISSPELLING' not in line:
+ if word in line and b('ALLOW-MISSPELLING') not in line:
yield lno+1, '"%s" used' % word
-bad_tags = ('<u>', '<s>', '<strike>', '<center>', '<font')
+bad_tags = map(b, ['<u>', '<s>', '<strike>', '<center>', '<font'])
@checker('.html')
def check_xhtml(fn, lines):
@@ -165,34 +171,32 @@ def check_xhtml(fn, lines):
def main(argv):
- try:
- gopts, args = getopt.getopt(argv[1:], "vi:")
- except getopt.GetoptError:
- print "Usage: %s [-v] [-i ignorepath]* [path]" % argv[0]
- return 2
- opts = {}
- for opt, val in gopts:
- if opt == '-i':
- val = abspath(val)
- opts.setdefault(opt, []).append(val)
+ parser = OptionParser(usage='Usage: %prog [-v] [-i ignorepath]* [path]')
+ parser.add_option('-v', '--verbose', dest='verbose', default=False,
+ action='store_true')
+ parser.add_option('-i', '--ignore-path', dest='ignored_paths',
+ default=[], action='append')
+ options, args = parser.parse_args(argv[1:])
if len(args) == 0:
path = '.'
elif len(args) == 1:
path = args[0]
else:
- print "Usage: %s [-v] [-i ignorepath]* [path]" % argv[0]
- return 2
+ print args
+ parser.error('No more then one path supported')
- verbose = '-v' in opts
+ verbose = options.verbose
+ ignored_paths = set(abspath(p) for p in options.ignored_paths)
num = 0
out = cStringIO.StringIO()
for root, dirs, files in os.walk(path):
- if '.svn' in dirs:
- dirs.remove('.svn')
- if '-i' in opts and abspath(root) in opts['-i']:
+ for vcs_dir in ['.svn', '.hg', '.git']:
+ if vcs_dir in dirs:
+ dirs.remove(vcs_dir)
+ if abspath(root) in ignored_paths:
del dirs[:]
continue
in_check_pkg = root.startswith('./sphinx')
@@ -201,7 +205,7 @@ def main(argv):
fn = join(root, fn)
if fn[:2] == './': fn = fn[2:]
- if '-i' in opts and abspath(fn) in opts['-i']:
+ if abspath(fn) in ignored_paths:
continue
ext = splitext(fn)[1]
@@ -213,8 +217,11 @@ def main(argv):
print "Checking %s..." % fn
try:
- f = open(fn, 'r')
- lines = list(f)
+ f = open(fn, 'rb')
+ try:
+ lines = list(f)
+ finally:
+ f.close()
except (IOError, OSError), err:
print "%s: cannot open: %s" % (fn, err)
num += 1
diff --git a/utils/convert.py b/utils/convert.py
new file mode 100644
index 00000000..f025c49a
--- /dev/null
+++ b/utils/convert.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python3
+# coding: utf-8
+"""
+ Converts files with 2to3
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Creates a Python 3 version of each file.
+
+ The Python3 version of a file foo.py will be called foo3.py.
+
+ :copyright: Copyright 2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+import os
+import sys
+from glob import iglob
+from optparse import OptionParser
+from shutil import copy
+from distutils.util import run_2to3
+
+def main(argv):
+ parser = OptionParser(usage='%prog [path]')
+ parser.add_option('-i', '--ignorepath', dest='ignored_paths',
+ action='append', default=[])
+ options, args = parser.parse_args(argv)
+
+ ignored_paths = {os.path.abspath(p) for p in options.ignored_paths}
+
+ path = os.path.abspath(args[0]) if args else os.getcwd()
+ convertables = []
+ for filename in iglob(os.path.join(path, '*.py')):
+ if filename in ignored_paths:
+ continue
+ basename, ext = os.path.splitext(filename)
+ if basename.endswith('3'):
+ continue
+ filename3 = basename + '3' + ext
+ copy(filename, filename3)
+ convertables.append(filename3)
+ run_2to3(convertables)
+
+if __name__ == "__main__":
+ main(sys.argv[1:])
diff --git a/utils/reindent.py b/utils/reindent.py
index c499f671..59828fd8 100755
--- a/utils/reindent.py
+++ b/utils/reindent.py
@@ -1,16 +1,14 @@
#! /usr/bin/env python
# Released to the public domain, by Tim Peters, 03 October 2000.
-# -B option added by Georg Brandl, 2006.
"""reindent [-d][-r][-v] [ path ... ]
--d (--dryrun) Dry run. Analyze, but don't make any changes to files.
--r (--recurse) Recurse. Search for all .py files in subdirectories too.
--B (--no-backup) Don't write .bak backup files.
--v (--verbose) Verbose. Print informative msgs; else only names of \
-changed files.
--h (--help) Help. Print this usage information and exit.
+-d (--dryrun) Dry run. Analyze, but don't make any changes to, files.
+-r (--recurse) Recurse. Search for all .py files in subdirectories too.
+-n (--nobackup) No backup. Does not make a ".bak" file before reindenting.
+-v (--verbose) Verbose. Print informative msgs; else no output.
+-h (--help) Help. Print this usage information and exit.
Change Python (.py) files to use 4-space indents and no hard tab characters.
Also trim excess spaces and tabs from ends of lines, and remove empty lines
@@ -34,18 +32,30 @@ resulting .py file won't change it again).
The hard part of reindenting is figuring out what to do with comment
lines. So long as the input files get a clean bill of health from
tabnanny.py, reindent should do a good job.
+
+The backup file is a copy of the one that is being reindented. The ".bak"
+file is generated with shutil.copy(), but some corner cases regarding
+user/group and permissions could leave the backup file more readable that
+you'd prefer. You can always use the --nobackup option to prevent this.
"""
__version__ = "1"
import tokenize
-import os
+import os, shutil
import sys
-verbose = 0
-recurse = 0
-dryrun = 0
-no_backup = 0
+if sys.version_info >= (3, 0):
+ def tokens(readline, tokeneater):
+ for token in tokenize.tokenize(readline):
+ yield tokeneater(*token)
+else:
+ tokens = tokenize.tokenize
+
+verbose = 0
+recurse = 0
+dryrun = 0
+makebackup = True
def usage(msg=None):
if msg is not None:
@@ -61,12 +71,10 @@ def errprint(*args):
def main():
import getopt
- global verbose, recurse, dryrun, no_backup
-
+ global verbose, recurse, dryrun, makebackup
try:
- opts, args = getopt.getopt(sys.argv[1:], "drvhB",
- ["dryrun", "recurse", "verbose", "help",
- "no-backup"])
+ opts, args = getopt.getopt(sys.argv[1:], "drnvh",
+ ["dryrun", "recurse", "nobackup", "verbose", "help"])
except getopt.error, msg:
usage(msg)
return
@@ -75,10 +83,10 @@ def main():
dryrun += 1
elif o in ('-r', '--recurse'):
recurse += 1
+ elif o in ('-n', '--nobackup'):
+ makebackup = False
elif o in ('-v', '--verbose'):
verbose += 1
- elif o in ('-B', '--no-backup'):
- no_backup += 1
elif o in ('-h', '--help'):
usage()
return
@@ -98,7 +106,8 @@ def check(file):
for name in names:
fullname = os.path.join(file, name)
if ((recurse and os.path.isdir(fullname) and
- not os.path.islink(fullname))
+ not os.path.islink(fullname) and
+ not os.path.split(fullname)[1].startswith("."))
or name.lower().endswith(".py")):
check(fullname)
return
@@ -118,26 +127,35 @@ def check(file):
print "changed."
if dryrun:
print "But this is a dry run, so leaving it alone."
- else:
- print "reindented", file, \
- (dryrun and "(dry run => not really)" or "")
if not dryrun:
- if not no_backup:
- bak = file + ".bak"
- if os.path.exists(bak):
- os.remove(bak)
- os.rename(file, bak)
+ bak = file + ".bak"
+ if makebackup:
+ shutil.copyfile(file, bak)
if verbose:
- print "renamed", file, "to", bak
+ print "backed up", file, "to", bak
f = open(file, "w")
r.write(f)
f.close()
if verbose:
print "wrote new", file
+ return True
else:
if verbose:
print "unchanged."
+ return False
+
+def _rstrip(line, JUNK='\n \t'):
+ """Return line stripped of trailing spaces, tabs, newlines.
+
+ Note that line.rstrip() instead also strips sundry control characters,
+ but at least one known Emacs user expects to keep junk like that, not
+ mentioning Barry by name or anything <wink>.
+ """
+ i = len(line)
+ while i > 0 and line[i-1] in JUNK:
+ i -= 1
+ return line[:i]
class Reindenter:
@@ -151,7 +169,7 @@ class Reindenter:
# File lines, rstripped & tab-expanded. Dummy at start is so
# that we can use tokenize's 1-based line numbering easily.
# Note that a line is all-blank iff it's "\n".
- self.lines = [line.rstrip('\n \t').expandtabs() + "\n"
+ self.lines = [_rstrip(line).expandtabs() + "\n"
for line in self.raw]
self.lines.insert(0, None)
self.index = 1 # index into self.lines of next line
@@ -163,7 +181,7 @@ class Reindenter:
self.stats = []
def run(self):
- tokenize.tokenize(self.getline, self.tokeneater)
+ tokens(self.getline, self.tokeneater)
# Remove trailing empty lines.
lines = self.lines
while lines and lines[-1] == "\n":