summaryrefslogtreecommitdiff
path: root/sphinx
diff options
context:
space:
mode:
Diffstat (limited to 'sphinx')
-rw-r--r--sphinx/__init__.py25
-rw-r--r--sphinx/addnodes.py195
-rw-r--r--sphinx/application.py22
-rw-r--r--sphinx/builders/__init__.py46
-rw-r--r--sphinx/builders/devhelp.py1
-rw-r--r--sphinx/builders/epub.py32
-rw-r--r--sphinx/builders/html.py64
-rw-r--r--sphinx/builders/htmlhelp.py3
-rw-r--r--sphinx/builders/intl.py110
-rw-r--r--sphinx/builders/qthelp.py14
-rw-r--r--sphinx/builders/versioning.py72
-rw-r--r--sphinx/builders/websupport.py161
-rw-r--r--sphinx/config.py42
-rw-r--r--sphinx/directives/code.py24
-rw-r--r--sphinx/directives/other.py50
-rw-r--r--sphinx/domains/__init__.py29
-rw-r--r--sphinx/domains/cpp.py29
-rw-r--r--sphinx/domains/javascript.py2
-rw-r--r--sphinx/domains/python.py22
-rw-r--r--sphinx/domains/rst.py7
-rw-r--r--sphinx/environment.py228
-rw-r--r--sphinx/ext/autodoc.py162
-rw-r--r--sphinx/ext/autosummary/__init__.py22
-rw-r--r--sphinx/ext/autosummary/generate.py13
-rw-r--r--sphinx/ext/coverage.py5
-rw-r--r--sphinx/ext/doctest.py6
-rw-r--r--sphinx/ext/graphviz.py38
-rw-r--r--sphinx/ext/inheritance_diagram.py16
-rw-r--r--sphinx/ext/intersphinx.py26
-rw-r--r--sphinx/ext/oldcmarkup.py1
-rw-r--r--sphinx/ext/pngmath.py6
-rw-r--r--sphinx/ext/viewcode.py8
-rw-r--r--sphinx/highlighting.py4
-rw-r--r--sphinx/jinja2glue.py6
-rw-r--r--sphinx/locale/__init__.py40
-rw-r--r--sphinx/locale/sv/LC_MESSAGES/sphinx.js1
-rw-r--r--sphinx/locale/sv/LC_MESSAGES/sphinx.mobin0 -> 9509 bytes
-rw-r--r--sphinx/locale/sv/LC_MESSAGES/sphinx.po797
-rw-r--r--sphinx/pycode/__init__.py6
-rw-r--r--sphinx/pycode/nodes.py2
-rw-r--r--sphinx/pycode/pgen2/literals.py2
-rw-r--r--sphinx/pycode/pgen2/tokenize.py4
-rw-r--r--sphinx/quickstart.py88
-rw-r--r--sphinx/roles.py35
-rw-r--r--sphinx/setup_command.py3
-rw-r--r--sphinx/themes/basic/searchresults.html36
-rw-r--r--sphinx/themes/basic/static/ajax-loader.gifbin0 -> 673 bytes
-rw-r--r--sphinx/themes/basic/static/comment-bright.pngbin0 -> 3500 bytes
-rw-r--r--sphinx/themes/basic/static/comment-close.pngbin0 -> 3578 bytes
-rw-r--r--sphinx/themes/basic/static/comment.pngbin0 -> 3445 bytes
-rw-r--r--sphinx/themes/basic/static/down-pressed.pngbin0 -> 368 bytes
-rw-r--r--sphinx/themes/basic/static/down.pngbin0 -> 363 bytes
-rw-r--r--sphinx/themes/basic/static/up-pressed.pngbin0 -> 372 bytes
-rw-r--r--sphinx/themes/basic/static/up.pngbin0 -> 363 bytes
-rw-r--r--sphinx/themes/basic/static/websupport.js762
-rw-r--r--sphinx/theming.py14
-rw-r--r--sphinx/util/__init__.py72
-rw-r--r--sphinx/util/docstrings.py15
-rw-r--r--sphinx/util/jsonimpl.py2
-rw-r--r--sphinx/util/matching.py11
-rw-r--r--sphinx/util/nodes.py57
-rw-r--r--sphinx/util/osutil.py16
-rw-r--r--sphinx/util/png.py14
-rw-r--r--sphinx/util/pycompat.py98
-rw-r--r--sphinx/util/websupport.py11
-rw-r--r--sphinx/versioning.py128
-rw-r--r--sphinx/websupport/__init__.py414
-rw-r--r--sphinx/websupport/errors.py33
-rw-r--r--sphinx/websupport/search/__init__.py121
-rw-r--r--sphinx/websupport/search/nullsearch.py24
-rw-r--r--sphinx/websupport/search/whooshsearch.py59
-rw-r--r--sphinx/websupport/search/xapiansearch.py81
-rw-r--r--sphinx/websupport/storage/__init__.py123
-rw-r--r--sphinx/websupport/storage/differ.py79
-rw-r--r--sphinx/websupport/storage/sqlalchemy_db.py205
-rw-r--r--sphinx/websupport/storage/sqlalchemystorage.py174
-rw-r--r--sphinx/writers/html.py6
-rw-r--r--sphinx/writers/latex.py24
-rw-r--r--sphinx/writers/text.py2
-rw-r--r--sphinx/writers/websupport.py46
80 files changed, 4481 insertions, 615 deletions
diff --git a/sphinx/__init__.py b/sphinx/__init__.py
index 5a6c3571..211e2413 100644
--- a/sphinx/__init__.py
+++ b/sphinx/__init__.py
@@ -9,11 +9,14 @@
:license: BSD, see LICENSE for details.
"""
+# Keep this file executable as-is in Python 3!
+# (Otherwise getting the version out of it from setup.py is impossible.)
+
import sys
from os import path
-__version__ = '1.0.3+'
-__released__ = '1.0.3' # used when Sphinx builds its own docs
+__version__ = '1.1pre'
+__released__ = '1.1 (hg)' # used when Sphinx builds its own docs
package_dir = path.abspath(path.dirname(__file__))
@@ -34,14 +37,16 @@ if '+' in __version__ or 'pre' in __version__:
def main(argv=sys.argv):
+ """Sphinx build "main" command-line entry."""
if sys.version_info[:3] < (2, 4, 0):
- print >>sys.stderr, \
- 'Error: Sphinx requires at least Python 2.4 to run.'
+ sys.stderr.write('Error: Sphinx requires at least '
+ 'Python 2.4 to run.\n')
return 1
try:
from sphinx import cmdline
- except ImportError, err:
+ except ImportError:
+ err = sys.exc_info()[1]
errstr = str(err)
if errstr.lower().startswith('no module named'):
whichmod = errstr[16:]
@@ -54,14 +59,14 @@ def main(argv=sys.argv):
whichmod = 'roman module (which is distributed with Docutils)'
hint = ('This can happen if you upgraded docutils using\n'
'easy_install without uninstalling the old version'
- 'first.')
+ 'first.\n')
else:
whichmod += ' module'
- print >>sys.stderr, ('Error: The %s cannot be found. '
- 'Did you install Sphinx and its dependencies '
- 'correctly?' % whichmod)
+ sys.stderr.write('Error: The %s cannot be found. '
+ 'Did you install Sphinx and its dependencies '
+ 'correctly?\n' % whichmod)
if hint:
- print >> sys.stderr, hint
+ sys.stderr.write(hint)
return 1
raise
return cmdline.main(argv)
diff --git a/sphinx/addnodes.py b/sphinx/addnodes.py
index 0a2f0f7f..592bef5d 100644
--- a/sphinx/addnodes.py
+++ b/sphinx/addnodes.py
@@ -11,103 +11,168 @@
from docutils import nodes
-# index markup
-class index(nodes.Invisible, nodes.Inline, nodes.TextElement): pass
+
+class toctree(nodes.General, nodes.Element):
+ """Node for inserting a "TOC tree"."""
+
# domain-specific object descriptions (class, function etc.)
-# parent node for signature and content
-class desc(nodes.Admonition, nodes.Element): pass
+class desc(nodes.Admonition, nodes.Element):
+ """Node for object descriptions.
+
+ This node is similar to a "definition list" with one definition. It
+ contains one or more ``desc_signature`` and a ``desc_content``.
+ """
+
+class desc_signature(nodes.Part, nodes.Inline, nodes.TextElement):
+ """Node for object signatures.
-# additional name parts (module name, class name)
-class desc_addname(nodes.Part, nodes.Inline, nodes.TextElement): pass
+ The "term" part of the custom Sphinx definition list.
+ """
+
+
+# nodes to use within a desc_signature
+
+class desc_addname(nodes.Part, nodes.Inline, nodes.TextElement):
+ """Node for additional name parts (module name, class name)."""
# compatibility alias
desc_classname = desc_addname
-# return type (C); object type
-class desc_type(nodes.Part, nodes.Inline, nodes.TextElement): pass
-# -> annotation (Python)
+
+class desc_type(nodes.Part, nodes.Inline, nodes.TextElement):
+ """Node for return types or object type names."""
+
class desc_returns(desc_type):
+ """Node for a "returns" annotation (a la -> in Python)."""
def astext(self):
return ' -> ' + nodes.TextElement.astext(self)
-# main name of object
-class desc_name(nodes.Part, nodes.Inline, nodes.TextElement): pass
-# argument list
-class desc_signature(nodes.Part, nodes.Inline, nodes.TextElement): pass
+
+class desc_name(nodes.Part, nodes.Inline, nodes.TextElement):
+ """Node for the main object name."""
+
class desc_parameterlist(nodes.Part, nodes.Inline, nodes.TextElement):
+ """Node for a general parameter list."""
child_text_separator = ', '
-class desc_parameter(nodes.Part, nodes.Inline, nodes.TextElement): pass
+
+class desc_parameter(nodes.Part, nodes.Inline, nodes.TextElement):
+ """Node for a single parameter."""
+
class desc_optional(nodes.Part, nodes.Inline, nodes.TextElement):
+ """Node for marking optional parts of the parameter list."""
child_text_separator = ', '
def astext(self):
return '[' + nodes.TextElement.astext(self) + ']'
-# annotation (not Python 3-style annotations)
-class desc_annotation(nodes.Part, nodes.Inline, nodes.TextElement): pass
-# node for content
-class desc_content(nodes.General, nodes.Element): pass
+class desc_annotation(nodes.Part, nodes.Inline, nodes.TextElement):
+ """Node for signature annotations (not Python 3-style annotations)."""
+
+class desc_content(nodes.General, nodes.Element):
+ """Node for object description content.
+
+ This is the "definition" part of the custom Sphinx definition list.
+ """
+
+
+# new admonition-like constructs
+
+class versionmodified(nodes.Admonition, nodes.TextElement):
+ """Node for version change entries.
+
+ Currently used for "versionadded", "versionchanged" and "deprecated"
+ directives.
+ """
+
+class seealso(nodes.Admonition, nodes.Element):
+ """Custom "see also" admonition."""
+
+class productionlist(nodes.Admonition, nodes.Element):
+ """Node for grammar production lists.
+
+ Contains ``production`` nodes.
+ """
+
+class production(nodes.Part, nodes.Inline, nodes.TextElement):
+ """Node for a single grammar production rule."""
+
+
+# other directive-level nodes
+
+class index(nodes.Invisible, nodes.Inline, nodes.TextElement):
+ """Node for index entries.
+
+ This node is created by the ``index`` directive and has one attribute,
+ ``entries``. Its value is a list of 4-tuples of ``(entrytype, entryname,
+ target, ignored)``.
+
+ *entrytype* is one of "single", "pair", "double", "triple".
+ """
+
+class centered(nodes.Part, nodes.Element):
+ """Deprecated."""
+
+class acks(nodes.Element):
+ """Special node for "acks" lists."""
+
+class hlist(nodes.Element):
+ """Node for "horizontal lists", i.e. lists that should be compressed to
+ take up less vertical space.
+ """
-# \versionadded, \versionchanged, \deprecated
-class versionmodified(nodes.Admonition, nodes.TextElement): pass
+class hlistcol(nodes.Element):
+ """Node for one column in a horizontal list."""
-# seealso
-class seealso(nodes.Admonition, nodes.Element): pass
+class compact_paragraph(nodes.paragraph):
+ """Node for a compact paragraph (which never makes a <p> node)."""
-# productionlist
-class productionlist(nodes.Admonition, nodes.Element): pass
-class production(nodes.Part, nodes.Inline, nodes.TextElement): pass
+class glossary(nodes.Element):
+ """Node to insert a glossary."""
-# toc tree
-class toctree(nodes.General, nodes.Element): pass
+class only(nodes.Element):
+ """Node for "only" directives (conditional inclusion based on tags)."""
-# centered
-class centered(nodes.Part, nodes.Element): pass
-# pending xref
-class pending_xref(nodes.Inline, nodes.Element): pass
+# meta-information nodes
-# compact paragraph -- never makes a <p>
-class compact_paragraph(nodes.paragraph): pass
+class start_of_file(nodes.Element):
+ """Node to mark start of a new file, used in the LaTeX builder only."""
-# reference to a file to download
-class download_reference(nodes.reference): pass
+class highlightlang(nodes.Element):
+ """Inserted to set the highlight language and line number options for
+ subsequent code blocks.
+ """
-# for the ACKS list
-class acks(nodes.Element): pass
+class tabular_col_spec(nodes.Element):
+ """Node for specifying tabular columns, used for LaTeX output."""
-# for horizontal lists
-class hlist(nodes.Element): pass
-class hlistcol(nodes.Element): pass
+class meta(nodes.Special, nodes.PreBibliographic, nodes.Element):
+ """Node for meta directive -- same as docutils' standard meta node,
+ but pickleable.
+ """
-# sets the highlighting language for literal blocks
-class highlightlang(nodes.Element): pass
-# like emphasis, but doesn't apply further text processors, e.g. smartypants
-class literal_emphasis(nodes.emphasis): pass
+# inline nodes
-# for abbreviations (with explanations)
-class abbreviation(nodes.Inline, nodes.TextElement): pass
+class pending_xref(nodes.Inline, nodes.Element):
+ """Node for cross-references that cannot be resolved without complete
+ information about all documents.
-# glossary
-class glossary(nodes.Element): pass
+ These nodes are resolved before writing output, in
+ BuildEnvironment.resolve_references.
+ """
-# start of a file, used in the LaTeX builder only
-class start_of_file(nodes.Element): pass
+class download_reference(nodes.reference):
+ """Node for download references, similar to pending_xref."""
-# tabular column specification, used for the LaTeX writer
-class tabular_col_spec(nodes.Element): pass
+class literal_emphasis(nodes.emphasis):
+ """Node that behaves like `emphasis`, but further text processors are not
+ applied (e.g. smartypants for HTML output).
+ """
-# only (in/exclusion based on tags)
-class only(nodes.Element): pass
+class abbreviation(nodes.Inline, nodes.TextElement):
+ """Node for abbreviations with explanations."""
-# meta directive -- same as docutils' standard meta node, but pickleable
-class meta(nodes.Special, nodes.PreBibliographic, nodes.Element): pass
-# make them known to docutils. this is needed, because the HTML writer
-# will choke at some point if these are not added
-nodes._add_node_class_names("""index desc desc_content desc_signature
- desc_type desc_returns desc_addname desc_name desc_parameterlist
- desc_parameter desc_optional download_reference hlist hlistcol
- centered versionmodified seealso productionlist production toctree
- pending_xref compact_paragraph highlightlang literal_emphasis
- abbreviation glossary acks module start_of_file tabular_col_spec
- meta""".split())
+# make the new nodes known to docutils; needed because the HTML writer will
+# choke at some point if these are not added
+nodes._add_node_class_names(k for k in globals().keys()
+ if k != 'nodes' and k[0] != '_')
diff --git a/sphinx/application.py b/sphinx/application.py
index 11f887da..50f4102e 100644
--- a/sphinx/application.py
+++ b/sphinx/application.py
@@ -37,9 +37,6 @@ from sphinx.util.osutil import ENOENT
from sphinx.util.console import bold
-# Directive is either new-style or old-style
-clstypes = (type, types.ClassType)
-
# List of all known core events. Maps name to arguments description.
events = {
'builder-inited': '',
@@ -136,9 +133,8 @@ class Sphinx(object):
self._init_builder(buildername)
def _init_i18n(self):
- """
- Load translated strings from the configured localedirs if
- enabled in the configuration.
+ """Load translated strings from the configured localedirs if enabled in
+ the configuration.
"""
if self.config.language is not None:
self.info(bold('loading translations [%s]... ' %
@@ -490,8 +486,7 @@ class TemplateBridge(object):
"""
def init(self, builder, theme=None, dirs=None):
- """
- Called by the builder to initialize the template system.
+ """Called by the builder to initialize the template system.
*builder* is the builder object; you'll probably want to look at the
value of ``builder.config.templates_path``.
@@ -502,23 +497,20 @@ class TemplateBridge(object):
raise NotImplementedError('must be implemented in subclasses')
def newest_template_mtime(self):
- """
- Called by the builder to determine if output files are outdated
+ """Called by the builder to determine if output files are outdated
because of template changes. Return the mtime of the newest template
file that was changed. The default implementation returns ``0``.
"""
return 0
def render(self, template, context):
- """
- Called by the builder to render a template given as a filename with a
- specified context (a Python dictionary).
+ """Called by the builder to render a template given as a filename with
+ a specified context (a Python dictionary).
"""
raise NotImplementedError('must be implemented in subclasses')
def render_string(self, template, context):
- """
- Called by the builder to render a template given as a string with a
+ """Called by the builder to render a template given as a string with a
specified context (a Python dictionary).
"""
raise NotImplementedError('must be implemented in subclasses')
diff --git a/sphinx/builders/__init__.py b/sphinx/builders/__init__.py
index e345d570..ce04f769 100644
--- a/sphinx/builders/__init__.py
+++ b/sphinx/builders/__init__.py
@@ -55,16 +55,13 @@ class Builder(object):
# helper methods
def init(self):
- """
- Load necessary templates and perform initialization. The default
+ """Load necessary templates and perform initialization. The default
implementation does nothing.
"""
pass
def create_template_bridge(self):
- """
- Return the template bridge configured.
- """
+ """Return the template bridge configured."""
if self.config.template_bridge:
self.templates = self.app.import_object(
self.config.template_bridge, 'template_bridge setting')()
@@ -73,23 +70,23 @@ class Builder(object):
self.templates = BuiltinTemplateLoader()
def get_target_uri(self, docname, typ=None):
- """
- Return the target URI for a document name (*typ* can be used to qualify
- the link characteristic for individual builders).
+ """Return the target URI for a document name.
+
+ *typ* can be used to qualify the link characteristic for individual
+ builders.
"""
raise NotImplementedError
def get_relative_uri(self, from_, to, typ=None):
- """
- Return a relative URI between two source filenames. May raise
- environment.NoUri if there's no way to return a sensible URI.
+ """Return a relative URI between two source filenames.
+
+ May raise environment.NoUri if there's no way to return a sensible URI.
"""
return relative_uri(self.get_target_uri(from_),
self.get_target_uri(to, typ))
def get_outdated_docs(self):
- """
- Return an iterable of output files that are outdated, or a string
+ """Return an iterable of output files that are outdated, or a string
describing what an update build will build.
If the builder does not output individual files corresponding to
@@ -129,9 +126,7 @@ class Builder(object):
supported_image_types = []
def post_process_images(self, doctree):
- """
- Pick the best candidate for all image URIs.
- """
+ """Pick the best candidate for all image URIs."""
for node in doctree.traverse(nodes.image):
if '?' in node['candidates']:
# don't rewrite nonlocal image URIs
@@ -198,9 +193,9 @@ class Builder(object):
'out of date' % len(to_build))
def build(self, docnames, summary=None, method='update'):
- """
- Main build method. First updates the environment, and then
- calls :meth:`write`.
+ """Main build method.
+
+ First updates the environment, and then calls :meth:`write`.
"""
if summary:
self.info(bold('building [%s]: ' % self.name), nonl=1)
@@ -302,15 +297,18 @@ class Builder(object):
raise NotImplementedError
def finish(self):
- """
- Finish the building process. The default implementation does nothing.
+ """Finish the building process.
+
+ The default implementation does nothing.
"""
pass
def cleanup(self):
+ """Cleanup any resources.
+
+ The default implementation does nothing.
"""
- Cleanup any resources. The default implementation does nothing.
- """
+ pass
BUILTIN_BUILDERS = {
@@ -329,4 +327,6 @@ BUILTIN_BUILDERS = {
'man': ('manpage', 'ManualPageBuilder'),
'changes': ('changes', 'ChangesBuilder'),
'linkcheck': ('linkcheck', 'CheckExternalLinksBuilder'),
+ 'websupport': ('websupport', 'WebSupportBuilder'),
+ 'gettext': ('intl', 'MessageCatalogBuilder'),
}
diff --git a/sphinx/builders/devhelp.py b/sphinx/builders/devhelp.py
index a5a0f280..d43cd624 100644
--- a/sphinx/builders/devhelp.py
+++ b/sphinx/builders/devhelp.py
@@ -42,7 +42,6 @@ except ImportError:
class DevhelpBuilder(StandaloneHTMLBuilder):
"""
Builder that also outputs GNOME Devhelp file.
-
"""
name = 'devhelp'
diff --git a/sphinx/builders/epub.py b/sphinx/builders/epub.py
index aea07d4d..3e123a0a 100644
--- a/sphinx/builders/epub.py
+++ b/sphinx/builders/epub.py
@@ -130,7 +130,8 @@ _refuri_re = re.compile("([^#:]*#)(.*)")
# The epub publisher
class EpubBuilder(StandaloneHTMLBuilder):
- """Builder that outputs epub files.
+ """
+ Builder that outputs epub files.
It creates the metainfo files container.opf, toc.ncx, mimetype, and
META-INF/container.xml. Afterwards, all necessary files are zipped to an
@@ -222,12 +223,12 @@ class EpubBuilder(StandaloneHTMLBuilder):
})
def fix_fragment(self, match):
- """Return a href attribute with colons replaced by hyphens.
- """
+ """Return a href attribute with colons replaced by hyphens."""
return match.group(1) + match.group(2).replace(':', '-')
def fix_ids(self, tree):
"""Replace colons with hyphens in href and id attributes.
+
Some readers crash because they interpret the part as a
transport protocol specification.
"""
@@ -246,8 +247,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
node.attributes['ids'] = newids
def add_visible_links(self, tree):
- """Append visible link targets after external links.
- """
+ """Append visible link targets after external links."""
for node in tree.traverse(nodes.reference):
uri = node.get('refuri', '')
if (uri.startswith('http:') or uri.startswith('https:') or
@@ -261,6 +261,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
def write_doc(self, docname, doctree):
"""Write one document file.
+
This method is overwritten in order to fix fragment identifiers
and to add visible external links.
"""
@@ -269,8 +270,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
return StandaloneHTMLBuilder.write_doc(self, docname, doctree)
def fix_genindex(self, tree):
- """Fix href attributes for genindex pages.
- """
+ """Fix href attributes for genindex pages."""
# XXX: modifies tree inline
# Logic modeled from themes/basic/genindex.html
for key, columns in tree:
@@ -288,8 +288,9 @@ class EpubBuilder(StandaloneHTMLBuilder):
def handle_page(self, pagename, addctx, templatename='page.html',
outfilename=None, event_arg=None):
"""Create a rendered page.
- This method is overwritten for genindex pages in order to fix
- href link attributes.
+
+ This method is overwritten for genindex pages in order to fix href link
+ attributes.
"""
if pagename.startswith('genindex'):
self.fix_genindex(addctx['genindexentries'])
@@ -413,6 +414,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
def insert_subnav(self, node, subnav):
"""Insert nested navpoints for given node.
+
The node and subnav are already rendered to text.
"""
nlist = node.rsplit('\n', 1)
@@ -422,8 +424,8 @@ class EpubBuilder(StandaloneHTMLBuilder):
def build_navpoints(self, nodes):
"""Create the toc navigation structure.
- Subelements of a node are nested inside the navpoint.
- For nested nodes the parent node is reinserted in the subnav.
+ Subelements of a node are nested inside the navpoint. For nested nodes
+ the parent node is reinserted in the subnav.
"""
navstack = []
navlist = []
@@ -461,8 +463,8 @@ class EpubBuilder(StandaloneHTMLBuilder):
return '\n'.join(navlist)
def toc_metadata(self, level, navpoints):
- """Create a dictionary with all metadata for the toc.ncx
- file properly escaped.
+ """Create a dictionary with all metadata for the toc.ncx file
+ properly escaped.
"""
metadata = {}
metadata['uid'] = self.config.epub_uid
@@ -487,8 +489,8 @@ class EpubBuilder(StandaloneHTMLBuilder):
def build_epub(self, outdir, outname):
"""Write the epub file.
- It is a zip file with the mimetype file stored uncompressed
- as the first entry.
+ It is a zip file with the mimetype file stored uncompressed as the first
+ entry.
"""
self.info('writing %s file...' % outname)
projectfiles = ['META-INF/container.xml', 'content.opf', 'toc.ncx'] \
diff --git a/sphinx/builders/html.py b/sphinx/builders/html.py
index 951f516d..78bafda8 100644
--- a/sphinx/builders/html.py
+++ b/sphinx/builders/html.py
@@ -35,7 +35,7 @@ from sphinx.util.osutil import SEP, os_path, relative_uri, ensuredir, \
movefile, ustrftime, copyfile
from sphinx.util.nodes import inline_all_toctrees
from sphinx.util.matching import patmatch, compile_matchers
-from sphinx.util.pycompat import any
+from sphinx.util.pycompat import any, b
from sphinx.errors import SphinxError
from sphinx.locale import _
from sphinx.search import js_index
@@ -63,6 +63,7 @@ class StandaloneHTMLBuilder(Builder):
out_suffix = '.html'
link_suffix = '.html' # defaults to matching out_suffix
indexer_format = js_index
+ indexer_dumps_unicode = True
supported_image_types = ['image/svg+xml', 'image/png',
'image/gif', 'image/jpeg']
searchindex_filename = 'searchindex.js'
@@ -146,8 +147,9 @@ class StandaloneHTMLBuilder(Builder):
cfgdict = dict((name, self.config[name])
for (name, desc) in self.config.values.iteritems()
if desc[1] == 'html')
- self.config_hash = md5(str(cfgdict)).hexdigest()
- self.tags_hash = md5(str(sorted(self.tags))).hexdigest()
+ self.config_hash = md5(unicode(cfgdict).encode('utf-8')).hexdigest()
+ self.tags_hash = md5(unicode(sorted(self.tags)).encode('utf-8')) \
+ .hexdigest()
old_config_hash = old_tags_hash = ''
try:
fp = open(path.join(self.outdir, '.buildinfo'))
@@ -199,7 +201,7 @@ class StandaloneHTMLBuilder(Builder):
"""Utility: Render a lone doctree node."""
if node is None:
return {'fragment': ''}
- doc = new_document('<partial node>')
+ doc = new_document(b('<partial node>'))
doc.append(node)
if self._publisher is None:
@@ -585,8 +587,7 @@ class StandaloneHTMLBuilder(Builder):
self.theme.cleanup()
def post_process_images(self, doctree):
- """
- Pick the best candidate for an image and link down-scaled images to
+ """Pick the best candidate for an image and link down-scaled images to
their high res version.
"""
Builder.post_process_images(self, doctree)
@@ -730,10 +731,12 @@ class StandaloneHTMLBuilder(Builder):
self.info(bold('dumping object inventory... '), nonl=True)
f = open(path.join(self.outdir, INVENTORY_FILENAME), 'wb')
try:
- f.write('# Sphinx inventory version 2\n')
- f.write('# Project: %s\n' % self.config.project.encode('utf-8'))
- f.write('# Version: %s\n' % self.config.version.encode('utf-8'))
- f.write('# The remainder of this file is compressed using zlib.\n')
+ f.write((u'# Sphinx inventory version 2\n'
+ u'# Project: %s\n'
+ u'# Version: %s\n'
+ u'# The remainder of this file is compressed using zlib.\n'
+ % (self.config.project, self.config.version)
+ ).encode('utf-8'))
compressor = zlib.compressobj(9)
for domainname, domain in self.env.domains.iteritems():
for name, dispname, type, docname, anchor, prio in \
@@ -745,11 +748,9 @@ class StandaloneHTMLBuilder(Builder):
if dispname == name:
dispname = u'-'
f.write(compressor.compress(
- '%s %s:%s %s %s %s\n' % (name.encode('utf-8'),
- domainname.encode('utf-8'),
- type.encode('utf-8'), prio,
- uri.encode('utf-8'),
- dispname.encode('utf-8'))))
+ (u'%s %s:%s %s %s %s\n' % (name, domainname, type,
+ prio, uri, dispname)
+ ).encode('utf-8')))
f.write(compressor.flush())
finally:
f.close()
@@ -761,7 +762,10 @@ class StandaloneHTMLBuilder(Builder):
searchindexfn = path.join(self.outdir, self.searchindex_filename)
# first write to a temporary file, so that if dumping fails,
# the existing index won't be overwritten
- f = open(searchindexfn + '.tmp', 'wb')
+ if self.indexer_dumps_unicode:
+ f = codecs.open(searchindexfn + '.tmp', 'w', encoding='utf-8')
+ else:
+ f = open(searchindexfn + '.tmp', 'wb')
try:
self.indexer.dump(f, self.indexer_format)
finally:
@@ -918,6 +922,7 @@ class SerializingHTMLBuilder(StandaloneHTMLBuilder):
#: implements a `dump`, `load`, `dumps` and `loads` functions
#: (pickle, simplejson etc.)
implementation = None
+ implementation_dumps_unicode = False
#: the filename for the global context file
globalcontext_filename = None
@@ -940,6 +945,17 @@ class SerializingHTMLBuilder(StandaloneHTMLBuilder):
return docname[:-5] # up to sep
return docname + SEP
+ def dump_context(self, context, filename):
+ if self.implementation_dumps_unicode:
+ f = codecs.open(filename, 'w', encoding='utf-8')
+ else:
+ f = open(filename, 'wb')
+ try:
+ # XXX: the third argument is pickle-specific!
+ self.implementation.dump(context, f, 2)
+ finally:
+ f.close()
+
def handle_page(self, pagename, ctx, templatename='page.html',
outfilename=None, event_arg=None):
ctx['current_page_name'] = pagename
@@ -953,11 +969,7 @@ class SerializingHTMLBuilder(StandaloneHTMLBuilder):
ctx, event_arg)
ensuredir(path.dirname(outfilename))
- f = open(outfilename, 'wb')
- try:
- self.implementation.dump(ctx, f, 2)
- finally:
- f.close()
+ self.dump_context(ctx, outfilename)
# if there is a source file, copy the source file for the
# "show source" link
@@ -970,11 +982,7 @@ class SerializingHTMLBuilder(StandaloneHTMLBuilder):
def handle_finish(self):
# dump the global context
outfilename = path.join(self.outdir, self.globalcontext_filename)
- f = open(outfilename, 'wb')
- try:
- self.implementation.dump(self.globalcontext, f, 2)
- finally:
- f.close()
+ self.dump_context(self.globalcontext, outfilename)
# super here to dump the search index
StandaloneHTMLBuilder.handle_finish(self)
@@ -994,7 +1002,9 @@ class PickleHTMLBuilder(SerializingHTMLBuilder):
A Builder that dumps the generated HTML into pickle files.
"""
implementation = pickle
+ implementation_dumps_unicode = False
indexer_format = pickle
+ indexer_dumps_unicode = False
name = 'pickle'
out_suffix = '.fpickle'
globalcontext_filename = 'globalcontext.pickle'
@@ -1009,7 +1019,9 @@ class JSONHTMLBuilder(SerializingHTMLBuilder):
A builder that dumps the generated HTML into JSON files.
"""
implementation = jsonimpl
+ implementation_dumps_unicode = True
indexer_format = jsonimpl
+ indexer_dumps_unicode = True
name = 'json'
out_suffix = '.fjson'
globalcontext_filename = 'globalcontext.json'
diff --git a/sphinx/builders/htmlhelp.py b/sphinx/builders/htmlhelp.py
index 848068e5..f5d716d9 100644
--- a/sphinx/builders/htmlhelp.py
+++ b/sphinx/builders/htmlhelp.py
@@ -258,7 +258,8 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder):
def write_index(title, refs, subitems):
def write_param(name, value):
item = ' <param name="%s" value="%s">\n' % (name, value)
- f.write(item.encode('ascii', 'xmlcharrefreplace'))
+ f.write(item.encode('ascii', 'xmlcharrefreplace')
+ .decode('ascii'))
title = cgi.escape(title)
f.write('<LI> <OBJECT type="text/sitemap">\n')
write_param('Keyword', title)
diff --git a/sphinx/builders/intl.py b/sphinx/builders/intl.py
new file mode 100644
index 00000000..1572747e
--- /dev/null
+++ b/sphinx/builders/intl.py
@@ -0,0 +1,110 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.builders.intl
+ ~~~~~~~~~~~~~~~~~~~~
+
+ The MessageCatalogBuilder class.
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from os import path
+from codecs import open
+from datetime import datetime
+from collections import defaultdict
+
+from docutils import nodes
+
+from sphinx.builders import Builder
+from sphinx.builders.versioning import VersioningBuilderMixin
+from sphinx.util.nodes import extract_messages
+from sphinx.util.osutil import SEP, copyfile
+from sphinx.util.console import darkgreen
+
+POHEADER = ur"""
+# SOME DESCRIPTIVE TITLE.
+# Copyright (C) %(copyright)s
+# This file is distributed under the same license as the %(project)s package.
+# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: %(version)s\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: %(ctime)s\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
+"Language-Team: LANGUAGE <LL@li.org>\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+
+"""[1:]
+
+
+class I18nBuilder(Builder, VersioningBuilderMixin):
+ """
+ General i18n builder.
+ """
+ name = 'i18n'
+
+ def init(self):
+ Builder.init(self)
+ VersioningBuilderMixin.init(self)
+ self.catalogs = defaultdict(dict)
+
+ def get_target_uri(self, docname, typ=None):
+ return ''
+
+ def get_outdated_docs(self):
+ return self.env.found_docs
+
+ def prepare_writing(self, docnames):
+ return
+
+ def write_doc(self, docname, doctree):
+ catalog = self.catalogs[docname.split(SEP, 1)[0]]
+
+ self.handle_versioning(docname, doctree, nodes.TextElement)
+
+ for node, msg in extract_messages(doctree):
+ catalog.setdefault(node.uid, msg)
+
+ def finish(self):
+ Builder.finish(self)
+ VersioningBuilderMixin.finish(self)
+
+
+class MessageCatalogBuilder(I18nBuilder):
+ """
+ Builds gettext-style message catalogs (.pot files).
+ """
+ name = 'gettext'
+
+ def finish(self):
+ I18nBuilder.finish(self)
+ data = dict(
+ version = self.config.version,
+ copyright = self.config.copyright,
+ project = self.config.project,
+ # XXX should supply tz
+ ctime = datetime.now().strftime('%Y-%m-%d %H:%M%z'),
+ )
+ for section, messages in self.status_iterator(
+ self.catalogs.iteritems(), "writing message catalogs... ",
+ lambda (section, _):darkgreen(section), len(self.catalogs)):
+
+ pofn = path.join(self.outdir, section + '.pot')
+ pofile = open(pofn, 'w', encoding='utf-8')
+ try:
+ pofile.write(POHEADER % data)
+ for uid, message in messages.iteritems():
+ # message contains *one* line of text ready for translation
+ message = message.replace(u'\\', ur'\\'). \
+ replace(u'"', ur'\"')
+ pomsg = u'#%s\nmsgid "%s"\nmsgstr ""\n\n' % (uid, message)
+ pofile.write(pomsg)
+ finally:
+ pofile.close()
diff --git a/sphinx/builders/qthelp.py b/sphinx/builders/qthelp.py
index ffc52334..e86f1921 100644
--- a/sphinx/builders/qthelp.py
+++ b/sphinx/builders/qthelp.py
@@ -130,8 +130,16 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
for indexname, indexcls, content, collapse in self.domain_indices:
item = section_template % {'title': indexcls.localname,
'ref': '%s.html' % indexname}
- sections.append(' '*4*4 + item)
- sections = '\n'.join(sections)
+ sections.append((' ' * 4 * 4 + item).encode('utf-8'))
+ # sections may be unicode strings or byte strings, we have to make sure
+ # they are all byte strings before joining them
+ new_sections = []
+ for section in sections:
+ if isinstance(section, unicode):
+ new_sections.append(section.encode('utf-8'))
+ else:
+ new_sections.append(section)
+ sections = u'\n'.encode('utf-8').join(new_sections)
# keywords
keywords = []
@@ -230,7 +238,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
link = node['refuri']
title = escape(node.astext()).replace('"','&quot;')
item = section_template % {'title': title, 'ref': link}
- item = ' '*4*indentlevel + item.encode('ascii', 'xmlcharrefreplace')
+ item = u' ' * 4 * indentlevel + item
parts.append(item.encode('ascii', 'xmlcharrefreplace'))
elif isinstance(node, nodes.bullet_list):
for subnode in node:
diff --git a/sphinx/builders/versioning.py b/sphinx/builders/versioning.py
new file mode 100644
index 00000000..6c2bccca
--- /dev/null
+++ b/sphinx/builders/versioning.py
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.builders.versioning
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+import os
+import pickle
+
+from docutils.utils import Reporter
+
+from sphinx.util.osutil import copyfile
+from sphinx.environment import WarningStream
+from sphinx.versioning import add_uids, merge_doctrees
+
+
+class VersioningBuilderMixin(object):
+ def walk_doctree_files(self):
+ for root, dirs, files in os.walk(self.doctreedir):
+ for fn in files:
+ yield os.path.join(root, fn)
+
+ def init(self):
+ for fp in self.walk_doctree_files():
+ if fp.endswith('.doctree'):
+ copyfile(fp, fp + '.old')
+
+ def get_old_doctree(self, docname):
+ fp = self.env.doc2path(docname, self.doctreedir, '.doctree.old')
+ try:
+ f = open(fp, 'rb')
+ try:
+ doctree = pickle.load(f)
+ finally:
+ f.close()
+ except IOError:
+ return None
+ doctree.settings.env = self.env
+ doctree.reporter = Reporter(self.env.doc2path(docname), 2, 5,
+ stream=WarningStream(self.env._warnfunc))
+ return doctree
+
+ def resave_doctree(self, docname, doctree):
+ reporter = doctree.reporter
+ doctree.reporter = None
+ doctree.settings.warning_stream = None
+ doctree.settings.env = None
+ doctree.settings.record_dependencies = None
+
+ fp = self.env.doc2path(docname, self.doctreedir, '.doctree')
+ f = open(fp, 'wb')
+ try:
+ pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
+ finally:
+ f.close()
+
+ doctree.reporter = reporter
+
+ def handle_versioning(self, docname, doctree, condition):
+ old_doctree = self.get_old_doctree(docname)
+ if old_doctree:
+ list(merge_doctrees(old_doctree, doctree, condition))
+ else:
+ list(add_uids(doctree, condition))
+ self.resave_doctree(docname, doctree)
+
+ def finish(self):
+ for fp in self.walk_doctree_files():
+ if fp.endswith('.doctree.old'):
+ os.remove(fp)
diff --git a/sphinx/builders/websupport.py b/sphinx/builders/websupport.py
new file mode 100644
index 00000000..303adfe6
--- /dev/null
+++ b/sphinx/builders/websupport.py
@@ -0,0 +1,161 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.builders.websupport
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Builder for the web support package.
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import cPickle as pickle
+from os import path
+from cgi import escape
+import posixpath
+import shutil
+
+from docutils.io import StringOutput
+
+from sphinx.util.osutil import os_path, relative_uri, ensuredir, copyfile
+from sphinx.util.jsonimpl import dumps as dump_json
+from sphinx.util.websupport import is_commentable
+from sphinx.builders.html import StandaloneHTMLBuilder
+from sphinx.builders.versioning import VersioningBuilderMixin
+from sphinx.writers.websupport import WebSupportTranslator
+
+
+class WebSupportBuilder(StandaloneHTMLBuilder, VersioningBuilderMixin):
+ """
+ Builds documents for the web support package.
+ """
+ name = 'websupport'
+ out_suffix = '.fpickle'
+
+ def init(self):
+ StandaloneHTMLBuilder.init(self)
+ VersioningBuilderMixin.init(self)
+
+ def init_translator_class(self):
+ self.translator_class = WebSupportTranslator
+
+ def write_doc(self, docname, doctree):
+ destination = StringOutput(encoding='utf-8')
+ doctree.settings = self.docsettings
+
+ self.handle_versioning(docname, doctree, is_commentable)
+
+ self.cur_docname = docname
+ self.secnumbers = self.env.toc_secnumbers.get(docname, {})
+ self.imgpath = '/' + posixpath.join(self.app.staticdir, '_images')
+ self.post_process_images(doctree)
+ self.dlpath = '/' + posixpath.join(self.app.staticdir, '_downloads')
+ self.docwriter.write(doctree, destination)
+ self.docwriter.assemble_parts()
+ body = self.docwriter.parts['fragment']
+ metatags = self.docwriter.clean_meta
+
+ ctx = self.get_doc_context(docname, body, metatags)
+ self.index_page(docname, doctree, ctx.get('title', ''))
+ self.handle_page(docname, ctx, event_arg=doctree)
+
+ def get_target_uri(self, docname, typ=None):
+ return docname
+
+ def load_indexer(self, docnames):
+ self.indexer = self.app.search
+ self.indexer.init_indexing(changed=docnames)
+
+ def handle_page(self, pagename, addctx, templatename='page.html',
+ outfilename=None, event_arg=None):
+ # This is mostly copied from StandaloneHTMLBuilder. However, instead
+ # of rendering the template and saving the html, create a context
+ # dict and pickle it.
+ ctx = self.globalcontext.copy()
+ ctx['pagename'] = pagename
+
+ def pathto(otheruri, resource=False,
+ baseuri=self.get_target_uri(pagename)):
+ if not resource:
+ otheruri = self.get_target_uri(otheruri)
+ return relative_uri(baseuri, otheruri) or '#'
+ else:
+ return '/' + posixpath.join(self.app.staticdir, otheruri)
+ ctx['pathto'] = pathto
+ ctx['hasdoc'] = lambda name: name in self.env.all_docs
+ ctx['encoding'] = encoding = self.config.html_output_encoding
+ ctx['toctree'] = lambda **kw: self._get_local_toctree(pagename, **kw)
+ self.add_sidebars(pagename, ctx)
+ ctx.update(addctx)
+
+ self.app.emit('html-page-context', pagename, templatename,
+ ctx, event_arg)
+
+ # Create a dict that will be pickled and used by webapps.
+ css = '<link rel="stylesheet" href="%s" type=text/css />' % \
+ pathto('_static/pygments.css', 1)
+ doc_ctx = {'body': ctx.get('body', ''),
+ 'title': ctx.get('title', ''),
+ 'css': css,
+ 'js': self._make_js(ctx)}
+ # Partially render the html template to proved a more useful ctx.
+ template = self.templates.environment.get_template(templatename)
+ template_module = template.make_module(ctx)
+ if hasattr(template_module, 'sidebar'):
+ doc_ctx['sidebar'] = template_module.sidebar()
+ if hasattr(template_module, 'relbar'):
+ doc_ctx['relbar'] = template_module.relbar()
+
+ if not outfilename:
+ outfilename = path.join(self.outdir, 'pickles',
+ os_path(pagename) + self.out_suffix)
+
+ ensuredir(path.dirname(outfilename))
+ f = open(outfilename, 'wb')
+ try:
+ pickle.dump(doc_ctx, f, pickle.HIGHEST_PROTOCOL)
+ finally:
+ f.close()
+
+ # if there is a source file, copy the source file for the
+ # "show source" link
+ if ctx.get('sourcename'):
+ source_name = path.join(self.app.builddir, self.app.staticdir,
+ '_sources', os_path(ctx['sourcename']))
+ ensuredir(path.dirname(source_name))
+ copyfile(self.env.doc2path(pagename), source_name)
+
+ def handle_finish(self):
+ StandaloneHTMLBuilder.handle_finish(self)
+ VersioningBuilderMixin.finish(self)
+ directories = ['_images', '_static']
+ for directory in directories:
+ src = path.join(self.outdir, directory)
+ dst = path.join(self.app.builddir, self.app.staticdir, directory)
+ if path.isdir(src):
+ if path.isdir(dst):
+ shutil.rmtree(dst)
+ shutil.move(src, dst)
+
+ def dump_search_index(self):
+ self.indexer.finish_indexing()
+
+ def _make_js(self, ctx):
+ def make_script(file):
+ path = ctx['pathto'](file, 1)
+ return '<script type="text/javascript" src="%s"></script>' % path
+
+ opts = {
+ 'URL_ROOT': ctx.get('url_root', ''),
+ 'VERSION': ctx['release'],
+ 'COLLAPSE_INDEX': False,
+ 'FILE_SUFFIX': '',
+ 'HAS_SOURCE': ctx['has_source']
+ }
+ scripts = [make_script(file) for file in ctx['script_files']]
+ scripts.append(make_script('_static/websupport.js'))
+ return '\n'.join([
+ '<script type="text/javascript">'
+ 'var DOCUMENTATION_OPTIONS = %s;' % dump_json(opts),
+ '</script>'
+ ] + scripts)
diff --git a/sphinx/config.py b/sphinx/config.py
index 12c2a04b..2e0a116c 100644
--- a/sphinx/config.py
+++ b/sphinx/config.py
@@ -11,16 +11,23 @@
import os
import re
+import sys
from os import path
from sphinx.errors import ConfigError
from sphinx.util.osutil import make_filename
+from sphinx.util.pycompat import bytes, b, convert_with_2to3
-nonascii_re = re.compile(r'[\x80-\xff]')
+nonascii_re = re.compile(b(r'[\x80-\xff]'))
+CONFIG_SYNTAX_ERROR = "There is a syntax error in your configuration file: %s"
+if sys.version_info >= (3, 0):
+ CONFIG_SYNTAX_ERROR += "\nDid you change the syntax from 2.x to 3.x?"
class Config(object):
- """Configuration file abstraction."""
+ """
+ Configuration file abstraction.
+ """
# the values are: (default, what needs to be rebuilt if changed)
@@ -163,12 +170,30 @@ class Config(object):
config['tags'] = tags
olddir = os.getcwd()
try:
+ # we promise to have the config dir as current dir while the
+ # config file is executed
+ os.chdir(dirname)
+ # get config source
+ f = open(config_file, 'rb')
try:
- os.chdir(dirname)
- execfile(config['__file__'], config)
+ source = f.read()
+ finally:
+ f.close()
+ try:
+ # compile to a code object, handle syntax errors
+ try:
+ code = compile(source, config_file, 'exec')
+ except SyntaxError:
+ if convert_with_2to3:
+ # maybe the file uses 2.x syntax; try to refactor to
+ # 3.x syntax using 2to3
+ source = convert_with_2to3(config_file)
+ code = compile(source, config_file, 'exec')
+ else:
+ raise
+ exec code in config
except SyntaxError, err:
- raise ConfigError('There is a syntax error in your '
- 'configuration file: ' + str(err))
+ raise ConfigError(CONFIG_SYNTAX_ERROR % err)
finally:
os.chdir(olddir)
@@ -182,10 +207,11 @@ class Config(object):
# check all string values for non-ASCII characters in bytestrings,
# since that can result in UnicodeErrors all over the place
for name, value in self._raw_config.iteritems():
- if isinstance(value, str) and nonascii_re.search(value):
+ if isinstance(value, bytes) and nonascii_re.search(value):
warn('the config value %r is set to a string with non-ASCII '
'characters; this can lead to Unicode errors occurring. '
- 'Please use Unicode strings, e.g. u"Content".' % name)
+ 'Please use Unicode strings, e.g. %r.' % (name, u'Content')
+ )
def init_values(self):
config = self._raw_config
diff --git a/sphinx/directives/code.py b/sphinx/directives/code.py
index 0647daf0..d235c4ca 100644
--- a/sphinx/directives/code.py
+++ b/sphinx/directives/code.py
@@ -7,10 +7,8 @@
:license: BSD, see LICENSE for details.
"""
-import os
import sys
import codecs
-from os import path
from docutils import nodes
from docutils.parsers.rst import Directive, directives
@@ -93,23 +91,11 @@ class LiteralInclude(Directive):
def run(self):
document = self.state.document
- filename = self.arguments[0]
if not document.settings.file_insertion_enabled:
return [document.reporter.warning('File insertion disabled',
line=self.lineno)]
env = document.settings.env
- if filename.startswith('/') or filename.startswith(os.sep):
- rel_fn = filename[1:]
- else:
- docdir = path.dirname(env.doc2path(env.docname, base=None))
- rel_fn = path.join(docdir, filename)
- try:
- fn = path.join(env.srcdir, rel_fn)
- except UnicodeDecodeError:
- # the source directory is a bytestring with non-ASCII characters;
- # let's try to encode the rel_fn in the file system encoding
- rel_fn = rel_fn.encode(sys.getfilesystemencoding())
- fn = path.join(env.srcdir, rel_fn)
+ rel_filename, filename = env.relfn2path(self.arguments[0])
if 'pyobject' in self.options and 'lines' in self.options:
return [document.reporter.warning(
@@ -119,7 +105,7 @@ class LiteralInclude(Directive):
encoding = self.options.get('encoding', env.config.source_encoding)
codec_info = codecs.lookup(encoding)
try:
- f = codecs.StreamReaderWriter(open(fn, 'U'),
+ f = codecs.StreamReaderWriter(open(filename, 'rb'),
codec_info[2], codec_info[3], 'strict')
lines = f.readlines()
f.close()
@@ -136,7 +122,7 @@ class LiteralInclude(Directive):
objectname = self.options.get('pyobject')
if objectname is not None:
from sphinx.pycode import ModuleAnalyzer
- analyzer = ModuleAnalyzer.for_file(fn, '')
+ analyzer = ModuleAnalyzer.for_file(filename, '')
tags = analyzer.find_tags()
if objectname not in tags:
return [document.reporter.warning(
@@ -178,13 +164,13 @@ class LiteralInclude(Directive):
text = ''.join(lines)
if self.options.get('tab-width'):
text = text.expandtabs(self.options['tab-width'])
- retnode = nodes.literal_block(text, text, source=fn)
+ retnode = nodes.literal_block(text, text, source=filename)
retnode.line = 1
if self.options.get('language', ''):
retnode['language'] = self.options['language']
if 'linenos' in self.options:
retnode['linenos'] = True
- document.settings.env.note_dependency(rel_fn)
+ env.note_dependency(rel_filename)
return [retnode]
diff --git a/sphinx/directives/other.py b/sphinx/directives/other.py
index 332c4084..cbf19b55 100644
--- a/sphinx/directives/other.py
+++ b/sphinx/directives/other.py
@@ -13,13 +13,19 @@ from docutils import nodes
from docutils.parsers.rst import Directive, directives
from sphinx import addnodes
-from sphinx.locale import pairindextypes, _
+from sphinx.locale import _
from sphinx.util import url_re, docname_join
-from sphinx.util.nodes import explicit_title_re
+from sphinx.util.nodes import explicit_title_re, process_index_entry
from sphinx.util.compat import make_admonition
from sphinx.util.matching import patfilter
+def int_or_nothing(argument):
+ if not argument:
+ return 999
+ return int(argument)
+
+
class TocTree(Directive):
"""
Directive to notify Sphinx about the hierarchical structure of the docs,
@@ -34,7 +40,7 @@ class TocTree(Directive):
'maxdepth': int,
'glob': directives.flag,
'hidden': directives.flag,
- 'numbered': directives.flag,
+ 'numbered': int_or_nothing,
'titlesonly': directives.flag,
}
@@ -98,7 +104,7 @@ class TocTree(Directive):
subnode['maxdepth'] = self.options.get('maxdepth', -1)
subnode['glob'] = glob
subnode['hidden'] = 'hidden' in self.options
- subnode['numbered'] = 'numbered' in self.options
+ subnode['numbered'] = self.options.get('numbered', 0)
subnode['titlesonly'] = 'titlesonly' in self.options
wrappernode = nodes.compound(classes=['toctree-wrapper'])
wrappernode.append(subnode)
@@ -151,10 +157,6 @@ class Index(Directive):
final_argument_whitespace = True
option_spec = {}
- indextypes = [
- 'single', 'pair', 'double', 'triple',
- ]
-
def run(self):
arguments = self.arguments[0].split('\n')
env = self.state.document.settings.env
@@ -164,28 +166,7 @@ class Index(Directive):
indexnode = addnodes.index()
indexnode['entries'] = ne = []
for entry in arguments:
- entry = entry.strip()
- for type in pairindextypes:
- if entry.startswith(type+':'):
- value = entry[len(type)+1:].strip()
- value = pairindextypes[type] + '; ' + value
- ne.append(('pair', value, targetid, value))
- break
- else:
- for type in self.indextypes:
- if entry.startswith(type+':'):
- value = entry[len(type)+1:].strip()
- if type == 'double':
- type = 'pair'
- ne.append((type, value, targetid, value))
- break
- # shorthand notation for single entries
- else:
- for value in entry.split(','):
- value = value.strip()
- if not value:
- continue
- ne.append(('single', value, targetid, value))
+ ne.extend(process_index_entry(entry, targetid))
return [indexnode, targetnode]
@@ -369,14 +350,13 @@ from docutils.parsers.rst.directives.misc import Include as BaseInclude
class Include(BaseInclude):
"""
Like the standard "Include" directive, but interprets absolute paths
- correctly.
+ "correctly", i.e. relative to source directory.
"""
def run(self):
- if self.arguments[0].startswith('/') or \
- self.arguments[0].startswith(os.sep):
- env = self.state.document.settings.env
- self.arguments[0] = os.path.join(env.srcdir, self.arguments[0][1:])
+ env = self.state.document.settings.env
+ rel_filename, filename = env.relfn2path(self.arguments[0])
+ self.arguments[0] = filename
return BaseInclude.run(self)
diff --git a/sphinx/domains/__init__.py b/sphinx/domains/__init__.py
index d133a812..484cd968 100644
--- a/sphinx/domains/__init__.py
+++ b/sphinx/domains/__init__.py
@@ -66,9 +66,8 @@ class Index(object):
self.domain = domain
def generate(self, docnames=None):
- """
- Return entries for the index given by *name*. If *docnames* is given,
- restrict to entries referring to these docnames.
+ """Return entries for the index given by *name*. If *docnames* is
+ given, restrict to entries referring to these docnames.
The return value is a tuple of ``(content, collapse)``, where *collapse*
is a boolean that determines if sub-entries should start collapsed (for
@@ -158,8 +157,7 @@ class Domain(object):
self.objtypes_for_role = self._role2type.get
def role(self, name):
- """
- Return a role adapter function that always gives the registered
+ """Return a role adapter function that always gives the registered
role its full name ('domain:name') as the first argument.
"""
if name in self._role_cache:
@@ -175,8 +173,7 @@ class Domain(object):
return role_adapter
def directive(self, name):
- """
- Return a directive adapter class that always gives the registered
+ """Return a directive adapter class that always gives the registered
directive its full name ('domain:name') as ``self.name``.
"""
if name in self._directive_cache:
@@ -195,21 +192,16 @@ class Domain(object):
# methods that should be overwritten
def clear_doc(self, docname):
- """
- Remove traces of a document in the domain-specific inventories.
- """
+ """Remove traces of a document in the domain-specific inventories."""
pass
def process_doc(self, env, docname, document):
- """
- Process a document after it is read by the environment.
- """
+ """Process a document after it is read by the environment."""
pass
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
- """
- Resolve the ``pending_xref`` *node* with the given *typ* and *target*.
+ """Resolve the pending_xref *node* with the given *typ* and *target*.
This method should return a new node, to replace the xref node,
containing the *contnode* which is the markup content of the
@@ -225,8 +217,7 @@ class Domain(object):
pass
def get_objects(self):
- """
- Return an iterable of "object descriptions", which are tuples with
+ """Return an iterable of "object descriptions", which are tuples with
five items:
* `name` -- fully qualified name
@@ -245,9 +236,7 @@ class Domain(object):
return []
def get_type_name(self, type, primary=False):
- """
- Return full name for given ObjType.
- """
+ """Return full name for given ObjType."""
if primary:
return type.lname
return _('%s %s') % (self.label, type.lname)
diff --git a/sphinx/domains/cpp.py b/sphinx/domains/cpp.py
index 4dac8925..a59b2b50 100644
--- a/sphinx/domains/cpp.py
+++ b/sphinx/domains/cpp.py
@@ -110,7 +110,7 @@ class DefinitionError(Exception):
return self.description
def __str__(self):
- return unicode(self.encode('utf-8'))
+ return unicode(self).encode('utf-8')
class DefExpr(object):
@@ -132,30 +132,34 @@ class DefExpr(object):
def __ne__(self, other):
return not self.__eq__(other)
+ __hash__ = None
+
def clone(self):
- """Close a definition expression node"""
+ """Clone a definition expression node."""
return deepcopy(self)
def get_id(self):
- """Returns the id for the node"""
+ """Return the id for the node."""
return u''
def get_name(self):
- """Returns the name. Returns either `None` or a node with
- a name you might call :meth:`split_owner` on.
+ """Return the name.
+
+ Returns either `None` or a node with a name you might call
+ :meth:`split_owner` on.
"""
return None
def split_owner(self):
- """Nodes returned by :meth:`get_name` can split off their
- owning parent. This function returns the owner and the
- name as a tuple of two items. If a node does not support
- it, :exc:`NotImplementedError` is raised.
+ """Nodes returned by :meth:`get_name` can split off their owning parent.
+
+ This function returns the owner and the name as a tuple of two items.
+ If a node does not support it, :exc:`NotImplementedError` is raised.
"""
raise NotImplementedError()
def prefix(self, prefix):
- """Prefixes a name node (a node returned by :meth:`get_name`)."""
+ """Prefix a name node (a node returned by :meth:`get_name`)."""
raise NotImplementedError()
def __str__(self):
@@ -982,8 +986,9 @@ class CPPFunctionObject(CPPObject):
class CPPCurrentNamespace(Directive):
- """This directive is just to tell Sphinx that we're documenting
- stuff in namespace foo.
+ """
+ This directive is just to tell Sphinx that we're documenting stuff in
+ namespace foo.
"""
has_content = False
diff --git a/sphinx/domains/javascript.py b/sphinx/domains/javascript.py
index 98c7948c..2b615847 100644
--- a/sphinx/domains/javascript.py
+++ b/sphinx/domains/javascript.py
@@ -130,7 +130,7 @@ class JSCallable(JSObject):
class JSConstructor(JSCallable):
- """Like a callable but with a different prefix"""
+ """Like a callable but with a different prefix."""
display_prefix = 'class '
diff --git a/sphinx/domains/python.py b/sphinx/domains/python.py
index cb34492f..e3090a29 100644
--- a/sphinx/domains/python.py
+++ b/sphinx/domains/python.py
@@ -108,22 +108,21 @@ class PyObject(ObjectDescription):
]
def get_signature_prefix(self, sig):
- """
- May return a prefix to put before the object name in the signature.
+ """May return a prefix to put before the object name in the
+ signature.
"""
return ''
def needs_arglist(self):
- """
- May return true if an empty argument list is to be generated even if
+ """May return true if an empty argument list is to be generated even if
the document contains none.
"""
return False
def handle_signature(self, sig, signode):
- """
- Transform a Python signature into RST nodes.
- Returns (fully qualified name of the thing, classname if any).
+ """Transform a Python signature into RST nodes.
+
+ Return (fully qualified name of the thing, classname if any).
If inside a class, the current class name is handled intelligently:
* it is stripped from the displayed name if present
@@ -193,9 +192,7 @@ class PyObject(ObjectDescription):
return fullname, name_prefix
def get_index_text(self, modname, name):
- """
- Return the text for the index entry of the object.
- """
+ """Return the text for the index entry of the object."""
raise NotImplementedError('must be implemented in subclasses')
def add_target_and_index(self, name_cls, sig, signode):
@@ -574,9 +571,8 @@ class PythonDomain(Domain):
del self.data['modules'][modname]
def find_obj(self, env, modname, classname, name, type, searchmode=0):
- """
- Find a Python object for "name", perhaps using the given module and/or
- classname. Returns a list of (name, object entry) tuples.
+ """Find a Python object for "name", perhaps using the given module
+ and/or classname. Returns a list of (name, object entry) tuples.
"""
# skip parens
if name[-2:] == '()':
diff --git a/sphinx/domains/rst.py b/sphinx/domains/rst.py
index d3ffc6bd..30134d9e 100644
--- a/sphinx/domains/rst.py
+++ b/sphinx/domains/rst.py
@@ -59,9 +59,10 @@ class ReSTMarkup(ObjectDescription):
def parse_directive(d):
- """
- Parses a directive signature. Returns (directive, arguments) string tuple.
- if no arguments are given, returns (directive, '').
+ """Parse a directive signature.
+
+ Returns (directive, arguments) string tuple. If no arguments are given,
+ returns (directive, '').
"""
dir = d.strip()
if not dir.startswith('.'):
diff --git a/sphinx/environment.py b/sphinx/environment.py
index 4919331b..883c91e9 100644
--- a/sphinx/environment.py
+++ b/sphinx/environment.py
@@ -11,11 +11,13 @@
import re
import os
+import sys
import time
import types
import codecs
import imghdr
import string
+import posixpath
import cPickle as pickle
from os import path
from glob import glob
@@ -24,9 +26,9 @@ from itertools import izip, groupby
from docutils import nodes
from docutils.io import FileInput, NullOutput
from docutils.core import Publisher
-from docutils.utils import Reporter, relative_path
+from docutils.utils import Reporter, relative_path, new_document
from docutils.readers import standalone
-from docutils.parsers.rst import roles, directives
+from docutils.parsers.rst import roles, directives, Parser as RSTParser
from docutils.parsers.rst.languages import en as english
from docutils.parsers.rst.directives.html import MetaBody
from docutils.writers import UnfilteredWriter
@@ -36,13 +38,14 @@ from docutils.transforms.parts import ContentsFilter
from sphinx import addnodes
from sphinx.util import url_re, get_matching_docs, docname_join, \
FilenameUniqDict
-from sphinx.util.nodes import clean_astext, make_refnode
+from sphinx.util.nodes import clean_astext, make_refnode, extract_messages
from sphinx.util.osutil import movefile, SEP, ustrftime
from sphinx.util.matching import compile_matchers
-from sphinx.util.pycompat import all
+from sphinx.util.pycompat import all, class_types
from sphinx.errors import SphinxError, ExtensionError
-from sphinx.locale import _
+from sphinx.locale import _, init as init_locale
+fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
orig_role_function = roles.role
orig_directive_function = directives.directive
@@ -80,7 +83,7 @@ class WarningStream(object):
self.warnfunc = warnfunc
def write(self, text):
if text.strip():
- self.warnfunc(text, None, '')
+ self.warnfunc(text.strip(), None, '')
class NoUri(Exception):
@@ -182,12 +185,50 @@ class CitationReferences(Transform):
citnode.parent.replace(citnode, refnode)
+class Locale(Transform):
+ """
+ Replace translatable nodes with their translated doctree.
+ """
+ default_priority = 0
+ def apply(self):
+ env = self.document.settings.env
+ settings, source = self.document.settings, self.document['source']
+ # XXX check if this is reliable
+ assert source.startswith(env.srcdir)
+ docname = posixpath.splitext(source[len(env.srcdir):].lstrip('/'))[0]
+ section = docname.split(SEP, 1)[0]
+
+ # fetch translations
+ dirs = [path.join(env.srcdir, x)
+ for x in env.config.locale_dirs]
+ catalog, empty = init_locale(dirs, env.config.language, section)
+ if not empty:
+ return
+
+ parser = RSTParser()
+
+ for node, msg in extract_messages(self.document):
+ # XXX ctx not used
+ #ctx = node.parent
+ patch = new_document(source, settings)
+ msgstr = catalog.gettext(msg)
+ # XXX add marker to untranslated parts
+ if not msgstr or msgstr == msg: # as-of-yet untranslated
+ continue
+ parser.parse(msgstr, patch)
+ patch = patch[0]
+ assert isinstance(patch, nodes.paragraph)
+ for child in patch.children: # update leaves
+ child.parent = node
+ node.children = patch.children
+
+
class SphinxStandaloneReader(standalone.Reader):
"""
Add our own transforms.
"""
- transforms = [CitationReferences, DefaultSubstitutions, MoveModuleTargets,
- HandleCodeBlocks, SortIds]
+ transforms = [Locale, CitationReferences, DefaultSubstitutions,
+ MoveModuleTargets, HandleCodeBlocks, SortIds]
def get_transforms(self):
return standalone.Reader.get_transforms(self) + self.transforms
@@ -251,7 +292,7 @@ class BuildEnvironment:
if key.startswith('_') or \
isinstance(val, types.ModuleType) or \
isinstance(val, types.FunctionType) or \
- isinstance(val, (type, types.ClassType)):
+ isinstance(val, class_types):
del self.config[key]
try:
pickle.dump(self, picklefile, pickle.HIGHEST_PROTOCOL)
@@ -376,25 +417,46 @@ class BuildEnvironment:
domain.clear_doc(docname)
def doc2path(self, docname, base=True, suffix=None):
+ """Return the filename for the document name.
+
+ If *base* is True, return absolute path under self.srcdir.
+ If *base* is None, return relative path to self.srcdir.
+ If *base* is a path string, return absolute path under that.
+ If *suffix* is not None, add it instead of config.source_suffix.
"""
- Return the filename for the document name.
- If base is True, return absolute path under self.srcdir.
- If base is None, return relative path to self.srcdir.
- If base is a path string, return absolute path under that.
- If suffix is not None, add it instead of config.source_suffix.
- """
+ docname = docname.replace(SEP, path.sep)
suffix = suffix or self.config.source_suffix
if base is True:
- return path.join(self.srcdir,
- docname.replace(SEP, path.sep)) + suffix
+ return path.join(self.srcdir, docname) + suffix
elif base is None:
- return docname.replace(SEP, path.sep) + suffix
+ return docname + suffix
else:
- return path.join(base, docname.replace(SEP, path.sep)) + suffix
+ return path.join(base, docname) + suffix
- def find_files(self, config):
+ def relfn2path(self, filename, docname=None):
+ """Return paths to a file referenced from a document, relative to
+ documentation root and absolute.
+
+ Absolute filenames are relative to the source dir, while relative
+ filenames are relative to the dir of the containing document.
"""
- Find all source files in the source dir and put them in self.found_docs.
+ if filename.startswith('/') or filename.startswith(os.sep):
+ rel_fn = filename[1:]
+ else:
+ docdir = path.dirname(self.doc2path(docname or self.docname,
+ base=None))
+ rel_fn = path.join(docdir, filename)
+ try:
+ return rel_fn, path.join(self.srcdir, rel_fn)
+ except UnicodeDecodeError:
+ # the source directory is a bytestring with non-ASCII characters;
+ # let's try to encode the rel_fn in the file system encoding
+ enc_rel_fn = rel_fn.encode(sys.getfilesystemencoding())
+ return rel_fn, path.join(self.srcdir, enc_rel_fn)
+
+ def find_files(self, config):
+ """Find all source files in the source dir and put them in
+ self.found_docs.
"""
matchers = compile_matchers(
config.exclude_patterns[:] +
@@ -407,9 +469,7 @@ class BuildEnvironment:
self.srcdir, config.source_suffix, exclude_matchers=matchers))
def get_outdated_files(self, config_changed):
- """
- Return (added, changed, removed) sets.
- """
+ """Return (added, changed, removed) sets."""
# clear all files no longer present
removed = set(self.all_docs) - self.found_docs
@@ -455,12 +515,12 @@ class BuildEnvironment:
return added, changed, removed
def update(self, config, srcdir, doctreedir, app=None):
- """
- (Re-)read all files new or changed since last update. Returns a
- summary, the total count of documents to reread and an iterator that
- yields docnames as it processes them. Store all environment docnames in
- the canonical format (ie using SEP as a separator in place of
- os.path.sep).
+ """(Re-)read all files new or changed since last update.
+
+ Returns a summary, the total count of documents to reread and an
+ iterator that yields docnames as it processes them. Store all
+ environment docnames in the canonical format (ie using SEP as a
+ separator in place of os.path.sep).
"""
config_changed = False
if self.config is None:
@@ -591,8 +651,8 @@ class BuildEnvironment:
roles.role = role
def read_doc(self, docname, src_path=None, save_parsed=True, app=None):
- """
- Parse a file and add/update inventory entries for the doctree.
+ """Parse a file and add/update inventory entries for the doctree.
+
If srcpath is given, read from a different source file.
"""
# remove all inventory entries for that file
@@ -628,6 +688,8 @@ class BuildEnvironment:
class SphinxSourceClass(FileInput):
def decode(self_, data):
+ if isinstance(data, unicode):
+ return data
return data.decode(self_.encoding, 'sphinx')
def read(self_):
@@ -649,7 +711,7 @@ class BuildEnvironment:
destination_class=NullOutput)
pub.set_components(None, 'restructuredtext', None)
pub.process_programmatic_settings(None, self.settings, None)
- pub.set_source(None, src_path)
+ pub.set_source(None, src_path.encode(fs_encoding))
pub.set_destination(None, None)
try:
pub.publish()
@@ -742,18 +804,15 @@ class BuildEnvironment:
# post-processing of read doctrees
def filter_messages(self, doctree):
- """
- Filter system messages from a doctree.
- """
+ """Filter system messages from a doctree."""
filterlevel = self.config.keep_warnings and 2 or 5
for node in doctree.traverse(nodes.system_message):
if node['level'] < filterlevel:
node.parent.remove(node)
+
def process_dependencies(self, docname, doctree):
- """
- Process docutils-generated dependency info.
- """
+ """Process docutils-generated dependency info."""
cwd = os.getcwd()
frompath = path.join(path.normpath(self.srcdir), 'dummy')
deps = doctree.settings.record_dependencies
@@ -767,30 +826,20 @@ class BuildEnvironment:
self.dependencies.setdefault(docname, set()).add(relpath)
def process_downloads(self, docname, doctree):
- """
- Process downloadable file paths.
- """
- docdir = path.dirname(self.doc2path(docname, base=None))
+ """Process downloadable file paths. """
for node in doctree.traverse(addnodes.download_reference):
targetname = node['reftarget']
- if targetname.startswith('/') or targetname.startswith(os.sep):
- # absolute
- filepath = targetname[1:]
- else:
- filepath = path.normpath(path.join(docdir, node['reftarget']))
- self.dependencies.setdefault(docname, set()).add(filepath)
- if not os.access(path.join(self.srcdir, filepath), os.R_OK):
- self.warn(docname, 'download file not readable: %s' % filepath,
+ rel_filename, filename = self.relfn2path(targetname, docname)
+ self.dependencies.setdefault(docname, set()).add(rel_filename)
+ if not os.access(filename, os.R_OK):
+ self.warn(docname, 'download file not readable: %s' % filename,
getattr(node, 'line', None))
continue
- uniquename = self.dlfiles.add_file(docname, filepath)
+ uniquename = self.dlfiles.add_file(docname, filename)
node['filename'] = uniquename
def process_images(self, docname, doctree):
- """
- Process and rewrite image URIs.
- """
- docdir = path.dirname(self.doc2path(docname, base=None))
+ """Process and rewrite image URIs."""
for node in doctree.traverse(nodes.image):
# Map the mimetype to the corresponding image. The writer may
# choose the best image from these candidates. The special key * is
@@ -803,16 +852,11 @@ class BuildEnvironment:
node.line)
candidates['?'] = imguri
continue
- # imgpath is the image path *from srcdir*
- if imguri.startswith('/') or imguri.startswith(os.sep):
- # absolute path (= relative to srcdir)
- imgpath = path.normpath(imguri[1:])
- else:
- imgpath = path.normpath(path.join(docdir, imguri))
+ rel_imgpath, full_imgpath = self.relfn2path(imguri, docname)
# set imgpath as default URI
- node['uri'] = imgpath
- if imgpath.endswith(os.extsep + '*'):
- for filename in glob(path.join(self.srcdir, imgpath)):
+ node['uri'] = rel_imgpath
+ if rel_imgpath.endswith(os.extsep + '*'):
+ for filename in glob(full_imgpath):
new_imgpath = relative_path(self.srcdir, filename)
if filename.lower().endswith('.pdf'):
candidates['application/pdf'] = new_imgpath
@@ -832,7 +876,7 @@ class BuildEnvironment:
if imgtype:
candidates['image/' + imgtype] = new_imgpath
else:
- candidates['*'] = imgpath
+ candidates['*'] = rel_imgpath
# map image paths to unique image names (so that they can be put
# into a single directory)
for imgpath in candidates.itervalues():
@@ -844,8 +888,8 @@ class BuildEnvironment:
self.images.add_file(docname, imgpath)
def process_metadata(self, docname, doctree):
- """
- Process the docinfo part of the doctree as metadata.
+ """Process the docinfo part of the doctree as metadata.
+
Keep processing minimal -- just return what docutils says.
"""
self.metadata[docname] = md = {}
@@ -930,8 +974,7 @@ class BuildEnvironment:
item.replace(para, compact_para)
def create_title_from(self, docname, document):
- """
- Add a title node to the document (just copy the first section title),
+ """Add a title node to the document (just copy the first section title),
and store that title in the environment.
"""
titlenode = nodes.title()
@@ -969,7 +1012,8 @@ class BuildEnvironment:
def note_toctree(self, docname, toctreenode):
"""Note a TOC tree directive in a document and gather information about
- file relations from it."""
+ file relations from it.
+ """
if toctreenode['glob']:
self.glob_toctrees.add(docname)
if toctreenode.get('numbered'):
@@ -1075,7 +1119,9 @@ class BuildEnvironment:
def get_domain(self, domainname):
"""Return the domain instance with the specified name.
- Raises an ExtensionError if the domain is not registered."""
+
+ Raises an ExtensionError if the domain is not registered.
+ """
try:
return self.domains[domainname]
except KeyError:
@@ -1100,7 +1146,8 @@ class BuildEnvironment:
def get_and_resolve_doctree(self, docname, builder, doctree=None,
prune_toctrees=True):
"""Read the doctree from the pickle, resolve cross-references and
- toctrees and return it."""
+ toctrees and return it.
+ """
if doctree is None:
doctree = self.get_doctree(docname)
@@ -1120,8 +1167,7 @@ class BuildEnvironment:
def resolve_toctree(self, docname, builder, toctree, prune=True, maxdepth=0,
titles_only=False, collapse=False, includehidden=False):
- """
- Resolve a *toctree* node into individual bullet lists with titles
+ """Resolve a *toctree* node into individual bullet lists with titles
as items, returning None (if no containing titles are found) or
a new node.
@@ -1380,46 +1426,54 @@ class BuildEnvironment:
old_secnumbers = self.toc_secnumbers
self.toc_secnumbers = {}
- def _walk_toc(node, secnums, titlenode=None):
+ def _walk_toc(node, secnums, depth, titlenode=None):
# titlenode is the title of the document, it will get assigned a
# secnumber too, so that it shows up in next/prev/parent rellinks
for subnode in node.children:
if isinstance(subnode, nodes.bullet_list):
numstack.append(0)
- _walk_toc(subnode, secnums, titlenode)
+ _walk_toc(subnode, secnums, depth-1, titlenode)
numstack.pop()
titlenode = None
elif isinstance(subnode, nodes.list_item):
- _walk_toc(subnode, secnums, titlenode)
+ _walk_toc(subnode, secnums, depth, titlenode)
titlenode = None
elif isinstance(subnode, addnodes.compact_paragraph):
numstack[-1] += 1
+ if depth > 0:
+ number = tuple(numstack)
+ else:
+ number = None
secnums[subnode[0]['anchorname']] = \
- subnode[0]['secnumber'] = tuple(numstack)
+ subnode[0]['secnumber'] = number
if titlenode:
- titlenode['secnumber'] = tuple(numstack)
+ titlenode['secnumber'] = number
titlenode = None
elif isinstance(subnode, addnodes.toctree):
- _walk_toctree(subnode)
+ _walk_toctree(subnode, depth)
- def _walk_toctree(toctreenode):
+ def _walk_toctree(toctreenode, depth):
+ if depth == 0:
+ return
for (title, ref) in toctreenode['entries']:
if url_re.match(ref) or ref == 'self':
# don't mess with those
continue
if ref in self.tocs:
secnums = self.toc_secnumbers[ref] = {}
- _walk_toc(self.tocs[ref], secnums, self.titles.get(ref))
+ _walk_toc(self.tocs[ref], secnums, depth,
+ self.titles.get(ref))
if secnums != old_secnumbers.get(ref):
rewrite_needed.append(ref)
for docname in self.numbered_toctrees:
doctree = self.get_doctree(docname)
for toctreenode in doctree.traverse(addnodes.toctree):
- if toctreenode.get('numbered'):
+ depth = toctreenode.get('numbered', 0)
+ if depth:
# every numbered toctree gets new numbering
numstack = [0]
- _walk_toctree(toctreenode)
+ _walk_toctree(toctreenode, depth)
return rewrite_needed
@@ -1518,8 +1572,9 @@ class BuildEnvironment:
i += 1
# group the entries by letter
- def keyfunc2((k, v), letters=string.ascii_uppercase + '_'):
+ def keyfunc2(item, letters=string.ascii_uppercase + '_'):
# hack: mutating the subitems dicts to a list in the keyfunc
+ k, v = item
v[1] = sorted((si, se) for (si, (se, void)) in v[1].iteritems())
# now calculate the key
letter = k[0].upper()
@@ -1578,7 +1633,6 @@ class BuildEnvironment:
def check_consistency(self):
"""Do consistency checks."""
-
for docname in sorted(self.all_docs):
if docname not in self.files_to_rebuild:
if docname == self.config.master_doc:
diff --git a/sphinx/ext/autodoc.py b/sphinx/ext/autodoc.py
index adf08bcd..3a2476a6 100644
--- a/sphinx/ext/autodoc.py
+++ b/sphinx/ext/autodoc.py
@@ -14,7 +14,7 @@
import re
import sys
import inspect
-from types import FunctionType, BuiltinFunctionType, MethodType, ClassType
+from types import FunctionType, BuiltinFunctionType, MethodType
from docutils import nodes
from docutils.utils import assemble_option_dict
@@ -27,15 +27,10 @@ from sphinx.application import ExtensionError
from sphinx.util.nodes import nested_parse_with_titles
from sphinx.util.compat import Directive
from sphinx.util.inspect import isdescriptor, safe_getmembers, safe_getattr
+from sphinx.util.pycompat import base_exception, class_types
from sphinx.util.docstrings import prepare_docstring
-try:
- base_exception = BaseException
-except NameError:
- base_exception = Exception
-
-
#: extended signature RE: with explicit module name separated by ::
py_ext_sig_re = re.compile(
r'''^ ([\w.]+::)? # explicit module name
@@ -90,7 +85,8 @@ def members_set_option(arg):
def bool_option(arg):
"""Used to convert flag options to auto directives. (Instead of
- directives.flag(), which returns None.)"""
+ directives.flag(), which returns None).
+ """
return True
@@ -138,8 +134,7 @@ class AutodocReporter(object):
# Some useful event listener factories for autodoc-process-docstring.
def cut_lines(pre, post=0, what=None):
- """
- Return a listener that removes the first *pre* and last *post*
+ """Return a listener that removes the first *pre* and last *post*
lines of every docstring. If *what* is a sequence of strings,
only docstrings of a type in *what* will be processed.
@@ -165,9 +160,8 @@ def cut_lines(pre, post=0, what=None):
return process
def between(marker, what=None, keepempty=False, exclude=False):
- """
- Return a listener that either keeps, or if *exclude* is True excludes, lines
- between lines that match the *marker* regular expression. If no line
+ """Return a listener that either keeps, or if *exclude* is True excludes,
+ lines between lines that match the *marker* regular expression. If no line
matches, the resulting docstring would be empty, so no change will be made
unless *keepempty* is true.
@@ -256,6 +250,9 @@ class Documenter(object):
self.retann = None
# the object to document (set after import_object succeeds)
self.object = None
+ self.object_name = None
+ # the parent/owner of the object to document
+ self.parent = None
# the module analyzer to get at attribute docs, or None
self.analyzer = None
@@ -264,8 +261,7 @@ class Documenter(object):
self.directive.result.append(self.indent + line, source, *lineno)
def resolve_name(self, modname, parents, path, base):
- """
- Resolve the module and name of the object to document given by the
+ """Resolve the module and name of the object to document given by the
arguments and the current module/class.
Must return a pair of the module name and a chain of attributes; for
@@ -275,8 +271,7 @@ class Documenter(object):
raise NotImplementedError('must be implemented in subclasses')
def parse_name(self):
- """
- Determine what module to import and what attribute to document.
+ """Determine what module to import and what attribute to document.
Returns True and sets *self.modname*, *self.objpath*, *self.fullname*,
*self.args* and *self.retann* if parsing and resolving was successful.
@@ -313,17 +308,20 @@ class Documenter(object):
return True
def import_object(self):
- """
- Import the object given by *self.modname* and *self.objpath* and sets
+ """Import the object given by *self.modname* and *self.objpath* and set
it as *self.object*.
Returns True if successful, False if an error occurred.
"""
try:
__import__(self.modname)
+ parent = None
obj = self.module = sys.modules[self.modname]
for part in self.objpath:
+ parent = obj
obj = self.get_attr(obj, part)
+ self.object_name = part
+ self.parent = parent
self.object = obj
return True
# this used to only catch SyntaxError, ImportError and AttributeError,
@@ -336,15 +334,15 @@ class Documenter(object):
return False
def get_real_modname(self):
- """
- Get the real module name of an object to document. (It can differ
- from the name of the module through which the object was imported.)
+ """Get the real module name of an object to document.
+
+ It can differ from the name of the module through which the object was
+ imported.
"""
return self.get_attr(self.object, '__module__', None) or self.modname
def check_module(self):
- """
- Check if *self.object* is really defined in the module given by
+ """Check if *self.object* is really defined in the module given by
*self.modname*.
"""
modname = self.get_attr(self.object, '__module__', None)
@@ -353,25 +351,26 @@ class Documenter(object):
return True
def format_args(self):
- """
- Format the argument signature of *self.object*. Should return None if
- the object does not have a signature.
+ """Format the argument signature of *self.object*.
+
+ Should return None if the object does not have a signature.
"""
return None
def format_name(self):
- """
- Format the name of *self.object*. This normally should be something
- that can be parsed by the generated directive, but doesn't need to be
- (Sphinx will display it unparsed then).
+ """Format the name of *self.object*.
+
+ This normally should be something that can be parsed by the generated
+ directive, but doesn't need to be (Sphinx will display it unparsed
+ then).
"""
# normally the name doesn't contain the module (except for module
# directives of course)
return '.'.join(self.objpath) or self.modname
def format_signature(self):
- """
- Format the signature (arguments and return annotation) of the object.
+ """Format the signature (arguments and return annotation) of the object.
+
Let the user process it via the ``autodoc-process-signature`` event.
"""
if self.args is not None:
@@ -416,9 +415,11 @@ class Documenter(object):
def get_doc(self, encoding=None):
"""Decode and return lines of the docstring(s) for the object."""
docstring = self.get_attr(self.object, '__doc__', None)
- if docstring:
- # make sure we have Unicode docstrings, then sanitize and split
- # into lines
+ # make sure we have Unicode docstrings, then sanitize and split
+ # into lines
+ if isinstance(docstring, unicode):
+ return [prepare_docstring(docstring)]
+ elif docstring:
return [prepare_docstring(force_decode(docstring, encoding))]
return []
@@ -438,8 +439,11 @@ class Documenter(object):
# set sourcename and add content from attribute documentation
if self.analyzer:
# prevent encoding errors when the file name is non-ASCII
- filename = unicode(self.analyzer.srcname,
- sys.getfilesystemencoding(), 'replace')
+ if not isinstance(self.analyzer.srcname, unicode):
+ filename = unicode(self.analyzer.srcname,
+ sys.getfilesystemencoding(), 'replace')
+ else:
+ filename = self.analyzer.srcname
sourcename = u'%s:docstring of %s' % (filename, self.fullname)
attr_docs = self.analyzer.find_attr_docs()
@@ -466,8 +470,7 @@ class Documenter(object):
self.add_line(line, src[0], src[1])
def get_object_members(self, want_all):
- """
- Return `(members_check_module, members)` where `members` is a
+ """Return `(members_check_module, members)` where `members` is a
list of `(membername, member)` pairs of the members of *self.object*.
If *want_all* is True, return all members. Else, only return those
@@ -511,8 +514,9 @@ class Documenter(object):
return False, sorted(members)
def filter_members(self, members, want_all):
- """
- Filter the given member list: members are skipped if
+ """Filter the given member list.
+
+ Members are skipped if
- they are private (except if given explicitly)
- they are undocumented (except if undoc-members is given)
@@ -565,9 +569,10 @@ class Documenter(object):
return ret
def document_members(self, all_members=False):
- """
- Generate reST for member documentation. If *all_members* is True,
- do all members, else those given by *self.options.members*.
+ """Generate reST for member documentation.
+
+ If *all_members* is True, do all members, else those given by
+ *self.options.members*.
"""
# set current namespace for finding members
self.env.temp_data['autodoc:module'] = self.modname
@@ -625,8 +630,8 @@ class Documenter(object):
def generate(self, more_content=None, real_modname=None,
check_module=False, all_members=False):
- """
- Generate reST for the object given by *self.name*, and possibly members.
+ """Generate reST for the object given by *self.name*, and possibly for
+ its members.
If *more_content* is given, include that content. If *real_modname* is
given, use that module name to find attribute docs. If *check_module* is
@@ -866,7 +871,7 @@ class ClassDocumenter(ModuleLevelDocumenter):
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
- return isinstance(member, (type, ClassType))
+ return isinstance(member, class_types)
def import_object(self):
ret = ModuleLevelDocumenter.import_object(self)
@@ -939,9 +944,12 @@ class ClassDocumenter(ModuleLevelDocumenter):
docstrings = [initdocstring]
else:
docstrings.append(initdocstring)
-
- return [prepare_docstring(force_decode(docstring, encoding))
- for docstring in docstrings]
+ doc = []
+ for docstring in docstrings:
+ if not isinstance(docstring, unicode):
+ docstring = force_decode(docstring, encoding)
+ doc.append(prepare_docstring(docstring))
+ return doc
def add_content(self, more_content, no_docstring=False):
if self.doc_as_attr:
@@ -972,7 +980,7 @@ class ExceptionDocumenter(ClassDocumenter):
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
- return isinstance(member, (type, ClassType)) and \
+ return isinstance(member, class_types) and \
issubclass(member, base_exception)
@@ -1004,24 +1012,38 @@ class MethodDocumenter(ClassLevelDocumenter):
return inspect.isroutine(member) and \
not isinstance(parent, ModuleDocumenter)
- def import_object(self):
- ret = ClassLevelDocumenter.import_object(self)
- if isinstance(self.object, classmethod) or \
- (isinstance(self.object, MethodType) and
- self.object.im_self is not None):
- self.directivetype = 'classmethod'
- # document class and static members before ordinary ones
- self.member_order = self.member_order - 1
- elif isinstance(self.object, FunctionType) or \
- (isinstance(self.object, BuiltinFunctionType) and
- hasattr(self.object, '__self__') and
- self.object.__self__ is not None):
- self.directivetype = 'staticmethod'
- # document class and static members before ordinary ones
- self.member_order = self.member_order - 1
- else:
- self.directivetype = 'method'
- return ret
+ if sys.version_info >= (3, 0):
+ def import_object(self):
+ ret = ClassLevelDocumenter.import_object(self)
+ obj_from_parent = self.parent.__dict__.get(self.object_name)
+ if isinstance(obj_from_parent, classmethod):
+ self.directivetype = 'classmethod'
+ self.member_order = self.member_order - 1
+ elif isinstance(obj_from_parent, staticmethod):
+ self.directivetype = 'staticmethod'
+ self.member_order = self.member_order - 1
+ else:
+ self.directivetype = 'method'
+ return ret
+ else:
+ def import_object(self):
+ ret = ClassLevelDocumenter.import_object(self)
+ if isinstance(self.object, classmethod) or \
+ (isinstance(self.object, MethodType) and
+ self.object.im_self is not None):
+ self.directivetype = 'classmethod'
+ # document class and static members before ordinary ones
+ self.member_order = self.member_order - 1
+ elif isinstance(self.object, FunctionType) or \
+ (isinstance(self.object, BuiltinFunctionType) and
+ hasattr(self.object, '__self__') and
+ self.object.__self__ is not None):
+ self.directivetype = 'staticmethod'
+ # document class and static members before ordinary ones
+ self.member_order = self.member_order - 1
+ else:
+ self.directivetype = 'method'
+ return ret
def format_args(self):
if inspect.isbuiltin(self.object) or \
diff --git a/sphinx/ext/autosummary/__init__.py b/sphinx/ext/autosummary/__init__.py
index cf67c7fb..8186a2e5 100644
--- a/sphinx/ext/autosummary/__init__.py
+++ b/sphinx/ext/autosummary/__init__.py
@@ -73,8 +73,7 @@ class autosummary_toc(nodes.comment):
pass
def process_autosummary_toc(app, doctree):
- """
- Insert items described in autosummary:: to the TOC tree, but do
+ """Insert items described in autosummary:: to the TOC tree, but do
not generate the toctree:: list.
"""
env = app.builder.env
@@ -135,8 +134,8 @@ except AttributeError:
isgetsetdescriptor = ismemberdescriptor
def get_documenter(obj):
- """
- Get an autodoc.Documenter class suitable for documenting the given object
+ """Get an autodoc.Documenter class suitable for documenting the given
+ object.
"""
import sphinx.ext.autodoc as autodoc
@@ -218,8 +217,7 @@ class Autosummary(Directive):
return self.warnings + nodes
def get_items(self, names):
- """
- Try to import the given names, and return a list of
+ """Try to import the given names, and return a list of
``[(name, signature, summary_string, real_name), ...]``.
"""
env = self.state.document.settings.env
@@ -287,8 +285,7 @@ class Autosummary(Directive):
return items
def get_table(self, items):
- """
- Generate a proper list of table nodes for autosummary:: directive.
+ """Generate a proper list of table nodes for autosummary:: directive.
*items* is a list produced by :meth:`get_items`.
"""
@@ -351,8 +348,7 @@ def mangle_signature(sig, max_chars=30):
return u"(%s)" % sig
def limited_join(sep, items, max_chars=30, overflow_marker="..."):
- """
- Join a number of strings to one, limiting the length to *max_chars*.
+ """Join a number of strings to one, limiting the length to *max_chars*.
If the string overflows this limit, replace the last fitting item by
*overflow_marker*.
@@ -377,8 +373,7 @@ def limited_join(sep, items, max_chars=30, overflow_marker="..."):
# -- Importing items -----------------------------------------------------------
def import_by_name(name, prefixes=[None]):
- """
- Import a Python object that has the given *name*, under one of the
+ """Import a Python object that has the given *name*, under one of the
*prefixes*. The first name that succeeds is used.
"""
tried = []
@@ -435,8 +430,7 @@ def _import_by_name(name):
def autolink_role(typ, rawtext, etext, lineno, inliner,
options={}, content=[]):
- """
- Smart linking role.
+ """Smart linking role.
Expands to ':obj:`text`' if `text` is an object that can be imported;
otherwise expands to '*text*'.
diff --git a/sphinx/ext/autosummary/generate.py b/sphinx/ext/autosummary/generate.py
index 66a124d2..4b6348b5 100644
--- a/sphinx/ext/autosummary/generate.py
+++ b/sphinx/ext/autosummary/generate.py
@@ -17,6 +17,7 @@
:copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+
import os
import re
import sys
@@ -193,8 +194,8 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
# -- Finding documented entries in files ---------------------------------------
def find_autosummary_in_files(filenames):
- """
- Find out what items are documented in source/*.rst.
+ """Find out what items are documented in source/*.rst.
+
See `find_autosummary_in_lines`.
"""
documented = []
@@ -206,8 +207,8 @@ def find_autosummary_in_files(filenames):
return documented
def find_autosummary_in_docstring(name, module=None, filename=None):
- """
- Find out what items are documented in the given object's docstring.
+ """Find out what items are documented in the given object's docstring.
+
See `find_autosummary_in_lines`.
"""
try:
@@ -221,8 +222,8 @@ def find_autosummary_in_docstring(name, module=None, filename=None):
return []
def find_autosummary_in_lines(lines, module=None, filename=None):
- """
- Find out what items appear in autosummary:: directives in the given lines.
+ """Find out what items appear in autosummary:: directives in the
+ given lines.
Returns a list of (name, toctree, template) where *name* is a name
of an object and *toctree* the :toctree: path of the corresponding
diff --git a/sphinx/ext/coverage.py b/sphinx/ext/coverage.py
index 4924d30b..f41820e2 100644
--- a/sphinx/ext/coverage.py
+++ b/sphinx/ext/coverage.py
@@ -173,8 +173,11 @@ class CoverageBuilder(Builder):
attrs = []
+ for attr_name in dir(obj):
+ attr = getattr(obj, attr_name)
for attr_name, attr in inspect.getmembers(
- obj, inspect.ismethod):
+ obj, lambda x: inspect.ismethod(x) or \
+ inspect.isfunction(x)):
if attr_name[0] == '_':
# starts with an underscore, ignore it
continue
diff --git a/sphinx/ext/doctest.py b/sphinx/ext/doctest.py
index 9d681f90..62fbfdff 100644
--- a/sphinx/ext/doctest.py
+++ b/sphinx/ext/doctest.py
@@ -149,14 +149,14 @@ class TestCode(object):
class SphinxDocTestRunner(doctest.DocTestRunner):
def summarize(self, out, verbose=None):
- io = StringIO.StringIO()
+ string_io = StringIO.StringIO()
old_stdout = sys.stdout
- sys.stdout = io
+ sys.stdout = string_io
try:
res = doctest.DocTestRunner.summarize(self, verbose)
finally:
sys.stdout = old_stdout
- out(io.getvalue())
+ out(string_io.getvalue())
return res
def _DocTestRunner__patched_linecache_getlines(self, filename,
diff --git a/sphinx/ext/graphviz.py b/sphinx/ext/graphviz.py
index 106de7a6..19dcd951 100644
--- a/sphinx/ext/graphviz.py
+++ b/sphinx/ext/graphviz.py
@@ -11,6 +11,7 @@
"""
import re
+import codecs
import posixpath
from os import path
from math import ceil
@@ -46,18 +47,38 @@ class Graphviz(Directive):
"""
has_content = True
required_arguments = 0
- optional_arguments = 0
+ optional_arguments = 1
final_argument_whitespace = False
option_spec = {
'alt': directives.unchanged,
}
def run(self):
- dotcode = '\n'.join(self.content)
- if not dotcode.strip():
- return [self.state_machine.reporter.warning(
- 'Ignoring "graphviz" directive without content.',
- line=self.lineno)]
+ if self.arguments:
+ document = self.state.document
+ if self.content:
+ return [document.reporter.warning(
+ 'Graphviz directive cannot have both content and '
+ 'a filename argument', line=self.lineno)]
+ env = self.state.document.settings.env
+ rel_filename, filename = env.relfn2path(self.arguments[0])
+ env.note_dependency(rel_filename)
+ try:
+ fp = codecs.open(filename, 'r', 'utf-8')
+ try:
+ dotcode = fp.read()
+ finally:
+ fp.close()
+ except (IOError, OSError):
+ return [document.reporter.warning(
+ 'External Graphviz file %r not found or reading '
+ 'it failed' % filename, line=self.lineno)]
+ else:
+ dotcode = '\n'.join(self.content)
+ if not dotcode.strip():
+ return [self.state_machine.reporter.warning(
+ 'Ignoring "graphviz" directive without content.',
+ line=self.lineno)]
node = graphviz()
node['code'] = dotcode
node['options'] = []
@@ -89,10 +110,9 @@ class GraphvizSimple(Directive):
def render_dot(self, code, options, format, prefix='graphviz'):
- """
- Render graphviz code into a PNG or PDF output file.
- """
+ """Render graphviz code into a PNG or PDF output file."""
hashkey = code.encode('utf-8') + str(options) + \
+ str(self.builder.config.graphviz_dot) + \
str(self.builder.config.graphviz_dot_args)
fname = '%s-%s.%s' % (prefix, sha(hashkey).hexdigest(), format)
if hasattr(self.builder, 'imgpath'):
diff --git a/sphinx/ext/inheritance_diagram.py b/sphinx/ext/inheritance_diagram.py
index 22c0e20f..3f6f0b4d 100644
--- a/sphinx/ext/inheritance_diagram.py
+++ b/sphinx/ext/inheritance_diagram.py
@@ -67,8 +67,7 @@ class InheritanceGraph(object):
graphviz dot graph from them.
"""
def __init__(self, class_names, currmodule, show_builtins=False, parts=0):
- """
- *class_names* is a list of child classes to show bases from.
+ """*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
in the graph.
@@ -81,9 +80,7 @@ class InheritanceGraph(object):
'inheritance diagram')
def _import_class_or_module(self, name, currmodule):
- """
- Import a class using its fully-qualified *name*.
- """
+ """Import a class using its fully-qualified *name*."""
try:
path, base = class_sig_re.match(name).groups()
except ValueError:
@@ -182,9 +179,7 @@ class InheritanceGraph(object):
return '.'.join(name_parts[-parts:])
def get_all_class_names(self):
- """
- Get all of the class names involved in the graph.
- """
+ """Get all of the class names involved in the graph."""
return [fullname for (_, fullname, _) in self.class_info]
# These are the default attrs for graphviz
@@ -213,9 +208,8 @@ class InheritanceGraph(object):
def generate_dot(self, name, urls={}, env=None,
graph_attrs={}, node_attrs={}, edge_attrs={}):
- """
- Generate a graphviz dot graph from the classes that
- were passed in to __init__.
+ """Generate a graphviz dot graph from the classes that were passed in
+ to __init__.
*name* is the name of the graph.
diff --git a/sphinx/ext/intersphinx.py b/sphinx/ext/intersphinx.py
index 10015fc1..442617e1 100644
--- a/sphinx/ext/intersphinx.py
+++ b/sphinx/ext/intersphinx.py
@@ -26,6 +26,7 @@
import time
import zlib
+import codecs
import urllib2
import posixpath
from os import path
@@ -33,19 +34,26 @@ from os import path
from docutils import nodes
from sphinx.builders.html import INVENTORY_FILENAME
+from sphinx.util.pycompat import b
+
handlers = [urllib2.ProxyHandler(), urllib2.HTTPRedirectHandler(),
urllib2.HTTPHandler()]
-if hasattr(urllib2, 'HTTPSHandler'):
+try:
handlers.append(urllib2.HTTPSHandler)
+except NameError:
+ pass
urllib2.install_opener(urllib2.build_opener(*handlers))
+UTF8StreamReader = codecs.lookup('utf-8')[2]
+
def read_inventory_v1(f, uri, join):
+ f = UTF8StreamReader(f)
invdata = {}
line = f.next()
- projname = line.rstrip()[11:].decode('utf-8')
+ projname = line.rstrip()[11:]
line = f.next()
version = line.rstrip()[11:]
for line in f:
@@ -68,25 +76,25 @@ def read_inventory_v2(f, uri, join, bufsize=16*1024):
projname = line.rstrip()[11:].decode('utf-8')
line = f.readline()
version = line.rstrip()[11:].decode('utf-8')
- line = f.readline()
+ line = f.readline().decode('utf-8')
if 'zlib' not in line:
raise ValueError
def read_chunks():
decompressor = zlib.decompressobj()
- for chunk in iter(lambda: f.read(bufsize), ''):
+ for chunk in iter(lambda: f.read(bufsize), b('')):
yield decompressor.decompress(chunk)
yield decompressor.flush()
def split_lines(iter):
- buf = ''
+ buf = b('')
for chunk in iter:
buf += chunk
- lineend = buf.find('\n')
+ lineend = buf.find(b('\n'))
while lineend != -1:
yield buf[:lineend].decode('utf-8')
buf = buf[lineend+1:]
- lineend = buf.find('\n')
+ lineend = buf.find(b('\n'))
assert not buf
for line in split_lines(read_chunks()):
@@ -109,13 +117,13 @@ def fetch_inventory(app, uri, inv):
if inv.find('://') != -1:
f = urllib2.urlopen(inv)
else:
- f = open(path.join(app.srcdir, inv))
+ f = open(path.join(app.srcdir, inv), 'rb')
except Exception, err:
app.warn('intersphinx inventory %r not fetchable due to '
'%s: %s' % (inv, err.__class__, err))
return
try:
- line = f.readline().rstrip()
+ line = f.readline().rstrip().decode('utf-8')
try:
if line == '# Sphinx inventory version 1':
invdata = read_inventory_v1(f, uri, join)
diff --git a/sphinx/ext/oldcmarkup.py b/sphinx/ext/oldcmarkup.py
index 00ac3749..bc921a23 100644
--- a/sphinx/ext/oldcmarkup.py
+++ b/sphinx/ext/oldcmarkup.py
@@ -18,6 +18,7 @@ WARNING_MSG = 'using old C markup; please migrate to new-style markup ' \
'(e.g. c:function instead of cfunction), see ' \
'http://sphinx.pocoo.org/domains.html'
+
class OldCDirective(Directive):
has_content = True
required_arguments = 1
diff --git a/sphinx/ext/pngmath.py b/sphinx/ext/pngmath.py
index 7f399754..e4e7c2d0 100644
--- a/sphinx/ext/pngmath.py
+++ b/sphinx/ext/pngmath.py
@@ -26,6 +26,7 @@ from docutils import nodes
from sphinx.errors import SphinxError
from sphinx.util.png import read_png_depth, write_png_depth
from sphinx.util.osutil import ensuredir, ENOENT
+from sphinx.util.pycompat import b
from sphinx.ext.mathbase import setup_math as mathbase_setup, wrap_displaymath
class MathExtError(SphinxError):
@@ -58,11 +59,10 @@ DOC_BODY_PREVIEW = r'''
\end{document}
'''
-depth_re = re.compile(r'\[\d+ depth=(-?\d+)\]')
+depth_re = re.compile(b(r'\[\d+ depth=(-?\d+)\]'))
def render_math(self, math):
- """
- Render the LaTeX math expression *math* using latex and dvipng.
+ """Render the LaTeX math expression *math* using latex and dvipng.
Return the filename relative to the built document and the "depth",
that is, the distance of image bottom and baseline in pixels, if the
diff --git a/sphinx/ext/viewcode.py b/sphinx/ext/viewcode.py
index 81881beb..b9bb9d77 100644
--- a/sphinx/ext/viewcode.py
+++ b/sphinx/ext/viewcode.py
@@ -31,7 +31,11 @@ def doctree_read(app, doctree):
env._viewcode_modules[modname] = False
return
analyzer.find_tags()
- entry = analyzer.code.decode(analyzer.encoding), analyzer.tags, {}
+ if not isinstance(analyzer.code, unicode):
+ code = analyzer.code.decode(analyzer.encoding)
+ else:
+ code = analyzer.code
+ entry = code, analyzer.tags, {}
env._viewcode_modules[modname] = entry
elif entry is False:
return
@@ -47,7 +51,7 @@ def doctree_read(app, doctree):
for signode in objnode:
if not isinstance(signode, addnodes.desc_signature):
continue
- modname = signode['module']
+ modname = signode.get('module')
if not modname:
continue
fullname = signode['fullname']
diff --git a/sphinx/highlighting.py b/sphinx/highlighting.py
index 0dcbc021..6d710919 100644
--- a/sphinx/highlighting.py
+++ b/sphinx/highlighting.py
@@ -156,7 +156,7 @@ class PygmentsBridge(object):
if sys.version_info >= (2, 5):
src = 'from __future__ import with_statement\n' + src
- if isinstance(src, unicode):
+ if sys.version_info < (3, 0) and isinstance(src, unicode):
# Non-ASCII chars will only occur in string literals
# and comments. If we wanted to give them to the parser
# correctly, we'd have to find out the correct source
@@ -175,7 +175,7 @@ class PygmentsBridge(object):
return True
def highlight_block(self, source, lang, linenos=False, warn=None):
- if isinstance(source, str):
+ if not isinstance(source, unicode):
source = source.decode()
if not pygments:
return self.unhighlighted(source)
diff --git a/sphinx/jinja2glue.py b/sphinx/jinja2glue.py
index a6f1a853..29ee334e 100644
--- a/sphinx/jinja2glue.py
+++ b/sphinx/jinja2glue.py
@@ -37,8 +37,10 @@ def accesskey(context, key):
class SphinxFileSystemLoader(FileSystemLoader):
- """FileSystemLoader subclass that is not so strict about '..'
- entries in template names."""
+ """
+ FileSystemLoader subclass that is not so strict about '..' entries in
+ template names.
+ """
def get_source(self, environment, template):
for searchpath in self.searchpath:
diff --git a/sphinx/locale/__init__.py b/sphinx/locale/__init__.py
index b0b89720..126a37b5 100644
--- a/sphinx/locale/__init__.py
+++ b/sphinx/locale/__init__.py
@@ -8,13 +8,16 @@
:copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+
+import sys
import gettext
import UserString
class _TranslationProxy(UserString.UserString, object):
- """Class for proxy strings from gettext translations. This is a helper
- for the lazy_* functions from this module.
+ """
+ Class for proxy strings from gettext translations. This is a helper for the
+ lazy_* functions from this module.
The proxy implementation attempts to be as complete as possible, so that
the lazy objects should mostly work as expected, for example for sorting.
@@ -135,7 +138,8 @@ class _TranslationProxy(UserString.UserString, object):
def mygettext(string):
"""Used instead of _ when creating TranslationProxies, because _ is
- not bound yet at that time."""
+ not bound yet at that time.
+ """
return _(string)
def lazy_gettext(string):
@@ -176,18 +180,32 @@ pairindextypes = {
'builtin': l_('built-in function'),
}
-translator = None
+translators = {}
-def _(message):
- return translator.ugettext(message)
+if sys.version_info >= (3, 0):
+ def _(message):
+ return translators['sphinx'].gettext(message)
+else:
+ def _(message):
+ return translators['sphinx'].ugettext(message)
-def init(locale_dirs, language):
- global translator
+
+def init(locale_dirs, language, catalog='sphinx'):
+ """Look for message catalogs in `locale_dirs` and *ensure* that there is at
+ least a NullTranslations catalog set in `translators`. If called multiple
+ times or if several ``.mo`` files are found, their contents are merged
+ together (thus making ``init`` reentrable).
+ """
+ global translators
+ translator = translators.get(catalog)
+ # ignore previously failed attempts to find message catalogs
+ if isinstance(translator, gettext.NullTranslations):
+ translator = None
# the None entry is the system's default locale path
has_translation = True
for dir_ in locale_dirs:
try:
- trans = gettext.translation('sphinx', localedir=dir_,
+ trans = gettext.translation(catalog, localedir=dir_,
languages=[language])
if translator is None:
translator = trans
@@ -196,7 +214,11 @@ def init(locale_dirs, language):
except Exception:
# Language couldn't be found in the specified path
pass
+ # guarantee translations[catalog] exists
if translator is None:
translator = gettext.NullTranslations()
has_translation = False
+ translators[catalog] = translator
+ if hasattr(translator, 'ugettext'):
+ translator.gettext = translator.ugettext
return translator, has_translation
diff --git a/sphinx/locale/sv/LC_MESSAGES/sphinx.js b/sphinx/locale/sv/LC_MESSAGES/sphinx.js
new file mode 100644
index 00000000..0cedfb45
--- /dev/null
+++ b/sphinx/locale/sv/LC_MESSAGES/sphinx.js
@@ -0,0 +1 @@
+Documentation.addTranslations({"locale": "sv", "plural_expr": "(n != 1)", "messages": {"Search Results": "S\u00f6kresultat", "Preparing search...": "F\u00f6rbereder s\u00f6kning...", "Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories.": "Din s\u00f6kning gav inga resultat. Kolla stavning och att du valt tillr\u00e4ckligt med kategorier.", "Search finished, found %s page(s) matching the search query.": "S\u00f6kning f\u00e4rdig, hittade %s tr\u00e4ffar.", ", in ": ", i ", "Expand sidebar": "Expandera sidolist", "Permalink to this headline": "Permalink till denna rubrik", "Searching": "S\u00f6ker", "Collapse sidebar": "D\u00f6lj sidolist", "Permalink to this definition": "Permalink till denna definition", "Hide Search Matches": "D\u00f6lj S\u00f6kresultat"}}); \ No newline at end of file
diff --git a/sphinx/locale/sv/LC_MESSAGES/sphinx.mo b/sphinx/locale/sv/LC_MESSAGES/sphinx.mo
new file mode 100644
index 00000000..8cf76751
--- /dev/null
+++ b/sphinx/locale/sv/LC_MESSAGES/sphinx.mo
Binary files differ
diff --git a/sphinx/locale/sv/LC_MESSAGES/sphinx.po b/sphinx/locale/sv/LC_MESSAGES/sphinx.po
new file mode 100644
index 00000000..f449e8f6
--- /dev/null
+++ b/sphinx/locale/sv/LC_MESSAGES/sphinx.po
@@ -0,0 +1,797 @@
+msgid ""
+msgstr ""
+"Project-Id-Version: \n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2010-05-24 23:53+0200\n"
+"PO-Revision-Date: 2010-08-25 12:36+0200\n"
+"Last-Translator: Henrik Holmboe <henrik@holmboe.se>\n"
+"Language-Team: \n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"X-Poedit-Language: Swedish\n"
+"X-Poedit-Country: SWEDEN\n"
+
+# Translators (rev. chron. order):
+# Ludvig Ericson <ludvig@lericson.se>
+# Henrik Holmboe <henrik@holmboe.se>
+
+#: sphinx/environment.py:106
+#: sphinx/writers/latex.py:184
+#: sphinx/writers/manpage.py:67
+#, python-format
+msgid "%B %d, %Y"
+msgstr "%B %d, %Y"
+
+#: sphinx/roles.py:174
+#, python-format
+msgid "Python Enhancement Proposals!PEP %s"
+msgstr "Python Enhancement Proposals!PEP %s"
+
+#: sphinx/builders/changes.py:72
+msgid "Builtins"
+msgstr "Inbyggda"
+
+#: sphinx/builders/changes.py:74
+msgid "Module level"
+msgstr "Modulnivå"
+
+#: sphinx/builders/html.py:266
+#, python-format
+msgid "%b %d, %Y"
+msgstr "%b %d, %Y"
+
+#: sphinx/builders/html.py:285
+#: sphinx/themes/basic/defindex.html:30
+msgid "General Index"
+msgstr "Huvudindex"
+
+#: sphinx/builders/html.py:285
+msgid "index"
+msgstr "index"
+
+#: sphinx/builders/html.py:345
+msgid "next"
+msgstr "nästa"
+
+#: sphinx/builders/html.py:354
+msgid "previous"
+msgstr "föregående"
+
+#: sphinx/builders/latex.py:151
+msgid " (in "
+msgstr "(i "
+
+#: sphinx/directives/other.py:127
+msgid "Section author: "
+msgstr "Sektionsförfattare"
+
+#: sphinx/directives/other.py:129
+msgid "Module author: "
+msgstr "Modulförfattare"
+
+#: sphinx/directives/other.py:131
+msgid "Code author: "
+msgstr "Källkodsförfattare"
+
+#: sphinx/directives/other.py:133
+msgid "Author: "
+msgstr "Upphovsman:"
+
+#: sphinx/directives/other.py:238
+msgid "See also"
+msgstr "Se även"
+
+#: sphinx/domains/__init__.py:253
+#, python-format
+msgid "%s %s"
+msgstr "%s %s"
+
+#: sphinx/domains/c.py:51
+#: sphinx/domains/python.py:49
+msgid "Parameters"
+msgstr "Parametrar"
+
+#: sphinx/domains/c.py:54
+#: sphinx/domains/javascript.py:137
+#: sphinx/domains/python.py:59
+msgid "Returns"
+msgstr "Returnerar"
+
+#: sphinx/domains/c.py:56
+#: sphinx/domains/python.py:61
+msgid "Return type"
+msgstr "Returtyp"
+
+#: sphinx/domains/c.py:133
+#, python-format
+msgid "%s (C function)"
+msgstr "%s (C-funktion)"
+
+#: sphinx/domains/c.py:135
+#, python-format
+msgid "%s (C member)"
+msgstr "%s (C-medlem)"
+
+#: sphinx/domains/c.py:137
+#, python-format
+msgid "%s (C macro)"
+msgstr "%s (C-makro)"
+
+#: sphinx/domains/c.py:139
+#, python-format
+msgid "%s (C type)"
+msgstr "%s (C-typ)"
+
+#: sphinx/domains/c.py:141
+#, python-format
+msgid "%s (C variable)"
+msgstr "%s (C-variabel)"
+
+#: sphinx/domains/c.py:171
+#: sphinx/domains/cpp.py:1031
+#: sphinx/domains/javascript.py:166
+#: sphinx/domains/python.py:497
+msgid "function"
+msgstr "funktion"
+
+#: sphinx/domains/c.py:172
+#: sphinx/domains/cpp.py:1032
+msgid "member"
+msgstr "medlem"
+
+#: sphinx/domains/c.py:173
+msgid "macro"
+msgstr "makro"
+
+#: sphinx/domains/c.py:174
+#: sphinx/domains/cpp.py:1033
+msgid "type"
+msgstr "typ"
+
+#: sphinx/domains/c.py:175
+msgid "variable"
+msgstr "variabel"
+
+#: sphinx/domains/cpp.py:876
+#, python-format
+msgid "%s (C++ class)"
+msgstr "%s (C++-klass)"
+
+#: sphinx/domains/cpp.py:891
+#, python-format
+msgid "%s (C++ type)"
+msgstr "%s (C++-typ)"
+
+#: sphinx/domains/cpp.py:910
+#, python-format
+msgid "%s (C++ member)"
+msgstr "%s (C++-medlem)"
+
+#: sphinx/domains/cpp.py:962
+#, python-format
+msgid "%s (C++ function)"
+msgstr "%s (C++-funktion)"
+
+#: sphinx/domains/cpp.py:1030
+#: sphinx/domains/python.py:499
+msgid "class"
+msgstr "klass"
+
+#: sphinx/domains/javascript.py:117
+#: sphinx/domains/python.py:221
+#, python-format
+msgid "%s() (built-in function)"
+msgstr "%s() (inbyggd funktion)"
+
+#: sphinx/domains/javascript.py:118
+#: sphinx/domains/python.py:285
+#, python-format
+msgid "%s() (%s method)"
+msgstr "%s() (%s metod)"
+
+#: sphinx/domains/javascript.py:120
+#, python-format
+msgid "%s (global variable or constant)"
+msgstr "%s (global variabel eller konstant)"
+
+#: sphinx/domains/javascript.py:122
+#: sphinx/domains/python.py:323
+#, python-format
+msgid "%s (%s attribute)"
+msgstr "%s (%s attribut)"
+
+#: sphinx/domains/javascript.py:131
+msgid "Arguments"
+msgstr "Argument"
+
+#: sphinx/domains/javascript.py:134
+msgid "Throws"
+msgstr "Kastar"
+
+#: sphinx/domains/javascript.py:167
+#: sphinx/domains/python.py:498
+msgid "data"
+msgstr "data"
+
+#: sphinx/domains/javascript.py:168
+#: sphinx/domains/python.py:504
+msgid "attribute"
+msgstr "attribut"
+
+#: sphinx/domains/python.py:53
+msgid "Variables"
+msgstr "Variabler"
+
+#: sphinx/domains/python.py:56
+msgid "Raises"
+msgstr "Väcker"
+
+#: sphinx/domains/python.py:222
+#: sphinx/domains/python.py:279
+#: sphinx/domains/python.py:291
+#: sphinx/domains/python.py:304
+#, python-format
+msgid "%s() (in module %s)"
+msgstr "%s() (i modul %s)"
+
+#: sphinx/domains/python.py:225
+#, python-format
+msgid "%s (built-in variable)"
+msgstr "%s (inbyggd variabel)"
+
+#: sphinx/domains/python.py:226
+#: sphinx/domains/python.py:317
+#, python-format
+msgid "%s (in module %s)"
+msgstr "%s (i modul %s)"
+
+#: sphinx/domains/python.py:242
+#, python-format
+msgid "%s (built-in class)"
+msgstr "%s (inbyggd klass)"
+
+#: sphinx/domains/python.py:243
+#, python-format
+msgid "%s (class in %s)"
+msgstr "%s (klass i %s)"
+
+#: sphinx/domains/python.py:283
+#, python-format
+msgid "%s() (%s.%s method)"
+msgstr "%s() (%s.%s metod)"
+
+#: sphinx/domains/python.py:295
+#, python-format
+msgid "%s() (%s.%s static method)"
+msgstr "%s() (%s.%s statisk metod)"
+
+#: sphinx/domains/python.py:298
+#, python-format
+msgid "%s() (%s static method)"
+msgstr "%s() (%s statisk metod)"
+
+#: sphinx/domains/python.py:308
+#, python-format
+msgid "%s() (%s.%s class method)"
+msgstr "%s() (%s.%s klassmetod)"
+
+#: sphinx/domains/python.py:311
+#, python-format
+msgid "%s() (%s class method)"
+msgstr "%s() (%s klassmetod)"
+
+#: sphinx/domains/python.py:321
+#, python-format
+msgid "%s (%s.%s attribute)"
+msgstr "%s (%s.%s attribut)"
+
+#: sphinx/domains/python.py:366
+msgid "Platforms: "
+msgstr "Plattformar:"
+
+#: sphinx/domains/python.py:372
+#, python-format
+msgid "%s (module)"
+msgstr "%s (modul)"
+
+#: sphinx/domains/python.py:429
+msgid "Python Module Index"
+msgstr "Python Modulindex"
+
+#: sphinx/domains/python.py:430
+msgid "modules"
+msgstr "moduler"
+
+#: sphinx/domains/python.py:475
+msgid "Deprecated"
+msgstr "Ersatt"
+
+#: sphinx/domains/python.py:500
+#: sphinx/locale/__init__.py:162
+msgid "exception"
+msgstr "undantag"
+
+#: sphinx/domains/python.py:501
+msgid "method"
+msgstr "metod"
+
+#: sphinx/domains/python.py:502
+msgid "class method"
+msgstr "klassmetod"
+
+#: sphinx/domains/python.py:503
+msgid "static method"
+msgstr "statisk metod"
+
+#: sphinx/domains/python.py:505
+#: sphinx/locale/__init__.py:158
+msgid "module"
+msgstr "modul"
+
+#: sphinx/domains/rst.py:53
+#, python-format
+msgid "%s (directive)"
+msgstr "%s (direktiv)"
+
+#: sphinx/domains/rst.py:55
+#, python-format
+msgid "%s (role)"
+msgstr "%s (roll)"
+
+#: sphinx/domains/rst.py:103
+msgid "directive"
+msgstr "direktiv"
+
+#: sphinx/domains/rst.py:104
+msgid "role"
+msgstr "roll"
+
+#: sphinx/domains/std.py:68
+#: sphinx/domains/std.py:84
+#, python-format
+msgid "environment variable; %s"
+msgstr "miljövariabel; %s"
+
+#: sphinx/domains/std.py:160
+#, python-format
+msgid "%scommand line option; %s"
+msgstr "%skommandorad växel; %s"
+
+#: sphinx/domains/std.py:328
+msgid "glossary term"
+msgstr "ordlista"
+
+#: sphinx/domains/std.py:329
+msgid "grammar token"
+msgstr "grammatisk token"
+
+#: sphinx/domains/std.py:330
+msgid "reference label"
+msgstr "referensetikett"
+
+#: sphinx/domains/std.py:331
+msgid "environment variable"
+msgstr "miljövariabel"
+
+#: sphinx/domains/std.py:332
+msgid "program option"
+msgstr "programväxel"
+
+#: sphinx/domains/std.py:360
+#: sphinx/themes/basic/genindex-single.html:11
+#: sphinx/themes/basic/genindex-split.html:11
+#: sphinx/themes/basic/genindex-split.html:14
+#: sphinx/themes/basic/genindex.html:11
+#: sphinx/themes/basic/genindex.html:14
+#: sphinx/themes/basic/genindex.html:50
+#: sphinx/themes/basic/layout.html:125
+#: sphinx/writers/latex.py:173
+msgid "Index"
+msgstr "Index"
+
+#: sphinx/domains/std.py:361
+msgid "Module Index"
+msgstr "Modulindex"
+
+#: sphinx/domains/std.py:362
+#: sphinx/themes/basic/defindex.html:25
+msgid "Search Page"
+msgstr "Söksida"
+
+#: sphinx/ext/autodoc.py:917
+#, python-format
+msgid " Bases: %s"
+msgstr " Baserad: %s"
+
+#: sphinx/ext/autodoc.py:950
+#, python-format
+msgid "alias of :class:`%s`"
+msgstr "alias för :class:`%s`"
+
+#: sphinx/ext/todo.py:41
+msgid "Todo"
+msgstr "Att göra"
+
+#: sphinx/ext/todo.py:109
+#, python-format
+msgid "(The <<original entry>> is located in %s, line %d.)"
+msgstr "(<<Ursprunget>> finns i %s, på rad %d.)"
+
+#: sphinx/ext/todo.py:117
+msgid "original entry"
+msgstr "ursprungsvärde"
+
+#: sphinx/ext/viewcode.py:66
+msgid "[source]"
+msgstr "[source]"
+
+#: sphinx/ext/viewcode.py:109
+msgid "[docs]"
+msgstr "[docs]"
+
+#: sphinx/ext/viewcode.py:123
+msgid "Module code"
+msgstr "Modulkällkod"
+
+#: sphinx/ext/viewcode.py:129
+#, python-format
+msgid "<h1>Source code for %s</h1>"
+msgstr "<h1>Källkod för %s</h1>"
+
+#: sphinx/ext/viewcode.py:156
+msgid "Overview: module code"
+msgstr "Översikt: modulkällkod"
+
+#: sphinx/ext/viewcode.py:157
+msgid "<h1>All modules for which code is available</h1>"
+msgstr "<h1>Alla moduler där källkod finns</h1>"
+
+#: sphinx/locale/__init__.py:139
+msgid "Attention"
+msgstr "Uppmärksamma"
+
+#: sphinx/locale/__init__.py:140
+msgid "Caution"
+msgstr "Varning"
+
+#: sphinx/locale/__init__.py:141
+msgid "Danger"
+msgstr "Risk"
+
+#: sphinx/locale/__init__.py:142
+msgid "Error"
+msgstr "Fel"
+
+#: sphinx/locale/__init__.py:143
+msgid "Hint"
+msgstr "RÃ¥d"
+
+#: sphinx/locale/__init__.py:144
+msgid "Important"
+msgstr "Viktigt"
+
+#: sphinx/locale/__init__.py:145
+msgid "Note"
+msgstr "Observera"
+
+#: sphinx/locale/__init__.py:146
+msgid "See Also"
+msgstr "Se även"
+
+#: sphinx/locale/__init__.py:147
+msgid "Tip"
+msgstr "Tips"
+
+#: sphinx/locale/__init__.py:148
+msgid "Warning"
+msgstr "Varning"
+
+#: sphinx/locale/__init__.py:152
+#, python-format
+msgid "New in version %s"
+msgstr "Nyheter i version %s"
+
+#: sphinx/locale/__init__.py:153
+#, python-format
+msgid "Changed in version %s"
+msgstr "Förändrat i version %s"
+
+#: sphinx/locale/__init__.py:154
+#, python-format
+msgid "Deprecated since version %s"
+msgstr "Ersatt sedan version %s"
+
+#: sphinx/locale/__init__.py:159
+msgid "keyword"
+msgstr "nyckelord"
+
+#: sphinx/locale/__init__.py:160
+msgid "operator"
+msgstr "operator"
+
+#: sphinx/locale/__init__.py:161
+msgid "object"
+msgstr "objekt"
+
+#: sphinx/locale/__init__.py:163
+msgid "statement"
+msgstr "uttryck"
+
+#: sphinx/locale/__init__.py:164
+msgid "built-in function"
+msgstr "inbyggda funktioner"
+
+#: sphinx/themes/agogo/layout.html:45
+#: sphinx/themes/basic/globaltoc.html:10
+#: sphinx/themes/basic/localtoc.html:11
+msgid "Table Of Contents"
+msgstr "Innehållsförteckning"
+
+#: sphinx/themes/agogo/layout.html:49
+#: sphinx/themes/basic/layout.html:128
+#: sphinx/themes/basic/search.html:11
+#: sphinx/themes/basic/search.html:14
+msgid "Search"
+msgstr "Sök"
+
+#: sphinx/themes/agogo/layout.html:52
+#: sphinx/themes/basic/searchbox.html:15
+msgid "Go"
+msgstr "GÃ¥"
+
+#: sphinx/themes/agogo/layout.html:57
+#: sphinx/themes/basic/searchbox.html:20
+msgid "Enter search terms or a module, class or function name."
+msgstr "Ange sökord eller modul-, klass- eller funktionsnamn."
+
+#: sphinx/themes/agogo/layout.html:78
+#: sphinx/themes/basic/sourcelink.html:14
+msgid "Show Source"
+msgstr "Visa källfil"
+
+#: sphinx/themes/basic/defindex.html:11
+msgid "Overview"
+msgstr "Översikt"
+
+#: sphinx/themes/basic/defindex.html:20
+msgid "Indices and tables:"
+msgstr "Index och tabeller"
+
+#: sphinx/themes/basic/defindex.html:23
+msgid "Complete Table of Contents"
+msgstr "Komplett Innehållsförteckning"
+
+#: sphinx/themes/basic/defindex.html:24
+msgid "lists all sections and subsections"
+msgstr "lista över alla paragrafer och underparagrafer"
+
+#: sphinx/themes/basic/defindex.html:26
+msgid "search this documentation"
+msgstr "sök i det här dokumentet"
+
+#: sphinx/themes/basic/defindex.html:28
+msgid "Global Module Index"
+msgstr "Global Modulindex"
+
+#: sphinx/themes/basic/defindex.html:29
+msgid "quick access to all modules"
+msgstr "genväg till alla moduler"
+
+#: sphinx/themes/basic/defindex.html:31
+msgid "all functions, classes, terms"
+msgstr "alla funktioner, klasser, villkor"
+
+#: sphinx/themes/basic/genindex-single.html:14
+#, python-format
+msgid "Index &ndash; %(key)s"
+msgstr "Index &ndash; %(key)s"
+
+#: sphinx/themes/basic/genindex-single.html:46
+#: sphinx/themes/basic/genindex-split.html:24
+#: sphinx/themes/basic/genindex-split.html:38
+#: sphinx/themes/basic/genindex.html:56
+msgid "Full index on one page"
+msgstr "Hela innehållsförteckningen på en sida"
+
+#: sphinx/themes/basic/genindex-split.html:16
+msgid "Index pages by letter"
+msgstr "Innehållsförteckning per inledande bokstav"
+
+#: sphinx/themes/basic/genindex-split.html:25
+msgid "can be huge"
+msgstr "kan bli stort"
+
+#: sphinx/themes/basic/layout.html:23
+msgid "Navigation"
+msgstr "Navigation"
+
+#: sphinx/themes/basic/layout.html:113
+#, python-format
+msgid "Search within %(docstitle)s"
+msgstr "Sök bland %(docstitle)s"
+
+#: sphinx/themes/basic/layout.html:122
+msgid "About these documents"
+msgstr "Om dessa dokument"
+
+#: sphinx/themes/basic/layout.html:131
+msgid "Copyright"
+msgstr "Copyright"
+
+#: sphinx/themes/basic/layout.html:180
+#, python-format
+msgid "&copy; <a href=\"%(path)s\">Copyright</a> %(copyright)s."
+msgstr "&copy; <a href=\"%(path)s\">Copyright</a> %(copyright)s."
+
+#: sphinx/themes/basic/layout.html:182
+#, python-format
+msgid "&copy; Copyright %(copyright)s."
+msgstr "&copy; Copyright %(copyright)s."
+
+#: sphinx/themes/basic/layout.html:186
+#, python-format
+msgid "Last updated on %(last_updated)s."
+msgstr "Senast uppdaterad %(last_updated)s."
+
+#: sphinx/themes/basic/layout.html:189
+#, python-format
+msgid "Created using <a href=\"http://sphinx.pocoo.org/\">Sphinx</a> %(sphinx_version)s."
+msgstr "Skapad med <a href=\"http://sphinx.pocoo.org/\">Sphinx</a> %(sphinx_version)s."
+
+#: sphinx/themes/basic/opensearch.xml:4
+#, python-format
+msgid "Search %(docstitle)s"
+msgstr "Sök %(docstitle)s"
+
+#: sphinx/themes/basic/relations.html:11
+msgid "Previous topic"
+msgstr "Föregående titel"
+
+#: sphinx/themes/basic/relations.html:13
+msgid "previous chapter"
+msgstr "Föregående kapitel"
+
+#: sphinx/themes/basic/relations.html:16
+msgid "Next topic"
+msgstr "Nästa titel"
+
+#: sphinx/themes/basic/relations.html:18
+msgid "next chapter"
+msgstr "Nästa kapitel"
+
+#: sphinx/themes/basic/search.html:18
+msgid ""
+"Please activate JavaScript to enable the search\n"
+" functionality."
+msgstr "Var god aktivera JavaScript för sökfunktionalitet."
+
+#: sphinx/themes/basic/search.html:23
+msgid ""
+"From here you can search these documents. Enter your search\n"
+" words into the box below and click \"search\". Note that the search\n"
+" function will automatically search for all of the words. Pages\n"
+" containing fewer words won't appear in the result list."
+msgstr ""
+"Här kan du söka bland dessa dokument. Ange sökord nedan och klicka \"sök\".\n"
+" Sökningen måste träffa på samtliga angivna sökord."
+
+#: sphinx/themes/basic/search.html:30
+msgid "search"
+msgstr "sök"
+
+#: sphinx/themes/basic/search.html:34
+#: sphinx/themes/basic/static/searchtools.js:489
+msgid "Search Results"
+msgstr "Sökresultat"
+
+#: sphinx/themes/basic/search.html:36
+msgid "Your search did not match any results."
+msgstr "Din sökning gav inga resultat."
+
+#: sphinx/themes/basic/searchbox.html:12
+msgid "Quick search"
+msgstr "Snabbsök"
+
+#: sphinx/themes/basic/sourcelink.html:11
+msgid "This Page"
+msgstr "Denna Sida"
+
+#: sphinx/themes/basic/changes/frameset.html:5
+#: sphinx/themes/basic/changes/versionchanges.html:12
+#, python-format
+msgid "Changes in Version %(version)s &mdash; %(docstitle)s"
+msgstr "Förändringar i Version %(version)s &mdash; %(docstitle)s"
+
+#: sphinx/themes/basic/changes/rstsource.html:5
+#, python-format
+msgid "%(filename)s &mdash; %(docstitle)s"
+msgstr "%(filename)s &mdash; %(docstitle)s"
+
+#: sphinx/themes/basic/changes/versionchanges.html:17
+#, python-format
+msgid "Automatically generated list of changes in version %(version)s"
+msgstr "Automatiskt genererad lista över förändringar i version %(version)s"
+
+#: sphinx/themes/basic/changes/versionchanges.html:18
+msgid "Library changes"
+msgstr "Förändringar i bibliotek"
+
+#: sphinx/themes/basic/changes/versionchanges.html:23
+msgid "C API changes"
+msgstr "Förändringar i C-API"
+
+#: sphinx/themes/basic/changes/versionchanges.html:25
+msgid "Other changes"
+msgstr "Övriga förändringar"
+
+#: sphinx/themes/basic/static/doctools.js:154
+#: sphinx/writers/html.py:482
+#: sphinx/writers/html.py:487
+msgid "Permalink to this headline"
+msgstr "Permalink till denna rubrik"
+
+#: sphinx/themes/basic/static/doctools.js:160
+#: sphinx/writers/html.py:87
+msgid "Permalink to this definition"
+msgstr "Permalink till denna definition"
+
+#: sphinx/themes/basic/static/doctools.js:189
+msgid "Hide Search Matches"
+msgstr "Dölj Sökresultat"
+
+#: sphinx/themes/basic/static/searchtools.js:285
+msgid "Searching"
+msgstr "Söker"
+
+#: sphinx/themes/basic/static/searchtools.js:290
+msgid "Preparing search..."
+msgstr "Förbereder sökning..."
+
+#: sphinx/themes/basic/static/searchtools.js:364
+msgid ", in "
+msgstr ", i "
+
+#: sphinx/themes/basic/static/searchtools.js:491
+msgid "Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories."
+msgstr "Din sökning gav inga resultat. Kolla stavning och att du valt tillräckligt med kategorier."
+
+#: sphinx/themes/basic/static/searchtools.js:493
+#, python-format
+msgid "Search finished, found %s page(s) matching the search query."
+msgstr "Sökning färdig, hittade %s träffar."
+
+#: sphinx/themes/default/static/sidebar.js:66
+msgid "Expand sidebar"
+msgstr "Expandera sidolist"
+
+#: sphinx/themes/default/static/sidebar.js:79
+#: sphinx/themes/default/static/sidebar.js:106
+msgid "Collapse sidebar"
+msgstr "Dölj sidolist"
+
+#: sphinx/themes/haiku/layout.html:26
+msgid "Contents"
+msgstr "Innehåll"
+
+#: sphinx/writers/latex.py:171
+msgid "Release"
+msgstr "Utgåva"
+
+#: sphinx/writers/latex.py:572
+#: sphinx/writers/manpage.py:178
+msgid "Footnotes"
+msgstr "Fotnoter"
+
+#: sphinx/writers/latex.py:641
+msgid "continued from previous page"
+msgstr "fortsättning från föregående sida"
+
+#: sphinx/writers/latex.py:646
+msgid "Continued on next page"
+msgstr "Fortsätter på nästa sida"
+
+#: sphinx/writers/text.py:422
+msgid "[image]"
+msgstr "[image]"
+
diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py
index b8e2fded..ef92297c 100644
--- a/sphinx/pycode/__init__.py
+++ b/sphinx/pycode/__init__.py
@@ -18,6 +18,7 @@ from sphinx.errors import PycodeError
from sphinx.pycode import nodes
from sphinx.pycode.pgen2 import driver, token, tokenize, parse, literals
from sphinx.util import get_module_source
+from sphinx.util.pycompat import next
from sphinx.util.docstrings import prepare_docstring, prepare_commentdoc
@@ -98,7 +99,8 @@ class AttrDocVisitor(nodes.NodeVisitor):
if not pnode or pnode.type not in (token.INDENT, token.DEDENT):
break
prefix = pnode.get_prefix()
- prefix = prefix.decode(self.encoding)
+ if not isinstance(prefix, unicode):
+ prefix = prefix.decode(self.encoding)
docstring = prepare_commentdoc(prefix)
self.add_docstring(node, docstring)
@@ -278,7 +280,7 @@ class ModuleAnalyzer(object):
result[fullname] = (dtype, startline, endline)
expect_indent = False
if tok in ('def', 'class'):
- name = tokeniter.next()[1]
+ name = next(tokeniter)[1]
namespace.append(name)
fullname = '.'.join(namespace)
stack.append((tok, fullname, spos[0], indent))
diff --git a/sphinx/pycode/nodes.py b/sphinx/pycode/nodes.py
index e7184677..fc6eb93a 100644
--- a/sphinx/pycode/nodes.py
+++ b/sphinx/pycode/nodes.py
@@ -29,6 +29,8 @@ class BaseNode(object):
return NotImplemented
return not self._eq(other)
+ __hash__ = None
+
def get_prev_sibling(self):
"""Return previous child in parent's children, or None."""
if self.parent is None:
diff --git a/sphinx/pycode/pgen2/literals.py b/sphinx/pycode/pgen2/literals.py
index 31900291..d4893702 100644
--- a/sphinx/pycode/pgen2/literals.py
+++ b/sphinx/pycode/pgen2/literals.py
@@ -66,7 +66,7 @@ uni_escape_re = re.compile(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3}|"
def evalString(s, encoding=None):
regex = escape_re
repl = escape
- if encoding:
+ if encoding and not isinstance(s, unicode):
s = s.decode(encoding)
if s.startswith('u') or s.startswith('U'):
regex = uni_escape_re
diff --git a/sphinx/pycode/pgen2/tokenize.py b/sphinx/pycode/pgen2/tokenize.py
index 4489db89..7ad9f012 100644
--- a/sphinx/pycode/pgen2/tokenize.py
+++ b/sphinx/pycode/pgen2/tokenize.py
@@ -143,7 +143,9 @@ class TokenError(Exception): pass
class StopTokenizing(Exception): pass
-def printtoken(type, token, (srow, scol), (erow, ecol), line): # for testing
+def printtoken(type, token, scell, ecell, line): # for testing
+ srow, scol = scell
+ erow, ecol = ecell
print "%d,%d-%d,%d:\t%s\t%s" % \
(srow, scol, erow, ecol, tok_name[type], repr(token))
diff --git a/sphinx/quickstart.py b/sphinx/quickstart.py
index 884caca7..fdac4cbe 100644
--- a/sphinx/quickstart.py
+++ b/sphinx/quickstart.py
@@ -9,8 +9,9 @@
:license: BSD, see LICENSE for details.
"""
-import sys, os, time
+import sys, os, time, re
from os import path
+from codecs import open
TERM_ENCODING = getattr(sys.stdin, 'encoding', None)
@@ -20,10 +21,23 @@ from sphinx.util.console import purple, bold, red, turquoise, \
nocolor, color_terminal
from sphinx.util import texescape
+# function to get input from terminal -- overridden by the test suite
+try:
+ # this raw_input is not converted by 2to3
+ term_input = raw_input
+except NameError:
+ term_input = input
+
PROMPT_PREFIX = '> '
-QUICKSTART_CONF = '''\
+if sys.version_info >= (3, 0):
+ # prevents that the file is checked for being written in Python 2.x syntax
+ QUICKSTART_CONF = '#!/usr/bin/env python3\n'
+else:
+ QUICKSTART_CONF = ''
+
+QUICKSTART_CONF += '''\
# -*- coding: utf-8 -*-
#
# %(project)s documentation build configuration file, created by
@@ -330,7 +344,7 @@ ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) \
$(SPHINXOPTS) %(rsrcdir)s
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp \
-epub latex latexpdf text man changes linkcheck doctest
+epub latex latexpdf text man changes linkcheck doctest gettext
help:
\t@echo "Please use \\`make <target>' where <target> is one of"
@@ -347,6 +361,7 @@ help:
\t@echo " latexpdf to make LaTeX files and run them through pdflatex"
\t@echo " text to make text files"
\t@echo " man to make manual pages"
+\t@echo " gettext to make PO message catalogs"
\t@echo " changes to make an overview of all changed/added/deprecated items"
\t@echo " linkcheck to check all external links for integrity"
\t@echo " doctest to run all doctests embedded in the documentation \
@@ -433,6 +448,11 @@ man:
\t@echo
\t@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+gettext:
+\t$(SPHINXBUILD) -b gettext $(ALLSPHINXOPTS) $(BUILDDIR)/locale
+\t@echo
+\t@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
changes:
\t$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
\t@echo
@@ -481,6 +501,7 @@ if "%%1" == "help" (
\techo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
\techo. text to make text files
\techo. man to make manual pages
+\techo. gettext to make PO message catalogs
\techo. changes to make an overview over all changed/added/deprecated items
\techo. linkcheck to check all external links for integrity
\techo. doctest to run all doctests embedded in the documentation if enabled
@@ -582,6 +603,13 @@ if "%%1" == "man" (
\tgoto end
)
+if "%%1" == "gettext" (
+\t%%SPHINXBUILD%% -b gettext %%ALLSPHINXOPTS%% %%BUILDDIR%%/locale
+\techo.
+\techo.Build finished. The message catalogs are in %%BUILDDIR%%/locale.
+\tgoto end
+)
+
if "%%1" == "changes" (
\t%%SPHINXBUILD%% -b changes %%ALLSPHINXOPTS%% %%BUILDDIR%%/changes
\techo.
@@ -656,20 +684,22 @@ def do_prompt(d, key, text, default=None, validator=nonempty):
prompt = purple(PROMPT_PREFIX + '%s [%s]: ' % (text, default))
else:
prompt = purple(PROMPT_PREFIX + text + ': ')
- x = raw_input(prompt)
+ x = term_input(prompt)
if default and not x:
x = default
- if x.decode('ascii', 'replace').encode('ascii', 'replace') != x:
- if TERM_ENCODING:
- x = x.decode(TERM_ENCODING)
- else:
- print turquoise('* Note: non-ASCII characters entered '
- 'and terminal encoding unknown -- assuming '
- 'UTF-8 or Latin-1.')
- try:
- x = x.decode('utf-8')
- except UnicodeDecodeError:
- x = x.decode('latin1')
+ if not isinstance(x, unicode):
+ # for Python 2.x, try to get a Unicode string out of it
+ if x.decode('ascii', 'replace').encode('ascii', 'replace') != x:
+ if TERM_ENCODING:
+ x = x.decode(TERM_ENCODING)
+ else:
+ print turquoise('* Note: non-ASCII characters entered '
+ 'and terminal encoding unknown -- assuming '
+ 'UTF-8 or Latin-1.')
+ try:
+ x = x.decode('utf-8')
+ except UnicodeDecodeError:
+ x = x.decode('latin1')
try:
x = validator(x)
except ValidationError, err:
@@ -679,6 +709,18 @@ def do_prompt(d, key, text, default=None, validator=nonempty):
d[key] = x
+if sys.version_info >= (3, 0):
+ # remove Unicode literal prefixes
+ _unicode_string_re = re.compile(r"[uU]('.*?')")
+ def _convert_python_source(source):
+ return _unicode_string_re.sub('\\1', source)
+
+ for f in ['QUICKSTART_CONF', 'EPUB_CONFIG', 'INTERSPHINX_CONFIG']:
+ globals()[f] = _convert_python_source(globals()[f])
+
+ del _unicode_string_re, _convert_python_source
+
+
def inner_main(args):
d = {}
texescape.init()
@@ -834,28 +876,28 @@ directly.'''
if d['ext_intersphinx']:
conf_text += INTERSPHINX_CONFIG
- f = open(path.join(srcdir, 'conf.py'), 'w')
- f.write(conf_text.encode('utf-8'))
+ f = open(path.join(srcdir, 'conf.py'), 'w', encoding='utf-8')
+ f.write(conf_text)
f.close()
masterfile = path.join(srcdir, d['master'] + d['suffix'])
- f = open(masterfile, 'w')
- f.write((MASTER_FILE % d).encode('utf-8'))
+ f = open(masterfile, 'w', encoding='utf-8')
+ f.write(MASTER_FILE % d)
f.close()
if d['makefile']:
d['rsrcdir'] = d['sep'] and 'source' or '.'
d['rbuilddir'] = d['sep'] and 'build' or d['dot'] + 'build'
# use binary mode, to avoid writing \r\n on Windows
- f = open(path.join(d['path'], 'Makefile'), 'wb')
- f.write((MAKEFILE % d).encode('utf-8'))
+ f = open(path.join(d['path'], 'Makefile'), 'wb', encoding='utf-8')
+ f.write(MAKEFILE % d)
f.close()
if d['batchfile']:
d['rsrcdir'] = d['sep'] and 'source' or '.'
d['rbuilddir'] = d['sep'] and 'build' or d['dot'] + 'build'
- f = open(path.join(d['path'], 'make.bat'), 'w')
- f.write((BATCHFILE % d).encode('utf-8'))
+ f = open(path.join(d['path'], 'make.bat'), 'w', encoding='utf-8')
+ f.write(BATCHFILE % d)
f.close()
print
diff --git a/sphinx/roles.py b/sphinx/roles.py
index 0ea0ec48..b44868e6 100644
--- a/sphinx/roles.py
+++ b/sphinx/roles.py
@@ -18,7 +18,7 @@ from docutils.parsers.rst import roles
from sphinx import addnodes
from sphinx.locale import _
from sphinx.util import ws_re
-from sphinx.util.nodes import split_explicit_title
+from sphinx.util.nodes import split_explicit_title, process_index_entry
generic_docroles = {
@@ -139,16 +139,15 @@ class XRefRole(object):
# methods that can be overwritten
def process_link(self, env, refnode, has_explicit_title, title, target):
- """
- Called after parsing title and target text, and creating the reference
- node (given in *refnode*). This method can alter the reference node and
- must return a new (or the same) ``(title, target)`` tuple.
+ """Called after parsing title and target text, and creating the
+ reference node (given in *refnode*). This method can alter the
+ reference node and must return a new (or the same) ``(title, target)``
+ tuple.
"""
return title, ws_re.sub(' ', target)
def result_nodes(self, document, env, node, is_ref):
- """
- Called before returning the finished nodes. *node* is the reference
+ """Called before returning the finished nodes. *node* is the reference
node if one was created (*is_ref* is then true), else the content node.
This method can add other nodes and must return a ``(nodes, messages)``
tuple (the usual return value of a role function).
@@ -269,6 +268,27 @@ def abbr_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
return [addnodes.abbreviation(abbr, abbr, explanation=expl)], []
+def index_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
+ # create new reference target
+ env = inliner.document.settings.env
+ targetid = 'index-%s' % env.new_serialno('index')
+ targetnode = nodes.target('', '', ids=[targetid])
+ # split text and target in role content
+ has_explicit_title, title, target = split_explicit_title(text)
+ title = utils.unescape(title)
+ target = utils.unescape(target)
+ # if an explicit target is given, we can process it as a full entry
+ if has_explicit_title:
+ entries = process_index_entry(target, targetid)
+ # otherwise we just create a "single" entry
+ else:
+ entries = [('single', target, targetid, target)]
+ indexnode = addnodes.index()
+ indexnode['entries'] = entries
+ textnode = nodes.Text(title, title)
+ return [indexnode, targetnode, textnode], []
+
+
specific_docroles = {
# links to download references
'download': XRefRole(nodeclass=addnodes.download_reference),
@@ -282,6 +302,7 @@ specific_docroles = {
'file': emph_literal_role,
'samp': emph_literal_role,
'abbr': abbr_role,
+ 'index': index_role,
}
for rolename, func in specific_docroles.iteritems():
diff --git a/sphinx/setup_command.py b/sphinx/setup_command.py
index 939fbb21..5fe34f18 100644
--- a/sphinx/setup_command.py
+++ b/sphinx/setup_command.py
@@ -22,7 +22,8 @@ from sphinx.util.console import darkred, nocolor, color_terminal
class BuildDoc(Command):
- """Distutils command to build Sphinx documentation.
+ """
+ Distutils command to build Sphinx documentation.
The Sphinx build can then be triggered from distutils, and some Sphinx
options can be set in ``setup.py`` or ``setup.cfg`` instead of Sphinx own
diff --git a/sphinx/themes/basic/searchresults.html b/sphinx/themes/basic/searchresults.html
new file mode 100644
index 00000000..4b5da1a3
--- /dev/null
+++ b/sphinx/themes/basic/searchresults.html
@@ -0,0 +1,36 @@
+{#
+ basic/searchresults.html
+ ~~~~~~~~~~~~~~~~~
+
+ Template for the body of the search results page.
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+#}
+<h1 id="search-documentation">Search</h1>
+<p>
+ From here you can search these documents. Enter your search
+ words into the box below and click "search".
+</p>
+<form action="" method="get">
+ <input type="text" name="q" value="" />
+ <input type="submit" value="search" />
+ <span id="search-progress" style="padding-left: 10px"></span>
+</form>
+{% if search_performed %}
+<h2>Search Results</h2>
+{% if not search_results %}
+<p>Your search did not match any results.</p>
+{% endif %}
+{% endif %}
+<div id="search-results">
+ {% if search_results %}
+ <ul class="search">
+ {% for href, caption, context in search_results %}
+ <li><a href="{{ href }}?highlight={{ q }}">{{ caption }}</a>
+ <div class="context">{{ context|e }}</div>
+ </li>
+ {% endfor %}
+ </ul>
+ {% endif %}
+</div>
diff --git a/sphinx/themes/basic/static/ajax-loader.gif b/sphinx/themes/basic/static/ajax-loader.gif
new file mode 100644
index 00000000..61faf8ca
--- /dev/null
+++ b/sphinx/themes/basic/static/ajax-loader.gif
Binary files differ
diff --git a/sphinx/themes/basic/static/comment-bright.png b/sphinx/themes/basic/static/comment-bright.png
new file mode 100644
index 00000000..551517b8
--- /dev/null
+++ b/sphinx/themes/basic/static/comment-bright.png
Binary files differ
diff --git a/sphinx/themes/basic/static/comment-close.png b/sphinx/themes/basic/static/comment-close.png
new file mode 100644
index 00000000..09b54be4
--- /dev/null
+++ b/sphinx/themes/basic/static/comment-close.png
Binary files differ
diff --git a/sphinx/themes/basic/static/comment.png b/sphinx/themes/basic/static/comment.png
new file mode 100644
index 00000000..92feb52b
--- /dev/null
+++ b/sphinx/themes/basic/static/comment.png
Binary files differ
diff --git a/sphinx/themes/basic/static/down-pressed.png b/sphinx/themes/basic/static/down-pressed.png
new file mode 100644
index 00000000..6f7ad782
--- /dev/null
+++ b/sphinx/themes/basic/static/down-pressed.png
Binary files differ
diff --git a/sphinx/themes/basic/static/down.png b/sphinx/themes/basic/static/down.png
new file mode 100644
index 00000000..3003a887
--- /dev/null
+++ b/sphinx/themes/basic/static/down.png
Binary files differ
diff --git a/sphinx/themes/basic/static/up-pressed.png b/sphinx/themes/basic/static/up-pressed.png
new file mode 100644
index 00000000..8bd587af
--- /dev/null
+++ b/sphinx/themes/basic/static/up-pressed.png
Binary files differ
diff --git a/sphinx/themes/basic/static/up.png b/sphinx/themes/basic/static/up.png
new file mode 100644
index 00000000..b9462568
--- /dev/null
+++ b/sphinx/themes/basic/static/up.png
Binary files differ
diff --git a/sphinx/themes/basic/static/websupport.js b/sphinx/themes/basic/static/websupport.js
new file mode 100644
index 00000000..870b0cdc
--- /dev/null
+++ b/sphinx/themes/basic/static/websupport.js
@@ -0,0 +1,762 @@
+(function($) {
+ $.fn.autogrow = function(){
+ return this.each(function(){
+ var textarea = this;
+
+ $.fn.autogrow.resize(textarea);
+
+ $(textarea)
+ .focus(function() {
+ textarea.interval = setInterval(function() {
+ $.fn.autogrow.resize(textarea);
+ }, 500);
+ })
+ .blur(function() {
+ clearInterval(textarea.interval);
+ });
+ });
+ };
+
+ $.fn.autogrow.resize = function(textarea) {
+ var lineHeight = parseInt($(textarea).css('line-height'), 10);
+ var lines = textarea.value.split('\n');
+ var columns = textarea.cols;
+ var lineCount = 0;
+ $.each(lines, function() {
+ lineCount += Math.ceil(this.length / columns) || 1;
+ });
+ var height = lineHeight * (lineCount + 1);
+ $(textarea).css('height', height);
+ };
+})(jQuery);
+
+(function($) {
+ var comp, by;
+
+ function init() {
+ initEvents();
+ initComparator();
+ }
+
+ function initEvents() {
+ $('a.comment_close').live("click", function(event) {
+ hide($(this).attr('id').substring(2));
+ return false;
+ });
+ $('.vote').live("click", function() {
+ handleVote($(this));
+ return false;
+ });
+ $('a.reply').live("click", function() {
+ openReply($(this).attr('id').substring(2));
+ return false;
+ });
+ $('a.close_reply').live("click", function() {
+ closeReply($(this).attr('id').substring(2));
+ return false;
+ });
+ $('a.sort_option').live("click", function(event) {
+ handleReSort($(this));
+ return false;
+ });
+ $('a.show_proposal').live("click", function() {
+ showProposal($(this).attr('id').substring(2));
+ return false;
+ });
+ $('a.hide_proposal').live("click", function() {
+ hideProposal($(this).attr('id').substring(2));
+ return false;
+ });
+ $('a.show_propose_change').live("click", function() {
+ showProposeChange($(this).attr('id').substring(2));
+ return false;
+ });
+ $('a.hide_propose_change').live("click", function() {
+ hideProposeChange($(this).attr('id').substring(2));
+ return false;
+ });
+ $('a.accept_comment').live("click", function() {
+ acceptComment($(this).attr('id').substring(2));
+ return false;
+ });
+ $('a.reject_comment').live("click", function() {
+ rejectComment($(this).attr('id').substring(2));
+ return false;
+ });
+ $('a.delete_comment').live("click", function() {
+ deleteComment($(this).attr('id').substring(2));
+ return false;
+ });
+ }
+
+ /*
+ Set comp, which is a comparator function used for sorting and
+ inserting comments into the list.
+ */
+ function setComparator() {
+ // If the first three letters are "asc", sort in ascending order
+ // and remove the prefix.
+ if (by.substring(0,3) == 'asc') {
+ var i = by.substring(3);
+ comp = function(a, b) { return a[i] - b[i]; };
+ } else {
+ // Otherwise sort in descending order.
+ comp = function(a, b) { return b[by] - a[by]; };
+ }
+
+ // Reset link styles and format the selected sort option.
+ $('a.sel').attr('href', '#').removeClass('sel');
+ $('a.' + by).removeAttr('href').addClass('sel');
+ }
+
+ /*
+ Create a comp function. If the user has preferences stored in
+ the sortBy cookie, use those, otherwise use the default.
+ */
+ function initComparator() {
+ by = 'rating'; // Default to sort by rating.
+ // If the sortBy cookie is set, use that instead.
+ if (document.cookie.length > 0) {
+ var start = document.cookie.indexOf('sortBy=');
+ if (start != -1) {
+ start = start + 7;
+ var end = document.cookie.indexOf(";", start);
+ if (end == -1) {
+ end = document.cookie.length;
+ by = unescape(document.cookie.substring(start, end));
+ }
+ }
+ }
+ setComparator();
+ }
+
+ /*
+ Show a comment div.
+ */
+ function show(id) {
+ $('#ao' + id).hide();
+ $('#ah' + id).show();
+ var context = $.extend({id: id}, opts);
+ var popup = $(renderTemplate(popupTemplate, context)).hide();
+ popup.find('textarea[name="proposal"]').hide();
+ popup.find('a.' + by).addClass('sel');
+ var form = popup.find('#cf' + id);
+ form.submit(function(event) {
+ event.preventDefault();
+ addComment(form);
+ });
+ $('#s' + id).after(popup);
+ popup.slideDown('fast', function() {
+ getComments(id);
+ });
+ }
+
+ /*
+ Hide a comment div.
+ */
+ function hide(id) {
+ $('#ah' + id).hide();
+ $('#ao' + id).show();
+ var div = $('#sc' + id);
+ div.slideUp('fast', function() {
+ div.remove();
+ });
+ }
+
+ /*
+ Perform an ajax request to get comments for a node
+ and insert the comments into the comments tree.
+ */
+ function getComments(id) {
+ $.ajax({
+ type: 'GET',
+ url: opts.getCommentsURL,
+ data: {node: id},
+ success: function(data, textStatus, request) {
+ var ul = $('#cl' + id);
+ var speed = 100;
+ $('#cf' + id)
+ .find('textarea[name="proposal"]')
+ .data('source', data.source);
+
+ if (data.comments.length === 0) {
+ ul.html('<li>No comments yet.</li>');
+ ul.data('empty', true);
+ } else {
+ // If there are comments, sort them and put them in the list.
+ var comments = sortComments(data.comments);
+ speed = data.comments.length * 100;
+ appendComments(comments, ul);
+ ul.data('empty', false);
+ }
+ $('#cn' + id).slideUp(speed + 200);
+ ul.slideDown(speed);
+ },
+ error: function(request, textStatus, error) {
+ showError('Oops, there was a problem retrieving the comments.');
+ },
+ dataType: 'json'
+ });
+ }
+
+ /*
+ Add a comment via ajax and insert the comment into the comment tree.
+ */
+ function addComment(form) {
+ // Disable the form that is being submitted.
+ form.find('textarea,input').attr('disabled', 'disabled');
+ var node_id = form.find('input[name="node"]').val();
+ var parent_id = form.find('input[name="parent"]').val();
+
+ // Send the comment to the server.
+ $.ajax({
+ type: "POST",
+ url: opts.addCommentURL,
+ dataType: 'json',
+ data: {
+ node: node_id,
+ parent: parent_id,
+ text: form.find('textarea[name="comment"]').val(),
+ proposal: form.find('textarea[name="proposal"]').val()
+ },
+ success: function(data, textStatus, error) {
+ // Reset the form.
+ if (node_id) {
+ hideProposeChange(node_id);
+ }
+ form.find('textarea')
+ .val('')
+ .add(form.find('input'))
+ .removeAttr('disabled');
+ var ul = $('#cl' + (node_id || parent_id));
+ if (ul.data('empty')) {
+ $(ul).empty();
+ ul.data('empty', false);
+ }
+ insertComment(data.comment);
+ },
+ error: function(request, textStatus, error) {
+ form.find('textarea,input').removeAttr('disabled');
+ showError('Oops, there was a problem adding the comment.');
+ }
+ });
+ }
+
+ /*
+ Recursively append comments to the main comment list and children
+ lists, creating the comment tree.
+ */
+ function appendComments(comments, ul) {
+ $.each(comments, function() {
+ var div = createCommentDiv(this);
+ ul.append($(document.createElement('li')).html(div));
+ appendComments(this.children, div.find('ul.children'));
+ // To avoid stagnating data, don't store the comments children in data.
+ this.children = null;
+ div.data('comment', this);
+ });
+ }
+
+ /*
+ After adding a new comment, it must be inserted in the correct
+ location in the comment tree.
+ */
+ function insertComment(comment) {
+ var div = createCommentDiv(comment);
+
+ // To avoid stagnating data, don't store the comments children in data.
+ comment.children = null;
+ div.data('comment', comment);
+
+ var ul = $('#cl' + (comment.node || comment.parent));
+ var siblings = getChildren(ul);
+
+ var li = $(document.createElement('li'));
+ li.hide();
+
+ // Determine where in the parents children list to insert this comment.
+ for(i=0; i < siblings.length; i++) {
+ if (comp(comment, siblings[i]) <= 0) {
+ $('#cd' + siblings[i].id)
+ .parent()
+ .before(li.html(div));
+ li.slideDown('fast');
+ return;
+ }
+ }
+
+ // If we get here, this comment rates lower than all the others,
+ // or it is the only comment in the list.
+ ul.append(li.html(div));
+ li.slideDown('fast');
+ }
+
+ function acceptComment(id) {
+ $.ajax({
+ type: 'POST',
+ url: opts.acceptCommentURL,
+ data: {id: id},
+ success: function(data, textStatus, request) {
+ $('#cm' + id).fadeOut('fast');
+ },
+ error: function(request, textStatus, error) {
+ showError("Oops, there was a problem accepting the comment.");
+ }
+ });
+ }
+
+ function rejectComment(id) {
+ $.ajax({
+ type: 'POST',
+ url: opts.rejectCommentURL,
+ data: {id: id},
+ success: function(data, textStatus, request) {
+ var div = $('#cd' + id);
+ div.slideUp('fast', function() {
+ div.remove();
+ });
+ },
+ error: function(request, textStatus, error) {
+ showError("Oops, there was a problem rejecting the comment.");
+ }
+ });
+ }
+
+ function deleteComment(id) {
+ $.ajax({
+ type: 'POST',
+ url: opts.deleteCommentURL,
+ data: {id: id},
+ success: function(data, textStatus, request) {
+ var div = $('#cd' + id);
+ div
+ .find('span.user_id:first')
+ .text('[deleted]').end()
+ .find('p.comment_text:first')
+ .text('[deleted]').end()
+ .find('#cm' + id + ', #dc' + id + ', #ac' + id + ', #rc' + id +
+ ', #sp' + id + ', #hp' + id + ', #cr' + id + ', #rl' + id)
+ .remove();
+ var comment = div.data('comment');
+ comment.username = '[deleted]';
+ comment.text = '[deleted]';
+ div.data('comment', comment);
+ },
+ error: function(request, textStatus, error) {
+ showError("Oops, there was a problem deleting the comment.");
+ }
+ });
+ }
+
+ function showProposal(id) {
+ $('#sp' + id).hide();
+ $('#hp' + id).show();
+ $('#pr' + id).slideDown('fast');
+ }
+
+ function hideProposal(id) {
+ $('#hp' + id).hide();
+ $('#sp' + id).show();
+ $('#pr' + id).slideUp('fast');
+ }
+
+ function showProposeChange(id) {
+ $('#pc' + id).hide();
+ $('#hc' + id).show();
+ var textarea = $('#pt' + id);
+ textarea.val(textarea.data('source'));
+ $.fn.autogrow.resize(textarea[0]);
+ textarea.slideDown('fast');
+ }
+
+ function hideProposeChange(id) {
+ $('#hc' + id).hide();
+ $('#pc' + id).show();
+ var textarea = $('#pt' + id);
+ textarea.val('').removeAttr('disabled');
+ textarea.slideUp('fast');
+ }
+
+ /*
+ Handle when the user clicks on a sort by link.
+ */
+ function handleReSort(link) {
+ var classes = link.attr('class').split(/\s+/);
+ for (var i=0; i<classes.length; i++) {
+ if (classes[i] != 'sort_option') {
+ by = classes[i];
+ }
+ }
+ setComparator();
+ // Save/update the sortBy cookie.
+ var expiration = new Date();
+ expiration.setDate(expiration.getDate() + 365);
+ document.cookie= 'sortBy=' + escape(by) +
+ ';expires=' + expiration.toUTCString();
+ $('ul.comment_ul').each(function(index, ul) {
+ var comments = getChildren($(ul), true);
+ comments = sortComments(comments);
+ appendComments(comments, $(ul).empty());
+ });
+ }
+
+ /*
+ Function to process a vote when a user clicks an arrow.
+ */
+ function handleVote(link) {
+ if (!opts.voting) {
+ showError("You'll need to login to vote.");
+ return;
+ }
+
+ var id = link.attr('id');
+ // If it is an unvote, the new vote value is 0,
+ // Otherwise it's 1 for an upvote, or -1 for a downvote.
+ var value = 0;
+ if (id.charAt(1) != 'u') {
+ value = id.charAt(0) == 'u' ? 1 : -1;
+ }
+ // The data to be sent to the server.
+ var d = {
+ comment_id: id.substring(2),
+ value: value
+ };
+
+ // Swap the vote and unvote links.
+ link.hide();
+ $('#' + id.charAt(0) + (id.charAt(1) == 'u' ? 'v' : 'u') + d.comment_id)
+ .show();
+
+ // The div the comment is displayed in.
+ var div = $('div#cd' + d.comment_id);
+ var data = div.data('comment');
+
+ // If this is not an unvote, and the other vote arrow has
+ // already been pressed, unpress it.
+ if ((d.value !== 0) && (data.vote === d.value * -1)) {
+ $('#' + (d.value == 1 ? 'd' : 'u') + 'u' + d.comment_id).hide();
+ $('#' + (d.value == 1 ? 'd' : 'u') + 'v' + d.comment_id).show();
+ }
+
+ // Update the comments rating in the local data.
+ data.rating += (data.vote === 0) ? d.value : (d.value - data.vote);
+ data.vote = d.value;
+ div.data('comment', data);
+
+ // Change the rating text.
+ div.find('.rating:first')
+ .text(data.rating + ' point' + (data.rating == 1 ? '' : 's'));
+
+ // Send the vote information to the server.
+ $.ajax({
+ type: "POST",
+ url: opts.processVoteURL,
+ data: d,
+ error: function(request, textStatus, error) {
+ showError("Oops, there was a problem casting that vote.");
+ }
+ });
+ }
+
+ /*
+ Open a reply form used to reply to an existing comment.
+ */
+ function openReply(id) {
+ // Swap out the reply link for the hide link
+ $('#rl' + id).hide();
+ $('#cr' + id).show();
+
+ // Add the reply li to the children ul.
+ var div = $(renderTemplate(replyTemplate, {id: id})).hide();
+ $('#cl' + id)
+ .prepend(div)
+ // Setup the submit handler for the reply form.
+ .find('#rf' + id)
+ .submit(function(event) {
+ event.preventDefault();
+ addComment($('#rf' + id));
+ closeReply(id);
+ });
+ div.slideDown('fast');
+ }
+
+ /*
+ Close the reply form opened with openReply.
+ */
+ function closeReply(id) {
+ // Remove the reply div from the DOM.
+ $('#rd' + id).slideUp('fast', function() {
+ $(this).remove();
+ });
+
+ // Swap out the hide link for the reply link
+ $('#cr' + id).hide();
+ $('#rl' + id).show();
+ }
+
+ /*
+ Recursively sort a tree of comments using the comp comparator.
+ */
+ function sortComments(comments) {
+ comments.sort(comp);
+ $.each(comments, function() {
+ this.children = sortComments(this.children);
+ });
+ return comments;
+ }
+
+ /*
+ Get the children comments from a ul. If recursive is true,
+ recursively include childrens' children.
+ */
+ function getChildren(ul, recursive) {
+ var children = [];
+ ul.children().children("[id^='cd']")
+ .each(function() {
+ var comment = $(this).data('comment');
+ if (recursive) {
+ comment.children = getChildren($(this).find('#cl' + comment.id), true);
+ }
+ children.push(comment);
+ });
+ return children;
+ }
+
+ /*
+ Create a div to display a comment in.
+ */
+ function createCommentDiv(comment) {
+ // Prettify the comment rating.
+ comment.pretty_rating = comment.rating + ' point' +
+ (comment.rating == 1 ? '' : 's');
+ // Create a div for this comment.
+ var context = $.extend({}, opts, comment);
+ var div = $(renderTemplate(commentTemplate, context));
+
+ // If the user has voted on this comment, highlight the correct arrow.
+ if (comment.vote) {
+ var direction = (comment.vote == 1) ? 'u' : 'd';
+ div.find('#' + direction + 'v' + comment.id).hide();
+ div.find('#' + direction + 'u' + comment.id).show();
+ }
+
+ if (comment.text != '[deleted]') {
+ div.find('a.reply').show();
+ if (comment.proposal_diff) {
+ div.find('#sp' + comment.id).show();
+ }
+ if (opts.moderator && !comment.displayed) {
+ div.find('#cm' + comment.id).show();
+ }
+ if (opts.moderator || (opts.username == comment.username)) {
+ div.find('#dc' + comment.id).show();
+ }
+ }
+ return div;
+ }
+
+ /*
+ A simple template renderer. Placeholders such as <%id%> are replaced
+ by context['id'] with items being escaped. Placeholders such as <#id#>
+ are not escaped.
+ */
+ function renderTemplate(template, context) {
+ var esc = $(document.createElement('div'));
+
+ function handle(ph, escape) {
+ var cur = context;
+ $.each(ph.split('.'), function() {
+ cur = cur[this];
+ });
+ return escape ? esc.text(cur || "").html() : cur;
+ }
+
+ return template.replace(/<([%#])([\w\.]*)\1>/g, function(){
+ return handle(arguments[2], arguments[1] == '%' ? true : false);
+ });
+ }
+
+ function showError(message) {
+ $(document.createElement('div')).attr({'class': 'popup_error'})
+ .append($(document.createElement('h1')).text(message))
+ .appendTo('body')
+ .fadeIn("slow")
+ .delay(2000)
+ .fadeOut("slow");
+ }
+
+ /*
+ Add a link the user uses to open the comments popup.
+ */
+ $.fn.comment = function() {
+ return this.each(function() {
+ var id = $(this).attr('id').substring(1);
+ var count = COMMENT_METADATA[id];
+ var title = count + ' comment' + (count == 1 ? '' : 's');
+ var image = count > 0 ? opts.commentBrightImage : opts.commentImage;
+ $(this)
+ .append(
+ $(document.createElement('a')).attr({
+ href: '#',
+ 'class': 'sphinx_comment',
+ id: 'ao' + id
+ })
+ .append($(document.createElement('img')).attr({
+ src: image,
+ alt: 'comment',
+ title: title
+ }))
+ .click(function(event) {
+ event.preventDefault();
+ show($(this).attr('id').substring(2));
+ })
+ )
+ .append(
+ $(document.createElement('a')).attr({
+ href: '#',
+ 'class': 'sphinx_comment_close hidden',
+ id: 'ah' + id
+ })
+ .append($(document.createElement('img')).attr({
+ src: opts.closeCommentImage,
+ alt: 'close',
+ title: 'close'
+ }))
+ .click(function(event) {
+ event.preventDefault();
+ hide($(this).attr('id').substring(2));
+ })
+ );
+ });
+ };
+
+ var opts = jQuery.extend({
+ processVoteURL: '/process_vote',
+ addCommentURL: '/add_comment',
+ getCommentsURL: '/get_comments',
+ acceptCommentURL: '/accept_comment',
+ rejectCommentURL: '/reject_comment',
+ deleteCommentURL: '/delete_comment',
+ commentImage: '/static/_static/comment.png',
+ closeCommentImage: '/static/_static/comment-close.png',
+ loadingImage: '/static/_static/ajax-loader.gif',
+ commentBrightImage: '/static/_static/comment-bright.png',
+ upArrow: '/static/_static/up.png',
+ downArrow: '/static/_static/down.png',
+ upArrowPressed: '/static/_static/up-pressed.png',
+ downArrowPressed: '/static/_static/down-pressed.png',
+ voting: false,
+ moderator: false
+ }, COMMENT_OPTIONS);
+
+ var replyTemplate = '\
+ <li>\
+ <div class="reply_div" id="rd<%id%>">\
+ <form id="rf<%id%>">\
+ <textarea name="comment" cols="80"></textarea>\
+ <input type="submit" value="add reply" />\
+ <input type="hidden" name="parent" value="<%id%>" />\
+ <input type="hidden" name="node" value="" />\
+ </form>\
+ </div>\
+ </li>';
+
+ var commentTemplate = '\
+ <div id="cd<%id%>" class="spxcdiv">\
+ <div class="vote">\
+ <div class="arrow">\
+ <a href="#" id="uv<%id%>" class="vote">\
+ <img src="<%upArrow%>" />\
+ </a>\
+ <a href="#" id="uu<%id%>" class="un vote">\
+ <img src="<%upArrowPressed%>" />\
+ </a>\
+ </div>\
+ <div class="arrow">\
+ <a href="#" id="dv<%id%>" class="vote">\
+ <img src="<%downArrow%>" id="da<%id%>" />\
+ </a>\
+ <a href="#" id="du<%id%>" class="un vote">\
+ <img src="<%downArrowPressed%>" />\
+ </a>\
+ </div>\
+ </div>\
+ <div class="comment_content">\
+ <p class="tagline comment">\
+ <span class="user_id"><%username%></span>\
+ <span class="rating"><%pretty_rating%></span>\
+ <span class="delta"><%time.delta%></span>\
+ </p>\
+ <p class="comment_text comment"><%text%></p>\
+ <p class="comment_opts comment">\
+ <a href="#" class="reply hidden" id="rl<%id%>">reply &#9657;</a>\
+ <a href="#" class="close_reply" id="cr<%id%>">reply &#9663;</a>\
+ <a href="#" id="sp<%id%>" class="show_proposal">\
+ proposal &#9657;\
+ </a>\
+ <a href="#" id="hp<%id%>" class="hide_proposal">\
+ proposal &#9663;\
+ </a>\
+ <a href="#" id="dc<%id%>" class="delete_comment hidden">\
+ delete\
+ </a>\
+ <span id="cm<%id%>" class="moderation hidden">\
+ <a href="#" id="ac<%id%>" class="accept_comment">accept</a>\
+ <a href="#" id="rc<%id%>" class="reject_comment">reject</a>\
+ </span>\
+ </p>\
+ <pre class="proposal" id="pr<%id%>">\
+<#proposal_diff#>\
+ </pre>\
+ <ul class="children" id="cl<%id%>"></ul>\
+ </div>\
+ <div class="clearleft"></div>\
+ </div>\
+ </div>';
+
+ var popupTemplate = '\
+ <div class="sphinx_comments" id="sc<%id%>">\
+ <h1>Comments</h1>\
+ <form method="post" id="cf<%id%>" class="comment_form" action="/docs/add_comment">\
+ <textarea name="comment" cols="80"></textarea>\
+ <p class="propose_button">\
+ <a href="#" id="pc<%id%>" class="show_propose_change">\
+ Propose a change &#9657;\
+ </a>\
+ <a href="#" id="hc<%id%>" class="hide_propose_change">\
+ Propose a change &#9663;\
+ </a>\
+ </p>\
+ <textarea name="proposal" id="pt<%id%>" cols="80" spellcheck="false"></textarea>\
+ <input type="submit" value="add comment" />\
+ <input type="hidden" name="node" value="<%id%>" />\
+ <input type="hidden" name="parent" value="" />\
+ <p class="sort_options">\
+ Sort by:\
+ <a href="#" class="sort_option rating">top</a>\
+ <a href="#" class="sort_option ascage">newest</a>\
+ <a href="#" class="sort_option age">oldest</a>\
+ </p>\
+ </form>\
+ <h3 id="cn<%id%>">loading comments... <img src="<%loadingImage%>" alt="" /></h3>\
+ <ul id="cl<%id%>" class="comment_ul"></ul>\
+ </div>';
+
+ $(document).ready(function() {
+ init();
+ });
+})(jQuery);
+
+$(document).ready(function() {
+ $('.spxcmt').comment();
+
+ /** Highlight search words in search results. */
+ $("div.context").each(function() {
+ var params = $.getQueryParameters();
+ var terms = (params.q) ? params.q[0].split(/\s+/) : [];
+ var result = $(this);
+ $.each(terms, function() {
+ result.highlightText(this.toLowerCase(), 'highlighted');
+ });
+ });
+});
diff --git a/sphinx/theming.py b/sphinx/theming.py
index 0d0f2863..92e63f31 100644
--- a/sphinx/theming.py
+++ b/sphinx/theming.py
@@ -98,8 +98,7 @@ class Theme(object):
self.base = Theme(inherit)
def get_confstr(self, section, name, default=NODEFAULT):
- """
- Return the value for a theme configuration setting, searching the
+ """Return the value for a theme configuration setting, searching the
base theme chain.
"""
try:
@@ -114,9 +113,7 @@ class Theme(object):
return default
def get_options(self, overrides):
- """
- Return a dictionary of theme options and their values.
- """
+ """Return a dictionary of theme options and their values."""
chain = [self.themeconf]
base = self.base
while base is not None:
@@ -135,8 +132,7 @@ class Theme(object):
return options
def get_dirchain(self):
- """
- Return a list of theme directories, beginning with this theme's,
+ """Return a list of theme directories, beginning with this theme's,
then the base theme's, then that one's base theme's, etc.
"""
chain = [self.themedir]
@@ -147,9 +143,7 @@ class Theme(object):
return chain
def cleanup(self):
- """
- Remove temporary directories.
- """
+ """Remove temporary directories."""
if self.themedir_created:
try:
shutil.rmtree(self.themedir)
diff --git a/sphinx/util/__init__.py b/sphinx/util/__init__.py
index 8d1298cd..a3d30d9d 100644
--- a/sphinx/util/__init__.py
+++ b/sphinx/util/__init__.py
@@ -18,6 +18,8 @@ import tempfile
import posixpath
import traceback
from os import path
+from codecs import open
+from collections import deque
import docutils
from docutils.utils import relative_path
@@ -48,8 +50,7 @@ def docname_join(basedocname, docname):
def get_matching_files(dirname, exclude_matchers=()):
- """
- Get all file names in a directory, recursively.
+ """Get all file names in a directory, recursively.
Exclude files and dirs matching some matcher in *exclude_matchers*.
"""
@@ -75,9 +76,8 @@ def get_matching_files(dirname, exclude_matchers=()):
def get_matching_docs(dirname, suffix, exclude_matchers=()):
- """
- Get all file names (without suffix) matching a suffix in a
- directory, recursively.
+ """Get all file names (without suffix) matching a suffix in a directory,
+ recursively.
Exclude files and dirs matching a pattern in *exclude_patterns*.
"""
@@ -140,8 +140,8 @@ def copy_static_entry(source, targetdir, builder, context={},
target = path.join(targetdir, path.basename(source))
if source.lower().endswith('_t') and builder.templates:
# templated!
- fsrc = open(source, 'rb')
- fdst = open(target[:-2], 'wb')
+ fsrc = open(source, 'r', encoding='utf-8')
+ fdst = open(target[:-2], 'w', encoding='utf-8')
fdst.write(builder.templates.render_string(fsrc.read(), context))
fsrc.close()
fdst.close()
@@ -162,17 +162,21 @@ def copy_static_entry(source, targetdir, builder, context={},
shutil.copytree(source, target)
+_DEBUG_HEADER = '''\
+# Sphinx version: %s
+# Docutils version: %s %s
+# Jinja2 version: %s
+'''
+
def save_traceback():
- """
- Save the current exception's traceback in a temporary file.
- """
+ """Save the current exception's traceback in a temporary file."""
exc = traceback.format_exc()
fd, path = tempfile.mkstemp('.log', 'sphinx-err-')
- os.write(fd, '# Sphinx version: %s\n' % sphinx.__version__)
- os.write(fd, '# Docutils version: %s %s\n' % (docutils.__version__,
- docutils.__version_details__))
- os.write(fd, '# Jinja2 version: %s\n' % jinja2.__version__)
- os.write(fd, exc)
+ os.write(fd, (_DEBUG_HEADER %
+ (sphinx.__version__,
+ docutils.__version__, docutils.__version_details__,
+ jinja2.__version__)).encode('utf-8'))
+ os.write(fd, exc.encode('utf-8'))
os.close(fd)
return path
@@ -225,8 +229,7 @@ class Tee(object):
def parselinenos(spec, total):
- """
- Parse a line number spec (such as "1,2,4-6") and return a list of
+ """Parse a line number spec (such as "1,2,4-6") and return a list of
wanted line numbers.
"""
items = list()
@@ -280,9 +283,7 @@ def rpartition(s, t):
def format_exception_cut_frames(x=1):
- """
- Format an exception with traceback, but only the last x frames.
- """
+ """Format an exception with traceback, but only the last x frames."""
typ, val, tb = sys.exc_info()
#res = ['Traceback (most recent call last):\n']
res = []
@@ -290,3 +291,34 @@ def format_exception_cut_frames(x=1):
res += tbres[-x:]
res += traceback.format_exception_only(typ, val)
return ''.join(res)
+
+
+class PeekableIterator(object):
+ """
+ An iterator which wraps any iterable and makes it possible to peek to see
+ what's the next item.
+ """
+ def __init__(self, iterable):
+ self.remaining = deque()
+ self._iterator = iter(iterable)
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ """Return the next item from the iterator."""
+ if self.remaining:
+ return self.remaining.popleft()
+ return self._iterator.next()
+
+ def push(self, item):
+ """Push the `item` on the internal stack, it will be returned on the
+ next :meth:`next` call.
+ """
+ self.remaining.append(item)
+
+ def peek(self):
+ """Return the next item without changing the state of the iterator."""
+ item = self.next()
+ self.push(item)
+ return item
diff --git a/sphinx/util/docstrings.py b/sphinx/util/docstrings.py
index 538af653..d1a2ff8d 100644
--- a/sphinx/util/docstrings.py
+++ b/sphinx/util/docstrings.py
@@ -13,11 +13,11 @@ import sys
def prepare_docstring(s):
- """
- Convert a docstring into lines of parseable reST. Return it as a list of
- lines usable for inserting into a docutils ViewList (used as argument
- of nested_parse().) An empty line is added to act as a separator between
- this docstring and following content.
+ """Convert a docstring into lines of parseable reST.
+
+ Return it as a list of lines usable for inserting into a docutils ViewList
+ (used as argument of nested_parse().) An empty line is added to act as a
+ separator between this docstring and following content.
"""
lines = s.expandtabs().splitlines()
# Find minimum indentation of any non-blank lines after first line.
@@ -42,9 +42,8 @@ def prepare_docstring(s):
def prepare_commentdoc(s):
- """
- Extract documentation comment lines (starting with #:) and return them as a
- list of lines. Returns an empty list if there is no documentation.
+ """Extract documentation comment lines (starting with #:) and return them
+ as a list of lines. Returns an empty list if there is no documentation.
"""
result = []
lines = [line.strip() for line in s.expandtabs().splitlines()]
diff --git a/sphinx/util/jsonimpl.py b/sphinx/util/jsonimpl.py
index fda85b5e..f654ef22 100644
--- a/sphinx/util/jsonimpl.py
+++ b/sphinx/util/jsonimpl.py
@@ -13,7 +13,7 @@ import UserString
try:
import json
- # json-py's json module has not JSONEncoder; this will raise AttributeError
+ # json-py's json module has no JSONEncoder; this will raise AttributeError
# if json-py is imported instead of the built-in json module
JSONEncoder = json.JSONEncoder
except (ImportError, AttributeError):
diff --git a/sphinx/util/matching.py b/sphinx/util/matching.py
index c459aca2..3746c87c 100644
--- a/sphinx/util/matching.py
+++ b/sphinx/util/matching.py
@@ -13,8 +13,7 @@ import re
def _translate_pattern(pat):
- """
- Translate a shell-style glob pattern to a regular expression.
+ """Translate a shell-style glob pattern to a regular expression.
Adapted from the fnmatch module, but enhanced so that single stars don't
match slashes.
@@ -65,16 +64,14 @@ def compile_matchers(patterns):
_pat_cache = {}
def patmatch(name, pat):
- """
- Return if name matches pat. Adapted from fnmatch module.
- """
+ """Return if name matches pat. Adapted from fnmatch module."""
if pat not in _pat_cache:
_pat_cache[pat] = re.compile(_translate_pattern(pat))
return _pat_cache[pat].match(name)
def patfilter(names, pat):
- """
- Return the subset of the list NAMES that match PAT.
+ """Return the subset of the list NAMES that match PAT.
+
Adapted from fnmatch module.
"""
if pat not in _pat_cache:
diff --git a/sphinx/util/nodes.py b/sphinx/util/nodes.py
index 97b58569..adce565c 100644
--- a/sphinx/util/nodes.py
+++ b/sphinx/util/nodes.py
@@ -10,11 +10,12 @@
"""
import re
-import types
from docutils import nodes
from sphinx import addnodes
+from sphinx.locale import pairindextypes
+from sphinx.util.pycompat import class_types
# \x00 means the "<" was backslash-escaped
@@ -22,7 +23,28 @@ explicit_title_re = re.compile(r'^(.+?)\s*(?<!\x00)<(.*?)>$', re.DOTALL)
caption_ref_re = explicit_title_re # b/w compat alias
+def extract_messages(doctree):
+ """Extract translatable messages from a document tree."""
+ for node in doctree.traverse(nodes.TextElement):
+ if isinstance(node, (nodes.Invisible, nodes.Inline)):
+ continue
+ # <field_name>orphan</field_name>
+ # XXX ignore all metadata (== docinfo)
+ if isinstance(node, nodes.field_name) and node.children[0] == 'orphan':
+ continue
+ msg = node.rawsource.replace('\n', ' ').strip()
+ # XXX nodes rendering empty are likely a bug in sphinx.addnodes
+ if msg:
+ yield node, msg
+
+
def nested_parse_with_titles(state, content, node):
+ """Version of state.nested_parse() that allows titles and does not require
+ titles to have the same decoration as the calling document.
+
+ This is useful when the parsed content comes from a completely different
+ context, such as docstrings.
+ """
# hack around title style bookkeeping
surrounding_title_styles = state.memo.title_styles
surrounding_section_level = state.memo.section_level
@@ -51,6 +73,37 @@ def split_explicit_title(text):
return False, text, text
+indextypes = [
+ 'single', 'pair', 'double', 'triple',
+]
+
+def process_index_entry(entry, targetid):
+ indexentries = []
+ entry = entry.strip()
+ for type in pairindextypes:
+ if entry.startswith(type+':'):
+ value = entry[len(type)+1:].strip()
+ value = pairindextypes[type] + '; ' + value
+ indexentries.append(('pair', value, targetid, value))
+ break
+ else:
+ for type in indextypes:
+ if entry.startswith(type+':'):
+ value = entry[len(type)+1:].strip()
+ if type == 'double':
+ type = 'pair'
+ indexentries.append((type, value, targetid, value))
+ break
+ # shorthand notation for single entries
+ else:
+ for value in entry.split(','):
+ value = value.strip()
+ if not value:
+ continue
+ indexentries.append(('single', value, targetid, value))
+ return indexentries
+
+
def inline_all_toctrees(builder, docnameset, docname, tree, colorfunc):
"""Inline all toctrees in the *tree*.
@@ -115,7 +168,7 @@ def _new_traverse(self, condition=None,
if include_self and descend and not siblings and not ascend:
if condition is None:
return self._all_traverse([])
- elif isinstance(condition, (types.ClassType, type)):
+ elif isinstance(condition, class_types):
return self._fast_traverse(condition, [])
return self._old_traverse(condition, include_self,
descend, siblings, ascend)
diff --git a/sphinx/util/osutil.py b/sphinx/util/osutil.py
index beab38cb..464e56ed 100644
--- a/sphinx/util/osutil.py
+++ b/sphinx/util/osutil.py
@@ -11,6 +11,7 @@
import os
import re
+import sys
import time
import errno
import shutil
@@ -58,8 +59,8 @@ def ensuredir(path):
def walk(top, topdown=True, followlinks=False):
- """
- Backport of os.walk from 2.6, where the followlinks argument was added.
+ """Backport of os.walk from 2.6, where the *followlinks* argument was
+ added.
"""
names = os.listdir(top)
@@ -124,7 +125,10 @@ no_fn_re = re.compile(r'[^a-zA-Z0-9_-]')
def make_filename(string):
return no_fn_re.sub('', string)
-
-def ustrftime(format, *args):
- # strftime for unicode strings
- return time.strftime(unicode(format).encode('utf-8'), *args).decode('utf-8')
+if sys.version_info < (3, 0):
+ def ustrftime(format, *args):
+ # strftime for unicode strings
+ return time.strftime(unicode(format).encode('utf-8'), *args) \
+ .decode('utf-8')
+else:
+ ustrftime = time.strftime
diff --git a/sphinx/util/png.py b/sphinx/util/png.py
index 2cb2aa9d..59c32715 100644
--- a/sphinx/util/png.py
+++ b/sphinx/util/png.py
@@ -12,18 +12,18 @@
import struct
import binascii
+from sphinx.util.pycompat import b
+
LEN_IEND = 12
LEN_DEPTH = 22
DEPTH_CHUNK_LEN = struct.pack('!i', 10)
-DEPTH_CHUNK_START = 'tEXtDepth\x00'
-IEND_CHUNK = '\x00\x00\x00\x00IEND\xAE\x42\x60\x82'
+DEPTH_CHUNK_START = b('tEXtDepth\x00')
+IEND_CHUNK = b('\x00\x00\x00\x00IEND\xAE\x42\x60\x82')
def read_png_depth(filename):
- """
- Read the special tEXt chunk indicating the depth from a PNG file.
- """
+ """Read the special tEXt chunk indicating the depth from a PNG file."""
result = None
f = open(filename, 'rb')
try:
@@ -39,8 +39,8 @@ def read_png_depth(filename):
def write_png_depth(filename, depth):
- """
- Write the special tEXt chunk indicating the depth to a PNG file.
+ """Write the special tEXt chunk indicating the depth to a PNG file.
+
The chunk is placed immediately before the special IEND chunk.
"""
data = struct.pack('!i', depth)
diff --git a/sphinx/util/pycompat.py b/sphinx/util/pycompat.py
index bdd9507d..319312a7 100644
--- a/sphinx/util/pycompat.py
+++ b/sphinx/util/pycompat.py
@@ -13,11 +13,101 @@ import sys
import codecs
import encodings
-
-try:
+# ------------------------------------------------------------------------------
+# Python 2/3 compatibility
+
+if sys.version_info >= (3, 0):
+ # Python 3
+ class_types = (type,)
+ # the ubiquitous "bytes" helper functions
+ def b(s):
+ return s.encode('utf-8')
+ bytes = bytes
+ # support for running 2to3 over config files
+ def convert_with_2to3(filepath):
+ from lib2to3.refactor import RefactoringTool, get_fixers_from_package
+ from lib2to3.pgen2.parse import ParseError
+ fixers = get_fixers_from_package('lib2to3.fixes')
+ refactoring_tool = RefactoringTool(fixers)
+ source = refactoring_tool._read_python_source(filepath)[0]
+ try:
+ tree = refactoring_tool.refactor_string(source, 'conf.py')
+ except ParseError, err:
+ # do not propagate lib2to3 exceptions
+ lineno, offset = err.context[1]
+ # try to match ParseError details with SyntaxError details
+ raise SyntaxError(err.msg, (filepath, lineno, offset, err.value))
+ return unicode(tree)
+
+else:
+ # Python 2
+ from types import ClassType
+ class_types = (type, ClassType)
+ b = str
+ bytes = str
+ # no need to refactor on 2.x versions
+ convert_with_2to3 = None
+
+
+# ------------------------------------------------------------------------------
+# Missing builtins and itertools in Python < 2.6
+
+if sys.version_info >= (2, 6):
+ # Python >= 2.6
+ next = next
+
+ from itertools import product
+ try:
+ from itertools import zip_longest # Python 3 name
+ except ImportError:
+ from itertools import izip_longest as zip_longest
+
+else:
+ # Python < 2.6
+ from itertools import izip, repeat, chain
+
+ # this is on Python 2, where the method is called "next" (it is refactored
+ # to __next__ by 2to3, but in that case never executed)
+ def next(iterator):
+ return iterator.next()
+
+ # These replacement functions have been taken from the Python 2.6
+ # itertools documentation.
+ def product(*args, **kwargs):
+ pools = map(tuple, args) * kwargs.get('repeat', 1)
+ result = [[]]
+ for pool in pools:
+ result = [x + [y] for x in result for y in pool]
+ for prod in result:
+ yield tuple(prod)
+
+ def zip_longest(*args, **kwds):
+ # zip_longest('ABCD', 'xy', fillvalue='-') --> Ax By C- D-
+ fillvalue = kwds.get('fillvalue')
+ def sentinel(counter = ([fillvalue]*(len(args)-1)).pop):
+ yield counter() # yields the fillvalue, or raises IndexError
+ fillers = repeat(fillvalue)
+ iters = [chain(it, sentinel(), fillers) for it in args]
+ try:
+ for tup in izip(*iters):
+ yield tup
+ except IndexError:
+ pass
+
+
+# ------------------------------------------------------------------------------
+# Missing builtins and codecs in Python < 2.5
+
+if sys.version_info >= (2, 5):
+ # Python >= 2.5
+ base_exception = BaseException
any = any
all = all
-except NameError:
+
+else:
+ # Python 2.4
+ base_exception = Exception
+
def all(gen):
for i in gen:
if not i:
@@ -30,8 +120,6 @@ except NameError:
return True
return False
-
-if sys.version_info < (2, 5):
# Python 2.4 doesn't know the utf-8-sig encoding, so deliver it here
def my_search_function(encoding):
diff --git a/sphinx/util/websupport.py b/sphinx/util/websupport.py
new file mode 100644
index 00000000..510ecbe0
--- /dev/null
+++ b/sphinx/util/websupport.py
@@ -0,0 +1,11 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.util.websupport
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+def is_commentable(node):
+ return node.__class__.__name__ in ('paragraph', 'literal_block')
diff --git a/sphinx/versioning.py b/sphinx/versioning.py
new file mode 100644
index 00000000..5b0b2127
--- /dev/null
+++ b/sphinx/versioning.py
@@ -0,0 +1,128 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.versioning
+ ~~~~~~~~~~~~~~~~~
+
+ Implements the low-level algorithms Sphinx uses for the versioning of
+ doctrees.
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+from uuid import uuid4
+from operator import itemgetter
+from collections import defaultdict
+
+from sphinx.util.pycompat import product, zip_longest
+
+
+# anything below that ratio is considered equal/changed
+VERSIONING_RATIO = 65
+
+
+def add_uids(doctree, condition):
+ """Add a unique id to every node in the `doctree` which matches the
+ condition and yield the nodes.
+
+ :param doctree:
+ A :class:`docutils.nodes.document` instance.
+
+ :param condition:
+ A callable which returns either ``True`` or ``False`` for a given node.
+ """
+ for node in doctree.traverse(condition):
+ node.uid = uuid4().hex
+ yield node
+
+
+def merge_doctrees(old, new, condition):
+ """Merge the `old` doctree with the `new` one while looking at nodes
+ matching the `condition`.
+
+ Each node which replaces another one or has been added to the `new` doctree
+ will be yielded.
+
+ :param condition:
+ A callable which returns either ``True`` or ``False`` for a given node.
+ """
+ old_iter = old.traverse(condition)
+ new_iter = new.traverse(condition)
+ old_nodes = []
+ new_nodes = []
+ ratios = defaultdict(list)
+ seen = set()
+ # compare the nodes each doctree in order
+ for old_node, new_node in zip_longest(old_iter, new_iter):
+ if old_node is None:
+ new_nodes.append(new_node)
+ continue
+ if new_node is None:
+ old_nodes.append(old_node)
+ continue
+ ratio = get_ratio(old_node.rawsource, new_node.rawsource)
+ if ratio == 0:
+ new_node.uid = old_node.uid
+ seen.add(new_node)
+ else:
+ ratios[old_node, new_node] = ratio
+ old_nodes.append(old_node)
+ new_nodes.append(new_node)
+ # calculate the ratios for each unequal pair of nodes, should we stumble
+ # on a pair which is equal we set the uid and add it to the seen ones
+ for old_node, new_node in product(old_nodes, new_nodes):
+ if new_node in seen or (old_node, new_node) in ratios:
+ continue
+ ratio = get_ratio(old_node.rawsource, new_node.rawsource)
+ if ratio == 0:
+ new_node.uid = old_node.uid
+ seen.add(new_node)
+ else:
+ ratios[old_node, new_node] = ratio
+ # choose the old node with the best ratio for each new node and set the uid
+ # as long as the ratio is under a certain value, in which case we consider
+ # them not changed but different
+ ratios = sorted(ratios.iteritems(), key=itemgetter(1))
+ for (old_node, new_node), ratio in ratios:
+ if new_node in seen:
+ continue
+ else:
+ seen.add(new_node)
+ if ratio < VERSIONING_RATIO:
+ new_node.uid = old_node.uid
+ else:
+ new_node.uid = uuid4().hex
+ yield new_node
+ # create new uuids for any new node we left out earlier, this happens
+ # if one or more nodes are simply added.
+ for new_node in set(new_nodes) - seen:
+ new_node.uid = uuid4().hex
+ yield new_node
+
+
+def get_ratio(old, new):
+ """Return a "similiarity ratio" (in percent) representing the similarity
+ between the two strings where 0 is equal and anything above less than equal.
+ """
+ if not all([old, new]):
+ return VERSIONING_RATIO
+ return levenshtein_distance(old, new) / (len(old) / 100.0)
+
+
+def levenshtein_distance(a, b):
+ """Return the Levenshtein edit distance between two strings *a* and *b*."""
+ if a == b:
+ return 0
+ if len(a) < len(b):
+ a, b = b, a
+ if not a:
+ return len(b)
+ previous_row = xrange(len(b) + 1)
+ for i, column1 in enumerate(a):
+ current_row = [i + 1]
+ for j, column2 in enumerate(b):
+ insertions = previous_row[j + 1] + 1
+ deletions = current_row[j] + 1
+ substitutions = previous_row[j] + (column1 != column2)
+ current_row.append(min(insertions, deletions, substitutions))
+ previous_row = current_row
+ return previous_row[-1]
diff --git a/sphinx/websupport/__init__.py b/sphinx/websupport/__init__.py
new file mode 100644
index 00000000..30303132
--- /dev/null
+++ b/sphinx/websupport/__init__.py
@@ -0,0 +1,414 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.websupport
+ ~~~~~~~~~~~~~~~~~
+
+ Base Module for web support functions.
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import sys
+import cPickle as pickle
+import posixpath
+from os import path
+
+from jinja2 import Environment, FileSystemLoader
+
+from sphinx.application import Sphinx
+from sphinx.util.osutil import ensuredir
+from sphinx.util.jsonimpl import dumps as dump_json
+from sphinx.websupport.search import BaseSearch, SEARCH_ADAPTERS
+from sphinx.websupport.storage import StorageBackend
+from sphinx.websupport.errors import *
+
+
+class WebSupportApp(Sphinx):
+ def __init__(self, *args, **kwargs):
+ self.staticdir = kwargs.pop('staticdir', None)
+ self.builddir = kwargs.pop('builddir', None)
+ self.search = kwargs.pop('search', None)
+ self.storage = kwargs.pop('storage', None)
+ Sphinx.__init__(self, *args, **kwargs)
+
+
+class WebSupport(object):
+ """The main API class for the web support package. All interactions
+ with the web support package should occur through this class.
+ """
+ def __init__(self, srcdir='', builddir='', datadir='', search=None,
+ storage=None, status=sys.stdout, warning=sys.stderr,
+ moderation_callback=None, staticdir='static',
+ docroot=''):
+ self.srcdir = srcdir
+ self.builddir = builddir
+ self.outdir = path.join(builddir, 'data')
+ self.datadir = datadir or self.outdir
+ self.staticdir = staticdir.strip('/')
+ self.docroot = docroot.strip('/')
+ self.status = status
+ self.warning = warning
+ self.moderation_callback = moderation_callback
+
+ self._init_templating()
+ self._init_search(search)
+ self._init_storage(storage)
+
+ self._make_base_comment_options()
+
+ def _init_storage(self, storage):
+ if isinstance(storage, StorageBackend):
+ self.storage = storage
+ else:
+ # If a StorageBackend isn't provided, use the default
+ # SQLAlchemy backend.
+ from sphinx.websupport.storage.sqlalchemystorage \
+ import SQLAlchemyStorage
+ if not storage:
+ # no explicit DB path given; create default sqlite database
+ db_path = path.join(self.datadir, 'db', 'websupport.db')
+ ensuredir(path.dirname(db_path))
+ storage = 'sqlite:///' + db_path
+ self.storage = SQLAlchemyStorage(storage)
+
+ def _init_templating(self):
+ import sphinx
+ template_path = path.join(path.dirname(sphinx.__file__),
+ 'themes', 'basic')
+ loader = FileSystemLoader(template_path)
+ self.template_env = Environment(loader=loader)
+
+ def _init_search(self, search):
+ if isinstance(search, BaseSearch):
+ self.search = search
+ else:
+ mod, cls = SEARCH_ADAPTERS[search or 'null']
+ mod = 'sphinx.websupport.search.' + mod
+ SearchClass = getattr(__import__(mod, None, None, [cls]), cls)
+ search_path = path.join(self.datadir, 'search')
+ self.search = SearchClass(search_path)
+ self.results_template = \
+ self.template_env.get_template('searchresults.html')
+
+ def build(self):
+ """Build the documentation. Places the data into the `outdir`
+ directory. Use it like this::
+
+ support = WebSupport(srcdir, builddir, search='xapian')
+ support.build()
+
+ This will read reStructured text files from `srcdir`. Then it will
+ build the pickles and search index, placing them into `builddir`.
+ It will also save node data to the database.
+ """
+ if not self.srcdir:
+ raise SrcdirNotSpecifiedError( \
+ 'No srcdir associated with WebSupport object')
+ doctreedir = path.join(self.outdir, 'doctrees')
+ app = WebSupportApp(self.srcdir, self.srcdir,
+ self.outdir, doctreedir, 'websupport',
+ search=self.search, status=self.status,
+ warning=self.warning, storage=self.storage,
+ staticdir=self.staticdir, builddir=self.builddir)
+
+ self.storage.pre_build()
+ app.build()
+ self.storage.post_build()
+
+ def get_document(self, docname, username='', moderator=False):
+ """Load and return a document from a pickle. The document will
+ be a dict object which can be used to render a template::
+
+ support = WebSupport(datadir=datadir)
+ support.get_document('index', username, moderator)
+
+ In most cases `docname` will be taken from the request path and
+ passed directly to this function. In Flask, that would be something
+ like this::
+
+ @app.route('/<path:docname>')
+ def index(docname):
+ username = g.user.name if g.user else ''
+ moderator = g.user.moderator if g.user else False
+ try:
+ document = support.get_document(docname, username,
+ moderator)
+ except DocumentNotFoundError:
+ abort(404)
+ render_template('doc.html', document=document)
+
+ The document dict that is returned contains the following items
+ to be used during template rendering.
+
+ * **body**: The main body of the document as HTML
+ * **sidebar**: The sidebar of the document as HTML
+ * **relbar**: A div containing links to related documents
+ * **title**: The title of the document
+ * **css**: Links to css files used by Sphinx
+ * **js**: Javascript containing comment options
+
+ This raises :class:`~sphinx.websupport.errors.DocumentNotFoundError`
+ if a document matching `docname` is not found.
+
+ :param docname: the name of the document to load.
+ """
+ infilename = path.join(self.datadir, 'pickles', docname + '.fpickle')
+
+ try:
+ f = open(infilename, 'rb')
+ except IOError:
+ raise DocumentNotFoundError(
+ 'The document "%s" could not be found' % docname)
+
+ document = pickle.load(f)
+ comment_opts = self._make_comment_options(username, moderator)
+ comment_metadata = self.storage.get_metadata(docname, moderator)
+
+ document['js'] = '\n'.join([comment_opts,
+ self._make_metadata(comment_metadata),
+ document['js']])
+ return document
+
+ def get_search_results(self, q):
+ """Perform a search for the query `q`, and create a set
+ of search results. Then render the search results as html and
+ return a context dict like the one created by
+ :meth:`get_document`::
+
+ document = support.get_search_results(q)
+
+ :param q: the search query
+ """
+ results = self.search.query(q)
+ ctx = {'search_performed': True,
+ 'search_results': results,
+ 'q': q}
+ document = self.get_document('search')
+ document['body'] = self.results_template.render(ctx)
+ document['title'] = 'Search Results'
+ return document
+
+ def get_data(self, node_id, username=None, moderator=False):
+ """Get the comments and source associated with `node_id`. If
+ `username` is given vote information will be included with the
+ returned comments. The default CommentBackend returns a dict with
+ two keys, *source*, and *comments*. *source* is raw source of the
+ node and is used as the starting point for proposals a user can
+ add. *comments* is a list of dicts that represent a comment, each
+ having the following items:
+
+ ============= ======================================================
+ Key Contents
+ ============= ======================================================
+ text The comment text.
+ username The username that was stored with the comment.
+ id The comment's unique identifier.
+ rating The comment's current rating.
+ age The time in seconds since the comment was added.
+ time A dict containing time information. It contains the
+ following keys: year, month, day, hour, minute, second,
+ iso, and delta. `iso` is the time formatted in ISO
+ 8601 format. `delta` is a printable form of how old
+ the comment is (e.g. "3 hours ago").
+ vote If `user_id` was given, this will be an integer
+ representing the vote. 1 for an upvote, -1 for a
+ downvote, or 0 if unvoted.
+ node The id of the node that the comment is attached to.
+ If the comment's parent is another comment rather than
+ a node, this will be null.
+ parent The id of the comment that this comment is attached
+ to if it is not attached to a node.
+ children A list of all children, in this format.
+ proposal_diff An HTML representation of the differences between the
+ the current source and the user's proposed source.
+ ============= ======================================================
+
+ :param node_id: the id of the node to get comments for.
+ :param username: the username of the user viewing the comments.
+ :param moderator: whether the user is a moderator.
+ """
+ return self.storage.get_data(node_id, username, moderator)
+
+ def delete_comment(self, comment_id, username='', moderator=False):
+ """Delete a comment. Doesn't actually delete the comment, but
+ instead replaces the username and text files with "[deleted]" so
+ as not to leave any comments orphaned.
+
+ If `moderator` is True, the comment will always be deleted. If
+ `moderator` is False, the comment will only be deleted if the
+ `username` matches the `username` on the comment.
+
+ This raises :class:`~sphinx.websupport.errors.UserNotAuthorizedError`
+ if moderator is False and `username` doesn't match username on the
+ comment.
+
+ :param comment_id: the id of the comment to delete.
+ :param username: the username requesting the deletion.
+ :param moderator: whether the requestor is a moderator.
+ """
+ self.storage.delete_comment(comment_id, username, moderator)
+
+ def add_comment(self, text, node_id='', parent_id='', displayed=True,
+ username=None, time=None, proposal=None,
+ moderator=False):
+ """Add a comment to a node or another comment. Returns the comment
+ in the same format as :meth:`get_comments`. If the comment is being
+ attached to a node, pass in the node's id (as a string) with the
+ node keyword argument::
+
+ comment = support.add_comment(text, node_id=node_id)
+
+ If the comment is the child of another comment, provide the parent's
+ id (as a string) with the parent keyword argument::
+
+ comment = support.add_comment(text, parent_id=parent_id)
+
+ If you would like to store a username with the comment, pass
+ in the optional `username` keyword argument::
+
+ comment = support.add_comment(text, node=node_id,
+ username=username)
+
+ :param parent_id: the prefixed id of the comment's parent.
+ :param text: the text of the comment.
+ :param displayed: for moderation purposes
+ :param username: the username of the user making the comment.
+ :param time: the time the comment was created, defaults to now.
+ """
+ comment = self.storage.add_comment(text, displayed, username,
+ time, proposal, node_id,
+ parent_id, moderator)
+ if not displayed and self.moderation_callback:
+ self.moderation_callback(comment)
+ return comment
+
+ def process_vote(self, comment_id, username, value):
+ """Process a user's vote. The web support package relies
+ on the API user to perform authentication. The API user will
+ typically receive a comment_id and value from a form, and then
+ make sure the user is authenticated. A unique username must be
+ passed in, which will also be used to retrieve the user's past
+ voting data. An example, once again in Flask::
+
+ @app.route('/docs/process_vote', methods=['POST'])
+ def process_vote():
+ if g.user is None:
+ abort(401)
+ comment_id = request.form.get('comment_id')
+ value = request.form.get('value')
+ if value is None or comment_id is None:
+ abort(400)
+ support.process_vote(comment_id, g.user.name, value)
+ return "success"
+
+ :param comment_id: the comment being voted on
+ :param username: the unique username of the user voting
+ :param value: 1 for an upvote, -1 for a downvote, 0 for an unvote.
+ """
+ value = int(value)
+ if not -1 <= value <= 1:
+ raise ValueError('vote value %s out of range (-1, 1)' % value)
+ self.storage.process_vote(comment_id, username, value)
+
+ def update_username(self, old_username, new_username):
+ """To remain decoupled from a webapp's authentication system, the
+ web support package stores a user's username with each of their
+ comments and votes. If the authentication system allows a user to
+ change their username, this can lead to stagnate data in the web
+ support system. To avoid this, each time a username is changed, this
+ method should be called.
+
+ :param old_username: The original username.
+ :param new_username: The new username.
+ """
+ self.storage.update_username(old_username, new_username)
+
+ def accept_comment(self, comment_id, moderator=False):
+ """Accept a comment that is pending moderation.
+
+ This raises :class:`~sphinx.websupport.errors.UserNotAuthorizedError`
+ if moderator is False.
+
+ :param comment_id: The id of the comment that was accepted.
+ :param moderator: Whether the user making the request is a moderator.
+ """
+ if not moderator:
+ raise UserNotAuthorizedError()
+ self.storage.accept_comment(comment_id)
+
+ def reject_comment(self, comment_id, moderator=False):
+ """Reject a comment that is pending moderation.
+
+ This raises :class:`~sphinx.websupport.errors.UserNotAuthorizedError`
+ if moderator is False.
+
+ :param comment_id: The id of the comment that was accepted.
+ :param moderator: Whether the user making the request is a moderator.
+ """
+ if not moderator:
+ raise UserNotAuthorizedError()
+ self.storage.reject_comment(comment_id)
+
+ def _make_base_comment_options(self):
+ """Helper method to create the part of the COMMENT_OPTIONS javascript
+ that remains the same throughout the lifetime of the
+ :class:`~sphinx.websupport.WebSupport` object.
+ """
+ self.base_comment_opts = {}
+
+ if self.docroot is not '':
+ comment_urls = [
+ ('addCommentURL', 'add_comment'),
+ ('getCommentsURL', 'get_comments'),
+ ('processVoteURL', 'process_vote'),
+ ('acceptCommentURL', 'accept_comment'),
+ ('rejectCommentURL', 'reject_comment'),
+ ('deleteCommentURL', 'delete_comment')
+ ]
+ for key, value in comment_urls:
+ self.base_comment_opts[key] = \
+ '/' + posixpath.join(self.docroot, value)
+ if self.staticdir != 'static':
+ static_urls = [
+ ('commentImage', 'comment.png'),
+ ('closeCommentImage', 'comment-close.png'),
+ ('loadingImage', 'ajax-loader.gif'),
+ ('commentBrightImage', 'comment-bright.png'),
+ ('upArrow', 'up.png'),
+ ('upArrowPressed', 'up-pressed.png'),
+ ('downArrow', 'down.png'),
+ ('downArrowPressed', 'down-pressed.png')
+ ]
+ for key, value in static_urls:
+ self.base_comment_opts[key] = \
+ '/' + posixpath.join(self.staticdir, '_static', value)
+
+ def _make_comment_options(self, username, moderator):
+ """Helper method to create the parts of the COMMENT_OPTIONS
+ javascript that are unique to each request.
+
+ :param username: The username of the user making the request.
+ :param moderator: Whether the user making the request is a moderator.
+ """
+ # XXX parts is not used?
+ #parts = [self.base_comment_opts]
+ rv = self.base_comment_opts.copy()
+ if username:
+ rv.update({
+ 'voting': True,
+ 'username': username,
+ 'moderator': moderator,
+ })
+ return '\n'.join([
+ '<script type="text/javascript">',
+ 'var COMMENT_OPTIONS = %s;' % dump_json(rv),
+ '</script>'
+ ])
+
+ def _make_metadata(self, data):
+ return '\n'.join([
+ '<script type="text/javascript">',
+ 'var COMMENT_METADATA = %s;' % dump_json(data),
+ '</script>'
+ ])
diff --git a/sphinx/websupport/errors.py b/sphinx/websupport/errors.py
new file mode 100644
index 00000000..53106dfb
--- /dev/null
+++ b/sphinx/websupport/errors.py
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.websupport.errors
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Contains Error classes for the web support package.
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+__all__ = ['DocumentNotFoundError', 'SrcdirNotSpecifiedError',
+ 'UserNotAuthorizedError', 'CommentNotAllowedError',
+ 'NullSearchException']
+
+class DocumentNotFoundError(Exception):
+ pass
+
+
+class SrcdirNotSpecifiedError(Exception):
+ pass
+
+
+class UserNotAuthorizedError(Exception):
+ pass
+
+
+class CommentNotAllowedError(Exception):
+ pass
+
+
+class NullSearchException(Exception):
+ pass
diff --git a/sphinx/websupport/search/__init__.py b/sphinx/websupport/search/__init__.py
new file mode 100644
index 00000000..0cba0f77
--- /dev/null
+++ b/sphinx/websupport/search/__init__.py
@@ -0,0 +1,121 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.websupport.search
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Server side search support for the web support package.
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+
+class BaseSearch(object):
+ def __init__(self, path):
+ pass
+
+ def init_indexing(self, changed=[]):
+ """Called by the builder to initialize the search indexer. `changed`
+ is a list of pagenames that will be reindexed. You may want to remove
+ these from the search index before indexing begins.
+
+ :param changed: a list of pagenames that will be re-indexed
+ """
+ pass
+
+ def finish_indexing(self):
+ """Called by the builder when writing has been completed. Use this
+ to perform any finalization or cleanup actions after indexing is
+ complete.
+ """
+ pass
+
+ def feed(self, pagename, title, doctree):
+ """Called by the builder to add a doctree to the index. Converts the
+ `doctree` to text and passes it to :meth:`add_document`. You probably
+ won't want to override this unless you need access to the `doctree`.
+ Override :meth:`add_document` instead.
+
+ :param pagename: the name of the page to be indexed
+ :param title: the title of the page to be indexed
+ :param doctree: is the docutils doctree representation of the page
+ """
+ self.add_document(pagename, title, doctree.astext())
+
+ def add_document(self, pagename, title, text):
+ """Called by :meth:`feed` to add a document to the search index.
+ This method should should do everything necessary to add a single
+ document to the search index.
+
+ `pagename` is name of the page being indexed. It is the combination
+ of the source files relative path and filename,
+ minus the extension. For example, if the source file is
+ "ext/builders.rst", the `pagename` would be "ext/builders". This
+ will need to be returned with search results when processing a
+ query.
+
+ :param pagename: the name of the page being indexed
+ :param title: the page's title
+ :param text: the full text of the page
+ """
+ raise NotImplementedError()
+
+ def query(self, q):
+ """Called by the web support api to get search results. This method
+ compiles the regular expression to be used when :meth:`extracting
+ context <extract_context>`, then calls :meth:`handle_query`. You
+ won't want to override this unless you don't want to use the included
+ :meth:`extract_context` method. Override :meth:`handle_query` instead.
+
+ :param q: the search query string.
+ """
+ self.context_re = re.compile('|'.join(q.split()), re.I)
+ return self.handle_query(q)
+
+ def handle_query(self, q):
+ """Called by :meth:`query` to retrieve search results for a search
+ query `q`. This should return an iterable containing tuples of the
+ following format::
+
+ (<path>, <title>, <context>)
+
+ `path` and `title` are the same values that were passed to
+ :meth:`add_document`, and `context` should be a short text snippet
+ of the text surrounding the search query in the document.
+
+ The :meth:`extract_context` method is provided as a simple way
+ to create the `context`.
+
+ :param q: the search query
+ """
+ raise NotImplementedError()
+
+ def extract_context(self, text, length=240):
+ """Extract the context for the search query from the document's
+ full `text`.
+
+ :param text: the full text of the document to create the context for
+ :param length: the length of the context snippet to return.
+ """
+ res = self.context_re.search(text)
+ if res is None:
+ return ''
+ context_start = max(res.start() - length/2, 0)
+ context_end = context_start + length
+ context = ''.join(['...' if context_start > 0 else '',
+ text[context_start:context_end],
+ '...' if context_end < len(text) else ''])
+
+ try:
+ return unicode(context, errors='ignore')
+ except TypeError:
+ return context
+
+# The built-in search adapters.
+SEARCH_ADAPTERS = {
+ 'xapian': ('xapiansearch', 'XapianSearch'),
+ 'whoosh': ('whooshsearch', 'WhooshSearch'),
+ 'null': ('nullsearch', 'NullSearch'),
+}
diff --git a/sphinx/websupport/search/nullsearch.py b/sphinx/websupport/search/nullsearch.py
new file mode 100644
index 00000000..fd6d4dcf
--- /dev/null
+++ b/sphinx/websupport/search/nullsearch.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.websupport.search.nullsearch
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ The default search adapter, does nothing.
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from sphinx.websupport.search import BaseSearch
+from sphinx.websupport.errors import NullSearchException
+
+
+class NullSearch(BaseSearch):
+ """A search adapter that does nothing. Used when no search adapter
+ is specified.
+ """
+ def feed(self, pagename, title, doctree):
+ pass
+
+ def query(self, q):
+ raise NullSearchException('No search adapter specified.')
diff --git a/sphinx/websupport/search/whooshsearch.py b/sphinx/websupport/search/whooshsearch.py
new file mode 100644
index 00000000..e58c7342
--- /dev/null
+++ b/sphinx/websupport/search/whooshsearch.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.websupport.search.whooshsearch
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Whoosh search adapter.
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from whoosh import index
+from whoosh.fields import Schema, ID, TEXT
+from whoosh.qparser import QueryParser
+from whoosh.analysis import StemmingAnalyzer
+
+from sphinx.util.osutil import ensuredir
+from sphinx.websupport.search import BaseSearch
+
+
+class WhooshSearch(BaseSearch):
+ """The whoosh search adapter for sphinx web support."""
+
+ # Define the Whoosh Schema for the search index.
+ schema = Schema(path=ID(stored=True, unique=True),
+ title=TEXT(field_boost=2.0, stored=True),
+ text=TEXT(analyzer=StemmingAnalyzer(), stored=True))
+
+ def __init__(self, db_path):
+ ensuredir(db_path)
+ if index.exists_in(db_path):
+ self.index = index.open_dir(db_path)
+ else:
+ self.index = index.create_in(db_path, schema=self.schema)
+ self.qparser = QueryParser('text', self.schema)
+
+ def init_indexing(self, changed=[]):
+ for changed_path in changed:
+ self.index.delete_by_term('path', changed_path)
+ self.index_writer = self.index.writer()
+
+ def finish_indexing(self):
+ self.index_writer.commit()
+
+ def add_document(self, pagename, title, text):
+ self.index_writer.add_document(path=unicode(pagename),
+ title=title,
+ text=text)
+
+ def handle_query(self, q):
+ searcher = self.index.searcher()
+ whoosh_results = searcher.search(self.qparser.parse(q))
+ results = []
+ for result in whoosh_results:
+ context = self.extract_context(result['text'])
+ results.append((result['path'],
+ result.get('title', ''),
+ context))
+ return results
diff --git a/sphinx/websupport/search/xapiansearch.py b/sphinx/websupport/search/xapiansearch.py
new file mode 100644
index 00000000..b0475435
--- /dev/null
+++ b/sphinx/websupport/search/xapiansearch.py
@@ -0,0 +1,81 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.websupport.search.xapiansearch
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Xapian search adapter.
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import xapian
+
+from sphinx.util.osutil import ensuredir
+from sphinx.websupport.search import BaseSearch
+
+
+class XapianSearch(BaseSearch):
+ # Adapted from the GSOC 2009 webapp project.
+
+ # Xapian metadata constants
+ DOC_PATH = 0
+ DOC_TITLE = 1
+
+ def __init__(self, db_path):
+ self.db_path = db_path
+
+ def init_indexing(self, changed=[]):
+ ensuredir(self.db_path)
+ self.database = xapian.WritableDatabase(self.db_path,
+ xapian.DB_CREATE_OR_OPEN)
+ self.indexer = xapian.TermGenerator()
+ stemmer = xapian.Stem("english")
+ self.indexer.set_stemmer(stemmer)
+
+ def finish_indexing(self):
+ # Ensure the db lock is removed.
+ del self.database
+
+ def add_document(self, path, title, text):
+ self.database.begin_transaction()
+ # sphinx_page_path is used to easily retrieve documents by path.
+ sphinx_page_path = '"sphinxpagepath%s"' % path.replace('/', '_')
+ # Delete the old document if it exists.
+ self.database.delete_document(sphinx_page_path)
+
+ doc = xapian.Document()
+ doc.set_data(text)
+ doc.add_value(self.DOC_PATH, path)
+ doc.add_value(self.DOC_TITLE, title)
+ self.indexer.set_document(doc)
+ self.indexer.index_text(text)
+ doc.add_term(sphinx_page_path)
+ for word in text.split():
+ doc.add_posting(word, 1)
+ self.database.add_document(doc)
+ self.database.commit_transaction()
+
+ def handle_query(self, q):
+ database = xapian.Database(self.db_path)
+ enquire = xapian.Enquire(database)
+ qp = xapian.QueryParser()
+ stemmer = xapian.Stem("english")
+ qp.set_stemmer(stemmer)
+ qp.set_database(database)
+ qp.set_stemming_strategy(xapian.QueryParser.STEM_SOME)
+ query = qp.parse_query(q)
+
+ # Find the top 100 results for the query.
+ enquire.set_query(query)
+ matches = enquire.get_mset(0, 100)
+
+ results = []
+
+ for m in matches:
+ context = self.extract_context(m.document.get_data())
+ results.append((m.document.get_value(self.DOC_PATH),
+ m.document.get_value(self.DOC_TITLE),
+ ''.join(context) ))
+
+ return results
diff --git a/sphinx/websupport/storage/__init__.py b/sphinx/websupport/storage/__init__.py
new file mode 100644
index 00000000..3d8a9ab5
--- /dev/null
+++ b/sphinx/websupport/storage/__init__.py
@@ -0,0 +1,123 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.websupport.storage
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Storage for the websupport package.
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+class StorageBackend(object):
+ def pre_build(self):
+ """Called immediately before the build process begins. Use this
+ to prepare the StorageBackend for the addition of nodes.
+ """
+ pass
+
+ def has_node(self, id):
+ """Check to see if a node exists.
+
+ :param id: the id to check for.
+ """
+ raise NotImplementedError()
+
+ def add_node(self, id, document, source):
+ """Add a node to the StorageBackend.
+
+ :param id: a unique id for the comment.
+ :param document: the name of the document the node belongs to.
+ :param source: the source files name.
+ """
+ raise NotImplementedError()
+
+ def post_build(self):
+ """Called after a build has completed. Use this to finalize the
+ addition of nodes if needed.
+ """
+ pass
+
+ def add_comment(self, text, displayed, username, time,
+ proposal, node_id, parent_id, moderator):
+ """Called when a comment is being added.
+
+ :param text: the text of the comment
+ :param displayed: whether the comment should be displayed
+ :param username: the name of the user adding the comment
+ :param time: a date object with the time the comment was added
+ :param proposal: the text of the proposal the user made
+ :param node_id: the id of the node that the comment is being added to
+ :param parent_id: the id of the comment's parent comment.
+ :param moderator: whether the user adding the comment is a moderator
+ """
+ raise NotImplementedError()
+
+ def delete_comment(self, comment_id, username, moderator):
+ """Delete a comment.
+
+ Raises :class:`~sphinx.websupport.errors.UserNotAuthorizedError`
+ if moderator is False and `username` doesn't match the username
+ on the comment.
+
+ :param comment_id: The id of the comment being deleted.
+ :param username: The username of the user requesting the deletion.
+ :param moderator: Whether the user is a moderator.
+ """
+ raise NotImplementedError()
+
+ def get_metadata(self, docname, moderator):
+ """Get metadata for a document. This is currently just a dict
+ of node_id's with associated comment counts.
+
+ :param docname: the name of the document to get metadata for.
+ :param moderator: whether the requester is a moderator.
+ """
+ raise NotImplementedError()
+
+ def get_data(self, node_id, username, moderator):
+ """Called to retrieve all data for a node. This should return a
+ dict with two keys, *source* and *comments* as described by
+ :class:`~sphinx.websupport.WebSupport`'s
+ :meth:`~sphinx.websupport.WebSupport.get_data` method.
+
+ :param node_id: The id of the node to get data for.
+ :param username: The name of the user requesting the data.
+ :param moderator: Whether the requestor is a moderator.
+ """
+ raise NotImplementedError()
+
+ def process_vote(self, comment_id, username, value):
+ """Process a vote that is being cast. `value` will be either -1, 0,
+ or 1.
+
+ :param comment_id: The id of the comment being voted on.
+ :param username: The username of the user casting the vote.
+ :param value: The value of the vote being cast.
+ """
+ raise NotImplementedError()
+
+ def update_username(self, old_username, new_username):
+ """If a user is allowed to change their username this method should
+ be called so that there is not stagnate data in the storage system.
+
+ :param old_username: The username being changed.
+ :param new_username: What the username is being changed to.
+ """
+ raise NotImplementedError()
+
+ def accept_comment(self, comment_id):
+ """Called when a moderator accepts a comment. After the method is
+ called the comment should be displayed to all users.
+
+ :param comment_id: The id of the comment being accepted.
+ """
+ raise NotImplementedError()
+
+ def reject_comment(self, comment_id):
+ """Called when a moderator rejects a comment. The comment should
+ then be deleted.
+
+ :param comment_id: The id of the comment being accepted.
+ """
+ raise NotImplementedError()
diff --git a/sphinx/websupport/storage/differ.py b/sphinx/websupport/storage/differ.py
new file mode 100644
index 00000000..d5225071
--- /dev/null
+++ b/sphinx/websupport/storage/differ.py
@@ -0,0 +1,79 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.websupport.storage.differ
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ A differ for creating an HTML representations of proposal diffs
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+from cgi import escape
+from difflib import Differ
+
+
+class CombinedHtmlDiff(object):
+ """Create an HTML representation of the differences between two pieces
+ of text.
+ """
+ highlight_regex = re.compile(r'([\+\-\^]+)')
+
+ def make_html(self, source, proposal):
+ """Return the HTML representation of the differences between
+ `source` and `proposal`.
+
+ :param source: the original text
+ :param proposal: the proposed text
+ """
+ proposal = escape(proposal)
+
+ differ = Differ()
+ diff = list(differ.compare(source.splitlines(1),
+ proposal.splitlines(1)))
+ html = []
+ line = diff.pop(0)
+ next = diff.pop(0)
+ while True:
+ html.append(self._handle_line(line, next))
+ line = next
+ try:
+ next = diff.pop(0)
+ except IndexError:
+ html.append(self._handle_line(line))
+ break
+ return ''.join(html).rstrip()
+
+ def _handle_line(self, line, next=None):
+ """Handle an individual line in a diff."""
+ prefix = line[0]
+ text = line[2:]
+
+ if prefix == ' ':
+ return text
+ elif prefix == '?':
+ return ''
+
+ if next is not None and next[0] == '?':
+ tag = 'ins' if prefix == '+' else 'del'
+ text = self._highlight_text(text, next, tag)
+ css_class = 'prop_added' if prefix == '+' else 'prop_removed'
+
+ return '<span class="%s">%s</span>\n' % (css_class, text.rstrip())
+
+ def _highlight_text(self, text, next, tag):
+ """Highlight the specific changes made to a line by adding
+ <ins> and <del> tags.
+ """
+ next = next[2:]
+ new_text = []
+ start = 0
+ for match in self.highlight_regex.finditer(next):
+ new_text.append(text[start:match.start()])
+ new_text.append('<%s>' % tag)
+ new_text.append(text[match.start():match.end()])
+ new_text.append('</%s>' % tag)
+ start = match.end()
+ new_text.append(text[start:])
+ return ''.join(new_text)
diff --git a/sphinx/websupport/storage/sqlalchemy_db.py b/sphinx/websupport/storage/sqlalchemy_db.py
new file mode 100644
index 00000000..4e2757a9
--- /dev/null
+++ b/sphinx/websupport/storage/sqlalchemy_db.py
@@ -0,0 +1,205 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.websupport.storage.sqlalchemy_db
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ SQLAlchemy table and mapper definitions used by the
+ :class:`sphinx.websupport.storage.sqlalchemystorage.SQLAlchemyStorage`.
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from datetime import datetime
+
+from sqlalchemy import Column, Integer, Text, String, Boolean, \
+ ForeignKey, DateTime
+from sqlalchemy.orm import relation, sessionmaker, aliased
+from sqlalchemy.ext.declarative import declarative_base
+
+Base = declarative_base()
+Session = sessionmaker()
+
+db_prefix = 'sphinx_'
+
+
+class Node(Base):
+ """Data about a Node in a doctree."""
+ __tablename__ = db_prefix + 'nodes'
+
+ id = Column(String(32), primary_key=True)
+ document = Column(String(256), nullable=False)
+ source = Column(Text, nullable=False)
+
+ def nested_comments(self, username, moderator):
+ """Create a tree of comments. First get all comments that are
+ descendents of this node, then convert them to a tree form.
+
+ :param username: the name of the user to get comments for.
+ :param moderator: whether the user is moderator.
+ """
+ session = Session()
+
+ if username:
+ # If a username is provided, create a subquery to retrieve all
+ # votes by this user. We will outerjoin with the comment query
+ # with this subquery so we have a user's voting information.
+ sq = session.query(CommentVote).\
+ filter(CommentVote.username == username).subquery()
+ cvalias = aliased(CommentVote, sq)
+ q = session.query(Comment, cvalias.value).outerjoin(cvalias)
+ else:
+ # If a username is not provided, we don't need to join with
+ # CommentVote.
+ q = session.query(Comment)
+
+ # Filter out all comments not descending from this node.
+ q = q.filter(Comment.path.like(str(self.id) + '.%'))
+
+ if not moderator:
+ q = q.filter(Comment.displayed == True)
+
+ # Retrieve all results. Results must be ordered by Comment.path
+ # so that we can easily transform them from a flat list to a tree.
+ results = q.order_by(Comment.path).all()
+ session.close()
+
+ return self._nest_comments(results, username)
+
+ def _nest_comments(self, results, username):
+ """Given the flat list of results, convert the list into a
+ tree.
+
+ :param results: the flat list of comments
+ :param username: the name of the user requesting the comments.
+ """
+ comments = []
+ list_stack = [comments]
+ for r in results:
+ comment, vote = r if username else (r, 0)
+
+ inheritance_chain = comment.path.split('.')[1:]
+
+ if len(inheritance_chain) == len(list_stack) + 1:
+ parent = list_stack[-1][-1]
+ list_stack.append(parent['children'])
+ elif len(inheritance_chain) < len(list_stack):
+ while len(inheritance_chain) < len(list_stack):
+ list_stack.pop()
+
+ list_stack[-1].append(comment.serializable(vote=vote))
+
+ return comments
+
+ def __init__(self, id, document, source):
+ self.id = id
+ self.document = document
+ self.source = source
+
+
+class Comment(Base):
+ """An individual Comment being stored."""
+ __tablename__ = db_prefix + 'comments'
+
+ id = Column(Integer, primary_key=True)
+ rating = Column(Integer, nullable=False)
+ time = Column(DateTime, nullable=False)
+ text = Column(Text, nullable=False)
+ displayed = Column(Boolean, index=True, default=False)
+ username = Column(String(64))
+ proposal = Column(Text)
+ proposal_diff = Column(Text)
+ path = Column(String(256), index=True)
+
+ node_id = Column(String, ForeignKey(db_prefix + 'nodes.id'))
+ node = relation(Node, backref="comments")
+
+ def __init__(self, text, displayed, username, rating, time,
+ proposal, proposal_diff):
+ self.text = text
+ self.displayed = displayed
+ self.username = username
+ self.rating = rating
+ self.time = time
+ self.proposal = proposal
+ self.proposal_diff = proposal_diff
+
+ def set_path(self, node_id, parent_id):
+ """Set the materialized path for this comment."""
+ # This exists because the path can't be set until the session has
+ # been flushed and this Comment has an id.
+ if node_id:
+ self.node_id = node_id
+ self.path = '%s.%s' % (node_id, self.id)
+ else:
+ session = Session()
+ parent_path = session.query(Comment.path).\
+ filter(Comment.id == parent_id).one().path
+ session.close()
+ self.node_id = parent_path.split('.')[0]
+ self.path = '%s.%s' % (parent_path, self.id)
+
+ def serializable(self, vote=0):
+ """Creates a serializable representation of the comment. This is
+ converted to JSON, and used on the client side.
+ """
+ delta = datetime.now() - self.time
+
+ time = {'year': self.time.year,
+ 'month': self.time.month,
+ 'day': self.time.day,
+ 'hour': self.time.hour,
+ 'minute': self.time.minute,
+ 'second': self.time.second,
+ 'iso': self.time.isoformat(),
+ 'delta': self.pretty_delta(delta)}
+
+ path = self.path.split('.')
+ node = path[0] if len(path) == 2 else None
+ parent = path[-2] if len(path) > 2 else None
+
+ return {'text': self.text,
+ 'username': self.username or 'Anonymous',
+ 'id': self.id,
+ 'node': node,
+ 'parent': parent,
+ 'rating': self.rating,
+ 'displayed': self.displayed,
+ 'age': delta.seconds,
+ 'time': time,
+ 'vote': vote or 0,
+ 'proposal_diff': self.proposal_diff,
+ 'children': []}
+
+ def pretty_delta(self, delta):
+ """Create a pretty representation of the Comment's age.
+ (e.g. 2 minutes).
+ """
+ days = delta.days
+ seconds = delta.seconds
+ hours = seconds / 3600
+ minutes = seconds / 60
+
+ if days == 0:
+ dt = (minutes, 'minute') if hours == 0 else (hours, 'hour')
+ else:
+ dt = (days, 'day')
+
+ return '%s %s ago' % dt if dt[0] == 1 else '%s %ss ago' % dt
+
+
+class CommentVote(Base):
+ """A vote a user has made on a Comment."""
+ __tablename__ = db_prefix + 'commentvote'
+
+ username = Column(String(64), primary_key=True)
+ comment_id = Column(Integer, ForeignKey(db_prefix + 'comments.id'),
+ primary_key=True)
+ comment = relation(Comment, backref="votes")
+ # -1 if downvoted, +1 if upvoted, 0 if voted then unvoted.
+ value = Column(Integer, nullable=False)
+
+ def __init__(self, comment_id, username, value):
+ self.comment_id = comment_id
+ self.username = username
+ self.value = value
diff --git a/sphinx/websupport/storage/sqlalchemystorage.py b/sphinx/websupport/storage/sqlalchemystorage.py
new file mode 100644
index 00000000..6f13c91b
--- /dev/null
+++ b/sphinx/websupport/storage/sqlalchemystorage.py
@@ -0,0 +1,174 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.websupport.storage.sqlalchemystorage
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ An SQLAlchemy storage backend.
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from datetime import datetime
+
+import sqlalchemy
+from sqlalchemy.orm import aliased
+from sqlalchemy.sql import func
+
+if sqlalchemy.__version__[:3] < '0.5':
+ raise ImportError('SQLAlchemy version 0.5 or greater is required for this '
+ 'storage backend; you have version %s' % sqlalchemy.__version__)
+
+from sphinx.websupport.errors import CommentNotAllowedError, \
+ UserNotAuthorizedError
+from sphinx.websupport.storage import StorageBackend
+from sphinx.websupport.storage.sqlalchemy_db import Base, Node, \
+ Comment, CommentVote, Session
+from sphinx.websupport.storage.differ import CombinedHtmlDiff
+
+
+class SQLAlchemyStorage(StorageBackend):
+ """
+ A :class:`.StorageBackend` using SQLAlchemy.
+ """
+
+ def __init__(self, uri):
+ self.engine = sqlalchemy.create_engine(uri)
+ Base.metadata.bind = self.engine
+ Base.metadata.create_all()
+ Session.configure(bind=self.engine)
+
+ def pre_build(self):
+ self.build_session = Session()
+
+ def has_node(self, id):
+ session = Session()
+ node = session.query(Node).filter(Node.id == id).first()
+ session.close()
+ return True if node else False
+
+ def add_node(self, id, document, source):
+ node = Node(id, document, source)
+ self.build_session.add(node)
+ self.build_session.flush()
+
+ def post_build(self):
+ self.build_session.commit()
+ self.build_session.close()
+
+ def add_comment(self, text, displayed, username, time,
+ proposal, node_id, parent_id, moderator):
+ session = Session()
+ proposal_diff = None
+
+ if node_id and proposal:
+ node = session.query(Node).filter(Node.id == node_id).one()
+ differ = CombinedHtmlDiff()
+ proposal_diff = differ.make_html(node.source, proposal)
+ elif parent_id:
+ parent = session.query(Comment.displayed).\
+ filter(Comment.id == parent_id).one()
+ if not parent.displayed:
+ raise CommentNotAllowedError(
+ "Can't add child to a parent that is not displayed")
+
+ comment = Comment(text, displayed, username, 0,
+ time or datetime.now(), proposal, proposal_diff)
+ session.add(comment)
+ session.flush()
+ # We have to flush the session before setting the path so the
+ # Comment has an id.
+ comment.set_path(node_id, parent_id)
+ session.commit()
+ d = comment.serializable()
+ session.close()
+ return d
+
+ def delete_comment(self, comment_id, username, moderator):
+ session = Session()
+ comment = session.query(Comment).\
+ filter(Comment.id == comment_id).one()
+ if moderator or comment.username == username:
+ comment.username = '[deleted]'
+ comment.text = '[deleted]'
+ comment.proposal = ''
+ session.commit()
+ session.close()
+ else:
+ session.close()
+ raise UserNotAuthorizedError()
+
+ def get_metadata(self, docname, moderator):
+ session = Session()
+ subquery = session.query(
+ Comment.id, Comment.node_id,
+ func.count('*').label('comment_count')).group_by(
+ Comment.node_id).subquery()
+ nodes = session.query(Node.id, subquery.c.comment_count).outerjoin(
+ (subquery, Node.id==subquery.c.node_id)).filter(
+ Node.document==docname)
+ session.close()
+ session.commit()
+ return dict([(k, v or 0) for k, v in nodes])
+
+ def get_data(self, node_id, username, moderator):
+ session = Session()
+ node = session.query(Node).filter(Node.id == node_id).one()
+ session.close()
+ comments = node.nested_comments(username, moderator)
+ return {'source': node.source,
+ 'comments': comments}
+
+ def process_vote(self, comment_id, username, value):
+ session = Session()
+
+ subquery = session.query(CommentVote).filter(
+ CommentVote.username == username).subquery()
+ vote_alias = aliased(CommentVote, subquery)
+ q = session.query(Comment, vote_alias).outerjoin(vote_alias).filter(
+ Comment.id == comment_id)
+ comment, vote = q.one()
+
+ if vote is None:
+ vote = CommentVote(comment_id, username, value)
+ comment.rating += value
+ else:
+ comment.rating += value - vote.value
+ vote.value = value
+
+ session.add(vote)
+ session.commit()
+ session.close()
+
+ def update_username(self, old_username, new_username):
+ session = Session()
+
+ session.query(Comment).filter(Comment.username == old_username).\
+ update({Comment.username: new_username})
+ session.query(CommentVote).\
+ filter(CommentVote.username == old_username).\
+ update({CommentVote.username: new_username})
+
+ session.commit()
+ session.close()
+
+ def accept_comment(self, comment_id):
+ session = Session()
+
+ # XXX assignment to "comment" needed?
+ comment = session.query(Comment).filter(
+ Comment.id == comment_id).update(
+ {Comment.displayed: True})
+
+ session.commit()
+ session.close()
+
+ def reject_comment(self, comment_id):
+ session = Session()
+
+ comment = session.query(Comment).\
+ filter(Comment.id == comment_id).one()
+ session.delete(comment)
+
+ session.commit()
+ session.close()
diff --git a/sphinx/writers/html.py b/sphinx/writers/html.py
index f206e479..33d90c91 100644
--- a/sphinx/writers/html.py
+++ b/sphinx/writers/html.py
@@ -180,7 +180,7 @@ class HTMLTranslator(BaseTranslator):
atts['title'] = node['reftitle']
self.body.append(self.starttag(node, 'a', '', **atts))
- if node.hasattr('secnumber'):
+ if node.get('secnumber'):
self.body.append(('%s' + self.secnumber_suffix) %
'.'.join(map(str, node['secnumber'])))
@@ -202,14 +202,14 @@ class HTMLTranslator(BaseTranslator):
self.depart_admonition(node)
def add_secnumber(self, node):
- if node.hasattr('secnumber'):
+ if node.get('secnumber'):
self.body.append('.'.join(map(str, node['secnumber'])) +
self.secnumber_suffix)
elif isinstance(node.parent, nodes.section):
anchorname = '#' + node.parent['ids'][0]
if anchorname not in self.builder.secnumbers:
anchorname = '' # try first heading which has no anchor
- if anchorname in self.builder.secnumbers:
+ if self.builder.secnumbers.get(anchorname):
numbers = self.builder.secnumbers[anchorname]
self.body.append('.'.join(map(str, numbers)) +
self.secnumber_suffix)
diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py
index 75676f03..de57e35a 100644
--- a/sphinx/writers/latex.py
+++ b/sphinx/writers/latex.py
@@ -317,7 +317,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
# ... and all others are the appendices
self.body.append(u'\n\\appendix\n')
self.first_document = -1
- if node.has_key('docname'):
+ if 'docname' in node:
self.body.append(self.hypertarget(':doc'))
# "- 1" because the level is increased before the title is visited
self.sectionlevel = self.top_sectionlevel - 1
@@ -701,7 +701,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.table.rowcount += 1
def visit_entry(self, node):
- if node.has_key('morerows') or node.has_key('morecols'):
+ if 'morerows' in node or 'morecols' in node:
raise UnsupportedError('%s:%s: column or row spanning cells are '
'not yet implemented.' %
(self.curfilestack[-1], node.line or ''))
@@ -758,7 +758,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
def visit_term(self, node):
ctx = '}] \\leavevmode'
- if node.has_key('ids') and node['ids']:
+ if node.get('ids'):
ctx += self.hypertarget(node['ids'][0])
self.body.append('\\item[{')
self.context.append(ctx)
@@ -840,20 +840,20 @@ class LaTeXTranslator(nodes.NodeVisitor):
post = []
include_graphics_options = []
is_inline = self.is_inline(node)
- if attrs.has_key('scale'):
+ if 'scale' in attrs:
# Could also be done with ``scale`` option to
# ``\includegraphics``; doing it this way for consistency.
pre.append('\\scalebox{%f}{' % (attrs['scale'] / 100.0,))
post.append('}')
- if attrs.has_key('width'):
+ if 'width' in attrs:
w = self.latex_image_length(attrs['width'])
if w:
include_graphics_options.append('width=%s' % w)
- if attrs.has_key('height'):
+ if 'height' in attrs:
h = self.latex_image_length(attrs['height'])
if h:
include_graphics_options.append('height=%s' % h)
- if attrs.has_key('align'):
+ if 'align' in attrs:
align_prepost = {
# By default latex aligns the top of an image.
(1, 'top'): ('', ''),
@@ -898,13 +898,13 @@ class LaTeXTranslator(nodes.NodeVisitor):
for id in self.next_figure_ids:
ids += self.hypertarget(id, anchor=False)
self.next_figure_ids.clear()
- if node.has_key('width') and node.get('align', '') in ('left', 'right'):
+ if 'width' in node and node.get('align', '') in ('left', 'right'):
self.body.append('\\begin{wrapfigure}{%s}{%s}\n\\centering' %
(node['align'] == 'right' and 'r' or 'l',
node['width']))
self.context.append(ids + '\\end{wrapfigure}\n')
else:
- if (not node.attributes.has_key('align') or
+ if (not 'align' in node.attributes or
node.attributes['align'] == 'center'):
# centering does not add vertical space like center.
align = '\n\\centering'
@@ -1181,7 +1181,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.no_contractions -= 1
if self.in_title:
self.body.append(r'\texttt{%s}' % content)
- elif node.has_key('role') and node['role'] == 'samp':
+ elif node.get('role') == 'samp':
self.body.append(r'\samp{%s}' % content)
else:
self.body.append(r'\code{%s}' % content)
@@ -1210,10 +1210,10 @@ class LaTeXTranslator(nodes.NodeVisitor):
code = self.verbatim.rstrip('\n')
lang = self.hlsettingstack[-1][0]
linenos = code.count('\n') >= self.hlsettingstack[-1][1] - 1
- if node.has_key('language'):
+ if 'language' in node:
# code-block directives
lang = node['language']
- if node.has_key('linenos'):
+ if 'linenos' in node:
linenos = node['linenos']
hlcode = self.highlighter.highlight_block(code, lang, linenos)
# workaround for Unicode issue
diff --git a/sphinx/writers/text.py b/sphinx/writers/text.py
index 98528d5b..b28b2379 100644
--- a/sphinx/writers/text.py
+++ b/sphinx/writers/text.py
@@ -390,7 +390,7 @@ class TextTranslator(nodes.NodeVisitor):
self.add_text(''.join(out) + '\n')
def writerow(row):
- lines = map(None, *row)
+ lines = zip(*row)
for line in lines:
out = ['|']
for i, cell in enumerate(line):
diff --git a/sphinx/writers/websupport.py b/sphinx/writers/websupport.py
new file mode 100644
index 00000000..bb80fb7e
--- /dev/null
+++ b/sphinx/writers/websupport.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.writers.websupport
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ sphinx.websupport writer that adds comment-related annotations.
+
+ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from sphinx.writers.html import HTMLTranslator
+from sphinx.util.websupport import is_commentable
+
+
+class WebSupportTranslator(HTMLTranslator):
+ """
+ Our custom HTML translator.
+ """
+
+ def __init__(self, builder, *args, **kwargs):
+ HTMLTranslator.__init__(self, builder, *args, **kwargs)
+ self.comment_class = 'spxcmt'
+
+ def dispatch_visit(self, node):
+ if is_commentable(node):
+ self.handle_visit_commentable(node)
+ HTMLTranslator.dispatch_visit(self, node)
+
+ def handle_visit_commentable(self, node):
+ # We will place the node in the HTML id attribute. If the node
+ # already has an id (for indexing purposes) put an empty
+ # span with the existing id directly before this node's HTML.
+ self.add_db_node(node)
+ if node.attributes['ids']:
+ self.body.append('<span id="%s"></span>'
+ % node.attributes['ids'][0])
+ node.attributes['ids'] = ['s%s' % node.uid]
+ node.attributes['classes'].append(self.comment_class)
+
+ def add_db_node(self, node):
+ storage = self.builder.app.storage
+ if not storage.has_node(node.uid):
+ storage.add_node(id=node.uid,
+ document=self.builder.cur_docname,
+ source=node.rawsource or node.astext())