From 7f5fda7bf8cc9f4a5d8093636fb738d021faeb09 Mon Sep 17 00:00:00 2001 From: cperkins1 Date: Tue, 4 Nov 2008 10:29:50 -0700 Subject: initial code brought over from numpy. Now we need to do some simplification of the numpy code. --- setup.py | 3 +- sphinx/ext/autosummary/__init__.py | 328 +++++++++++++++++++ sphinx/ext/autosummary/docscrape.py | 500 +++++++++++++++++++++++++++++ sphinx/ext/autosummary/docscrape_sphinx.py | 133 ++++++++ sphinx/scripts/__init__.py | 0 sphinx/scripts/autosummary_generate.py | 198 ++++++++++++ sphinx/templates/autosummary-module.html | 39 +++ 7 files changed, 1200 insertions(+), 1 deletion(-) create mode 100644 sphinx/ext/autosummary/__init__.py create mode 100644 sphinx/ext/autosummary/docscrape.py create mode 100644 sphinx/ext/autosummary/docscrape_sphinx.py create mode 100644 sphinx/scripts/__init__.py create mode 100755 sphinx/scripts/autosummary_generate.py create mode 100644 sphinx/templates/autosummary-module.html diff --git a/setup.py b/setup.py index abe82198..aa4d2c21 100644 --- a/setup.py +++ b/setup.py @@ -177,7 +177,8 @@ setup( entry_points={ 'console_scripts': [ 'sphinx-build = sphinx:main', - 'sphinx-quickstart = sphinx.quickstart:main' + 'sphinx-quickstart = sphinx.quickstart:main', + 'sphinx-autogen = sphinx.scripts.autosummary_generate:main', ], 'distutils.commands': [ 'build_sphinx = sphinx.setup_command:BuildDoc', diff --git a/sphinx/ext/autosummary/__init__.py b/sphinx/ext/autosummary/__init__.py new file mode 100644 index 00000000..6b74190c --- /dev/null +++ b/sphinx/ext/autosummary/__init__.py @@ -0,0 +1,328 @@ +""" +=========== +autosummary +=========== + +Sphinx extension that adds an autosummary:: directive, which can be +used to generate function/method/attribute/etc. summary lists, similar +to those output eg. by Epydoc and other API doc generation tools. + +An :autolink: role is also provided. + +autosummary directive +--------------------- + +The autosummary directive has the form:: + + .. autosummary:: + :nosignatures: + :toctree: generated/ + + module.function_1 + module.function_2 + ... + +and it generates an output table (containing signatures, optionally) + + ======================== ============================================= + module.function_1(args) Summary line from the docstring of function_1 + module.function_2(args) Summary line from the docstring + ... + ======================== ============================================= + +If the :toctree: option is specified, files matching the function names +are inserted to the toctree with the given prefix: + + generated/module.function_1 + generated/module.function_2 + ... + +Note: The file names contain the module:: or currentmodule:: prefixes. + +.. seealso:: autosummary_generate.py + + +autolink role +------------- + +The autolink role functions as ``:obj:`` when the name referred can be +resolved to a Python object, and otherwise it becomes simple emphasis. +This can be used as the default role to make links 'smart'. + +""" +import sys, os, posixpath, re + +from docutils.parsers.rst import directives +from docutils.statemachine import ViewList +from docutils import nodes + +import sphinx.addnodes, sphinx.roles, sphinx.builder +from sphinx.util import patfilter + +from docscrape_sphinx import get_doc_object +import inspect + +def setup(app): + app.add_directive('autosummary', autosummary_directive, True, (0, 0, False), + toctree=directives.unchanged, + nosignatures=directives.flag) + app.add_role('autolink', autolink_role) + + app.add_node(autosummary_toc, + html=(autosummary_toc_visit_html, autosummary_toc_depart_noop), + latex=(autosummary_toc_visit_latex, autosummary_toc_depart_noop)) + app.connect('doctree-read', process_autosummary_toc) + +#------------------------------------------------------------------------------ +# autosummary_toc node +#------------------------------------------------------------------------------ + +class autosummary_toc(nodes.comment): + pass + +def process_autosummary_toc(app, doctree): + """ + Insert items described in autosummary:: to the TOC tree, but do + not generate the toctree:: list. + + """ + env = app.builder.env + crawled = {} + def crawl_toc(node, depth=1): + crawled[node] = True + for j, subnode in enumerate(node): + try: + if (isinstance(subnode, autosummary_toc) + and isinstance(subnode[0], sphinx.addnodes.toctree)): + env.note_toctree(env.docname, subnode[0]) + continue + except IndexError: + continue + if not isinstance(subnode, nodes.section): + continue + if subnode not in crawled: + crawl_toc(subnode, depth+1) + crawl_toc(doctree) + +def autosummary_toc_visit_html(self, node): + """Hide autosummary toctree list in HTML output""" + raise nodes.SkipNode + +def autosummary_toc_visit_latex(self, node): + """Show autosummary toctree (= put the referenced pages here) in Latex""" + pass + +def autosummary_toc_depart_noop(self, node): + pass + +#------------------------------------------------------------------------------ +# .. autosummary:: +#------------------------------------------------------------------------------ + +def autosummary_directive(dirname, arguments, options, content, lineno, + content_offset, block_text, state, state_machine): + """ + Pretty table containing short signatures and summaries of functions etc. + + autosummary also generates a (hidden) toctree:: node. + + """ + + names = [] + names += [x.strip() for x in content if x.strip()] + + table, warnings, real_names = get_autosummary(names, state, + 'nosignatures' in options) + node = table + + env = state.document.settings.env + suffix = env.config.source_suffix + all_docnames = env.found_docs.copy() + dirname = posixpath.dirname(env.docname) + + if 'toctree' in options: + tree_prefix = options['toctree'].strip() + docnames = [] + for name in names: + name = real_names.get(name, name) + + docname = tree_prefix + name + if docname.endswith(suffix): + docname = docname[:-len(suffix)] + docname = posixpath.normpath(posixpath.join(dirname, docname)) + if docname not in env.found_docs: + warnings.append(state.document.reporter.warning( + 'toctree references unknown document %r' % docname, + line=lineno)) + docnames.append(docname) + + tocnode = sphinx.addnodes.toctree() + tocnode['includefiles'] = docnames + tocnode['maxdepth'] = -1 + tocnode['glob'] = None + + tocnode = autosummary_toc('', '', tocnode) + return warnings + [node] + [tocnode] + else: + return warnings + [node] + +def get_autosummary(names, state, no_signatures=False): + """ + Generate a proper table node for autosummary:: directive. + + Parameters + ---------- + names : list of str + Names of Python objects to be imported and added to the table. + document : document + Docutils document object + + """ + document = state.document + + real_names = {} + warnings = [] + + prefixes = [''] + prefixes.insert(0, document.settings.env.currmodule) + + table = nodes.table('') + group = nodes.tgroup('', cols=2) + table.append(group) + group.append(nodes.colspec('', colwidth=30)) + group.append(nodes.colspec('', colwidth=70)) + body = nodes.tbody('') + group.append(body) + + def append_row(*column_texts): + row = nodes.row('') + for text in column_texts: + node = nodes.paragraph('') + vl = ViewList() + vl.append(text, '') + state.nested_parse(vl, 0, node) + row.append(nodes.entry('', node)) + body.append(row) + + for name in names: + try: + obj, real_name = import_by_name(name, prefixes=prefixes) + except ImportError: + warnings.append(document.reporter.warning( + 'failed to import %s' % name)) + append_row(":obj:`%s`" % name, "") + continue + + real_names[name] = real_name + + doc = get_doc_object(obj) + + if doc['Summary']: + title = " ".join(doc['Summary']) + else: + title = "" + qualifier = 'obj' + if inspect.ismodule(obj): + qualifier = 'mod' + col1 = ":"+qualifier+":`%s <%s>`" % (name, real_name) + if doc['Signature']: + sig = re.sub('^[a-zA-Z_0-9.-]*', '', + doc['Signature'].replace('*', r'\*')) + if '=' in sig: + # abbreviate optional arguments + sig = re.sub(r', ([a-zA-Z0-9_]+)=', r'[, \1=', sig, count=1) + sig = re.sub(r'\(([a-zA-Z0-9_]+)=', r'([\1=', sig, count=1) + sig = re.sub(r'=[^,)]+,', ',', sig) + sig = re.sub(r'=[^,)]+\)$', '])', sig) + # shorten long strings + sig = re.sub(r'(\[.{16,16}[^,)]*?),.*?\]\)', r'\1, ...])', sig) + else: + sig = re.sub(r'(\(.{16,16}[^,)]*?),.*?\)', r'\1, ...)', sig) + col1 += " " + sig + col2 = title + append_row(col1, col2) + + return table, warnings, real_names + +def import_by_name(name, prefixes=[None]): + """ + Import a Python object that has the given name, under one of the prefixes. + + Parameters + ---------- + name : str + Name of a Python object, eg. 'numpy.ndarray.view' + prefixes : list of (str or None), optional + Prefixes to prepend to the name (None implies no prefix). + The first prefixed name that results to successful import is used. + + Returns + ------- + obj + The imported object + name + Name of the imported object (useful if `prefixes` was used) + + """ + for prefix in prefixes: + try: + if prefix: + prefixed_name = '.'.join([prefix, name]) + else: + prefixed_name = name + return _import_by_name(prefixed_name), prefixed_name + except ImportError: + pass + raise ImportError + +def _import_by_name(name): + """Import a Python object given its full name""" + try: + name_parts = name.split('.') + last_j = 0 + modname = None + for j in reversed(range(1, len(name_parts)+1)): + last_j = j + modname = '.'.join(name_parts[:j]) + try: + __import__(modname) + except ImportError: + continue + if modname in sys.modules: + break + + if last_j < len(name_parts): + obj = sys.modules[modname] + for obj_name in name_parts[last_j:]: + obj = getattr(obj, obj_name) + return obj + else: + return sys.modules[modname] + except (ValueError, ImportError, AttributeError, KeyError), e: + raise ImportError(e) + +#------------------------------------------------------------------------------ +# :autolink: (smart default role) +#------------------------------------------------------------------------------ + +def autolink_role(typ, rawtext, etext, lineno, inliner, + options={}, content=[]): + """ + Smart linking role. + + Expands to ":obj:`text`" if `text` is an object that can be imported; + otherwise expands to "*text*". + """ + r = sphinx.roles.xfileref_role('obj', rawtext, etext, lineno, inliner, + options, content) + pnode = r[0][0] + + prefixes = [None] + #prefixes.insert(0, inliner.document.settings.env.currmodule) + try: + obj, name = import_by_name(pnode['reftarget'], prefixes) + except ImportError: + content = pnode[0] + r[0][0] = nodes.emphasis(rawtext, content[0].astext(), + classes=content['classes']) + return r diff --git a/sphinx/ext/autosummary/docscrape.py b/sphinx/ext/autosummary/docscrape.py new file mode 100644 index 00000000..beb4a24e --- /dev/null +++ b/sphinx/ext/autosummary/docscrape.py @@ -0,0 +1,500 @@ +"""Extract reference documentation from the NumPy source tree. + +""" + +import inspect +import textwrap +import re +import pydoc +from StringIO import StringIO +from warnings import warn + +class Reader(object): + """A line-based string reader. + + """ + def __init__(self, data): + """ + Parameters + ---------- + data : str + String with lines separated by '\n'. + + """ + if isinstance(data,list): + self._str = data + else: + self._str = data.split('\n') # store string as list of lines + + self.reset() + + def __getitem__(self, n): + return self._str[n] + + def reset(self): + self._l = 0 # current line nr + + def read(self): + if not self.eof(): + out = self[self._l] + self._l += 1 + return out + else: + return '' + + def seek_next_non_empty_line(self): + for l in self[self._l:]: + if l.strip(): + break + else: + self._l += 1 + + def eof(self): + return self._l >= len(self._str) + + def read_to_condition(self, condition_func): + start = self._l + for line in self[start:]: + if condition_func(line): + return self[start:self._l] + self._l += 1 + if self.eof(): + return self[start:self._l+1] + return [] + + def read_to_next_empty_line(self): + self.seek_next_non_empty_line() + def is_empty(line): + return not line.strip() + return self.read_to_condition(is_empty) + + def read_to_next_unindented_line(self): + def is_unindented(line): + return (line.strip() and (len(line.lstrip()) == len(line))) + return self.read_to_condition(is_unindented) + + def peek(self,n=0): + if self._l + n < len(self._str): + return self[self._l + n] + else: + return '' + + def is_empty(self): + return not ''.join(self._str).strip() + + +class NumpyDocString(object): + def __init__(self,docstring): + docstring = docstring.split('\n') + + # De-indent paragraph + try: + indent = min(len(s) - len(s.lstrip()) for s in docstring + if s.strip()) + except ValueError: + indent = 0 + + for n,line in enumerate(docstring): + docstring[n] = docstring[n][indent:] + + self._doc = Reader(docstring) + self._parsed_data = { + 'Signature': '', + 'Summary': '', + 'Extended Summary': [], + 'Parameters': [], + 'Returns': [], + 'Raises': [], + 'Warns': [], + 'Other Parameters': [], + 'Attributes': [], + 'Methods': [], + 'See Also': [], + 'Notes': [], + 'Warnings': [], + 'References': '', + 'Examples': '', + 'index': {} + } + + self._parse() + + def __getitem__(self,key): + return self._parsed_data[key] + + def __setitem__(self,key,val): + if not self._parsed_data.has_key(key): + warn("Unknown section %s" % key) + else: + self._parsed_data[key] = val + + def _is_at_section(self): + self._doc.seek_next_non_empty_line() + + if self._doc.eof(): + return False + + l1 = self._doc.peek().strip() # e.g. Parameters + + if l1.startswith('.. index::'): + return True + + l2 = self._doc.peek(1).strip() # ---------- or ========== + return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1)) + + def _strip(self,doc): + i = 0 + j = 0 + for i,line in enumerate(doc): + if line.strip(): break + + for j,line in enumerate(doc[::-1]): + if line.strip(): break + + return doc[i:len(doc)-j] + + def _read_to_next_section(self): + section = self._doc.read_to_next_empty_line() + + while not self._is_at_section() and not self._doc.eof(): + if not self._doc.peek(-1).strip(): # previous line was empty + section += [''] + + section += self._doc.read_to_next_empty_line() + + return section + + def _read_sections(self): + while not self._doc.eof(): + data = self._read_to_next_section() + name = data[0].strip() + + if name.startswith('..'): # index section + yield name, data[1:] + elif len(data) < 2: + yield StopIteration + else: + yield name, self._strip(data[2:]) + + def _parse_param_list(self,content): + r = Reader(content) + params = [] + while not r.eof(): + header = r.read().strip() + if ' : ' in header: + arg_name, arg_type = header.split(' : ')[:2] + else: + arg_name, arg_type = header, '' + + desc = r.read_to_next_unindented_line() + for n,line in enumerate(desc): + desc[n] = line.strip() + desc = desc #'\n'.join(desc) + + params.append((arg_name,arg_type,desc)) + + return params + + + _name_rgx = re.compile(r"^\s*(:(?P\w+):`(?P[a-zA-Z0-9_.-]+)`|" + r" (?P[a-zA-Z0-9_.-]+))\s*", re.X) + def _parse_see_also(self, content): + """ + func_name : Descriptive text + continued text + another_func_name : Descriptive text + func_name1, func_name2, :meth:`func_name`, func_name3 + + """ + items = [] + + def parse_item_name(text): + """Match ':role:`name`' or 'name'""" + m = self._name_rgx.match(text) + if m: + g = m.groups() + if g[1] is None: + return g[3], None + else: + return g[2], g[1] + raise ValueError("%s is not a item name" % text) + + def push_item(name, rest): + if not name: + return + name, role = parse_item_name(name) + items.append((name, list(rest), role)) + del rest[:] + + current_func = None + rest = [] + + for line in content: + if not line.strip(): continue + + m = self._name_rgx.match(line) + if m and line[m.end():].strip().startswith(':'): + push_item(current_func, rest) + current_func, line = line[:m.end()], line[m.end():] + rest = [line.split(':', 1)[1].strip()] + if not rest[0]: + rest = [] + elif not line.startswith(' '): + push_item(current_func, rest) + current_func = None + if ',' in line: + for func in line.split(','): + push_item(func, []) + elif line.strip(): + current_func = line + elif current_func is not None: + rest.append(line.strip()) + push_item(current_func, rest) + return items + + def _parse_index(self, section, content): + """ + .. index: default + :refguide: something, else, and more + + """ + def strip_each_in(lst): + return [s.strip() for s in lst] + + out = {} + section = section.split('::') + if len(section) > 1: + out['default'] = strip_each_in(section[1].split(','))[0] + for line in content: + line = line.split(':') + if len(line) > 2: + out[line[1]] = strip_each_in(line[2].split(',')) + return out + + def _parse_summary(self): + """Grab signature (if given) and summary""" + if self._is_at_section(): + return + + summary = self._doc.read_to_next_empty_line() + summary_str = " ".join([s.strip() for s in summary]).strip() + if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str): + self['Signature'] = summary_str + if not self._is_at_section(): + self['Summary'] = self._doc.read_to_next_empty_line() + else: + self['Summary'] = summary + + if not self._is_at_section(): + self['Extended Summary'] = self._read_to_next_section() + + def _parse(self): + self._doc.reset() + self._parse_summary() + + for (section,content) in self._read_sections(): + if not section.startswith('..'): + section = ' '.join([s.capitalize() for s in section.split(' ')]) + if section in ('Parameters', 'Attributes', 'Methods', + 'Returns', 'Raises', 'Warns'): + self[section] = self._parse_param_list(content) + elif section.startswith('.. index::'): + self['index'] = self._parse_index(section, content) + elif section == 'See Also': + self['See Also'] = self._parse_see_also(content) + else: + self[section] = content + + # string conversion routines + + def _str_header(self, name, symbol='-'): + return [name, len(name)*symbol] + + def _str_indent(self, doc, indent=4): + out = [] + for line in doc: + out += [' '*indent + line] + return out + + def _str_signature(self): + if self['Signature']: + return [self['Signature'].replace('*','\*')] + [''] + else: + return [''] + + def _str_summary(self): + if self['Summary']: + return self['Summary'] + [''] + else: + return [] + + def _str_extended_summary(self): + if self['Extended Summary']: + return self['Extended Summary'] + [''] + else: + return [] + + def _str_param_list(self, name): + out = [] + if self[name]: + out += self._str_header(name) + for param,param_type,desc in self[name]: + out += ['%s : %s' % (param, param_type)] + out += self._str_indent(desc) + out += [''] + return out + + def _str_section(self, name): + out = [] + if self[name]: + out += self._str_header(name) + out += self[name] + out += [''] + return out + + def _str_see_also(self, func_role): + if not self['See Also']: return [] + out = [] + out += self._str_header("See Also") + last_had_desc = True + for func, desc, role in self['See Also']: + if role: + link = ':%s:`%s`' % (role, func) + elif func_role: + link = ':%s:`%s`' % (func_role, func) + else: + link = "`%s`_" % func + if desc or last_had_desc: + out += [''] + out += [link] + else: + out[-1] += ", %s" % link + if desc: + out += self._str_indent([' '.join(desc)]) + last_had_desc = True + else: + last_had_desc = False + out += [''] + return out + + def _str_index(self): + idx = self['index'] + out = [] + out += ['.. index:: %s' % idx.get('default','')] + for section, references in idx.iteritems(): + if section == 'default': + continue + out += [' :%s: %s' % (section, ', '.join(references))] + return out + + def __str__(self, func_role=''): + out = [] + out += self._str_signature() + out += self._str_summary() + out += self._str_extended_summary() + for param_list in ('Parameters','Returns','Raises'): + out += self._str_param_list(param_list) + out += self._str_section('Warnings') + out += self._str_see_also(func_role) + for s in ('Notes','References','Examples'): + out += self._str_section(s) + out += self._str_index() + return '\n'.join(out) + + +def indent(str,indent=4): + indent_str = ' '*indent + if str is None: + return indent_str + lines = str.split('\n') + return '\n'.join(indent_str + l for l in lines) + +def header(text, style='-'): + return text + '\n' + style*len(text) + '\n' + + +class FunctionDoc(NumpyDocString): + def __init__(self, func, role='func'): + self._f = func + self._role = role # e.g. "func" or "meth" + try: + NumpyDocString.__init__(self,inspect.getdoc(func) or '') + except ValueError, e: + print '*'*78 + print "ERROR: '%s' while parsing `%s`" % (e, self._f) + print '*'*78 + #print "Docstring follows:" + #print doclines + #print '='*78 + + if not self['Signature']: + func, func_name = self.get_func() + try: + # try to read signature + argspec = inspect.getargspec(func) + argspec = inspect.formatargspec(*argspec) + argspec = argspec.replace('*','\*') + signature = '%s%s' % (func_name, argspec) + except TypeError, e: + signature = '%s()' % func_name + self['Signature'] = signature + + def get_func(self): + func_name = getattr(self._f, '__name__', self.__class__.__name__) + if inspect.isclass(self._f): + func = getattr(self._f, '__call__', self._f.__init__) + else: + func = self._f + return func, func_name + + def __str__(self): + out = '' + + func, func_name = self.get_func() + signature = self['Signature'].replace('*', '\*') + + roles = {'func': 'function', + 'meth': 'method'} + + if self._role: + if not roles.has_key(self._role): + print "Warning: invalid role %s" % self._role + out += '.. %s:: %s\n \n\n' % (roles.get(self._role,''), + func_name) + + out += super(FunctionDoc, self).__str__(func_role=self._role) + return out + + +class ClassDoc(NumpyDocString): + def __init__(self,cls,modulename='',func_doc=FunctionDoc): + if not inspect.isclass(cls): + raise ValueError("Initialise using a class. Got %r" % cls) + self._cls = cls + + if modulename and not modulename.endswith('.'): + modulename += '.' + self._mod = modulename + self._name = cls.__name__ + self._func_doc = func_doc + + NumpyDocString.__init__(self, pydoc.getdoc(cls)) + + @property + def methods(self): + return [name for name,func in inspect.getmembers(self._cls) + if not name.startswith('_') and callable(func)] + + def __str__(self): + out = '' + out += super(ClassDoc, self).__str__() + out += "\n\n" + + #for m in self.methods: + # print "Parsing `%s`" % m + # out += str(self._func_doc(getattr(self._cls,m), 'meth')) + '\n\n' + # out += '.. index::\n single: %s; %s\n\n' % (self._name, m) + + return out + + diff --git a/sphinx/ext/autosummary/docscrape_sphinx.py b/sphinx/ext/autosummary/docscrape_sphinx.py new file mode 100644 index 00000000..d431ecd3 --- /dev/null +++ b/sphinx/ext/autosummary/docscrape_sphinx.py @@ -0,0 +1,133 @@ +import re, inspect, textwrap, pydoc +from docscrape import NumpyDocString, FunctionDoc, ClassDoc + +class SphinxDocString(NumpyDocString): + # string conversion routines + def _str_header(self, name, symbol='`'): + return ['.. rubric:: ' + name, ''] + + def _str_field_list(self, name): + return [':' + name + ':'] + + def _str_indent(self, doc, indent=4): + out = [] + for line in doc: + out += [' '*indent + line] + return out + + def _str_signature(self): + return [''] + if self['Signature']: + return ['``%s``' % self['Signature']] + [''] + else: + return [''] + + def _str_summary(self): + return self['Summary'] + [''] + + def _str_extended_summary(self): + return self['Extended Summary'] + [''] + + def _str_param_list(self, name): + out = [] + if self[name]: + out += self._str_field_list(name) + out += [''] + for param,param_type,desc in self[name]: + out += self._str_indent(['**%s** : %s' % (param.strip(), + param_type)]) + out += [''] + out += self._str_indent(desc,8) + out += [''] + return out + + def _str_section(self, name): + out = [] + if self[name]: + out += self._str_header(name) + out += [''] + content = textwrap.dedent("\n".join(self[name])).split("\n") + out += content + out += [''] + return out + + def _str_see_also(self, func_role): + out = [] + if self['See Also']: + see_also = super(SphinxDocString, self)._str_see_also(func_role) + out = ['.. seealso::', ''] + out += self._str_indent(see_also[2:]) + return out + + def _str_warnings(self): + out = [] + if self['Warnings']: + out = ['.. warning::', ''] + out += self._str_indent(self['Warnings']) + return out + + def _str_index(self): + idx = self['index'] + out = [] + if len(idx) == 0: + return out + + out += ['.. index:: %s' % idx.get('default','')] + for section, references in idx.iteritems(): + if section == 'default': + continue + elif section == 'refguide': + out += [' single: %s' % (', '.join(references))] + else: + out += [' %s: %s' % (section, ','.join(references))] + return out + + def _str_references(self): + out = [] + if self['References']: + out += self._str_header('References') + if isinstance(self['References'], str): + self['References'] = [self['References']] + out.extend(self['References']) + out += [''] + return out + + def __str__(self, indent=0, func_role="obj"): + out = [] + out += self._str_signature() + out += self._str_index() + [''] + out += self._str_summary() + out += self._str_extended_summary() + for param_list in ('Parameters', 'Attributes', 'Methods', + 'Returns','Raises'): + out += self._str_param_list(param_list) + out += self._str_warnings() + out += self._str_see_also(func_role) + out += self._str_section('Notes') + out += self._str_references() + out += self._str_section('Examples') + out = self._str_indent(out,indent) + return '\n'.join(out) + +class SphinxFunctionDoc(SphinxDocString, FunctionDoc): + pass + +class SphinxClassDoc(SphinxDocString, ClassDoc): + pass + +def get_doc_object(obj, what=None): + if what is None: + if inspect.isclass(obj): + what = 'class' + elif inspect.ismodule(obj): + what = 'module' + elif callable(obj): + what = 'function' + else: + what = 'object' + if what == 'class': + return SphinxClassDoc(obj, '', func_doc=SphinxFunctionDoc) + elif what in ('function', 'method'): + return SphinxFunctionDoc(obj, '') + else: + return SphinxDocString(pydoc.getdoc(obj)) diff --git a/sphinx/scripts/__init__.py b/sphinx/scripts/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/sphinx/scripts/autosummary_generate.py b/sphinx/scripts/autosummary_generate.py new file mode 100755 index 00000000..acab57d3 --- /dev/null +++ b/sphinx/scripts/autosummary_generate.py @@ -0,0 +1,198 @@ +#!/usr/bin/env python +r""" +autosummary_generate.py OPTIONS FILES + +Generate automatic RST source files for items referred to in +autosummary:: directives. + +Each generated RST file contains a single auto*:: directive which +extracts the docstring of the referred item. + +Example Makefile rule:: + + generate: + ./ext/autosummary_generate.py -o source/generated source/*.rst + +""" +import glob, re, inspect, os, optparse +from sphinx.ext.autosummary import import_by_name + +from jinja import Environment, PackageLoader +env = Environment(loader=PackageLoader('numpyext', 'templates')) + +def main(): + p = optparse.OptionParser(__doc__.strip()) + p.add_option("-o", "--output-dir", action="store", type="string", + dest="output_dir", default=None, + help=("Write all output files to the given directory (instead " + "of writing them as specified in the autosummary:: " + "directives)")) + options, args = p.parse_args() + + if len(args) == 0: + p.error("wrong number of arguments") + + # read + names = {} + for name, loc in get_documented(args).items(): + for (filename, sec_title, keyword, toctree) in loc: + if toctree is not None: + path = os.path.join(os.path.dirname(filename), toctree) + names[name] = os.path.abspath(path) + + # write + for name, path in sorted(names.items()): + if options.output_dir is not None: + path = options.output_dir + + if not os.path.isdir(path): + os.makedirs(path) + + try: + obj, name = import_by_name(name) + except ImportError, e: + print "Failed to import '%s': %s" % (name, e) + continue + + fn = os.path.join(path, '%s.rst' % name) + + if os.path.exists(fn): + # skip + continue + + f = open(fn, 'w') + + + try: + + if inspect.ismodule(obj): + tmpl = env.get_template('module.html') + functions = [getattr(obj, item).__name__ for item in dir(obj) if inspect.isfunction(getattr(obj, item))] + classes = [getattr(obj, item).__name__ for item in dir(obj) if inspect.isclass(getattr(obj, item)) and not issubclass(getattr(obj, item), Exception)] + exceptions = [getattr(obj, item).__name__ for item in dir(obj) if inspect.isclass(getattr(obj, item)) and issubclass(getattr(obj, item), Exception)] + rendered = tmpl.render(name=name, + functions=functions, + classes=classes, + exceptions=exceptions, + len_functions=len(functions), + len_classes=len(classes), + len_exceptions=len(exceptions) + + ) + f.write(rendered) + else: + f.write('%s\n%s\n\n' % (name, '='*len(name))) + + if inspect.isclass(obj): + if issubclass(obj, Exception): + f.write(format_modulemember(name, 'autoexception')) + else: + f.write(format_modulemember(name, 'autoclass')) + elif inspect.ismethod(obj) or inspect.ismethoddescriptor(obj): + f.write(format_classmember(name, 'automethod')) + elif callable(obj): + f.write(format_modulemember(name, 'autofunction')) + elif hasattr(obj, '__get__'): + f.write(format_classmember(name, 'autoattribute')) + else: + f.write(format_modulemember(name, 'autofunction')) + finally: + f.close() + +def format_modulemember(name, directive): + parts = name.split('.') + mod, name = '.'.join(parts[:-1]), parts[-1] + return ".. currentmodule:: %s\n\n.. %s:: %s\n" % (mod, directive, name) + +def format_classmember(name, directive): + parts = name.split('.') + mod, name = '.'.join(parts[:-2]), '.'.join(parts[-2:]) + return ".. currentmodule:: %s\n\n.. %s:: %s\n" % (mod, directive, name) + +def get_documented(filenames): + """ + Find out what items are documented in source/*.rst + + Returns + ------- + documented : dict of list of (filename, title, keyword, toctree) + Dictionary whose keys are documented names of objects. + The value is a list of locations where the object was documented. + Each location is a tuple of filename, the current section title, + the name of the directive, and the value of the :toctree: argument + (if present) of the directive. + + """ + + title_underline_re = re.compile("^[-=*_^#]{3,}\s*$") + autodoc_re = re.compile(".. auto(function|method|attribute|class|exception|module)::\s*([A-Za-z0-9_.]+)\s*$") + autosummary_re = re.compile(r'^\.\.\s+autosummary::\s*') + module_re = re.compile(r'^\.\.\s+(current)?module::\s*([a-zA-Z0-9_.]+)\s*$') + autosummary_item_re = re.compile(r'^\s+([_a-zA-Z][a-zA-Z0-9_.]*)\s*') + toctree_arg_re = re.compile(r'^\s+:toctree:\s*(.*?)\s*$') + + documented = {} + + for filename in filenames: + current_title = [] + last_line = None + toctree = None + current_module = None + in_autosummary = False + + f = open(filename, 'r') + for line in f: + try: + if in_autosummary: + m = toctree_arg_re.match(line) + if m: + toctree = m.group(1) + continue + + if line.strip().startswith(':'): + continue # skip options + + m = autosummary_item_re.match(line) + + if m: + name = m.group(1).strip() + if current_module and not name.startswith(current_module + '.'): + name = "%s.%s" % (current_module, name) + documented.setdefault(name, []).append( + (filename, current_title, 'autosummary', toctree)) + continue + if line.strip() == '': + continue + in_autosummary = False + + m = autosummary_re.match(line) + if m: + in_autosummary = True + continue + + m = autodoc_re.search(line) + if m: + name = m.group(2).strip() + if current_module and not name.startswith(current_module + '.'): + name = "%s.%s" % (current_module, name) + if m.group(1) == "module": + current_module = name + documented.setdefault(name, []).append( + (filename, current_title, "auto" + m.group(1), None)) + continue + + m = title_underline_re.match(line) + if m and last_line: + current_title = last_line.strip() + continue + + m = module_re.match(line) + if m: + current_module = m.group(2) + continue + finally: + last_line = line + return documented + +if __name__ == "__main__": + main() diff --git a/sphinx/templates/autosummary-module.html b/sphinx/templates/autosummary-module.html new file mode 100644 index 00000000..34dd8100 --- /dev/null +++ b/sphinx/templates/autosummary-module.html @@ -0,0 +1,39 @@ +:mod:`{{name}}` +=============================================================================================================================================== + + +.. automodule:: {{name}} + +{% if len_functions > 0 %} +Functions +---------- +{% for item in functions %} +.. autofunction:: {{item}} +{% endfor %} +{% endif %} + +{% if len_classes > 0 %} +Classes +-------- +{% for item in classes %} +.. autoclass:: {{item}} + :show-inheritance: + :members: + :inherited-members: + :undoc-members: + +{% endfor %} +{% endif %} + +{% if len_exceptions > 0 %} +Exceptions +------------ +{% for item in exceptions %} +.. autoclass:: {{item}} + :show-inheritance: + :members: + :inherited-members: + :undoc-members: + +{% endfor %} +{% endif %} \ No newline at end of file -- cgit v1.2.1 From 98bd7c4775810b502a1bd47c6a8f1a53f50f7535 Mon Sep 17 00:00:00 2001 From: percious Date: Tue, 4 Nov 2008 10:40:42 -0700 Subject: brought over license file from numpy --- sphinx/ext/autosummary/LICENSE.txt | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 sphinx/ext/autosummary/LICENSE.txt diff --git a/sphinx/ext/autosummary/LICENSE.txt b/sphinx/ext/autosummary/LICENSE.txt new file mode 100644 index 00000000..035a2095 --- /dev/null +++ b/sphinx/ext/autosummary/LICENSE.txt @@ -0,0 +1,32 @@ + The files + - __init__.py + - docscrape.py + - doscrape-sphinx.py + - ../scripts/autosummary_generate.py + + have the following license: + +Copyright (C) 2008 Stefan van der Walt , Pauli Virtanen , Christopher Perkins + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. -- cgit v1.2.1 From f2b1348908e73a94b063fb2c2fb703ab93b07c1f Mon Sep 17 00:00:00 2001 From: percious Date: Tue, 4 Nov 2008 11:07:58 -0700 Subject: reorganized code to fit within the ext.autosummary module --- setup.py | 2 +- sphinx/ext/autosummary/LICENSE.txt | 3 +- sphinx/ext/autosummary/generate.py | 195 ++++++++++++++++++++++++++ sphinx/ext/autosummary/templates/module.html | 39 ++++++ sphinx/scripts/__init__.py | 0 sphinx/scripts/autosummary_generate.py | 198 --------------------------- sphinx/templates/autosummary-module.html | 39 ------ 7 files changed, 237 insertions(+), 239 deletions(-) create mode 100644 sphinx/ext/autosummary/generate.py create mode 100644 sphinx/ext/autosummary/templates/module.html delete mode 100644 sphinx/scripts/__init__.py delete mode 100755 sphinx/scripts/autosummary_generate.py delete mode 100644 sphinx/templates/autosummary-module.html diff --git a/setup.py b/setup.py index aa4d2c21..13e70690 100644 --- a/setup.py +++ b/setup.py @@ -178,7 +178,7 @@ setup( 'console_scripts': [ 'sphinx-build = sphinx:main', 'sphinx-quickstart = sphinx.quickstart:main', - 'sphinx-autogen = sphinx.scripts.autosummary_generate:main', + 'sphinx-autogen = sphinx.ext.autosummary.generate:main', ], 'distutils.commands': [ 'build_sphinx = sphinx.setup_command:BuildDoc', diff --git a/sphinx/ext/autosummary/LICENSE.txt b/sphinx/ext/autosummary/LICENSE.txt index 035a2095..da27afd4 100644 --- a/sphinx/ext/autosummary/LICENSE.txt +++ b/sphinx/ext/autosummary/LICENSE.txt @@ -2,7 +2,8 @@ - __init__.py - docscrape.py - doscrape-sphinx.py - - ../scripts/autosummary_generate.py + - generate.py + - templates/module.html have the following license: diff --git a/sphinx/ext/autosummary/generate.py b/sphinx/ext/autosummary/generate.py new file mode 100644 index 00000000..ce9e4c2d --- /dev/null +++ b/sphinx/ext/autosummary/generate.py @@ -0,0 +1,195 @@ +""" +autosummary_generate.py OPTIONS FILES + +Generate automatic RST source files for items referred to in +autosummary:: directives. + +Each generated RST file contains a single auto*:: directive which +extracts the docstring of the referred item. + +Example Makefile rule:: + + generate: + sphinx-autogen source/*.rst source/generated + +""" +import glob, re, inspect, os, optparse +from sphinx.ext.autosummary import import_by_name + +from jinja import Environment, PackageLoader +env = Environment(loader=PackageLoader('sphinx.ext.autosummary', 'templates')) + +def main(): + p = optparse.OptionParser(__doc__.strip()) + options, args = p.parse_args() + + if len(args) <2: + p.error("wrong number of arguments") + + print 'generating docs from:', args[:-1] + generate_autosummary_docs(args[:-1], args[-1]) + +def generate_autosummary_docs(source_dir, output_dir): + # read + names = {} + for name, loc in get_documented(source_dir).items(): + for (filename, sec_title, keyword, toctree) in loc: + if toctree is not None: + path = os.path.join(os.path.dirname(filename), toctree) + names[name] = os.path.abspath(path) + + # write + for name, path in sorted(names.items()): + path = output_dir + + if not os.path.isdir(path): + os.makedirs(path) + + try: + obj, name = import_by_name(name) + except ImportError, e: + print "Failed to import '%s': %s" % (name, e) + continue + + fn = os.path.join(path, '%s.rst' % name) + + if os.path.exists(fn): + # skip + continue + + f = open(fn, 'w') + + + try: + + if inspect.ismodule(obj): + tmpl = env.get_template('module.html') + functions = [getattr(obj, item).__name__ for item in dir(obj) if inspect.isfunction(getattr(obj, item))] + classes = [getattr(obj, item).__name__ for item in dir(obj) if inspect.isclass(getattr(obj, item)) and not issubclass(getattr(obj, item), Exception)] + exceptions = [getattr(obj, item).__name__ for item in dir(obj) if inspect.isclass(getattr(obj, item)) and issubclass(getattr(obj, item), Exception)] + rendered = tmpl.render(name=name, + functions=functions, + classes=classes, + exceptions=exceptions, + len_functions=len(functions), + len_classes=len(classes), + len_exceptions=len(exceptions) + + ) + f.write(rendered) + else: + f.write('%s\n%s\n\n' % (name, '='*len(name))) + + if inspect.isclass(obj): + if issubclass(obj, Exception): + f.write(format_modulemember(name, 'autoexception')) + else: + f.write(format_modulemember(name, 'autoclass')) + elif inspect.ismethod(obj) or inspect.ismethoddescriptor(obj): + f.write(format_classmember(name, 'automethod')) + elif callable(obj): + f.write(format_modulemember(name, 'autofunction')) + elif hasattr(obj, '__get__'): + f.write(format_classmember(name, 'autoattribute')) + else: + f.write(format_modulemember(name, 'autofunction')) + finally: + f.close() + +def format_modulemember(name, directive): + parts = name.split('.') + mod, name = '.'.join(parts[:-1]), parts[-1] + return ".. currentmodule:: %s\n\n.. %s:: %s\n" % (mod, directive, name) + +def format_classmember(name, directive): + parts = name.split('.') + mod, name = '.'.join(parts[:-2]), '.'.join(parts[-2:]) + return ".. currentmodule:: %s\n\n.. %s:: %s\n" % (mod, directive, name) + +def get_documented(filenames): + """ + Find out what items are documented in source/*.rst + + Returns + ------- + documented : dict of list of (filename, title, keyword, toctree) + Dictionary whose keys are documented names of objects. + The value is a list of locations where the object was documented. + Each location is a tuple of filename, the current section title, + the name of the directive, and the value of the :toctree: argument + (if present) of the directive. + + """ + + title_underline_re = re.compile("^[-=*_^#]{3,}\s*$") + autodoc_re = re.compile(".. auto(function|method|attribute|class|exception|module)::\s*([A-Za-z0-9_.]+)\s*$") + autosummary_re = re.compile(r'^\.\.\s+autosummary::\s*') + module_re = re.compile(r'^\.\.\s+(current)?module::\s*([a-zA-Z0-9_.]+)\s*$') + autosummary_item_re = re.compile(r'^\s+([_a-zA-Z][a-zA-Z0-9_.]*)\s*') + toctree_arg_re = re.compile(r'^\s+:toctree:\s*(.*?)\s*$') + + documented = {} + + for filename in filenames: + current_title = [] + last_line = None + toctree = None + current_module = None + in_autosummary = False + + f = open(filename, 'r') + for line in f: + try: + if in_autosummary: + m = toctree_arg_re.match(line) + if m: + toctree = m.group(1) + continue + + if line.strip().startswith(':'): + continue # skip options + + m = autosummary_item_re.match(line) + + if m: + name = m.group(1).strip() + if current_module and not name.startswith(current_module + '.'): + name = "%s.%s" % (current_module, name) + documented.setdefault(name, []).append( + (filename, current_title, 'autosummary', toctree)) + continue + if line.strip() == '': + continue + in_autosummary = False + + m = autosummary_re.match(line) + if m: + in_autosummary = True + continue + + m = autodoc_re.search(line) + if m: + name = m.group(2).strip() + if current_module and not name.startswith(current_module + '.'): + name = "%s.%s" % (current_module, name) + if m.group(1) == "module": + current_module = name + documented.setdefault(name, []).append( + (filename, current_title, "auto" + m.group(1), None)) + continue + + m = title_underline_re.match(line) + if m and last_line: + current_title = last_line.strip() + continue + + m = module_re.match(line) + if m: + current_module = m.group(2) + continue + finally: + last_line = line + return documented + +if __name__ == "__main__": + main() diff --git a/sphinx/ext/autosummary/templates/module.html b/sphinx/ext/autosummary/templates/module.html new file mode 100644 index 00000000..34dd8100 --- /dev/null +++ b/sphinx/ext/autosummary/templates/module.html @@ -0,0 +1,39 @@ +:mod:`{{name}}` +=============================================================================================================================================== + + +.. automodule:: {{name}} + +{% if len_functions > 0 %} +Functions +---------- +{% for item in functions %} +.. autofunction:: {{item}} +{% endfor %} +{% endif %} + +{% if len_classes > 0 %} +Classes +-------- +{% for item in classes %} +.. autoclass:: {{item}} + :show-inheritance: + :members: + :inherited-members: + :undoc-members: + +{% endfor %} +{% endif %} + +{% if len_exceptions > 0 %} +Exceptions +------------ +{% for item in exceptions %} +.. autoclass:: {{item}} + :show-inheritance: + :members: + :inherited-members: + :undoc-members: + +{% endfor %} +{% endif %} \ No newline at end of file diff --git a/sphinx/scripts/__init__.py b/sphinx/scripts/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/sphinx/scripts/autosummary_generate.py b/sphinx/scripts/autosummary_generate.py deleted file mode 100755 index acab57d3..00000000 --- a/sphinx/scripts/autosummary_generate.py +++ /dev/null @@ -1,198 +0,0 @@ -#!/usr/bin/env python -r""" -autosummary_generate.py OPTIONS FILES - -Generate automatic RST source files for items referred to in -autosummary:: directives. - -Each generated RST file contains a single auto*:: directive which -extracts the docstring of the referred item. - -Example Makefile rule:: - - generate: - ./ext/autosummary_generate.py -o source/generated source/*.rst - -""" -import glob, re, inspect, os, optparse -from sphinx.ext.autosummary import import_by_name - -from jinja import Environment, PackageLoader -env = Environment(loader=PackageLoader('numpyext', 'templates')) - -def main(): - p = optparse.OptionParser(__doc__.strip()) - p.add_option("-o", "--output-dir", action="store", type="string", - dest="output_dir", default=None, - help=("Write all output files to the given directory (instead " - "of writing them as specified in the autosummary:: " - "directives)")) - options, args = p.parse_args() - - if len(args) == 0: - p.error("wrong number of arguments") - - # read - names = {} - for name, loc in get_documented(args).items(): - for (filename, sec_title, keyword, toctree) in loc: - if toctree is not None: - path = os.path.join(os.path.dirname(filename), toctree) - names[name] = os.path.abspath(path) - - # write - for name, path in sorted(names.items()): - if options.output_dir is not None: - path = options.output_dir - - if not os.path.isdir(path): - os.makedirs(path) - - try: - obj, name = import_by_name(name) - except ImportError, e: - print "Failed to import '%s': %s" % (name, e) - continue - - fn = os.path.join(path, '%s.rst' % name) - - if os.path.exists(fn): - # skip - continue - - f = open(fn, 'w') - - - try: - - if inspect.ismodule(obj): - tmpl = env.get_template('module.html') - functions = [getattr(obj, item).__name__ for item in dir(obj) if inspect.isfunction(getattr(obj, item))] - classes = [getattr(obj, item).__name__ for item in dir(obj) if inspect.isclass(getattr(obj, item)) and not issubclass(getattr(obj, item), Exception)] - exceptions = [getattr(obj, item).__name__ for item in dir(obj) if inspect.isclass(getattr(obj, item)) and issubclass(getattr(obj, item), Exception)] - rendered = tmpl.render(name=name, - functions=functions, - classes=classes, - exceptions=exceptions, - len_functions=len(functions), - len_classes=len(classes), - len_exceptions=len(exceptions) - - ) - f.write(rendered) - else: - f.write('%s\n%s\n\n' % (name, '='*len(name))) - - if inspect.isclass(obj): - if issubclass(obj, Exception): - f.write(format_modulemember(name, 'autoexception')) - else: - f.write(format_modulemember(name, 'autoclass')) - elif inspect.ismethod(obj) or inspect.ismethoddescriptor(obj): - f.write(format_classmember(name, 'automethod')) - elif callable(obj): - f.write(format_modulemember(name, 'autofunction')) - elif hasattr(obj, '__get__'): - f.write(format_classmember(name, 'autoattribute')) - else: - f.write(format_modulemember(name, 'autofunction')) - finally: - f.close() - -def format_modulemember(name, directive): - parts = name.split('.') - mod, name = '.'.join(parts[:-1]), parts[-1] - return ".. currentmodule:: %s\n\n.. %s:: %s\n" % (mod, directive, name) - -def format_classmember(name, directive): - parts = name.split('.') - mod, name = '.'.join(parts[:-2]), '.'.join(parts[-2:]) - return ".. currentmodule:: %s\n\n.. %s:: %s\n" % (mod, directive, name) - -def get_documented(filenames): - """ - Find out what items are documented in source/*.rst - - Returns - ------- - documented : dict of list of (filename, title, keyword, toctree) - Dictionary whose keys are documented names of objects. - The value is a list of locations where the object was documented. - Each location is a tuple of filename, the current section title, - the name of the directive, and the value of the :toctree: argument - (if present) of the directive. - - """ - - title_underline_re = re.compile("^[-=*_^#]{3,}\s*$") - autodoc_re = re.compile(".. auto(function|method|attribute|class|exception|module)::\s*([A-Za-z0-9_.]+)\s*$") - autosummary_re = re.compile(r'^\.\.\s+autosummary::\s*') - module_re = re.compile(r'^\.\.\s+(current)?module::\s*([a-zA-Z0-9_.]+)\s*$') - autosummary_item_re = re.compile(r'^\s+([_a-zA-Z][a-zA-Z0-9_.]*)\s*') - toctree_arg_re = re.compile(r'^\s+:toctree:\s*(.*?)\s*$') - - documented = {} - - for filename in filenames: - current_title = [] - last_line = None - toctree = None - current_module = None - in_autosummary = False - - f = open(filename, 'r') - for line in f: - try: - if in_autosummary: - m = toctree_arg_re.match(line) - if m: - toctree = m.group(1) - continue - - if line.strip().startswith(':'): - continue # skip options - - m = autosummary_item_re.match(line) - - if m: - name = m.group(1).strip() - if current_module and not name.startswith(current_module + '.'): - name = "%s.%s" % (current_module, name) - documented.setdefault(name, []).append( - (filename, current_title, 'autosummary', toctree)) - continue - if line.strip() == '': - continue - in_autosummary = False - - m = autosummary_re.match(line) - if m: - in_autosummary = True - continue - - m = autodoc_re.search(line) - if m: - name = m.group(2).strip() - if current_module and not name.startswith(current_module + '.'): - name = "%s.%s" % (current_module, name) - if m.group(1) == "module": - current_module = name - documented.setdefault(name, []).append( - (filename, current_title, "auto" + m.group(1), None)) - continue - - m = title_underline_re.match(line) - if m and last_line: - current_title = last_line.strip() - continue - - m = module_re.match(line) - if m: - current_module = m.group(2) - continue - finally: - last_line = line - return documented - -if __name__ == "__main__": - main() diff --git a/sphinx/templates/autosummary-module.html b/sphinx/templates/autosummary-module.html deleted file mode 100644 index 34dd8100..00000000 --- a/sphinx/templates/autosummary-module.html +++ /dev/null @@ -1,39 +0,0 @@ -:mod:`{{name}}` -=============================================================================================================================================== - - -.. automodule:: {{name}} - -{% if len_functions > 0 %} -Functions ----------- -{% for item in functions %} -.. autofunction:: {{item}} -{% endfor %} -{% endif %} - -{% if len_classes > 0 %} -Classes --------- -{% for item in classes %} -.. autoclass:: {{item}} - :show-inheritance: - :members: - :inherited-members: - :undoc-members: - -{% endfor %} -{% endif %} - -{% if len_exceptions > 0 %} -Exceptions ------------- -{% for item in exceptions %} -.. autoclass:: {{item}} - :show-inheritance: - :members: - :inherited-members: - :undoc-members: - -{% endfor %} -{% endif %} \ No newline at end of file -- cgit v1.2.1 From bd4d482e749d592f9745f0f2b84133fdb3b3a4ba Mon Sep 17 00:00:00 2001 From: percious Date: Tue, 4 Nov 2008 11:25:03 -0700 Subject: modified sphinx-build script to allow for auto-generation of docs --- sphinx/__init__.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/sphinx/__init__.py b/sphinx/__init__.py index d53fdafd..2c96cdd9 100644 --- a/sphinx/__init__.py +++ b/sphinx/__init__.py @@ -42,6 +42,7 @@ Options: -b -- builder to use; default is html -N -- do not do colored output -q -- no output on stdout, just warnings on stderr -P -- run Pdb on exception + -g -- create autogenerated files Modi: * without -a and without filenames, write new and changed files. * with -a, write all files. @@ -59,7 +60,7 @@ def main(argv=sys.argv): nocolor() try: - opts, args = getopt.getopt(argv[1:], 'ab:d:c:D:NEqP') + opts, args = getopt.getopt(argv[1:], 'ab:d:c:D:g:NEqP') srcdir = confdir = path.abspath(args[0]) if not path.isdir(srcdir): print >>sys.stderr, 'Error: Cannot find source directory.' @@ -84,6 +85,7 @@ def main(argv=sys.argv): err = 1 if err: return 1 + buildername = all_files = None freshenv = use_pdb = False @@ -91,7 +93,19 @@ def main(argv=sys.argv): confoverrides = {} doctreedir = path.join(outdir, '.doctrees') for opt, val in opts: - if opt == '-b': + + if opt == '-g': + print 'in here' + source_filenames =[srcdir+'/'+f for f in os.listdir(srcdir) if f.endswith('.rst')] + if val is None: + print >>sys.stderr, \ + 'Error: you must provide a destination directory for autodoc generation.' + return 1 + p = path.abspath(val) + from sphinx.ext.autosummary.generate import generate_autosummary_docs + generate_autosummary_docs(source_filenames, p) + + elif opt == '-b': buildername = val elif opt == '-a': if filenames: -- cgit v1.2.1 From d5943afe33490828fd8dfeee969b25e8b49d0314 Mon Sep 17 00:00:00 2001 From: percious Date: Tue, 4 Nov 2008 13:58:17 -0700 Subject: added -g option to sphinx-build and cleaned out numpy code. --- sphinx/__init__.py | 1 - sphinx/ext/autosummary/__init__.py | 22 +- sphinx/ext/autosummary/docscrape.py | 500 ----------------------------- sphinx/ext/autosummary/docscrape_sphinx.py | 133 -------- 4 files changed, 1 insertion(+), 655 deletions(-) delete mode 100644 sphinx/ext/autosummary/docscrape.py delete mode 100644 sphinx/ext/autosummary/docscrape_sphinx.py diff --git a/sphinx/__init__.py b/sphinx/__init__.py index 2c96cdd9..bda0efb0 100644 --- a/sphinx/__init__.py +++ b/sphinx/__init__.py @@ -95,7 +95,6 @@ def main(argv=sys.argv): for opt, val in opts: if opt == '-g': - print 'in here' source_filenames =[srcdir+'/'+f for f in os.listdir(srcdir) if f.endswith('.rst')] if val is None: print >>sys.stderr, \ diff --git a/sphinx/ext/autosummary/__init__.py b/sphinx/ext/autosummary/__init__.py index 6b74190c..be11309e 100644 --- a/sphinx/ext/autosummary/__init__.py +++ b/sphinx/ext/autosummary/__init__.py @@ -59,7 +59,6 @@ from docutils import nodes import sphinx.addnodes, sphinx.roles, sphinx.builder from sphinx.util import patfilter -from docscrape_sphinx import get_doc_object import inspect def setup(app): @@ -215,30 +214,11 @@ def get_autosummary(names, state, no_signatures=False): real_names[name] = real_name - doc = get_doc_object(obj) - - if doc['Summary']: - title = " ".join(doc['Summary']) - else: - title = "" qualifier = 'obj' if inspect.ismodule(obj): qualifier = 'mod' col1 = ":"+qualifier+":`%s <%s>`" % (name, real_name) - if doc['Signature']: - sig = re.sub('^[a-zA-Z_0-9.-]*', '', - doc['Signature'].replace('*', r'\*')) - if '=' in sig: - # abbreviate optional arguments - sig = re.sub(r', ([a-zA-Z0-9_]+)=', r'[, \1=', sig, count=1) - sig = re.sub(r'\(([a-zA-Z0-9_]+)=', r'([\1=', sig, count=1) - sig = re.sub(r'=[^,)]+,', ',', sig) - sig = re.sub(r'=[^,)]+\)$', '])', sig) - # shorten long strings - sig = re.sub(r'(\[.{16,16}[^,)]*?),.*?\]\)', r'\1, ...])', sig) - else: - sig = re.sub(r'(\(.{16,16}[^,)]*?),.*?\)', r'\1, ...)', sig) - col1 += " " + sig + col2 = title append_row(col1, col2) diff --git a/sphinx/ext/autosummary/docscrape.py b/sphinx/ext/autosummary/docscrape.py deleted file mode 100644 index beb4a24e..00000000 --- a/sphinx/ext/autosummary/docscrape.py +++ /dev/null @@ -1,500 +0,0 @@ -"""Extract reference documentation from the NumPy source tree. - -""" - -import inspect -import textwrap -import re -import pydoc -from StringIO import StringIO -from warnings import warn - -class Reader(object): - """A line-based string reader. - - """ - def __init__(self, data): - """ - Parameters - ---------- - data : str - String with lines separated by '\n'. - - """ - if isinstance(data,list): - self._str = data - else: - self._str = data.split('\n') # store string as list of lines - - self.reset() - - def __getitem__(self, n): - return self._str[n] - - def reset(self): - self._l = 0 # current line nr - - def read(self): - if not self.eof(): - out = self[self._l] - self._l += 1 - return out - else: - return '' - - def seek_next_non_empty_line(self): - for l in self[self._l:]: - if l.strip(): - break - else: - self._l += 1 - - def eof(self): - return self._l >= len(self._str) - - def read_to_condition(self, condition_func): - start = self._l - for line in self[start:]: - if condition_func(line): - return self[start:self._l] - self._l += 1 - if self.eof(): - return self[start:self._l+1] - return [] - - def read_to_next_empty_line(self): - self.seek_next_non_empty_line() - def is_empty(line): - return not line.strip() - return self.read_to_condition(is_empty) - - def read_to_next_unindented_line(self): - def is_unindented(line): - return (line.strip() and (len(line.lstrip()) == len(line))) - return self.read_to_condition(is_unindented) - - def peek(self,n=0): - if self._l + n < len(self._str): - return self[self._l + n] - else: - return '' - - def is_empty(self): - return not ''.join(self._str).strip() - - -class NumpyDocString(object): - def __init__(self,docstring): - docstring = docstring.split('\n') - - # De-indent paragraph - try: - indent = min(len(s) - len(s.lstrip()) for s in docstring - if s.strip()) - except ValueError: - indent = 0 - - for n,line in enumerate(docstring): - docstring[n] = docstring[n][indent:] - - self._doc = Reader(docstring) - self._parsed_data = { - 'Signature': '', - 'Summary': '', - 'Extended Summary': [], - 'Parameters': [], - 'Returns': [], - 'Raises': [], - 'Warns': [], - 'Other Parameters': [], - 'Attributes': [], - 'Methods': [], - 'See Also': [], - 'Notes': [], - 'Warnings': [], - 'References': '', - 'Examples': '', - 'index': {} - } - - self._parse() - - def __getitem__(self,key): - return self._parsed_data[key] - - def __setitem__(self,key,val): - if not self._parsed_data.has_key(key): - warn("Unknown section %s" % key) - else: - self._parsed_data[key] = val - - def _is_at_section(self): - self._doc.seek_next_non_empty_line() - - if self._doc.eof(): - return False - - l1 = self._doc.peek().strip() # e.g. Parameters - - if l1.startswith('.. index::'): - return True - - l2 = self._doc.peek(1).strip() # ---------- or ========== - return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1)) - - def _strip(self,doc): - i = 0 - j = 0 - for i,line in enumerate(doc): - if line.strip(): break - - for j,line in enumerate(doc[::-1]): - if line.strip(): break - - return doc[i:len(doc)-j] - - def _read_to_next_section(self): - section = self._doc.read_to_next_empty_line() - - while not self._is_at_section() and not self._doc.eof(): - if not self._doc.peek(-1).strip(): # previous line was empty - section += [''] - - section += self._doc.read_to_next_empty_line() - - return section - - def _read_sections(self): - while not self._doc.eof(): - data = self._read_to_next_section() - name = data[0].strip() - - if name.startswith('..'): # index section - yield name, data[1:] - elif len(data) < 2: - yield StopIteration - else: - yield name, self._strip(data[2:]) - - def _parse_param_list(self,content): - r = Reader(content) - params = [] - while not r.eof(): - header = r.read().strip() - if ' : ' in header: - arg_name, arg_type = header.split(' : ')[:2] - else: - arg_name, arg_type = header, '' - - desc = r.read_to_next_unindented_line() - for n,line in enumerate(desc): - desc[n] = line.strip() - desc = desc #'\n'.join(desc) - - params.append((arg_name,arg_type,desc)) - - return params - - - _name_rgx = re.compile(r"^\s*(:(?P\w+):`(?P[a-zA-Z0-9_.-]+)`|" - r" (?P[a-zA-Z0-9_.-]+))\s*", re.X) - def _parse_see_also(self, content): - """ - func_name : Descriptive text - continued text - another_func_name : Descriptive text - func_name1, func_name2, :meth:`func_name`, func_name3 - - """ - items = [] - - def parse_item_name(text): - """Match ':role:`name`' or 'name'""" - m = self._name_rgx.match(text) - if m: - g = m.groups() - if g[1] is None: - return g[3], None - else: - return g[2], g[1] - raise ValueError("%s is not a item name" % text) - - def push_item(name, rest): - if not name: - return - name, role = parse_item_name(name) - items.append((name, list(rest), role)) - del rest[:] - - current_func = None - rest = [] - - for line in content: - if not line.strip(): continue - - m = self._name_rgx.match(line) - if m and line[m.end():].strip().startswith(':'): - push_item(current_func, rest) - current_func, line = line[:m.end()], line[m.end():] - rest = [line.split(':', 1)[1].strip()] - if not rest[0]: - rest = [] - elif not line.startswith(' '): - push_item(current_func, rest) - current_func = None - if ',' in line: - for func in line.split(','): - push_item(func, []) - elif line.strip(): - current_func = line - elif current_func is not None: - rest.append(line.strip()) - push_item(current_func, rest) - return items - - def _parse_index(self, section, content): - """ - .. index: default - :refguide: something, else, and more - - """ - def strip_each_in(lst): - return [s.strip() for s in lst] - - out = {} - section = section.split('::') - if len(section) > 1: - out['default'] = strip_each_in(section[1].split(','))[0] - for line in content: - line = line.split(':') - if len(line) > 2: - out[line[1]] = strip_each_in(line[2].split(',')) - return out - - def _parse_summary(self): - """Grab signature (if given) and summary""" - if self._is_at_section(): - return - - summary = self._doc.read_to_next_empty_line() - summary_str = " ".join([s.strip() for s in summary]).strip() - if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str): - self['Signature'] = summary_str - if not self._is_at_section(): - self['Summary'] = self._doc.read_to_next_empty_line() - else: - self['Summary'] = summary - - if not self._is_at_section(): - self['Extended Summary'] = self._read_to_next_section() - - def _parse(self): - self._doc.reset() - self._parse_summary() - - for (section,content) in self._read_sections(): - if not section.startswith('..'): - section = ' '.join([s.capitalize() for s in section.split(' ')]) - if section in ('Parameters', 'Attributes', 'Methods', - 'Returns', 'Raises', 'Warns'): - self[section] = self._parse_param_list(content) - elif section.startswith('.. index::'): - self['index'] = self._parse_index(section, content) - elif section == 'See Also': - self['See Also'] = self._parse_see_also(content) - else: - self[section] = content - - # string conversion routines - - def _str_header(self, name, symbol='-'): - return [name, len(name)*symbol] - - def _str_indent(self, doc, indent=4): - out = [] - for line in doc: - out += [' '*indent + line] - return out - - def _str_signature(self): - if self['Signature']: - return [self['Signature'].replace('*','\*')] + [''] - else: - return [''] - - def _str_summary(self): - if self['Summary']: - return self['Summary'] + [''] - else: - return [] - - def _str_extended_summary(self): - if self['Extended Summary']: - return self['Extended Summary'] + [''] - else: - return [] - - def _str_param_list(self, name): - out = [] - if self[name]: - out += self._str_header(name) - for param,param_type,desc in self[name]: - out += ['%s : %s' % (param, param_type)] - out += self._str_indent(desc) - out += [''] - return out - - def _str_section(self, name): - out = [] - if self[name]: - out += self._str_header(name) - out += self[name] - out += [''] - return out - - def _str_see_also(self, func_role): - if not self['See Also']: return [] - out = [] - out += self._str_header("See Also") - last_had_desc = True - for func, desc, role in self['See Also']: - if role: - link = ':%s:`%s`' % (role, func) - elif func_role: - link = ':%s:`%s`' % (func_role, func) - else: - link = "`%s`_" % func - if desc or last_had_desc: - out += [''] - out += [link] - else: - out[-1] += ", %s" % link - if desc: - out += self._str_indent([' '.join(desc)]) - last_had_desc = True - else: - last_had_desc = False - out += [''] - return out - - def _str_index(self): - idx = self['index'] - out = [] - out += ['.. index:: %s' % idx.get('default','')] - for section, references in idx.iteritems(): - if section == 'default': - continue - out += [' :%s: %s' % (section, ', '.join(references))] - return out - - def __str__(self, func_role=''): - out = [] - out += self._str_signature() - out += self._str_summary() - out += self._str_extended_summary() - for param_list in ('Parameters','Returns','Raises'): - out += self._str_param_list(param_list) - out += self._str_section('Warnings') - out += self._str_see_also(func_role) - for s in ('Notes','References','Examples'): - out += self._str_section(s) - out += self._str_index() - return '\n'.join(out) - - -def indent(str,indent=4): - indent_str = ' '*indent - if str is None: - return indent_str - lines = str.split('\n') - return '\n'.join(indent_str + l for l in lines) - -def header(text, style='-'): - return text + '\n' + style*len(text) + '\n' - - -class FunctionDoc(NumpyDocString): - def __init__(self, func, role='func'): - self._f = func - self._role = role # e.g. "func" or "meth" - try: - NumpyDocString.__init__(self,inspect.getdoc(func) or '') - except ValueError, e: - print '*'*78 - print "ERROR: '%s' while parsing `%s`" % (e, self._f) - print '*'*78 - #print "Docstring follows:" - #print doclines - #print '='*78 - - if not self['Signature']: - func, func_name = self.get_func() - try: - # try to read signature - argspec = inspect.getargspec(func) - argspec = inspect.formatargspec(*argspec) - argspec = argspec.replace('*','\*') - signature = '%s%s' % (func_name, argspec) - except TypeError, e: - signature = '%s()' % func_name - self['Signature'] = signature - - def get_func(self): - func_name = getattr(self._f, '__name__', self.__class__.__name__) - if inspect.isclass(self._f): - func = getattr(self._f, '__call__', self._f.__init__) - else: - func = self._f - return func, func_name - - def __str__(self): - out = '' - - func, func_name = self.get_func() - signature = self['Signature'].replace('*', '\*') - - roles = {'func': 'function', - 'meth': 'method'} - - if self._role: - if not roles.has_key(self._role): - print "Warning: invalid role %s" % self._role - out += '.. %s:: %s\n \n\n' % (roles.get(self._role,''), - func_name) - - out += super(FunctionDoc, self).__str__(func_role=self._role) - return out - - -class ClassDoc(NumpyDocString): - def __init__(self,cls,modulename='',func_doc=FunctionDoc): - if not inspect.isclass(cls): - raise ValueError("Initialise using a class. Got %r" % cls) - self._cls = cls - - if modulename and not modulename.endswith('.'): - modulename += '.' - self._mod = modulename - self._name = cls.__name__ - self._func_doc = func_doc - - NumpyDocString.__init__(self, pydoc.getdoc(cls)) - - @property - def methods(self): - return [name for name,func in inspect.getmembers(self._cls) - if not name.startswith('_') and callable(func)] - - def __str__(self): - out = '' - out += super(ClassDoc, self).__str__() - out += "\n\n" - - #for m in self.methods: - # print "Parsing `%s`" % m - # out += str(self._func_doc(getattr(self._cls,m), 'meth')) + '\n\n' - # out += '.. index::\n single: %s; %s\n\n' % (self._name, m) - - return out - - diff --git a/sphinx/ext/autosummary/docscrape_sphinx.py b/sphinx/ext/autosummary/docscrape_sphinx.py deleted file mode 100644 index d431ecd3..00000000 --- a/sphinx/ext/autosummary/docscrape_sphinx.py +++ /dev/null @@ -1,133 +0,0 @@ -import re, inspect, textwrap, pydoc -from docscrape import NumpyDocString, FunctionDoc, ClassDoc - -class SphinxDocString(NumpyDocString): - # string conversion routines - def _str_header(self, name, symbol='`'): - return ['.. rubric:: ' + name, ''] - - def _str_field_list(self, name): - return [':' + name + ':'] - - def _str_indent(self, doc, indent=4): - out = [] - for line in doc: - out += [' '*indent + line] - return out - - def _str_signature(self): - return [''] - if self['Signature']: - return ['``%s``' % self['Signature']] + [''] - else: - return [''] - - def _str_summary(self): - return self['Summary'] + [''] - - def _str_extended_summary(self): - return self['Extended Summary'] + [''] - - def _str_param_list(self, name): - out = [] - if self[name]: - out += self._str_field_list(name) - out += [''] - for param,param_type,desc in self[name]: - out += self._str_indent(['**%s** : %s' % (param.strip(), - param_type)]) - out += [''] - out += self._str_indent(desc,8) - out += [''] - return out - - def _str_section(self, name): - out = [] - if self[name]: - out += self._str_header(name) - out += [''] - content = textwrap.dedent("\n".join(self[name])).split("\n") - out += content - out += [''] - return out - - def _str_see_also(self, func_role): - out = [] - if self['See Also']: - see_also = super(SphinxDocString, self)._str_see_also(func_role) - out = ['.. seealso::', ''] - out += self._str_indent(see_also[2:]) - return out - - def _str_warnings(self): - out = [] - if self['Warnings']: - out = ['.. warning::', ''] - out += self._str_indent(self['Warnings']) - return out - - def _str_index(self): - idx = self['index'] - out = [] - if len(idx) == 0: - return out - - out += ['.. index:: %s' % idx.get('default','')] - for section, references in idx.iteritems(): - if section == 'default': - continue - elif section == 'refguide': - out += [' single: %s' % (', '.join(references))] - else: - out += [' %s: %s' % (section, ','.join(references))] - return out - - def _str_references(self): - out = [] - if self['References']: - out += self._str_header('References') - if isinstance(self['References'], str): - self['References'] = [self['References']] - out.extend(self['References']) - out += [''] - return out - - def __str__(self, indent=0, func_role="obj"): - out = [] - out += self._str_signature() - out += self._str_index() + [''] - out += self._str_summary() - out += self._str_extended_summary() - for param_list in ('Parameters', 'Attributes', 'Methods', - 'Returns','Raises'): - out += self._str_param_list(param_list) - out += self._str_warnings() - out += self._str_see_also(func_role) - out += self._str_section('Notes') - out += self._str_references() - out += self._str_section('Examples') - out = self._str_indent(out,indent) - return '\n'.join(out) - -class SphinxFunctionDoc(SphinxDocString, FunctionDoc): - pass - -class SphinxClassDoc(SphinxDocString, ClassDoc): - pass - -def get_doc_object(obj, what=None): - if what is None: - if inspect.isclass(obj): - what = 'class' - elif inspect.ismodule(obj): - what = 'module' - elif callable(obj): - what = 'function' - else: - what = 'object' - if what == 'class': - return SphinxClassDoc(obj, '', func_doc=SphinxFunctionDoc) - elif what in ('function', 'method'): - return SphinxFunctionDoc(obj, '') - else: - return SphinxDocString(pydoc.getdoc(obj)) -- cgit v1.2.1 From 5f4c4a17c9691abff90d8f1cbf028a9a67967621 Mon Sep 17 00:00:00 2001 From: percious Date: Tue, 4 Nov 2008 13:59:02 -0700 Subject: modified license file to reflect removal of numpy-specific code. --- sphinx/ext/autosummary/LICENSE.txt | 2 -- 1 file changed, 2 deletions(-) diff --git a/sphinx/ext/autosummary/LICENSE.txt b/sphinx/ext/autosummary/LICENSE.txt index da27afd4..aa1bf333 100644 --- a/sphinx/ext/autosummary/LICENSE.txt +++ b/sphinx/ext/autosummary/LICENSE.txt @@ -1,7 +1,5 @@ The files - __init__.py - - docscrape.py - - doscrape-sphinx.py - generate.py - templates/module.html -- cgit v1.2.1 From b3d241f948b0f0022dd2eb23600ae60555a5ab16 Mon Sep 17 00:00:00 2001 From: percious Date: Tue, 4 Nov 2008 15:01:46 -0700 Subject: fixed bug in autosummary. --- sphinx/ext/autosummary/__init__.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/sphinx/ext/autosummary/__init__.py b/sphinx/ext/autosummary/__init__.py index be11309e..f26c4676 100644 --- a/sphinx/ext/autosummary/__init__.py +++ b/sphinx/ext/autosummary/__init__.py @@ -214,11 +214,9 @@ def get_autosummary(names, state, no_signatures=False): real_names[name] = real_name + title = "" qualifier = 'obj' - if inspect.ismodule(obj): - qualifier = 'mod' col1 = ":"+qualifier+":`%s <%s>`" % (name, real_name) - col2 = title append_row(col1, col2) -- cgit v1.2.1 From 11b7c42eac7391e60cfe6d523a988c5410b431fd Mon Sep 17 00:00:00 2001 From: Sebastian Wiesner Date: Thu, 20 Nov 2008 20:05:45 +0100 Subject: Switched templating to jinja2 --- babel.cfg | 6 +-- setup.py | 4 +- sphinx/_jinja2.py | 90 ++++++++++++++++++++++++++++++++++++++++++ sphinx/builder.py | 2 +- sphinx/templates/layout.html | 6 +-- sphinx/templates/modindex.html | 2 +- 6 files changed, 99 insertions(+), 11 deletions(-) create mode 100644 sphinx/_jinja2.py diff --git a/babel.cfg b/babel.cfg index 5f5188b1..e53a462d 100644 --- a/babel.cfg +++ b/babel.cfg @@ -1,6 +1,4 @@ -[extractors] -jinja = sphinx._jinja.babel_extract [python: **.py] -[jinja: **/templates/**.html] -[jinja: **/templates/**.xml] +[jinja2: **/templates/**.html] +[jinja2: **/templates/**.xml] [javascript: **.js] diff --git a/setup.py b/setup.py index abe82198..ba80ea23 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- +0;115;0c# -*- coding: utf-8 -*- import ez_setup ez_setup.use_setuptools() @@ -36,7 +36,7 @@ are already present, work fine and can be seen "in action" in the Python docs: and inclusion of appropriately formatted docstrings. ''' -requires = ['Pygments>=0.8', 'Jinja>=1.1', 'docutils>=0.4'] +requires = ['Pygments>=0.8', 'Jinja2>=2.0', 'docutils>=0.4'] if sys.version_info < (2, 4): print 'ERROR: Sphinx requires at least Python 2.4 to run.' diff --git a/sphinx/_jinja2.py b/sphinx/_jinja2.py new file mode 100644 index 00000000..a6f23e28 --- /dev/null +++ b/sphinx/_jinja2.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- + +""" + sphinx._jinja2 + ============== + + Glue code for jinja2. + + :author: Sebastian Wiesner + :contact: basti.wiesner@gmx.net + :copyright: 2008 by Sebastian Wiesner + :license: MIT +""" + +import codecs +from os import path + +import jinja2 + +from sphinx.util import mtimes_of_files +from sphinx.application import TemplateBridge + + +class SphinxLoader(jinja2.BaseLoader): + """ + A jinja2 reimplementation of `sphinx._jinja.SphinxFileSystemLoader`. + """ + + def __init__(self, basepath, extpaths, encoding='utf-8'): + """ + Creates a new loader for sphinx. + + ``extpaths`` is a list of directories, which provide additional + templates to sphinx. + + ``encoding`` is used to decode the templates into unicode strings. + Defaults to utf-8. + + If ``basepath`` is set, this path is used to load sphinx core + templates. If False, these templates are loaded from the sphinx + package. + """ + self.core_loader = jinja2.FileSystemLoader(basepath) + self.all_loaders = jinja2.ChoiceLoader( + [jinja2.FileSystemLoader(extpath) for extpath in extpaths] + + [self.core_loader]) + + def get_source(self, environment, template): + # exclamation mark forces loading from core + if template.startswith('!'): + return self.core_loader.get_source(environment, template[1:]) + # check if the template is probably an absolute path + fs_path = template.replace('/', path.sep) + if path.isabs(fs_path): + if not path.exists(fs_path): + raise jinja2.TemplateNotFound(template) + f = codecs.open(fs_path, 'r', self.encoding) + try: + mtime = path.getmtime(path) + return (f.read(), fs_path, + lambda: mtime == path.getmtime(path)) + finally: + f.close() + # finally try to load from custom templates + return self.all_loaders.get_source(environment, template) + + +class BuiltinTemplates(TemplateBridge): + """ + Interfaces the rendering environment of jinja2 for use in sphinx. + """ + + def init(self, builder): + base_templates_path = path.join(path.dirname(__file__), 'templates') + ext_templates_path = [path.join(builder.confdir, dir) + for dir in builder.config.templates_path] + self.templates_path = [base_templates_path] + ext_templates_path + loader = SphinxLoader(base_templates_path, ext_templates_path) + use_i18n = builder.translator is not None + extensions = use_i18n and ['jinja2.ext.i18n'] or [] + self.environment = jinja2.Environment(loader=loader, + extensions=extensions) + if use_i18n: + self.environment.install_gettext_translations(builder.translator) + + def render(self, template, context): + return self.environment.get_template(template).render(context) + + def newest_template_mtime(self): + return max(mtimes_of_files(self.templates_path, '.html')) diff --git a/sphinx/builder.py b/sphinx/builder.py index 159fe803..24c42d2b 100644 --- a/sphinx/builder.py +++ b/sphinx/builder.py @@ -98,7 +98,7 @@ class Builder(object): self.templates = self.app.import_object( self.config.template_bridge, 'template_bridge setting')() else: - from sphinx._jinja import BuiltinTemplates + from sphinx._jinja2 import BuiltinTemplates self.templates = BuiltinTemplates() self.templates.init(self) diff --git a/sphinx/templates/layout.html b/sphinx/templates/layout.html index 5ad4f8dd..e6374b6d 100644 --- a/sphinx/templates/layout.html +++ b/sphinx/templates/layout.html @@ -4,7 +4,7 @@ {%- endblock %} {%- set reldelim1 = reldelim1 is not defined and ' »' or reldelim1 %} {%- set reldelim2 = reldelim2 is not defined and ' |' or reldelim2 %} -{%- macro relbar %} +{%- macro relbar() %} {%- endmacro %} -{%- macro sidebar %} +{%- macro sidebar() %} {%- if builder != 'htmlhelp' %}
@@ -64,7 +64,7 @@ {%- endif %} {%- if customsidebar %} - {{ rendertemplate(customsidebar) }} + {% include customsidebar %} {%- endif %} {%- block sidebarsearch %} {%- if pagename != "search" %} diff --git a/sphinx/templates/modindex.html b/sphinx/templates/modindex.html index d6b505da..a2d2bb9a 100644 --- a/sphinx/templates/modindex.html +++ b/sphinx/templates/modindex.html @@ -52,7 +52,7 @@ {% if fname %}{% endif -%} {{ modname|e }} {%- if fname %}{% endif %} - {%- if pform[0] %} ({{ pform|join(', ') }}){% endif -%} + {%- if pform and pform[0] %} ({{ pform|join(', ') }}){% endif -%} {% if dep %}{{ _('Deprecated')}}:{% endif %} {{ synops|e }} {%- endif -%} -- cgit v1.2.1 From 71355f44f63d6fba8767d08d6d3ba0c15267006d Mon Sep 17 00:00:00 2001 From: Sebastian Wiesner Date: Thu, 20 Nov 2008 20:06:36 +0100 Subject: Fixed encoding issue in pngmath hashing --- sphinx/ext/pngmath.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sphinx/ext/pngmath.py b/sphinx/ext/pngmath.py index 77d08eef..aa0d0c2e 100644 --- a/sphinx/ext/pngmath.py +++ b/sphinx/ext/pngmath.py @@ -75,7 +75,7 @@ def render_math(self, math): """ use_preview = self.builder.config.pngmath_use_preview - shasum = "%s.png" % sha(math).hexdigest() + shasum = "%s.png" % sha(math.encode('utf-8')).hexdigest() relfn = posixpath.join(self.builder.imgpath, 'math', shasum) outfn = path.join(self.builder.outdir, '_images', 'math', shasum) if path.isfile(outfn): -- cgit v1.2.1 From 72e0a250c7d891171e2c648ee585e6ec8b453aa7 Mon Sep 17 00:00:00 2001 From: Sebastian Wiesner Date: Thu, 20 Nov 2008 20:12:41 +0100 Subject: Fixed syntax error in setup.py --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index ba80ea23..c3fbcf2d 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,4 @@ -0;115;0c# -*- coding: utf-8 -*- +# -*- coding: utf-8 -*- import ez_setup ez_setup.use_setuptools() -- cgit v1.2.1 From 35b9921582ff30eb1b7c343cfcf45cc41208a045 Mon Sep 17 00:00:00 2001 From: Vsevolod Solovyov Date: Mon, 24 Nov 2008 16:55:06 +0200 Subject: Fixes #32. sphinx-quickstart adapted for windows --- sphinx/quickstart.py | 130 +++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 125 insertions(+), 5 deletions(-) diff --git a/sphinx/quickstart.py b/sphinx/quickstart.py index 03024535..998d93fb 100644 --- a/sphinx/quickstart.py +++ b/sphinx/quickstart.py @@ -318,6 +318,109 @@ linkcheck: \t "or in %(rbuilddir)s/linkcheck/output.txt." ''' +BATCHFILE = '''\ +@ECHO OFF + +REM Command file for Sphinx documentation + +set SPHINXBUILD=sphinx-build +set ALLSPHINXOPTS=-d %(rbuilddir)s/doctrees %%SPHINXOPTS%% %(rsrcdir)s +if NOT "%%PAPER%%" == "" ( +\tset ALLSPHINXOPTS=-D latex_paper_size=%%PAPER%% %%ALLSPHINXOPTS%% +) + +if "%%1" == "" goto help + +if "%%1" == "help" ( +\t:help +\techo.Please use `make-docs ^` where ^ is one of +\techo. html to make standalone HTML files +\techo. pickle to make pickle files +\techo. json to make JSON files +\techo. htmlhelp to make HTML files and a HTML help project +\techo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter +\techo. changes to make an overview over all changed/added/deprecated items +\techo. linkcheck to check all external links for integrity +\tgoto end +) + +if "%%1" == "clean" ( +\tfor /d %%%%i in (%(rbuilddir)s\*) do rmdir /q /s %%%%i +\tdel /q /s %(rbuilddir)s\* +\tgoto end +) + +if "%%1" == "html" ( +\tcall :mkdir %(rbuilddir)s\html %(rbuilddir)s\doctrees +\t%%SPHINXBUILD%% -b html %%ALLSPHINXOPTS%% %(rbuilddir)s/html +\techo. +\techo.Build finished. The HTML pages are in %(rbuilddir)s/html. +\tgoto end +) + +if "%%1" == "web" goto pickle + +if "%%1" == "pickle" ( +\t:pickle +\tcall :mkdir %(rbuilddir)s\pickle %(rbuilddir)s\doctrees +\t%%SPHINXBUILD%% -b pickle %%ALLSPHINXOPTS%% %(rbuilddir)s/pickle +\techo. +\techo.Build finished; now you can process the pickle files. +\tgoto end +) + +if "%%1" == "json" ( +\tcall :mkdir %(rbuilddir)s\json %(rbuilddir)s\doctrees +\t%%SPHINXBUILD%% -b json %%ALLSPHINXOPTS%% %(rbuilddir)s/json +\techo. +\techo.Build finished; now you can process the JSON files. +\tgoto end +) + +if "%%1" == "htmlhelp" ( +\tcall :mkdir %(rbuilddir)s\htmlhelp %(rbuilddir)s\doctrees +\t%%SPHINXBUILD%% -b htmlhelp %%ALLSPHINXOPTS%% %(rbuilddir)s/htmlhelp +\techo. +\techo.Build finished; now you can run HTML Help Workshop with the ^ +.hhp project file in %(rbuilddir)s/htmlhelp. +\tgoto end +) + +if "%%1" == "latex" ( +\tcall :mkdir %(rbuilddir)s\latex %(rbuilddir)s\doctrees +\t%%SPHINXBUILD%% -b latex %%ALLSPHINXOPTS%% %(rbuilddir)s/latex +\techo. +\techo.Build finished; the LaTeX files are in %(rbuilddir)s/latex. +\tgoto end +) + +if "%%1" == "changes" ( +\tcall :mkdir %(rbuilddir)s\changes %(rbuilddir)s\doctrees +\t%%SPHINXBUILD%% -b changes %%ALLSPHINXOPTS%% %(rbuilddir)s/changes +\techo. +\techo.The overview file is in %(rbuilddir)s/changes. +\tgoto end +) + +if "%%1" == "linkcheck" ( +\tcall :mkdir %(rbuilddir)s\linkcheck %(rbuilddir)s\doctrees +\t%%SPHINXBUILD%% -b linkcheck %%ALLSPHINXOPTS%% %(rbuilddir)s/linkcheck +\techo. +\techo.Link check complete; look for any errors in the above output ^ +or in %(rbuilddir)s/linkcheck/output.txt. +\tgoto end +) + +goto end + +:mkdir %%1 %%2 +\tIF NOT EXIST %%1 mkdir %%1 +\tIF NOT EXIST %%2 mkdir %%2 +\texit /b + +:end +''' + def mkdir_p(dir): if path.isdir(dir): @@ -410,12 +513,18 @@ Either, you use a directory ".build" within the root path, or you separate "source" and "build" directories within the root path.''' do_prompt(d, 'sep', 'Separate source and build directories (y/N)', 'n', boolean) - print ''' + if os.name == 'nt': + print ''' +Inside the root directory, two more directories will be created; "_templates" +for custom HTML templates and "_static" for custom stylesheets and other +static files. You can enter another prefix (such as ".") to replace the underscore.''' + do_prompt(d, 'dot', 'Name prefix for templates and static dir', '_', ok) + else: + print ''' Inside the root directory, two more directories will be created; ".templates" for custom HTML templates and ".static" for custom stylesheets and other -static files. Since the leading dot may be inconvenient for Windows users, -you can enter another prefix (such as "_") to replace the dot.''' - do_prompt(d, 'dot', 'Name prefix for templates and static dir', '.', ok) +static files. You can enter another prefix (such as "_") to replace the dot.''' + do_prompt(d, 'dot', 'Name prefix for templates and static dir', '.', ok) print ''' The project name will occur in several places in the built documentation.''' @@ -454,6 +563,8 @@ only have to run e.g. `make html' instead of invoking sphinx-build directly.''' do_prompt(d, 'makefile', 'Create Makefile? (Y/n)', os.name == 'posix' and 'y' or 'n', boolean) + do_prompt(d, 'batchfile', 'Create Windows command file? (Y/n)', + os.name == 'nt' and 'y' or 'n', boolean) d['project_fn'] = make_filename(d['project']) d['now'] = time.asctime() @@ -505,12 +616,21 @@ directly.''' f.write((MAKEFILE % d).encode('utf-8')) f.close() + create_batch = d['batchfile'].upper() in ('Y', 'YES') + if create_batch: + d['rsrcdir'] = separate and 'source' or '.' + d['rbuilddir'] = separate and 'build' or d['dot'] + 'build' + f = open(path.join(d['path'], 'make.bat'), 'w') + f.write((BATCHFILE % d).encode('utf-8')) + f.close() + + print print bold('Finished: An initial directory structure has been created.') print ''' You should now populate your master file %s and create other documentation source files. Use the sphinx-build script to build the docs, like so: -''' % masterfile + (create_makefile and ''' +''' % masterfile + ((create_makefile or create_batch) and ''' make ''' or ''' sphinx-build -b %s %s -- cgit v1.2.1 From 61f7e12faeca94a847aa32084bc4f1933d32c89a Mon Sep 17 00:00:00 2001 From: Sebastian Wiesner Date: Thu, 27 Nov 2008 00:31:43 +0100 Subject: Fixed markup escaping issue --- sphinx/templates/layout.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sphinx/templates/layout.html b/sphinx/templates/layout.html index eb691455..f2314f41 100644 --- a/sphinx/templates/layout.html +++ b/sphinx/templates/layout.html @@ -89,7 +89,7 @@ {{ metatags }} {%- if builder != 'htmlhelp' %} - {%- set titlesuffix = " — " + docstitle|e %} + {%- set titlesuffix = " — "|safe + docstitle|e %} {%- endif %} {{ title|striptags }}{{ titlesuffix }} {%- if builder == 'web' %} -- cgit v1.2.1 From 10c994f344a0ecee8b43e94d0190b95d8d1d4aea Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sat, 29 Nov 2008 19:56:58 +0100 Subject: Move builders and writers into new packages. --- sphinx/__init__.py | 3 + sphinx/_jinja.py | 3 +- sphinx/application.py | 24 +- sphinx/builder.py | 1274 +----------------------------------------- sphinx/builders/__init__.py | 328 +++++++++++ sphinx/builders/changes.py | 137 +++++ sphinx/builders/html.py | 607 ++++++++++++++++++++ sphinx/builders/htmlhelp.py | 245 ++++++++ sphinx/builders/latex.py | 185 ++++++ sphinx/builders/linkcheck.py | 130 +++++ sphinx/builders/text.py | 68 +++ sphinx/ext/intersphinx.py | 2 +- sphinx/htmlhelp.py | 220 -------- sphinx/htmlwriter.py | 457 --------------- sphinx/latexwriter.py | 1185 --------------------------------------- sphinx/linkcheck.py | 130 ----- sphinx/textwriter.py | 679 ---------------------- sphinx/writers/__init__.py | 10 + sphinx/writers/html.py | 457 +++++++++++++++ sphinx/writers/latex.py | 1185 +++++++++++++++++++++++++++++++++++++++ sphinx/writers/text.py | 679 ++++++++++++++++++++++ tests/test_build.py | 2 +- tests/test_markup.py | 4 +- 23 files changed, 4070 insertions(+), 3944 deletions(-) create mode 100644 sphinx/builders/__init__.py create mode 100644 sphinx/builders/changes.py create mode 100644 sphinx/builders/html.py create mode 100644 sphinx/builders/htmlhelp.py create mode 100644 sphinx/builders/latex.py create mode 100644 sphinx/builders/linkcheck.py create mode 100644 sphinx/builders/text.py delete mode 100644 sphinx/htmlhelp.py delete mode 100644 sphinx/htmlwriter.py delete mode 100644 sphinx/latexwriter.py delete mode 100644 sphinx/linkcheck.py delete mode 100644 sphinx/textwriter.py create mode 100644 sphinx/writers/__init__.py create mode 100644 sphinx/writers/html.py create mode 100644 sphinx/writers/latex.py create mode 100644 sphinx/writers/text.py diff --git a/sphinx/__init__.py b/sphinx/__init__.py index aa4398e0..2df41707 100644 --- a/sphinx/__init__.py +++ b/sphinx/__init__.py @@ -10,11 +10,14 @@ """ import sys +from os import path __revision__ = '$Revision$' __version__ = '0.5' __released__ = '0.5' +package_dir = path.abspath(path.dirname(__file__)) + def main(argv=sys.argv): if sys.version_info[:3] < (2, 4, 0): diff --git a/sphinx/_jinja.py b/sphinx/_jinja.py index d6e98b21..654e0c52 100644 --- a/sphinx/_jinja.py +++ b/sphinx/_jinja.py @@ -12,6 +12,7 @@ import codecs from os import path +from sphinx import package_dir from sphinx.util import mtimes_of_files from sphinx.application import TemplateBridge @@ -88,7 +89,7 @@ class TranslatorEnvironment(Environment): class BuiltinTemplates(TemplateBridge): def init(self, builder): self.templates = {} - base_templates_path = path.join(path.dirname(__file__), 'templates') + base_templates_path = path.join(package_dir, 'templates') ext_templates_path = [path.join(builder.confdir, dir) for dir in builder.config.templates_path] self.templates_path = [base_templates_path] + ext_templates_path diff --git a/sphinx/application.py b/sphinx/application.py index 888d7567..6c644bbe 100644 --- a/sphinx/application.py +++ b/sphinx/application.py @@ -22,7 +22,7 @@ from docutils.parsers.rst import directives, roles import sphinx from sphinx.roles import xfileref_role, innernodetypes from sphinx.config import Config -from sphinx.builder import builtin_builders, StandaloneHTMLBuilder +from sphinx.builders import BUILTIN_BUILDERS from sphinx.directives import desc_directive, target_directive, additional_xref_types from sphinx.environment import SphinxStandaloneReader from sphinx.util.console import bold @@ -77,7 +77,7 @@ class Sphinx(object): confoverrides, status, warning=sys.stderr, freshenv=False): self.next_listener_id = 0 self._listeners = {} - self.builderclasses = builtin_builders.copy() + self.builderclasses = BUILTIN_BUILDERS.copy() self.builder = None self.srcdir = srcdir @@ -125,6 +125,11 @@ class Sphinx(object): buildername))) builderclass = self.builderclasses[buildername] + if isinstance(builderclass, tuple): + # builtin builder + mod, cls = builderclass + builderclass = getattr( + __import__('sphinx.builders.' + mod, None, None, [cls]), cls) self.builder = builderclass(self, freshenv=freshenv) self.emit('builder-inited') @@ -220,8 +225,12 @@ class Sphinx(object): if not hasattr(builder, 'name'): raise ExtensionError('Builder class %s has no "name" attribute' % builder) if builder.name in self.builderclasses: - raise ExtensionError('Builder %r already exists (in module %s)' % ( - builder.name, self.builderclasses[builder.name].__module__)) + if isinstance(self.builderclasses[builder.name], tuple): + raise ExtensionError('Builder %r is a builtin builder' % + builder.name) + else: + raise ExtensionError('Builder %r already exists (in module %s)' % ( + builder.name, self.builderclasses[builder.name].__module__)) self.builderclasses[builder.name] = builder def add_config_value(self, name, default, rebuild_env): @@ -243,11 +252,11 @@ class Sphinx(object): raise ExtensionError('Value for key %r must be a (visit, depart) ' 'function tuple' % key) if key == 'html': - from sphinx.htmlwriter import HTMLTranslator as translator + from sphinx.writers.html import HTMLTranslator as translator elif key == 'latex': - from sphinx.latexwriter import LaTeXTranslator as translator + from sphinx.writers.latex import LaTeXTranslator as translator elif key == 'text': - from sphinx.textwriter import TextTranslator as translator + from sphinx.writers.text import TextTranslator as translator else: # ignore invalid keys for compatibility continue @@ -284,6 +293,7 @@ class Sphinx(object): SphinxStandaloneReader.transforms.append(transform) def add_javascript(self, filename): + from sphinx.builders.html import StandaloneHTMLBuilder StandaloneHTMLBuilder.script_files.append( posixpath.join('_static', filename)) diff --git a/sphinx/builder.py b/sphinx/builder.py index 159fe803..2a19b751 100644 --- a/sphinx/builder.py +++ b/sphinx/builder.py @@ -3,1268 +3,20 @@ sphinx.builder ~~~~~~~~~~~~~~ - Builder classes for different output formats. + .. warning:: - :copyright: 2007-2008 by Georg Brandl, Sebastian Wiesner, Horst Gutmann. + This module is only kept for API compatibility; new code should + import these classes directly from the sphinx.builders package. + + :copyright: 2008 by Georg Brandl. :license: BSD. """ -import os -import time -import codecs -import shutil -import gettext -import cPickle as pickle -from os import path -from cgi import escape - -from docutils import nodes -from docutils.io import StringOutput, FileOutput, DocTreeInput -from docutils.core import publish_parts -from docutils.utils import new_document -from docutils.frontend import OptionParser -from docutils.readers.doctree import Reader as DoctreeReader - -from sphinx import addnodes, locale, __version__ -from sphinx.util import ensuredir, relative_uri, SEP, os_path, texescape, ustrftime -from sphinx.htmlhelp import build_hhx -from sphinx.htmlwriter import HTMLWriter, HTMLTranslator, SmartyPantsHTMLTranslator -from sphinx.textwriter import TextWriter -from sphinx.latexwriter import LaTeXWriter -from sphinx.environment import BuildEnvironment, NoUri -from sphinx.highlighting import PygmentsBridge -from sphinx.util.console import bold, purple, darkgreen -from sphinx.search import js_index - -try: - import json -except ImportError: - try: - import simplejson as json - except ImportError: - json = None - -# side effect: registers roles and directives -from sphinx import roles -from sphinx import directives - -ENV_PICKLE_FILENAME = 'environment.pickle' -LAST_BUILD_FILENAME = 'last_build' -INVENTORY_FILENAME = 'objects.inv' - - -class Builder(object): - """ - Builds target formats from the reST sources. - """ - - # builder's name, for the -b command line options - name = '' - - def __init__(self, app, env=None, freshenv=False): - self.srcdir = app.srcdir - self.confdir = app.confdir - self.outdir = app.outdir - self.doctreedir = app.doctreedir - if not path.isdir(self.doctreedir): - os.makedirs(self.doctreedir) - - self.app = app - self.warn = app.warn - self.info = app.info - self.config = app.config - - self.load_i18n() - - # images that need to be copied over (source -> dest) - self.images = {} - - # if None, this is set in load_env() - self.env = env - self.freshenv = freshenv - - self.init() - self.load_env() - - # helper methods - - def init(self): - """Load necessary templates and perform initialization.""" - raise NotImplementedError - - def init_templates(self): - # Call this from init() if you need templates. - if self.config.template_bridge: - self.templates = self.app.import_object( - self.config.template_bridge, 'template_bridge setting')() - else: - from sphinx._jinja import BuiltinTemplates - self.templates = BuiltinTemplates() - self.templates.init(self) - - def get_target_uri(self, docname, typ=None): - """ - Return the target URI for a document name (typ can be used to qualify - the link characteristic for individual builders). - """ - raise NotImplementedError - - def get_relative_uri(self, from_, to, typ=None): - """ - Return a relative URI between two source filenames. May raise environment.NoUri - if there's no way to return a sensible URI. - """ - return relative_uri(self.get_target_uri(from_), - self.get_target_uri(to, typ)) - - def get_outdated_docs(self): - """ - Return an iterable of output files that are outdated, or a string describing - what an update build will build. - """ - raise NotImplementedError - - def status_iterator(self, iterable, summary, colorfunc=darkgreen): - l = -1 - for item in iterable: - if l == -1: - self.info(bold(summary), nonl=1) - l = 0 - self.info(colorfunc(item) + ' ', nonl=1) - yield item - if l == 0: - self.info() - - supported_image_types = [] - - def post_process_images(self, doctree): - """ - Pick the best candidate for all image URIs. - """ - for node in doctree.traverse(nodes.image): - if '?' in node['candidates']: - # don't rewrite nonlocal image URIs - continue - if '*' not in node['candidates']: - for imgtype in self.supported_image_types: - candidate = node['candidates'].get(imgtype, None) - if candidate: - break - else: - self.warn('%s:%s: no matching candidate for image URI %r' % - (node.source, getattr(node, 'lineno', ''), node['uri'])) - continue - node['uri'] = candidate - else: - candidate = node['uri'] - if candidate not in self.env.images: - # non-existing URI; let it alone - continue - self.images[candidate] = self.env.images[candidate][1] - - # build methods - - def load_i18n(self): - """ - Load translated strings from the configured localedirs if - enabled in the configuration. - """ - self.translator = None - if self.config.language is not None: - self.info(bold('loading translations [%s]... ' % self.config.language), - nonl=True) - locale_dirs = [path.join(path.dirname(__file__), 'locale')] + \ - [path.join(self.srcdir, x) for x in self.config.locale_dirs] - for dir_ in locale_dirs: - try: - trans = gettext.translation('sphinx', localedir=dir_, - languages=[self.config.language]) - if self.translator is None: - self.translator = trans - else: - self.translator._catalog.update(trans.catalog) - except Exception: - # Language couldn't be found in the specified path - pass - if self.translator is not None: - self.info('done') - else: - self.info('locale not available') - if self.translator is None: - self.translator = gettext.NullTranslations() - self.translator.install(unicode=True) - locale.init() # translate common labels - - def load_env(self): - """Set up the build environment.""" - if self.env: - return - if not self.freshenv: - try: - self.info(bold('loading pickled environment... '), nonl=True) - self.env = BuildEnvironment.frompickle(self.config, - path.join(self.doctreedir, ENV_PICKLE_FILENAME)) - self.info('done') - except Exception, err: - if type(err) is IOError and err.errno == 2: - self.info('not found') - else: - self.info('failed: %s' % err) - self.env = BuildEnvironment(self.srcdir, self.doctreedir, self.config) - self.env.find_files(self.config) - else: - self.env = BuildEnvironment(self.srcdir, self.doctreedir, self.config) - self.env.find_files(self.config) - self.env.set_warnfunc(self.warn) - - def build_all(self): - """Build all source files.""" - self.build(None, summary='all source files', method='all') - - def build_specific(self, filenames): - """Only rebuild as much as needed for changes in the source_filenames.""" - # bring the filenames to the canonical format, that is, - # relative to the source directory and without source_suffix. - dirlen = len(self.srcdir) + 1 - to_write = [] - suffix = self.config.source_suffix - for filename in filenames: - filename = path.abspath(filename)[dirlen:] - if filename.endswith(suffix): - filename = filename[:-len(suffix)] - filename = filename.replace(os.path.sep, SEP) - to_write.append(filename) - self.build(to_write, method='specific', - summary='%d source files given on command ' - 'line' % len(to_write)) - - def build_update(self): - """Only rebuild files changed or added since last build.""" - to_build = self.get_outdated_docs() - if isinstance(to_build, str): - self.build(['__all__'], to_build) - else: - to_build = list(to_build) - self.build(to_build, - summary='targets for %d source files that are ' - 'out of date' % len(to_build)) - - def build(self, docnames, summary=None, method='update'): - if summary: - self.info(bold('building [%s]: ' % self.name), nonl=1) - self.info(summary) - - updated_docnames = [] - # while reading, collect all warnings from docutils - warnings = [] - self.env.set_warnfunc(warnings.append) - self.info(bold('updating environment: '), nonl=1) - iterator = self.env.update(self.config, self.srcdir, self.doctreedir, self.app) - # the first item in the iterator is a summary message - self.info(iterator.next()) - for docname in self.status_iterator(iterator, 'reading sources... ', purple): - updated_docnames.append(docname) - # nothing further to do, the environment has already done the reading - for warning in warnings: - if warning.strip(): - self.warn(warning) - self.env.set_warnfunc(self.warn) - - if updated_docnames: - # save the environment - self.info(bold('pickling environment... '), nonl=True) - self.env.topickle(path.join(self.doctreedir, ENV_PICKLE_FILENAME)) - self.info('done') - - # global actions - self.info(bold('checking consistency... '), nonl=True) - self.env.check_consistency() - self.info('done') - else: - if method == 'update' and not docnames: - self.info(bold('no targets are out of date.')) - return - - # another indirection to support methods which don't build files - # individually - self.write(docnames, updated_docnames, method) - - # finish (write static files etc.) - self.finish() - if self.app._warncount: - self.info(bold('build succeeded, %s warning%s.' % - (self.app._warncount, - self.app._warncount != 1 and 's' or ''))) - else: - self.info(bold('build succeeded.')) - - def write(self, build_docnames, updated_docnames, method='update'): - if build_docnames is None or build_docnames == ['__all__']: - # build_all - build_docnames = self.env.found_docs - if method == 'update': - # build updated ones as well - docnames = set(build_docnames) | set(updated_docnames) - else: - docnames = set(build_docnames) - - # add all toctree-containing files that may have changed - for docname in list(docnames): - for tocdocname in self.env.files_to_rebuild.get(docname, []): - docnames.add(tocdocname) - docnames.add(self.config.master_doc) - - self.info(bold('preparing documents... '), nonl=True) - self.prepare_writing(docnames) - self.info('done') - - # write target files - warnings = [] - self.env.set_warnfunc(warnings.append) - for docname in self.status_iterator(sorted(docnames), - 'writing output... ', darkgreen): - doctree = self.env.get_and_resolve_doctree(docname, self) - self.write_doc(docname, doctree) - for warning in warnings: - if warning.strip(): - self.warn(warning) - self.env.set_warnfunc(self.warn) - - def prepare_writing(self, docnames): - raise NotImplementedError - - def write_doc(self, docname, doctree): - raise NotImplementedError - - def finish(self): - raise NotImplementedError - - -class StandaloneHTMLBuilder(Builder): - """ - Builds standalone HTML docs. - """ - name = 'html' - copysource = True - out_suffix = '.html' - indexer_format = js_index - supported_image_types = ['image/svg+xml', 'image/png', 'image/gif', - 'image/jpeg'] - searchindex_filename = 'searchindex.js' - add_header_links = True - add_definition_links = True - - # This is a class attribute because it is mutated by Sphinx.add_javascript. - script_files = ['_static/jquery.js', '_static/doctools.js'] - - def init(self): - """Load templates.""" - self.init_templates() - self.init_translator_class() - if self.config.html_file_suffix: - self.out_suffix = self.config.html_file_suffix - - if self.config.language is not None: - jsfile = path.join(path.dirname(__file__), 'locale', self.config.language, - 'LC_MESSAGES', 'sphinx.js') - if path.isfile(jsfile): - self.script_files.append('_static/translations.js') - - def init_translator_class(self): - if self.config.html_translator_class: - self.translator_class = self.app.import_object( - self.config.html_translator_class, 'html_translator_class setting') - elif self.config.html_use_smartypants: - self.translator_class = SmartyPantsHTMLTranslator - else: - self.translator_class = HTMLTranslator - - def render_partial(self, node): - """Utility: Render a lone doctree node.""" - doc = new_document('') - doc.append(node) - return publish_parts( - doc, - source_class=DocTreeInput, - reader=DoctreeReader(), - writer=HTMLWriter(self), - settings_overrides={'output_encoding': 'unicode'} - ) - - def prepare_writing(self, docnames): - from sphinx.search import IndexBuilder - - self.indexer = IndexBuilder(self.env) - self.load_indexer(docnames) - self.docwriter = HTMLWriter(self) - self.docsettings = OptionParser( - defaults=self.env.settings, - components=(self.docwriter,)).get_default_values() - - # format the "last updated on" string, only once is enough since it - # typically doesn't include the time of day - lufmt = self.config.html_last_updated_fmt - if lufmt is not None: - self.last_updated = ustrftime(lufmt or _('%b %d, %Y')) - else: - self.last_updated = None - - logo = self.config.html_logo and \ - path.basename(self.config.html_logo) or '' - - favicon = self.config.html_favicon and \ - path.basename(self.config.html_favicon) or '' - if favicon and os.path.splitext(favicon)[1] != '.ico': - self.warn('html_favicon is not an .ico file') - - if not isinstance(self.config.html_use_opensearch, basestring): - self.warn('html_use_opensearch config value must now be a string') - - self.relations = self.env.collect_relations() - - rellinks = [] - if self.config.html_use_index: - rellinks.append(('genindex', _('General Index'), 'I', _('index'))) - if self.config.html_use_modindex and self.env.modules: - rellinks.append(('modindex', _('Global Module Index'), 'M', _('modules'))) - - self.globalcontext = dict( - project = self.config.project, - release = self.config.release, - version = self.config.version, - last_updated = self.last_updated, - copyright = self.config.copyright, - master_doc = self.config.master_doc, - style = self.config.html_style, - use_opensearch = self.config.html_use_opensearch, - docstitle = self.config.html_title, - shorttitle = self.config.html_short_title, - show_sphinx = self.config.html_show_sphinx, - file_suffix = self.out_suffix, - script_files = self.script_files, - sphinx_version = __version__, - rellinks = rellinks, - builder = self.name, - parents = [], - logo = logo, - favicon = favicon, - ) - self.globalcontext.update(self.config.html_context) - - def get_doc_context(self, docname, body, metatags): - """Collect items for the template context of a page.""" - # find out relations - prev = next = None - parents = [] - rellinks = self.globalcontext['rellinks'][:] - related = self.relations.get(docname) - titles = self.env.titles - if related and related[2]: - try: - next = {'link': self.get_relative_uri(docname, related[2]), - 'title': self.render_partial(titles[related[2]])['title']} - rellinks.append((related[2], next['title'], 'N', _('next'))) - except KeyError: - next = None - if related and related[1]: - try: - prev = {'link': self.get_relative_uri(docname, related[1]), - 'title': self.render_partial(titles[related[1]])['title']} - rellinks.append((related[1], prev['title'], 'P', _('previous'))) - except KeyError: - # the relation is (somehow) not in the TOC tree, handle that gracefully - prev = None - while related and related[0]: - try: - parents.append( - {'link': self.get_relative_uri(docname, related[0]), - 'title': self.render_partial(titles[related[0]])['title']}) - except KeyError: - pass - related = self.relations.get(related[0]) - if parents: - parents.pop() # remove link to the master file; we have a generic - # "back to index" link already - parents.reverse() - - # title rendered as HTML - title = titles.get(docname) - title = title and self.render_partial(title)['title'] or '' - # the name for the copied source - sourcename = self.config.html_copy_source and docname + '.txt' or '' - - # metadata for the document - meta = self.env.metadata.get(docname) - - return dict( - parents = parents, - prev = prev, - next = next, - title = title, - meta = meta, - body = body, - metatags = metatags, - rellinks = rellinks, - sourcename = sourcename, - toc = self.render_partial(self.env.get_toc_for(docname))['fragment'], - # only display a TOC if there's more than one item to show - display_toc = (self.env.toc_num_entries[docname] > 1), - ) - - def write_doc(self, docname, doctree): - self.post_process_images(doctree) - destination = StringOutput(encoding='utf-8') - doctree.settings = self.docsettings - - self.imgpath = relative_uri(self.get_target_uri(docname), '_images') - self.docwriter.write(doctree, destination) - self.docwriter.assemble_parts() - body = self.docwriter.parts['fragment'] - metatags = self.docwriter.clean_meta - - ctx = self.get_doc_context(docname, body, metatags) - self.index_page(docname, doctree, ctx.get('title', '')) - self.handle_page(docname, ctx, event_arg=doctree) - - def finish(self): - self.info(bold('writing additional files...'), nonl=1) - - # the global general index - - if self.config.html_use_index: - # the total count of lines for each index letter, used to distribute - # the entries into two columns - genindex = self.env.create_index(self) - indexcounts = [] - for _, entries in genindex: - indexcounts.append(sum(1 + len(subitems) - for _, (_, subitems) in entries)) - - genindexcontext = dict( - genindexentries = genindex, - genindexcounts = indexcounts, - split_index = self.config.html_split_index, - ) - self.info(' genindex', nonl=1) - - if self.config.html_split_index: - self.handle_page('genindex', genindexcontext, 'genindex-split.html') - self.handle_page('genindex-all', genindexcontext, 'genindex.html') - for (key, entries), count in zip(genindex, indexcounts): - ctx = {'key': key, 'entries': entries, 'count': count, - 'genindexentries': genindex} - self.handle_page('genindex-' + key, ctx, 'genindex-single.html') - else: - self.handle_page('genindex', genindexcontext, 'genindex.html') - - # the global module index - - if self.config.html_use_modindex and self.env.modules: - # the sorted list of all modules, for the global module index - modules = sorted(((mn, (self.get_relative_uri('modindex', fn) + - '#module-' + mn, sy, pl, dep)) - for (mn, (fn, sy, pl, dep)) in - self.env.modules.iteritems()), - key=lambda x: x[0].lower()) - # collect all platforms - platforms = set() - # sort out collapsable modules - modindexentries = [] - letters = [] - pmn = '' - num_toplevels = 0 - num_collapsables = 0 - cg = 0 # collapse group - fl = '' # first letter - for mn, (fn, sy, pl, dep) in modules: - pl = pl and pl.split(', ') or [] - platforms.update(pl) - if fl != mn[0].lower() and mn[0] != '_': - # heading - modindexentries.append(['', False, 0, False, - mn[0].upper(), '', [], False]) - letters.append(mn[0].upper()) - tn = mn.split('.')[0] - if tn != mn: - # submodule - if pmn == tn: - # first submodule - make parent collapsable - modindexentries[-1][1] = True - num_collapsables += 1 - elif not pmn.startswith(tn): - # submodule without parent in list, add dummy entry - cg += 1 - modindexentries.append([tn, True, cg, False, '', '', [], False]) - else: - num_toplevels += 1 - cg += 1 - modindexentries.append([mn, False, cg, (tn != mn), fn, sy, pl, dep]) - pmn = mn - fl = mn[0].lower() - platforms = sorted(platforms) - - # apply heuristics when to collapse modindex at page load: - # only collapse if number of toplevel modules is larger than - # number of submodules - collapse = len(modules) - num_toplevels < num_toplevels - - modindexcontext = dict( - modindexentries = modindexentries, - platforms = platforms, - letters = letters, - collapse_modindex = collapse, - ) - self.info(' modindex', nonl=1) - self.handle_page('modindex', modindexcontext, 'modindex.html') - - # the search page - if self.name != 'htmlhelp': - self.info(' search', nonl=1) - self.handle_page('search', {}, 'search.html') - - # additional pages from conf.py - for pagename, template in self.config.html_additional_pages.items(): - self.info(' '+pagename, nonl=1) - self.handle_page(pagename, {}, template) - - if self.config.html_use_opensearch and self.name != 'htmlhelp': - self.info(' opensearch', nonl=1) - fn = path.join(self.outdir, '_static', 'opensearch.xml') - self.handle_page('opensearch', {}, 'opensearch.xml', outfilename=fn) - - self.info() - - # copy image files - if self.images: - self.info(bold('copying images...'), nonl=True) - ensuredir(path.join(self.outdir, '_images')) - for src, dest in self.images.iteritems(): - self.info(' '+src, nonl=1) - shutil.copyfile(path.join(self.srcdir, src), - path.join(self.outdir, '_images', dest)) - self.info() - - # copy static files - self.info(bold('copying static files... '), nonl=True) - ensuredir(path.join(self.outdir, '_static')) - # first, create pygments style file - f = open(path.join(self.outdir, '_static', 'pygments.css'), 'w') - f.write(PygmentsBridge('html', self.config.pygments_style).get_stylesheet()) - f.close() - # then, copy translations JavaScript file - if self.config.language is not None: - jsfile = path.join(path.dirname(__file__), 'locale', self.config.language, - 'LC_MESSAGES', 'sphinx.js') - if path.isfile(jsfile): - shutil.copyfile(jsfile, path.join(self.outdir, '_static', - 'translations.js')) - # then, copy over all user-supplied static files - staticdirnames = [path.join(path.dirname(__file__), 'static')] + \ - [path.join(self.confdir, spath) - for spath in self.config.html_static_path] - for staticdirname in staticdirnames: - for filename in os.listdir(staticdirname): - if filename.startswith('.'): - continue - fullname = path.join(staticdirname, filename) - targetname = path.join(self.outdir, '_static', filename) - if path.isfile(fullname): - shutil.copyfile(fullname, targetname) - elif path.isdir(fullname): - if filename in self.config.exclude_dirnames: - continue - if path.exists(targetname): - shutil.rmtree(targetname) - shutil.copytree(fullname, targetname) - # last, copy logo file (handled differently) - if self.config.html_logo: - logobase = path.basename(self.config.html_logo) - shutil.copyfile(path.join(self.confdir, self.config.html_logo), - path.join(self.outdir, '_static', logobase)) - self.info('done') - - # dump the search index - self.handle_finish() - - def get_outdated_docs(self): - if self.templates: - template_mtime = self.templates.newest_template_mtime() - else: - template_mtime = 0 - for docname in self.env.found_docs: - if docname not in self.env.all_docs: - yield docname - continue - targetname = self.env.doc2path(docname, self.outdir, self.out_suffix) - try: - targetmtime = path.getmtime(targetname) - except Exception: - targetmtime = 0 - try: - srcmtime = max(path.getmtime(self.env.doc2path(docname)), - template_mtime) - if srcmtime > targetmtime: - yield docname - except EnvironmentError: - # source doesn't exist anymore - pass - - def load_indexer(self, docnames): - keep = set(self.env.all_docs) - set(docnames) - try: - f = open(path.join(self.outdir, self.searchindex_filename), 'rb') - try: - self.indexer.load(f, self.indexer_format) - finally: - f.close() - except (IOError, OSError, ValueError): - if keep: - self.warn("search index couldn't be loaded, but not all documents " - "will be built: the index will be incomplete.") - # delete all entries for files that will be rebuilt - self.indexer.prune(keep) - - def index_page(self, pagename, doctree, title): - # only index pages with title - if self.indexer is not None and title: - self.indexer.feed(pagename, title, doctree) - - # --------- these are overwritten by the serialization builder - - def get_target_uri(self, docname, typ=None): - return docname + self.out_suffix - - def handle_page(self, pagename, addctx, templatename='page.html', - outfilename=None, event_arg=None): - ctx = self.globalcontext.copy() - # current_page_name is backwards compatibility - ctx['pagename'] = ctx['current_page_name'] = pagename - - def pathto(otheruri, resource=False, - baseuri=self.get_target_uri(pagename)): - if not resource: - otheruri = self.get_target_uri(otheruri) - return relative_uri(baseuri, otheruri) - ctx['pathto'] = pathto - ctx['hasdoc'] = lambda name: name in self.env.all_docs - ctx['customsidebar'] = self.config.html_sidebars.get(pagename) - ctx.update(addctx) - - self.app.emit('html-page-context', pagename, templatename, ctx, event_arg) - - output = self.templates.render(templatename, ctx) - if not outfilename: - outfilename = path.join(self.outdir, os_path(pagename) + self.out_suffix) - ensuredir(path.dirname(outfilename)) # normally different from self.outdir - try: - f = codecs.open(outfilename, 'w', 'utf-8') - try: - f.write(output) - finally: - f.close() - except (IOError, OSError), err: - self.warn("Error writing file %s: %s" % (outfilename, err)) - if self.copysource and ctx.get('sourcename'): - # copy the source file for the "show source" link - source_name = path.join(self.outdir, '_sources', os_path(ctx['sourcename'])) - ensuredir(path.dirname(source_name)) - shutil.copyfile(self.env.doc2path(pagename), source_name) - - def handle_finish(self): - self.info(bold('dumping search index... '), nonl=True) - self.indexer.prune(self.env.all_docs) - f = open(path.join(self.outdir, self.searchindex_filename), 'wb') - try: - self.indexer.dump(f, self.indexer_format) - finally: - f.close() - self.info('done') - - self.info(bold('dumping object inventory... '), nonl=True) - f = open(path.join(self.outdir, INVENTORY_FILENAME), 'w') - try: - f.write('# Sphinx inventory version 1\n') - f.write('# Project: %s\n' % self.config.project.encode('utf-8')) - f.write('# Version: %s\n' % self.config.version) - for modname, info in self.env.modules.iteritems(): - f.write('%s mod %s\n' % (modname, self.get_target_uri(info[0]))) - for refname, (docname, desctype) in self.env.descrefs.iteritems(): - f.write('%s %s %s\n' % (refname, desctype, self.get_target_uri(docname))) - finally: - f.close() - self.info('done') - - -class SerializingHTMLBuilder(StandaloneHTMLBuilder): - """ - An abstract builder that serializes the HTML generated. - """ - #: the serializing implementation to use. Set this to a module that - #: implements a `dump`, `load`, `dumps` and `loads` functions - #: (pickle, simplejson etc.) - implementation = None - - #: the filename for the global context file - globalcontext_filename = None - - supported_image_types = ('image/svg+xml', 'image/png', 'image/gif', - 'image/jpeg') - - def init(self): - self.init_translator_class() - self.templates = None # no template bridge necessary - - def get_target_uri(self, docname, typ=None): - if docname == 'index': - return '' - if docname.endswith(SEP + 'index'): - return docname[:-5] # up to sep - return docname + SEP - - def handle_page(self, pagename, ctx, templatename='page.html', - outfilename=None, event_arg=None): - ctx['current_page_name'] = pagename - sidebarfile = self.config.html_sidebars.get(pagename) - if sidebarfile: - ctx['customsidebar'] = sidebarfile - - if not outfilename: - outfilename = path.join(self.outdir, os_path(pagename) + self.out_suffix) - - self.app.emit('html-page-context', pagename, templatename, ctx, event_arg) - - ensuredir(path.dirname(outfilename)) - f = open(outfilename, 'wb') - try: - self.implementation.dump(ctx, f, 2) - finally: - f.close() - - # if there is a source file, copy the source file for the - # "show source" link - if ctx.get('sourcename'): - source_name = path.join(self.outdir, '_sources', - os_path(ctx['sourcename'])) - ensuredir(path.dirname(source_name)) - shutil.copyfile(self.env.doc2path(pagename), source_name) - - def handle_finish(self): - # dump the global context - outfilename = path.join(self.outdir, self.globalcontext_filename) - f = open(outfilename, 'wb') - try: - self.implementation.dump(self.globalcontext, f, 2) - finally: - f.close() - - # super here to dump the search index - StandaloneHTMLBuilder.handle_finish(self) - - # copy the environment file from the doctree dir to the output dir - # as needed by the web app - shutil.copyfile(path.join(self.doctreedir, ENV_PICKLE_FILENAME), - path.join(self.outdir, ENV_PICKLE_FILENAME)) - - # touch 'last build' file, used by the web application to determine - # when to reload its environment and clear the cache - open(path.join(self.outdir, LAST_BUILD_FILENAME), 'w').close() - - -class PickleHTMLBuilder(SerializingHTMLBuilder): - """ - A Builder that dumps the generated HTML into pickle files. - """ - implementation = pickle - indexer_format = pickle - name = 'pickle' - out_suffix = '.fpickle' - globalcontext_filename = 'globalcontext.pickle' - searchindex_filename = 'searchindex.pickle' - - -class JSONHTMLBuilder(SerializingHTMLBuilder): - """ - A builder that dumps the generated HTML into JSON files. - """ - implementation = json - indexer_format = json - name = 'json' - out_suffix = '.fjson' - globalcontext_filename = 'globalcontext.json' - searchindex_filename = 'searchindex.json' - - def init(self): - if json is None: - from sphinx.application import SphinxError - raise SphinxError('The module simplejson (or json in Python >= 2.6) ' - 'is not available. The JSONHTMLBuilder builder ' - 'will not work.') - SerializingHTMLBuilder.init(self) - - -class HTMLHelpBuilder(StandaloneHTMLBuilder): - """ - Builder that also outputs Windows HTML help project, contents and index files. - Adapted from the original Doc/tools/prechm.py. - """ - name = 'htmlhelp' - - # don't copy the reST source - copysource = False - supported_image_types = ['image/png', 'image/gif', 'image/jpeg'] - - # don't add links - add_header_links = False - add_definition_links = False - - def init(self): - StandaloneHTMLBuilder.init(self) - # the output files for HTML help must be .html only - self.out_suffix = '.html' - - def handle_finish(self): - build_hhx(self, self.outdir, self.config.htmlhelp_basename) - - -class LaTeXBuilder(Builder): - """ - Builds LaTeX output to create PDF. - """ - name = 'latex' - supported_image_types = ['application/pdf', 'image/png', 'image/gif', - 'image/jpeg'] - - def init(self): - self.docnames = [] - self.document_data = [] - texescape.init() - - def get_outdated_docs(self): - return 'all documents' # for now - - def get_target_uri(self, docname, typ=None): - if typ == 'token': - # token references are always inside production lists and must be - # replaced by \token{} in LaTeX - return '@token' - if docname not in self.docnames: - raise NoUri - else: - return '' - - def init_document_data(self): - preliminary_document_data = map(list, self.config.latex_documents) - if not preliminary_document_data: - self.warn('No "latex_documents" config value found; no documents ' - 'will be written.') - return - # assign subdirs to titles - self.titles = [] - for entry in preliminary_document_data: - docname = entry[0] - if docname not in self.env.all_docs: - self.warn('"latex_documents" config value references unknown ' - 'document %s' % docname) - continue - self.document_data.append(entry) - if docname.endswith(SEP+'index'): - docname = docname[:-5] - self.titles.append((docname, entry[2])) - - def write(self, *ignored): - # first, assemble the "appendix" docs that are in every PDF - appendices = [] - for fname in self.config.latex_appendices: - appendices.append(self.env.get_doctree(fname)) - - docwriter = LaTeXWriter(self) - docsettings = OptionParser( - defaults=self.env.settings, - components=(docwriter,)).get_default_values() - - self.init_document_data() - - for entry in self.document_data: - docname, targetname, title, author, docclass = entry[:5] - toctree_only = False - if len(entry) > 5: - toctree_only = entry[5] - destination = FileOutput( - destination_path=path.join(self.outdir, targetname), - encoding='utf-8') - self.info("processing " + targetname + "... ", nonl=1) - doctree = self.assemble_doctree(docname, toctree_only, - appendices=(docclass == 'manual') and appendices or []) - self.post_process_images(doctree) - self.info("writing... ", nonl=1) - doctree.settings = docsettings - doctree.settings.author = author - doctree.settings.title = title - doctree.settings.docname = docname - doctree.settings.docclass = docclass - docwriter.write(doctree, destination) - self.info("done") - - def assemble_doctree(self, indexfile, toctree_only, appendices): - self.docnames = set([indexfile] + appendices) - self.info(darkgreen(indexfile) + " ", nonl=1) - def process_tree(docname, tree): - tree = tree.deepcopy() - for toctreenode in tree.traverse(addnodes.toctree): - newnodes = [] - includefiles = map(str, toctreenode['includefiles']) - for includefile in includefiles: - try: - self.info(darkgreen(includefile) + " ", nonl=1) - subtree = process_tree(includefile, - self.env.get_doctree(includefile)) - self.docnames.add(includefile) - except Exception: - self.warn('%s: toctree contains ref to nonexisting file %r' % - (docname, includefile)) - else: - sof = addnodes.start_of_file() - sof.children = subtree.children - newnodes.append(sof) - toctreenode.parent.replace(toctreenode, newnodes) - return tree - tree = self.env.get_doctree(indexfile) - if toctree_only: - # extract toctree nodes from the tree and put them in a fresh document - new_tree = new_document('') - new_sect = nodes.section() - new_sect += nodes.title(u'', u'') - new_tree += new_sect - for node in tree.traverse(addnodes.toctree): - new_sect += node - tree = new_tree - largetree = process_tree(indexfile, tree) - largetree.extend(appendices) - self.info() - self.info("resolving references...") - self.env.resolve_references(largetree, indexfile, self) - # resolve :ref:s to distant tex files -- we can't add a cross-reference, - # but append the document name - for pendingnode in largetree.traverse(addnodes.pending_xref): - docname = pendingnode['refdocname'] - sectname = pendingnode['refsectname'] - newnodes = [nodes.emphasis(sectname, sectname)] - for subdir, title in self.titles: - if docname.startswith(subdir): - newnodes.append(nodes.Text(_(' (in '), _(' (in '))) - newnodes.append(nodes.emphasis(title, title)) - newnodes.append(nodes.Text(')', ')')) - break - else: - pass - pendingnode.replace_self(newnodes) - return largetree - - def finish(self): - # copy image files - if self.images: - self.info(bold('copying images...'), nonl=1) - for src, dest in self.images.iteritems(): - self.info(' '+src, nonl=1) - shutil.copyfile(path.join(self.srcdir, src), - path.join(self.outdir, dest)) - self.info() - - # the logo is handled differently - if self.config.latex_logo: - logobase = path.basename(self.config.latex_logo) - shutil.copyfile(path.join(self.confdir, self.config.latex_logo), - path.join(self.outdir, logobase)) - - self.info(bold('copying TeX support files... '), nonl=True) - staticdirname = path.join(path.dirname(__file__), 'texinputs') - for filename in os.listdir(staticdirname): - if not filename.startswith('.'): - shutil.copyfile(path.join(staticdirname, filename), - path.join(self.outdir, filename)) - self.info('done') - - -class ChangesBuilder(Builder): - """ - Write a summary with all versionadded/changed directives. - """ - name = 'changes' - - def init(self): - self.init_templates() - - def get_outdated_docs(self): - return self.outdir - - typemap = { - 'versionadded': 'added', - 'versionchanged': 'changed', - 'deprecated': 'deprecated', - } - - def write(self, *ignored): - version = self.config.version - libchanges = {} - apichanges = [] - otherchanges = {} - if version not in self.env.versionchanges: - self.info(bold('no changes in this version.')) - return - self.info(bold('writing summary file...')) - for type, docname, lineno, module, descname, content in \ - self.env.versionchanges[version]: - ttext = self.typemap[type] - context = content.replace('\n', ' ') - if descname and docname.startswith('c-api'): - if not descname: - continue - if context: - entry = '%s: %s: %s' % (descname, ttext, context) - else: - entry = '%s: %s.' % (descname, ttext) - apichanges.append((entry, docname, lineno)) - elif descname or module: - if not module: - module = _('Builtins') - if not descname: - descname = _('Module level') - if context: - entry = '%s: %s: %s' % (descname, ttext, context) - else: - entry = '%s: %s.' % (descname, ttext) - libchanges.setdefault(module, []).append((entry, docname, lineno)) - else: - if not context: - continue - entry = '%s: %s' % (ttext.capitalize(), context) - title = self.env.titles[docname].astext() - otherchanges.setdefault((docname, title), []).append( - (entry, docname, lineno)) - - ctx = { - 'project': self.config.project, - 'version': version, - 'docstitle': self.config.html_title, - 'shorttitle': self.config.html_short_title, - 'libchanges': sorted(libchanges.iteritems()), - 'apichanges': sorted(apichanges), - 'otherchanges': sorted(otherchanges.iteritems()), - 'show_sphinx': self.config.html_show_sphinx, - } - f = open(path.join(self.outdir, 'index.html'), 'w') - try: - f.write(self.templates.render('changes/frameset.html', ctx)) - finally: - f.close() - f = open(path.join(self.outdir, 'changes.html'), 'w') - try: - f.write(self.templates.render('changes/versionchanges.html', ctx)) - finally: - f.close() - - hltext = ['.. versionadded:: %s' % version, - '.. versionchanged:: %s' % version, - '.. deprecated:: %s' % version] - - def hl(no, line): - line = ' ' % no + escape(line) - for x in hltext: - if x in line: - line = '%s' % line - break - return line - - self.info(bold('copying source files...')) - for docname in self.env.all_docs: - f = open(self.env.doc2path(docname)) - lines = f.readlines() - targetfn = path.join(self.outdir, 'rst', os_path(docname)) + '.html' - ensuredir(path.dirname(targetfn)) - f = codecs.open(targetfn, 'w', 'utf8') - try: - text = ''.join(hl(i+1, line) for (i, line) in enumerate(lines)) - ctx = {'filename': self.env.doc2path(docname, None), 'text': text} - f.write(self.templates.render('changes/rstsource.html', ctx)) - finally: - f.close() - shutil.copyfile(path.join(path.dirname(__file__), 'static', 'default.css'), - path.join(self.outdir, 'default.css')) - - def hl(self, text, version): - text = escape(text) - for directive in ['versionchanged', 'versionadded', 'deprecated']: - text = text.replace('.. %s:: %s' % (directive, version), - '.. %s:: %s' % (directive, version)) - return text - - def finish(self): - pass - - -class TextBuilder(Builder): - name = 'text' - out_suffix = '.txt' - - def init(self): - pass - - def get_outdated_docs(self): - for docname in self.env.found_docs: - if docname not in self.env.all_docs: - yield docname - continue - targetname = self.env.doc2path(docname, self.outdir, self.out_suffix) - try: - targetmtime = path.getmtime(targetname) - except Exception: - targetmtime = 0 - try: - srcmtime = path.getmtime(self.env.doc2path(docname)) - if srcmtime > targetmtime: - yield docname - except EnvironmentError: - # source doesn't exist anymore - pass - - def get_target_uri(self, docname, typ=None): - return '' - - def prepare_writing(self, docnames): - self.writer = TextWriter(self) - - def write_doc(self, docname, doctree): - destination = StringOutput(encoding='utf-8') - self.writer.write(doctree, destination) - outfilename = path.join(self.outdir, os_path(docname) + self.out_suffix) - ensuredir(path.dirname(outfilename)) # normally different from self.outdir - try: - f = codecs.open(outfilename, 'w', 'utf-8') - try: - f.write(self.writer.output) - finally: - f.close() - except (IOError, OSError), err: - self.warn("Error writing file %s: %s" % (outfilename, err)) - - def finish(self): - pass - - -# compatibility alias -WebHTMLBuilder = PickleHTMLBuilder - - -from sphinx.linkcheck import CheckExternalLinksBuilder - -builtin_builders = { - 'html': StandaloneHTMLBuilder, - 'pickle': PickleHTMLBuilder, - 'json': JSONHTMLBuilder, - 'web': PickleHTMLBuilder, - 'htmlhelp': HTMLHelpBuilder, - 'latex': LaTeXBuilder, - 'text': TextBuilder, - 'changes': ChangesBuilder, - 'linkcheck': CheckExternalLinksBuilder, -} +from sphinx.builders import Builder +from sphinx.builders.text import TextBuilder +from sphinx.builders.html import StandaloneHTMLBuilder, WebHTMLBuilder, \ + PickleHTMLBuilder, JSONHTMLBuilder +from sphinx.builders.latex import LaTeXBuilder +from sphinx.builders.changes import ChangesBuilder +from sphinx.builders.htmlhelp import HTMLHelpBuilder +from sphinx.builders.linkcheck import CheckExternalLinksBuilder diff --git a/sphinx/builders/__init__.py b/sphinx/builders/__init__.py new file mode 100644 index 00000000..09d37de2 --- /dev/null +++ b/sphinx/builders/__init__.py @@ -0,0 +1,328 @@ +# -*- coding: utf-8 -*- +""" + sphinx.builders + ~~~~~~~~~~~~~~~ + + Builder superclass for all builders. + + :copyright: 2007-2008 by Georg Brandl, Sebastian Wiesner, Horst Gutmann. + :license: BSD. +""" + +import os +import gettext +from os import path + +from docutils import nodes + +from sphinx import package_dir, locale +from sphinx.util import SEP, relative_uri +from sphinx.environment import BuildEnvironment +from sphinx.util.console import bold, purple, darkgreen + +# side effect: registers roles and directives +from sphinx import roles +from sphinx import directives + + +ENV_PICKLE_FILENAME = 'environment.pickle' + + +class Builder(object): + """ + Builds target formats from the reST sources. + """ + + # builder's name, for the -b command line options + name = '' + + def __init__(self, app, env=None, freshenv=False): + self.srcdir = app.srcdir + self.confdir = app.confdir + self.outdir = app.outdir + self.doctreedir = app.doctreedir + if not path.isdir(self.doctreedir): + os.makedirs(self.doctreedir) + + self.app = app + self.warn = app.warn + self.info = app.info + self.config = app.config + + self.load_i18n() + + # images that need to be copied over (source -> dest) + self.images = {} + + # if None, this is set in load_env() + self.env = env + self.freshenv = freshenv + + self.init() + self.load_env() + + # helper methods + + def init(self): + """Load necessary templates and perform initialization.""" + raise NotImplementedError + + def init_templates(self): + # Call this from init() if you need templates. + if self.config.template_bridge: + self.templates = self.app.import_object( + self.config.template_bridge, 'template_bridge setting')() + else: + from sphinx._jinja import BuiltinTemplates + self.templates = BuiltinTemplates() + self.templates.init(self) + + def get_target_uri(self, docname, typ=None): + """ + Return the target URI for a document name (typ can be used to qualify + the link characteristic for individual builders). + """ + raise NotImplementedError + + def get_relative_uri(self, from_, to, typ=None): + """ + Return a relative URI between two source filenames. May raise environment.NoUri + if there's no way to return a sensible URI. + """ + return relative_uri(self.get_target_uri(from_), + self.get_target_uri(to, typ)) + + def get_outdated_docs(self): + """ + Return an iterable of output files that are outdated, or a string describing + what an update build will build. + """ + raise NotImplementedError + + def status_iterator(self, iterable, summary, colorfunc=darkgreen): + l = -1 + for item in iterable: + if l == -1: + self.info(bold(summary), nonl=1) + l = 0 + self.info(colorfunc(item) + ' ', nonl=1) + yield item + if l == 0: + self.info() + + supported_image_types = [] + + def post_process_images(self, doctree): + """ + Pick the best candidate for all image URIs. + """ + for node in doctree.traverse(nodes.image): + if '?' in node['candidates']: + # don't rewrite nonlocal image URIs + continue + if '*' not in node['candidates']: + for imgtype in self.supported_image_types: + candidate = node['candidates'].get(imgtype, None) + if candidate: + break + else: + self.warn('%s:%s: no matching candidate for image URI %r' % + (node.source, getattr(node, 'lineno', ''), node['uri'])) + continue + node['uri'] = candidate + else: + candidate = node['uri'] + if candidate not in self.env.images: + # non-existing URI; let it alone + continue + self.images[candidate] = self.env.images[candidate][1] + + # build methods + + def load_i18n(self): + """ + Load translated strings from the configured localedirs if + enabled in the configuration. + """ + self.translator = None + if self.config.language is not None: + self.info(bold('loading translations [%s]... ' % self.config.language), + nonl=True) + locale_dirs = [path.join(package_dir, 'locale')] + \ + [path.join(self.srcdir, x) for x in self.config.locale_dirs] + for dir_ in locale_dirs: + try: + trans = gettext.translation('sphinx', localedir=dir_, + languages=[self.config.language]) + if self.translator is None: + self.translator = trans + else: + self.translator._catalog.update(trans.catalog) + except Exception: + # Language couldn't be found in the specified path + pass + if self.translator is not None: + self.info('done') + else: + self.info('locale not available') + if self.translator is None: + self.translator = gettext.NullTranslations() + self.translator.install(unicode=True) + locale.init() # translate common labels + + def load_env(self): + """Set up the build environment.""" + if self.env: + return + if not self.freshenv: + try: + self.info(bold('loading pickled environment... '), nonl=True) + self.env = BuildEnvironment.frompickle(self.config, + path.join(self.doctreedir, ENV_PICKLE_FILENAME)) + self.info('done') + except Exception, err: + if type(err) is IOError and err.errno == 2: + self.info('not found') + else: + self.info('failed: %s' % err) + self.env = BuildEnvironment(self.srcdir, self.doctreedir, self.config) + self.env.find_files(self.config) + else: + self.env = BuildEnvironment(self.srcdir, self.doctreedir, self.config) + self.env.find_files(self.config) + self.env.set_warnfunc(self.warn) + + def build_all(self): + """Build all source files.""" + self.build(None, summary='all source files', method='all') + + def build_specific(self, filenames): + """Only rebuild as much as needed for changes in the source_filenames.""" + # bring the filenames to the canonical format, that is, + # relative to the source directory and without source_suffix. + dirlen = len(self.srcdir) + 1 + to_write = [] + suffix = self.config.source_suffix + for filename in filenames: + filename = path.abspath(filename)[dirlen:] + if filename.endswith(suffix): + filename = filename[:-len(suffix)] + filename = filename.replace(os.path.sep, SEP) + to_write.append(filename) + self.build(to_write, method='specific', + summary='%d source files given on command ' + 'line' % len(to_write)) + + def build_update(self): + """Only rebuild files changed or added since last build.""" + to_build = self.get_outdated_docs() + if isinstance(to_build, str): + self.build(['__all__'], to_build) + else: + to_build = list(to_build) + self.build(to_build, + summary='targets for %d source files that are ' + 'out of date' % len(to_build)) + + def build(self, docnames, summary=None, method='update'): + if summary: + self.info(bold('building [%s]: ' % self.name), nonl=1) + self.info(summary) + + updated_docnames = [] + # while reading, collect all warnings from docutils + warnings = [] + self.env.set_warnfunc(warnings.append) + self.info(bold('updating environment: '), nonl=1) + iterator = self.env.update(self.config, self.srcdir, self.doctreedir, self.app) + # the first item in the iterator is a summary message + self.info(iterator.next()) + for docname in self.status_iterator(iterator, 'reading sources... ', purple): + updated_docnames.append(docname) + # nothing further to do, the environment has already done the reading + for warning in warnings: + if warning.strip(): + self.warn(warning) + self.env.set_warnfunc(self.warn) + + if updated_docnames: + # save the environment + self.info(bold('pickling environment... '), nonl=True) + self.env.topickle(path.join(self.doctreedir, ENV_PICKLE_FILENAME)) + self.info('done') + + # global actions + self.info(bold('checking consistency... '), nonl=True) + self.env.check_consistency() + self.info('done') + else: + if method == 'update' and not docnames: + self.info(bold('no targets are out of date.')) + return + + # another indirection to support methods which don't build files + # individually + self.write(docnames, updated_docnames, method) + + # finish (write static files etc.) + self.finish() + if self.app._warncount: + self.info(bold('build succeeded, %s warning%s.' % + (self.app._warncount, + self.app._warncount != 1 and 's' or ''))) + else: + self.info(bold('build succeeded.')) + + def write(self, build_docnames, updated_docnames, method='update'): + if build_docnames is None or build_docnames == ['__all__']: + # build_all + build_docnames = self.env.found_docs + if method == 'update': + # build updated ones as well + docnames = set(build_docnames) | set(updated_docnames) + else: + docnames = set(build_docnames) + + # add all toctree-containing files that may have changed + for docname in list(docnames): + for tocdocname in self.env.files_to_rebuild.get(docname, []): + docnames.add(tocdocname) + docnames.add(self.config.master_doc) + + self.info(bold('preparing documents... '), nonl=True) + self.prepare_writing(docnames) + self.info('done') + + # write target files + warnings = [] + self.env.set_warnfunc(warnings.append) + for docname in self.status_iterator(sorted(docnames), + 'writing output... ', darkgreen): + doctree = self.env.get_and_resolve_doctree(docname, self) + self.write_doc(docname, doctree) + for warning in warnings: + if warning.strip(): + self.warn(warning) + self.env.set_warnfunc(self.warn) + + def prepare_writing(self, docnames): + raise NotImplementedError + + def write_doc(self, docname, doctree): + raise NotImplementedError + + def finish(self): + raise NotImplementedError + + +BUILTIN_BUILDERS = { + 'html': ('html', 'StandaloneHTMLBuilder'), + 'pickle': ('html', 'PickleHTMLBuilder'), + 'json': ('html', 'JSONHTMLBuilder'), + 'web': ('html', 'PickleHTMLBuilder'), + 'htmlhelp': ('htmlhelp', 'HTMLHelpBuilder'), + 'latex': ('latex', 'LaTeXBuilder'), + 'text': ('text', 'TextBuilder'), + 'changes': ('changes', 'ChangesBuilder'), + 'linkcheck': ('linkcheck', 'CheckExternalLinksBuilder'), +} diff --git a/sphinx/builders/changes.py b/sphinx/builders/changes.py new file mode 100644 index 00000000..28805738 --- /dev/null +++ b/sphinx/builders/changes.py @@ -0,0 +1,137 @@ +# -*- coding: utf-8 -*- +""" + sphinx.builders.changes + ~~~~~~~~~~~~~~~~~~~~~~~ + + Changelog builder. + + :copyright: 2008 by Georg Brandl. + :license: BSD. +""" + +import codecs +import shutil +from os import path +from cgi import escape + +from sphinx import package_dir +from sphinx.util import ensuredir, os_path +from sphinx.builders import Builder +from sphinx.util.console import bold + + +class ChangesBuilder(Builder): + """ + Write a summary with all versionadded/changed directives. + """ + name = 'changes' + + def init(self): + self.init_templates() + + def get_outdated_docs(self): + return self.outdir + + typemap = { + 'versionadded': 'added', + 'versionchanged': 'changed', + 'deprecated': 'deprecated', + } + + def write(self, *ignored): + version = self.config.version + libchanges = {} + apichanges = [] + otherchanges = {} + if version not in self.env.versionchanges: + self.info(bold('no changes in this version.')) + return + self.info(bold('writing summary file...')) + for type, docname, lineno, module, descname, content in \ + self.env.versionchanges[version]: + ttext = self.typemap[type] + context = content.replace('\n', ' ') + if descname and docname.startswith('c-api'): + if not descname: + continue + if context: + entry = '%s: %s: %s' % (descname, ttext, context) + else: + entry = '%s: %s.' % (descname, ttext) + apichanges.append((entry, docname, lineno)) + elif descname or module: + if not module: + module = _('Builtins') + if not descname: + descname = _('Module level') + if context: + entry = '%s: %s: %s' % (descname, ttext, context) + else: + entry = '%s: %s.' % (descname, ttext) + libchanges.setdefault(module, []).append((entry, docname, lineno)) + else: + if not context: + continue + entry = '%s: %s' % (ttext.capitalize(), context) + title = self.env.titles[docname].astext() + otherchanges.setdefault((docname, title), []).append( + (entry, docname, lineno)) + + ctx = { + 'project': self.config.project, + 'version': version, + 'docstitle': self.config.html_title, + 'shorttitle': self.config.html_short_title, + 'libchanges': sorted(libchanges.iteritems()), + 'apichanges': sorted(apichanges), + 'otherchanges': sorted(otherchanges.iteritems()), + 'show_sphinx': self.config.html_show_sphinx, + } + f = open(path.join(self.outdir, 'index.html'), 'w') + try: + f.write(self.templates.render('changes/frameset.html', ctx)) + finally: + f.close() + f = open(path.join(self.outdir, 'changes.html'), 'w') + try: + f.write(self.templates.render('changes/versionchanges.html', ctx)) + finally: + f.close() + + hltext = ['.. versionadded:: %s' % version, + '.. versionchanged:: %s' % version, + '.. deprecated:: %s' % version] + + def hl(no, line): + line = ' ' % no + escape(line) + for x in hltext: + if x in line: + line = '%s' % line + break + return line + + self.info(bold('copying source files...')) + for docname in self.env.all_docs: + f = open(self.env.doc2path(docname)) + lines = f.readlines() + targetfn = path.join(self.outdir, 'rst', os_path(docname)) + '.html' + ensuredir(path.dirname(targetfn)) + f = codecs.open(targetfn, 'w', 'utf8') + try: + text = ''.join(hl(i+1, line) for (i, line) in enumerate(lines)) + ctx = {'filename': self.env.doc2path(docname, None), 'text': text} + f.write(self.templates.render('changes/rstsource.html', ctx)) + finally: + f.close() + shutil.copyfile(path.join(package_dir, 'static', 'default.css'), + path.join(self.outdir, 'default.css')) + + def hl(self, text, version): + text = escape(text) + for directive in ['versionchanged', 'versionadded', 'deprecated']: + text = text.replace('.. %s:: %s' % (directive, version), + '.. %s:: %s' % (directive, version)) + return text + + def finish(self): + pass diff --git a/sphinx/builders/html.py b/sphinx/builders/html.py new file mode 100644 index 00000000..adf58be1 --- /dev/null +++ b/sphinx/builders/html.py @@ -0,0 +1,607 @@ +# -*- coding: utf-8 -*- +""" + sphinx.builders.html + ~~~~~~~~~~~~~~~~~~~~ + + Several HTML builders. + + :copyright: 2007-2008 by Georg Brandl, Armin Ronacher. + :license: BSD. +""" + +import os +import codecs +import shutil +import cPickle as pickle +from os import path + +from docutils.io import DocTreeInput, StringOutput +from docutils.core import publish_parts +from docutils.utils import new_document +from docutils.frontend import OptionParser +from docutils.readers.doctree import Reader as DoctreeReader + +from sphinx import package_dir, __version__ +from sphinx.util import SEP, os_path, relative_uri, ensuredir, ustrftime +from sphinx.search import js_index +from sphinx.builders import Builder, ENV_PICKLE_FILENAME +from sphinx.highlighting import PygmentsBridge +from sphinx.util.console import bold +from sphinx.writers.html import HTMLWriter, HTMLTranslator, SmartyPantsHTMLTranslator + +try: + import json +except ImportError: + try: + import simplejson as json + except ImportError: + json = None + + +INVENTORY_FILENAME = 'objects.inv' +LAST_BUILD_FILENAME = 'last_build' + + +class StandaloneHTMLBuilder(Builder): + """ + Builds standalone HTML docs. + """ + name = 'html' + copysource = True + out_suffix = '.html' + indexer_format = js_index + supported_image_types = ['image/svg+xml', 'image/png', 'image/gif', + 'image/jpeg'] + searchindex_filename = 'searchindex.js' + add_header_links = True + add_definition_links = True + + # This is a class attribute because it is mutated by Sphinx.add_javascript. + script_files = ['_static/jquery.js', '_static/doctools.js'] + + def init(self): + """Load templates.""" + self.init_templates() + self.init_translator_class() + if self.config.html_file_suffix: + self.out_suffix = self.config.html_file_suffix + + if self.config.language is not None: + jsfile = path.join(package_dir, 'locale', self.config.language, + 'LC_MESSAGES', 'sphinx.js') + if path.isfile(jsfile): + self.script_files.append('_static/translations.js') + + def init_translator_class(self): + if self.config.html_translator_class: + self.translator_class = self.app.import_object( + self.config.html_translator_class, 'html_translator_class setting') + elif self.config.html_use_smartypants: + self.translator_class = SmartyPantsHTMLTranslator + else: + self.translator_class = HTMLTranslator + + def render_partial(self, node): + """Utility: Render a lone doctree node.""" + doc = new_document('') + doc.append(node) + return publish_parts( + doc, + source_class=DocTreeInput, + reader=DoctreeReader(), + writer=HTMLWriter(self), + settings_overrides={'output_encoding': 'unicode'} + ) + + def prepare_writing(self, docnames): + from sphinx.search import IndexBuilder + + self.indexer = IndexBuilder(self.env) + self.load_indexer(docnames) + self.docwriter = HTMLWriter(self) + self.docsettings = OptionParser( + defaults=self.env.settings, + components=(self.docwriter,)).get_default_values() + + # format the "last updated on" string, only once is enough since it + # typically doesn't include the time of day + lufmt = self.config.html_last_updated_fmt + if lufmt is not None: + self.last_updated = ustrftime(lufmt or _('%b %d, %Y')) + else: + self.last_updated = None + + logo = self.config.html_logo and \ + path.basename(self.config.html_logo) or '' + + favicon = self.config.html_favicon and \ + path.basename(self.config.html_favicon) or '' + if favicon and os.path.splitext(favicon)[1] != '.ico': + self.warn('html_favicon is not an .ico file') + + if not isinstance(self.config.html_use_opensearch, basestring): + self.warn('html_use_opensearch config value must now be a string') + + self.relations = self.env.collect_relations() + + rellinks = [] + if self.config.html_use_index: + rellinks.append(('genindex', _('General Index'), 'I', _('index'))) + if self.config.html_use_modindex and self.env.modules: + rellinks.append(('modindex', _('Global Module Index'), 'M', _('modules'))) + + self.globalcontext = dict( + project = self.config.project, + release = self.config.release, + version = self.config.version, + last_updated = self.last_updated, + copyright = self.config.copyright, + master_doc = self.config.master_doc, + style = self.config.html_style, + use_opensearch = self.config.html_use_opensearch, + docstitle = self.config.html_title, + shorttitle = self.config.html_short_title, + show_sphinx = self.config.html_show_sphinx, + file_suffix = self.out_suffix, + script_files = self.script_files, + sphinx_version = __version__, + rellinks = rellinks, + builder = self.name, + parents = [], + logo = logo, + favicon = favicon, + ) + self.globalcontext.update(self.config.html_context) + + def get_doc_context(self, docname, body, metatags): + """Collect items for the template context of a page.""" + # find out relations + prev = next = None + parents = [] + rellinks = self.globalcontext['rellinks'][:] + related = self.relations.get(docname) + titles = self.env.titles + if related and related[2]: + try: + next = {'link': self.get_relative_uri(docname, related[2]), + 'title': self.render_partial(titles[related[2]])['title']} + rellinks.append((related[2], next['title'], 'N', _('next'))) + except KeyError: + next = None + if related and related[1]: + try: + prev = {'link': self.get_relative_uri(docname, related[1]), + 'title': self.render_partial(titles[related[1]])['title']} + rellinks.append((related[1], prev['title'], 'P', _('previous'))) + except KeyError: + # the relation is (somehow) not in the TOC tree, handle that gracefully + prev = None + while related and related[0]: + try: + parents.append( + {'link': self.get_relative_uri(docname, related[0]), + 'title': self.render_partial(titles[related[0]])['title']}) + except KeyError: + pass + related = self.relations.get(related[0]) + if parents: + parents.pop() # remove link to the master file; we have a generic + # "back to index" link already + parents.reverse() + + # title rendered as HTML + title = titles.get(docname) + title = title and self.render_partial(title)['title'] or '' + # the name for the copied source + sourcename = self.config.html_copy_source and docname + '.txt' or '' + + # metadata for the document + meta = self.env.metadata.get(docname) + + return dict( + parents = parents, + prev = prev, + next = next, + title = title, + meta = meta, + body = body, + metatags = metatags, + rellinks = rellinks, + sourcename = sourcename, + toc = self.render_partial(self.env.get_toc_for(docname))['fragment'], + # only display a TOC if there's more than one item to show + display_toc = (self.env.toc_num_entries[docname] > 1), + ) + + def write_doc(self, docname, doctree): + self.post_process_images(doctree) + destination = StringOutput(encoding='utf-8') + doctree.settings = self.docsettings + + self.imgpath = relative_uri(self.get_target_uri(docname), '_images') + self.docwriter.write(doctree, destination) + self.docwriter.assemble_parts() + body = self.docwriter.parts['fragment'] + metatags = self.docwriter.clean_meta + + ctx = self.get_doc_context(docname, body, metatags) + self.index_page(docname, doctree, ctx.get('title', '')) + self.handle_page(docname, ctx, event_arg=doctree) + + def finish(self): + self.info(bold('writing additional files...'), nonl=1) + + # the global general index + + if self.config.html_use_index: + # the total count of lines for each index letter, used to distribute + # the entries into two columns + genindex = self.env.create_index(self) + indexcounts = [] + for _, entries in genindex: + indexcounts.append(sum(1 + len(subitems) + for _, (_, subitems) in entries)) + + genindexcontext = dict( + genindexentries = genindex, + genindexcounts = indexcounts, + split_index = self.config.html_split_index, + ) + self.info(' genindex', nonl=1) + + if self.config.html_split_index: + self.handle_page('genindex', genindexcontext, 'genindex-split.html') + self.handle_page('genindex-all', genindexcontext, 'genindex.html') + for (key, entries), count in zip(genindex, indexcounts): + ctx = {'key': key, 'entries': entries, 'count': count, + 'genindexentries': genindex} + self.handle_page('genindex-' + key, ctx, 'genindex-single.html') + else: + self.handle_page('genindex', genindexcontext, 'genindex.html') + + # the global module index + + if self.config.html_use_modindex and self.env.modules: + # the sorted list of all modules, for the global module index + modules = sorted(((mn, (self.get_relative_uri('modindex', fn) + + '#module-' + mn, sy, pl, dep)) + for (mn, (fn, sy, pl, dep)) in + self.env.modules.iteritems()), + key=lambda x: x[0].lower()) + # collect all platforms + platforms = set() + # sort out collapsable modules + modindexentries = [] + letters = [] + pmn = '' + num_toplevels = 0 + num_collapsables = 0 + cg = 0 # collapse group + fl = '' # first letter + for mn, (fn, sy, pl, dep) in modules: + pl = pl and pl.split(', ') or [] + platforms.update(pl) + if fl != mn[0].lower() and mn[0] != '_': + # heading + modindexentries.append(['', False, 0, False, + mn[0].upper(), '', [], False]) + letters.append(mn[0].upper()) + tn = mn.split('.')[0] + if tn != mn: + # submodule + if pmn == tn: + # first submodule - make parent collapsable + modindexentries[-1][1] = True + num_collapsables += 1 + elif not pmn.startswith(tn): + # submodule without parent in list, add dummy entry + cg += 1 + modindexentries.append([tn, True, cg, False, '', '', [], False]) + else: + num_toplevels += 1 + cg += 1 + modindexentries.append([mn, False, cg, (tn != mn), fn, sy, pl, dep]) + pmn = mn + fl = mn[0].lower() + platforms = sorted(platforms) + + # apply heuristics when to collapse modindex at page load: + # only collapse if number of toplevel modules is larger than + # number of submodules + collapse = len(modules) - num_toplevels < num_toplevels + + modindexcontext = dict( + modindexentries = modindexentries, + platforms = platforms, + letters = letters, + collapse_modindex = collapse, + ) + self.info(' modindex', nonl=1) + self.handle_page('modindex', modindexcontext, 'modindex.html') + + # the search page + if self.name != 'htmlhelp': + self.info(' search', nonl=1) + self.handle_page('search', {}, 'search.html') + + # additional pages from conf.py + for pagename, template in self.config.html_additional_pages.items(): + self.info(' '+pagename, nonl=1) + self.handle_page(pagename, {}, template) + + if self.config.html_use_opensearch and self.name != 'htmlhelp': + self.info(' opensearch', nonl=1) + fn = path.join(self.outdir, '_static', 'opensearch.xml') + self.handle_page('opensearch', {}, 'opensearch.xml', outfilename=fn) + + self.info() + + # copy image files + if self.images: + self.info(bold('copying images...'), nonl=True) + ensuredir(path.join(self.outdir, '_images')) + for src, dest in self.images.iteritems(): + self.info(' '+src, nonl=1) + shutil.copyfile(path.join(self.srcdir, src), + path.join(self.outdir, '_images', dest)) + self.info() + + # copy static files + self.info(bold('copying static files... '), nonl=True) + ensuredir(path.join(self.outdir, '_static')) + # first, create pygments style file + f = open(path.join(self.outdir, '_static', 'pygments.css'), 'w') + f.write(PygmentsBridge('html', self.config.pygments_style).get_stylesheet()) + f.close() + # then, copy translations JavaScript file + if self.config.language is not None: + jsfile = path.join(package_dir, 'locale', self.config.language, + 'LC_MESSAGES', 'sphinx.js') + if path.isfile(jsfile): + shutil.copyfile(jsfile, path.join(self.outdir, '_static', + 'translations.js')) + # then, copy over all user-supplied static files + staticdirnames = [path.join(package_dir, 'static')] + \ + [path.join(self.confdir, spath) + for spath in self.config.html_static_path] + for staticdirname in staticdirnames: + for filename in os.listdir(staticdirname): + if filename.startswith('.'): + continue + fullname = path.join(staticdirname, filename) + targetname = path.join(self.outdir, '_static', filename) + if path.isfile(fullname): + shutil.copyfile(fullname, targetname) + elif path.isdir(fullname): + if filename in self.config.exclude_dirnames: + continue + if path.exists(targetname): + shutil.rmtree(targetname) + shutil.copytree(fullname, targetname) + # last, copy logo file (handled differently) + if self.config.html_logo: + logobase = path.basename(self.config.html_logo) + shutil.copyfile(path.join(self.confdir, self.config.html_logo), + path.join(self.outdir, '_static', logobase)) + self.info('done') + + # dump the search index + self.handle_finish() + + def get_outdated_docs(self): + if self.templates: + template_mtime = self.templates.newest_template_mtime() + else: + template_mtime = 0 + for docname in self.env.found_docs: + if docname not in self.env.all_docs: + yield docname + continue + targetname = self.env.doc2path(docname, self.outdir, self.out_suffix) + try: + targetmtime = path.getmtime(targetname) + except Exception: + targetmtime = 0 + try: + srcmtime = max(path.getmtime(self.env.doc2path(docname)), + template_mtime) + if srcmtime > targetmtime: + yield docname + except EnvironmentError: + # source doesn't exist anymore + pass + + def load_indexer(self, docnames): + keep = set(self.env.all_docs) - set(docnames) + try: + f = open(path.join(self.outdir, self.searchindex_filename), 'rb') + try: + self.indexer.load(f, self.indexer_format) + finally: + f.close() + except (IOError, OSError, ValueError): + if keep: + self.warn("search index couldn't be loaded, but not all documents " + "will be built: the index will be incomplete.") + # delete all entries for files that will be rebuilt + self.indexer.prune(keep) + + def index_page(self, pagename, doctree, title): + # only index pages with title + if self.indexer is not None and title: + self.indexer.feed(pagename, title, doctree) + + # --------- these are overwritten by the serialization builder + + def get_target_uri(self, docname, typ=None): + return docname + self.out_suffix + + def handle_page(self, pagename, addctx, templatename='page.html', + outfilename=None, event_arg=None): + ctx = self.globalcontext.copy() + # current_page_name is backwards compatibility + ctx['pagename'] = ctx['current_page_name'] = pagename + + def pathto(otheruri, resource=False, + baseuri=self.get_target_uri(pagename)): + if not resource: + otheruri = self.get_target_uri(otheruri) + return relative_uri(baseuri, otheruri) + ctx['pathto'] = pathto + ctx['hasdoc'] = lambda name: name in self.env.all_docs + ctx['customsidebar'] = self.config.html_sidebars.get(pagename) + ctx.update(addctx) + + self.app.emit('html-page-context', pagename, templatename, ctx, event_arg) + + output = self.templates.render(templatename, ctx) + if not outfilename: + outfilename = path.join(self.outdir, os_path(pagename) + self.out_suffix) + ensuredir(path.dirname(outfilename)) # normally different from self.outdir + try: + f = codecs.open(outfilename, 'w', 'utf-8') + try: + f.write(output) + finally: + f.close() + except (IOError, OSError), err: + self.warn("Error writing file %s: %s" % (outfilename, err)) + if self.copysource and ctx.get('sourcename'): + # copy the source file for the "show source" link + source_name = path.join(self.outdir, '_sources', os_path(ctx['sourcename'])) + ensuredir(path.dirname(source_name)) + shutil.copyfile(self.env.doc2path(pagename), source_name) + + def handle_finish(self): + self.info(bold('dumping search index... '), nonl=True) + self.indexer.prune(self.env.all_docs) + f = open(path.join(self.outdir, self.searchindex_filename), 'wb') + try: + self.indexer.dump(f, self.indexer_format) + finally: + f.close() + self.info('done') + + self.info(bold('dumping object inventory... '), nonl=True) + f = open(path.join(self.outdir, INVENTORY_FILENAME), 'w') + try: + f.write('# Sphinx inventory version 1\n') + f.write('# Project: %s\n' % self.config.project.encode('utf-8')) + f.write('# Version: %s\n' % self.config.version) + for modname, info in self.env.modules.iteritems(): + f.write('%s mod %s\n' % (modname, self.get_target_uri(info[0]))) + for refname, (docname, desctype) in self.env.descrefs.iteritems(): + f.write('%s %s %s\n' % (refname, desctype, self.get_target_uri(docname))) + finally: + f.close() + self.info('done') + + +class SerializingHTMLBuilder(StandaloneHTMLBuilder): + """ + An abstract builder that serializes the HTML generated. + """ + #: the serializing implementation to use. Set this to a module that + #: implements a `dump`, `load`, `dumps` and `loads` functions + #: (pickle, simplejson etc.) + implementation = None + + #: the filename for the global context file + globalcontext_filename = None + + supported_image_types = ('image/svg+xml', 'image/png', 'image/gif', + 'image/jpeg') + + def init(self): + self.init_translator_class() + self.templates = None # no template bridge necessary + + def get_target_uri(self, docname, typ=None): + if docname == 'index': + return '' + if docname.endswith(SEP + 'index'): + return docname[:-5] # up to sep + return docname + SEP + + def handle_page(self, pagename, ctx, templatename='page.html', + outfilename=None, event_arg=None): + ctx['current_page_name'] = pagename + sidebarfile = self.config.html_sidebars.get(pagename) + if sidebarfile: + ctx['customsidebar'] = sidebarfile + + if not outfilename: + outfilename = path.join(self.outdir, os_path(pagename) + self.out_suffix) + + self.app.emit('html-page-context', pagename, templatename, ctx, event_arg) + + ensuredir(path.dirname(outfilename)) + f = open(outfilename, 'wb') + try: + self.implementation.dump(ctx, f, 2) + finally: + f.close() + + # if there is a source file, copy the source file for the + # "show source" link + if ctx.get('sourcename'): + source_name = path.join(self.outdir, '_sources', + os_path(ctx['sourcename'])) + ensuredir(path.dirname(source_name)) + shutil.copyfile(self.env.doc2path(pagename), source_name) + + def handle_finish(self): + # dump the global context + outfilename = path.join(self.outdir, self.globalcontext_filename) + f = open(outfilename, 'wb') + try: + self.implementation.dump(self.globalcontext, f, 2) + finally: + f.close() + + # super here to dump the search index + StandaloneHTMLBuilder.handle_finish(self) + + # copy the environment file from the doctree dir to the output dir + # as needed by the web app + shutil.copyfile(path.join(self.doctreedir, ENV_PICKLE_FILENAME), + path.join(self.outdir, ENV_PICKLE_FILENAME)) + + # touch 'last build' file, used by the web application to determine + # when to reload its environment and clear the cache + open(path.join(self.outdir, LAST_BUILD_FILENAME), 'w').close() + + +class PickleHTMLBuilder(SerializingHTMLBuilder): + """ + A Builder that dumps the generated HTML into pickle files. + """ + implementation = pickle + indexer_format = pickle + name = 'pickle' + out_suffix = '.fpickle' + globalcontext_filename = 'globalcontext.pickle' + searchindex_filename = 'searchindex.pickle' + +# compatibility alias +WebHTMLBuilder = PickleHTMLBuilder + + +class JSONHTMLBuilder(SerializingHTMLBuilder): + """ + A builder that dumps the generated HTML into JSON files. + """ + implementation = json + indexer_format = json + name = 'json' + out_suffix = '.fjson' + globalcontext_filename = 'globalcontext.json' + searchindex_filename = 'searchindex.json' + + def init(self): + if json is None: + from sphinx.application import SphinxError + raise SphinxError('The module simplejson (or json in Python >= 2.6) ' + 'is not available. The JSONHTMLBuilder builder ' + 'will not work.') + SerializingHTMLBuilder.init(self) diff --git a/sphinx/builders/htmlhelp.py b/sphinx/builders/htmlhelp.py new file mode 100644 index 00000000..23900f36 --- /dev/null +++ b/sphinx/builders/htmlhelp.py @@ -0,0 +1,245 @@ +# -*- coding: utf-8 -*- +""" + sphinx.builders.htmlhelp + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Build HTML help support files. + Parts adapted from Python's Doc/tools/prechm.py. + + :copyright: 2007-2008 by Georg Brandl. + :license: BSD. +""" + +import os +import cgi +from os import path + +from docutils import nodes + +from sphinx import addnodes +from sphinx.builders.html import StandaloneHTMLBuilder + + +# Project file (*.hhp) template. 'outname' is the file basename (like +# the pythlp in pythlp.hhp); 'version' is the doc version number (like +# the 2.2 in Python 2.2). +# The magical numbers in the long line under [WINDOWS] set most of the +# user-visible features (visible buttons, tabs, etc). +# About 0x10384e: This defines the buttons in the help viewer. The +# following defns are taken from htmlhelp.h. Not all possibilities +# actually work, and not all those that work are available from the Help +# Workshop GUI. In particular, the Zoom/Font button works and is not +# available from the GUI. The ones we're using are marked with 'x': +# +# 0x000002 Hide/Show x +# 0x000004 Back x +# 0x000008 Forward x +# 0x000010 Stop +# 0x000020 Refresh +# 0x000040 Home x +# 0x000080 Forward +# 0x000100 Back +# 0x000200 Notes +# 0x000400 Contents +# 0x000800 Locate x +# 0x001000 Options x +# 0x002000 Print x +# 0x004000 Index +# 0x008000 Search +# 0x010000 History +# 0x020000 Favorites +# 0x040000 Jump 1 +# 0x080000 Jump 2 +# 0x100000 Zoom/Font x +# 0x200000 TOC Next +# 0x400000 TOC Prev + +project_template = '''\ +[OPTIONS] +Binary TOC=Yes +Binary Index=No +Compiled file=%(outname)s.chm +Contents file=%(outname)s.hhc +Default Window=%(outname)s +Default topic=index.html +Display compile progress=No +Full text search stop list file=%(outname)s.stp +Full-text search=Yes +Index file=%(outname)s.hhk +Language=0x409 +Title=%(title)s + +[WINDOWS] +%(outname)s="%(title)s","%(outname)s.hhc","%(outname)s.hhk",\ +"index.html","index.html",,,,,0x63520,220,0x10384e,[0,0,1024,768],,,,,,,0 + +[FILES] +''' + +contents_header = '''\ + + + + + + + + + + +
    +''' + +contents_footer = '''\ +
+''' + +object_sitemap = '''\ + + + + +''' + +# List of words the full text search facility shouldn't index. This +# becomes file outname.stp. Note that this list must be pretty small! +# Different versions of the MS docs claim the file has a maximum size of +# 256 or 512 bytes (including \r\n at the end of each line). +# Note that "and", "or", "not" and "near" are operators in the search +# language, so no point indexing them even if we wanted to. +stopwords = """ +a and are as at +be but by +for +if in into is it +near no not +of on or +such +that the their then there these they this to +was will with +""".split() + + +class HTMLHelpBuilder(StandaloneHTMLBuilder): + """ + Builder that also outputs Windows HTML help project, contents and index files. + Adapted from the original Doc/tools/prechm.py. + """ + name = 'htmlhelp' + + # don't copy the reST source + copysource = False + supported_image_types = ['image/png', 'image/gif', 'image/jpeg'] + + # don't add links + add_header_links = False + add_definition_links = False + + def init(self): + StandaloneHTMLBuilder.init(self) + # the output files for HTML help must be .html only + self.out_suffix = '.html' + + def handle_finish(self): + self.build_hhx(self, self.outdir, self.config.htmlhelp_basename) + + def build_hhx(self, outdir, outname): + self.info('dumping stopword list...') + f = open(path.join(outdir, outname+'.stp'), 'w') + try: + for word in sorted(stopwords): + print >>f, word + finally: + f.close() + + self.info('writing project file...') + f = open(path.join(outdir, outname+'.hhp'), 'w') + try: + f.write(project_template % {'outname': outname, + 'title': self.config.html_title, + 'version': self.config.version, + 'project': self.config.project}) + if not outdir.endswith(os.sep): + outdir += os.sep + olen = len(outdir) + for root, dirs, files in os.walk(outdir): + staticdir = (root == path.join(outdir, '_static')) + for fn in files: + if (staticdir and not fn.endswith('.js')) or fn.endswith('.html'): + print >>f, path.join(root, fn)[olen:].replace(os.sep, '\\') + finally: + f.close() + + self.info('writing TOC file...') + f = open(path.join(outdir, outname+'.hhc'), 'w') + try: + f.write(contents_header) + # special books + f.write('
  • ' + object_sitemap % (self.config.html_short_title, + 'index.html')) + if self.config.html_use_modindex: + f.write('
  • ' + object_sitemap % (_('Global Module Index'), + 'modindex.html')) + # the TOC + tocdoc = self.env.get_and_resolve_doctree(self.config.master_doc, self, + prune_toctrees=False) + def write_toc(node, ullevel=0): + if isinstance(node, nodes.list_item): + f.write('
  • ') + for subnode in node: + write_toc(subnode, ullevel) + elif isinstance(node, nodes.reference): + link = node['refuri'] + title = cgi.escape(node.astext()).replace('"','"') + item = object_sitemap % (title, link) + f.write(item.encode('ascii', 'xmlcharrefreplace')) + elif isinstance(node, nodes.bullet_list): + if ullevel != 0: + f.write('
      \n') + for subnode in node: + write_toc(subnode, ullevel+1) + if ullevel != 0: + f.write('
    \n') + elif isinstance(node, addnodes.compact_paragraph): + for subnode in node: + write_toc(subnode, ullevel) + istoctree = lambda node: isinstance(node, addnodes.compact_paragraph) and \ + node.has_key('toctree') + for node in tocdoc.traverse(istoctree): + write_toc(node) + f.write(contents_footer) + finally: + f.close() + + self.info('writing index file...') + index = self.env.create_index(self) + f = open(path.join(outdir, outname+'.hhk'), 'w') + try: + f.write('
      \n') + def write_index(title, refs, subitems): + def write_param(name, value): + item = ' \n' % (name, value) + f.write(item.encode('ascii', 'xmlcharrefreplace')) + title = cgi.escape(title) + f.write('
    • \n') + write_param('Keyword', title) + if len(refs) == 0: + write_param('See Also', title) + elif len(refs) == 1: + write_param('Local', refs[0]) + else: + for i, ref in enumerate(refs): + write_param('Name', '[%d] %s' % (i, ref)) # XXX: better title? + write_param('Local', ref) + f.write('\n') + if subitems: + f.write('
        ') + for subitem in subitems: + write_index(subitem[0], subitem[1], []) + f.write('
      ') + for (key, group) in index: + for title, (refs, subitems) in group: + write_index(title, refs, subitems) + f.write('
    \n') + finally: + f.close() diff --git a/sphinx/builders/latex.py b/sphinx/builders/latex.py new file mode 100644 index 00000000..916430db --- /dev/null +++ b/sphinx/builders/latex.py @@ -0,0 +1,185 @@ +# -*- coding: utf-8 -*- +""" + sphinx.builders.latex + ~~~~~~~~~~~~~~~~~~~~~ + + LaTeX builder. + + :copyright: 2008 by Georg Brandl. + :license: BSD. +""" + +import os +import shutil +from os import path + +from docutils import nodes +from docutils.io import FileOutput +from docutils.utils import new_document +from docutils.frontend import OptionParser + +from sphinx import package_dir, addnodes +from sphinx.util import SEP, texescape +from sphinx.builders import Builder +from sphinx.environment import NoUri +from sphinx.util.console import bold, darkgreen +from sphinx.writers.latex import LaTeXWriter + + +class LaTeXBuilder(Builder): + """ + Builds LaTeX output to create PDF. + """ + name = 'latex' + supported_image_types = ['application/pdf', 'image/png', 'image/gif', + 'image/jpeg'] + + def init(self): + self.docnames = [] + self.document_data = [] + texescape.init() + + def get_outdated_docs(self): + return 'all documents' # for now + + def get_target_uri(self, docname, typ=None): + if typ == 'token': + # token references are always inside production lists and must be + # replaced by \token{} in LaTeX + return '@token' + if docname not in self.docnames: + raise NoUri + else: + return '' + + def init_document_data(self): + preliminary_document_data = map(list, self.config.latex_documents) + if not preliminary_document_data: + self.warn('No "latex_documents" config value found; no documents ' + 'will be written.') + return + # assign subdirs to titles + self.titles = [] + for entry in preliminary_document_data: + docname = entry[0] + if docname not in self.env.all_docs: + self.warn('"latex_documents" config value references unknown ' + 'document %s' % docname) + continue + self.document_data.append(entry) + if docname.endswith(SEP+'index'): + docname = docname[:-5] + self.titles.append((docname, entry[2])) + + def write(self, *ignored): + # first, assemble the "appendix" docs that are in every PDF + appendices = [] + for fname in self.config.latex_appendices: + appendices.append(self.env.get_doctree(fname)) + + docwriter = LaTeXWriter(self) + docsettings = OptionParser( + defaults=self.env.settings, + components=(docwriter,)).get_default_values() + + self.init_document_data() + + for entry in self.document_data: + docname, targetname, title, author, docclass = entry[:5] + toctree_only = False + if len(entry) > 5: + toctree_only = entry[5] + destination = FileOutput( + destination_path=path.join(self.outdir, targetname), + encoding='utf-8') + self.info("processing " + targetname + "... ", nonl=1) + doctree = self.assemble_doctree(docname, toctree_only, + appendices=(docclass == 'manual') and appendices or []) + self.post_process_images(doctree) + self.info("writing... ", nonl=1) + doctree.settings = docsettings + doctree.settings.author = author + doctree.settings.title = title + doctree.settings.docname = docname + doctree.settings.docclass = docclass + docwriter.write(doctree, destination) + self.info("done") + + def assemble_doctree(self, indexfile, toctree_only, appendices): + self.docnames = set([indexfile] + appendices) + self.info(darkgreen(indexfile) + " ", nonl=1) + def process_tree(docname, tree): + tree = tree.deepcopy() + for toctreenode in tree.traverse(addnodes.toctree): + newnodes = [] + includefiles = map(str, toctreenode['includefiles']) + for includefile in includefiles: + try: + self.info(darkgreen(includefile) + " ", nonl=1) + subtree = process_tree(includefile, + self.env.get_doctree(includefile)) + self.docnames.add(includefile) + except Exception: + self.warn('%s: toctree contains ref to nonexisting file %r' % + (docname, includefile)) + else: + sof = addnodes.start_of_file() + sof.children = subtree.children + newnodes.append(sof) + toctreenode.parent.replace(toctreenode, newnodes) + return tree + tree = self.env.get_doctree(indexfile) + if toctree_only: + # extract toctree nodes from the tree and put them in a fresh document + new_tree = new_document('') + new_sect = nodes.section() + new_sect += nodes.title(u'', u'') + new_tree += new_sect + for node in tree.traverse(addnodes.toctree): + new_sect += node + tree = new_tree + largetree = process_tree(indexfile, tree) + largetree.extend(appendices) + self.info() + self.info("resolving references...") + self.env.resolve_references(largetree, indexfile, self) + # resolve :ref:s to distant tex files -- we can't add a cross-reference, + # but append the document name + for pendingnode in largetree.traverse(addnodes.pending_xref): + docname = pendingnode['refdocname'] + sectname = pendingnode['refsectname'] + newnodes = [nodes.emphasis(sectname, sectname)] + for subdir, title in self.titles: + if docname.startswith(subdir): + newnodes.append(nodes.Text(_(' (in '), _(' (in '))) + newnodes.append(nodes.emphasis(title, title)) + newnodes.append(nodes.Text(')', ')')) + break + else: + pass + pendingnode.replace_self(newnodes) + return largetree + + def finish(self): + # copy image files + if self.images: + self.info(bold('copying images...'), nonl=1) + for src, dest in self.images.iteritems(): + self.info(' '+src, nonl=1) + shutil.copyfile(path.join(self.srcdir, src), + path.join(self.outdir, dest)) + self.info() + + # the logo is handled differently + if self.config.latex_logo: + logobase = path.basename(self.config.latex_logo) + shutil.copyfile(path.join(self.confdir, self.config.latex_logo), + path.join(self.outdir, logobase)) + + self.info(bold('copying TeX support files... '), nonl=True) + staticdirname = path.join(package_dir, 'texinputs') + for filename in os.listdir(staticdirname): + if not filename.startswith('.'): + shutil.copyfile(path.join(staticdirname, filename), + path.join(self.outdir, filename)) + self.info('done') diff --git a/sphinx/builders/linkcheck.py b/sphinx/builders/linkcheck.py new file mode 100644 index 00000000..9dbfc913 --- /dev/null +++ b/sphinx/builders/linkcheck.py @@ -0,0 +1,130 @@ +# -*- coding: utf-8 -*- +""" + sphinx.builders.linkcheck + ~~~~~~~~~~~~~~~~~~~~~~~~~ + + The CheckExternalLinksBuilder class. + + :copyright: 2008 by Georg Brandl, Thomas Lamb. + :license: BSD. +""" + +import socket +from os import path +from urllib2 import build_opener, HTTPError + +from docutils import nodes + +from sphinx.builders import Builder +from sphinx.util.console import purple, red, darkgreen + +# create an opener that will simulate a browser user-agent +opener = build_opener() +opener.addheaders = [('User-agent', 'Mozilla/5.0')] + + +class CheckExternalLinksBuilder(Builder): + """ + Checks for broken external links. + """ + name = 'linkcheck' + + def init(self): + self.good = set() + self.broken = {} + self.redirected = {} + # set a timeout for non-responding servers + socket.setdefaulttimeout(5.0) + # create output file + open(path.join(self.outdir, 'output.txt'), 'w').close() + + def get_target_uri(self, docname, typ=None): + return '' + + def get_outdated_docs(self): + return self.env.found_docs + + def prepare_writing(self, docnames): + return + + def write_doc(self, docname, doctree): + self.info() + for node in doctree.traverse(nodes.reference): + try: + self.check(node, docname) + except KeyError: + continue + + def check(self, node, docname): + uri = node['refuri'] + + if '#' in uri: + uri = uri.split('#')[0] + + if uri in self.good: + return + + lineno = None + while lineno is None and node: + node = node.parent + lineno = node.line + + if uri[0:5] == 'http:' or uri[0:6] == 'https:': + self.info(uri, nonl=1) + + if uri in self.broken: + (r, s) = self.broken[uri] + elif uri in self.redirected: + (r, s) = self.redirected[uri] + else: + (r, s) = self.resolve(uri) + + if r == 0: + self.info(' - ' + darkgreen('working')) + self.good.add(uri) + elif r == 2: + self.info(' - ' + red('broken: ') + s) + self.write_entry('broken', docname, lineno, uri + ': ' + s) + self.broken[uri] = (r, s) + if self.app.quiet: + self.warn('%s:%s: broken link: %s' % (docname, lineno, uri)) + else: + self.info(' - ' + purple('redirected') + ' to ' + s) + self.write_entry('redirected', docname, lineno, uri + ' to ' + s) + self.redirected[uri] = (r, s) + elif len(uri) == 0 or uri[0:7] == 'mailto:' or uri[0:4] == 'ftp:': + return + else: + self.warn(uri + ' - ' + red('malformed!')) + self.write_entry('malformed', docname, lineno, uri) + if self.app.quiet: + self.warn('%s:%s: malformed link: %s' % (docname, lineno, uri)) + self.app.statuscode = 1 + + if self.broken: + self.app.statuscode = 1 + + def write_entry(self, what, docname, line, uri): + output = open(path.join(self.outdir, 'output.txt'), 'a') + output.write("%s:%s: [%s] %s\n" % (self.env.doc2path(docname, None), + line, what, uri)) + output.close() + + def resolve(self, uri): + try: + f = opener.open(uri) + f.close() + except HTTPError, err: + #if err.code == 403 and uri.startswith('http://en.wikipedia.org/'): + # # Wikipedia blocks requests from urllib User-Agent + # return (0, 0) + return (2, str(err)) + except Exception, err: + return (2, str(err)) + if f.url.rstrip('/') == uri.rstrip('/'): + return (0, 0) + else: + return (1, f.url) + + def finish(self): + return diff --git a/sphinx/builders/text.py b/sphinx/builders/text.py new file mode 100644 index 00000000..c6f232e8 --- /dev/null +++ b/sphinx/builders/text.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- +""" + sphinx.builders.text + ~~~~~~~~~~~~~~~~~~~~ + + Plain-text Sphinx builder. + + :copyright: 2008 by Georg Brandl. + :license: BSD. +""" + +import codecs +from os import path + +from docutils.io import StringOutput + +from sphinx.util import ensuredir, os_path +from sphinx.builders import Builder +from sphinx.writers.text import TextWriter + + +class TextBuilder(Builder): + name = 'text' + out_suffix = '.txt' + + def init(self): + pass + + def get_outdated_docs(self): + for docname in self.env.found_docs: + if docname not in self.env.all_docs: + yield docname + continue + targetname = self.env.doc2path(docname, self.outdir, self.out_suffix) + try: + targetmtime = path.getmtime(targetname) + except Exception: + targetmtime = 0 + try: + srcmtime = path.getmtime(self.env.doc2path(docname)) + if srcmtime > targetmtime: + yield docname + except EnvironmentError: + # source doesn't exist anymore + pass + + def get_target_uri(self, docname, typ=None): + return '' + + def prepare_writing(self, docnames): + self.writer = TextWriter(self) + + def write_doc(self, docname, doctree): + destination = StringOutput(encoding='utf-8') + self.writer.write(doctree, destination) + outfilename = path.join(self.outdir, os_path(docname) + self.out_suffix) + ensuredir(path.dirname(outfilename)) # normally different from self.outdir + try: + f = codecs.open(outfilename, 'w', 'utf-8') + try: + f.write(self.writer.output) + finally: + f.close() + except (IOError, OSError), err: + self.warn("Error writing file %s: %s" % (outfilename, err)) + + def finish(self): + pass diff --git a/sphinx/ext/intersphinx.py b/sphinx/ext/intersphinx.py index 0c034e0d..5d9607c9 100644 --- a/sphinx/ext/intersphinx.py +++ b/sphinx/ext/intersphinx.py @@ -31,7 +31,7 @@ from os import path from docutils import nodes -from sphinx.builder import INVENTORY_FILENAME +from sphinx.builders import INVENTORY_FILENAME def fetch_inventory(app, uri, inv): diff --git a/sphinx/htmlhelp.py b/sphinx/htmlhelp.py deleted file mode 100644 index 4cc68bc9..00000000 --- a/sphinx/htmlhelp.py +++ /dev/null @@ -1,220 +0,0 @@ -# -*- coding: utf-8 -*- -""" - sphinx.htmlhelp - ~~~~~~~~~~~~~~~ - - Build HTML help support files. - Adapted from the original Doc/tools/prechm.py. - - :copyright: 2007-2008 by Georg Brandl. - :license: BSD. -""" - -import os -import cgi -from os import path - -from docutils import nodes - -from sphinx import addnodes - -# Project file (*.hhp) template. 'outname' is the file basename (like -# the pythlp in pythlp.hhp); 'version' is the doc version number (like -# the 2.2 in Python 2.2). -# The magical numbers in the long line under [WINDOWS] set most of the -# user-visible features (visible buttons, tabs, etc). -# About 0x10384e: This defines the buttons in the help viewer. The -# following defns are taken from htmlhelp.h. Not all possibilities -# actually work, and not all those that work are available from the Help -# Workshop GUI. In particular, the Zoom/Font button works and is not -# available from the GUI. The ones we're using are marked with 'x': -# -# 0x000002 Hide/Show x -# 0x000004 Back x -# 0x000008 Forward x -# 0x000010 Stop -# 0x000020 Refresh -# 0x000040 Home x -# 0x000080 Forward -# 0x000100 Back -# 0x000200 Notes -# 0x000400 Contents -# 0x000800 Locate x -# 0x001000 Options x -# 0x002000 Print x -# 0x004000 Index -# 0x008000 Search -# 0x010000 History -# 0x020000 Favorites -# 0x040000 Jump 1 -# 0x080000 Jump 2 -# 0x100000 Zoom/Font x -# 0x200000 TOC Next -# 0x400000 TOC Prev - -project_template = '''\ -[OPTIONS] -Binary TOC=Yes -Binary Index=No -Compiled file=%(outname)s.chm -Contents file=%(outname)s.hhc -Default Window=%(outname)s -Default topic=index.html -Display compile progress=No -Full text search stop list file=%(outname)s.stp -Full-text search=Yes -Index file=%(outname)s.hhk -Language=0x409 -Title=%(title)s - -[WINDOWS] -%(outname)s="%(title)s","%(outname)s.hhc","%(outname)s.hhk",\ -"index.html","index.html",,,,,0x63520,220,0x10384e,[0,0,1024,768],,,,,,,0 - -[FILES] -''' - -contents_header = '''\ - - - - - - - - - - -
      -''' - -contents_footer = '''\ -
    -''' - -object_sitemap = '''\ - - - - -''' - -# List of words the full text search facility shouldn't index. This -# becomes file outname.stp. Note that this list must be pretty small! -# Different versions of the MS docs claim the file has a maximum size of -# 256 or 512 bytes (including \r\n at the end of each line). -# Note that "and", "or", "not" and "near" are operators in the search -# language, so no point indexing them even if we wanted to. -stopwords = """ -a and are as at -be but by -for -if in into is it -near no not -of on or -such -that the their then there these they this to -was will with -""".split() - - -def build_hhx(builder, outdir, outname): - builder.info('dumping stopword list...') - f = open(path.join(outdir, outname+'.stp'), 'w') - try: - for word in sorted(stopwords): - print >>f, word - finally: - f.close() - - builder.info('writing project file...') - f = open(path.join(outdir, outname+'.hhp'), 'w') - try: - f.write(project_template % {'outname': outname, - 'title': builder.config.html_title, - 'version': builder.config.version, - 'project': builder.config.project}) - if not outdir.endswith(os.sep): - outdir += os.sep - olen = len(outdir) - for root, dirs, files in os.walk(outdir): - staticdir = (root == path.join(outdir, '_static')) - for fn in files: - if (staticdir and not fn.endswith('.js')) or fn.endswith('.html'): - print >>f, path.join(root, fn)[olen:].replace(os.sep, '\\') - finally: - f.close() - - builder.info('writing TOC file...') - f = open(path.join(outdir, outname+'.hhc'), 'w') - try: - f.write(contents_header) - # special books - f.write('
  • ' + object_sitemap % (builder.config.html_short_title, - 'index.html')) - if builder.config.html_use_modindex: - f.write('
  • ' + object_sitemap % (_('Global Module Index'), - 'modindex.html')) - # the TOC - tocdoc = builder.env.get_and_resolve_doctree(builder.config.master_doc, builder, - prune_toctrees=False) - def write_toc(node, ullevel=0): - if isinstance(node, nodes.list_item): - f.write('
  • ') - for subnode in node: - write_toc(subnode, ullevel) - elif isinstance(node, nodes.reference): - link = node['refuri'] - title = cgi.escape(node.astext()).replace('"','"') - item = object_sitemap % (title, link) - f.write(item.encode('ascii', 'xmlcharrefreplace')) - elif isinstance(node, nodes.bullet_list): - if ullevel != 0: - f.write('
      \n') - for subnode in node: - write_toc(subnode, ullevel+1) - if ullevel != 0: - f.write('
    \n') - elif isinstance(node, addnodes.compact_paragraph): - for subnode in node: - write_toc(subnode, ullevel) - istoctree = lambda node: isinstance(node, addnodes.compact_paragraph) and \ - node.has_key('toctree') - for node in tocdoc.traverse(istoctree): - write_toc(node) - f.write(contents_footer) - finally: - f.close() - - builder.info('writing index file...') - index = builder.env.create_index(builder) - f = open(path.join(outdir, outname+'.hhk'), 'w') - try: - f.write('
      \n') - def write_index(title, refs, subitems): - def write_param(name, value): - item = ' \n' % (name, value) - f.write(item.encode('ascii', 'xmlcharrefreplace')) - title = cgi.escape(title) - f.write('
    • \n') - write_param('Keyword', title) - if len(refs) == 0: - write_param('See Also', title) - elif len(refs) == 1: - write_param('Local', refs[0]) - else: - for i, ref in enumerate(refs): - write_param('Name', '[%d] %s' % (i, ref)) # XXX: better title? - write_param('Local', ref) - f.write('\n') - if subitems: - f.write('
        ') - for subitem in subitems: - write_index(subitem[0], subitem[1], []) - f.write('
      ') - for (key, group) in index: - for title, (refs, subitems) in group: - write_index(title, refs, subitems) - f.write('
    \n') - finally: - f.close() diff --git a/sphinx/htmlwriter.py b/sphinx/htmlwriter.py deleted file mode 100644 index 0505fd08..00000000 --- a/sphinx/htmlwriter.py +++ /dev/null @@ -1,457 +0,0 @@ -# -*- coding: utf-8 -*- -""" - sphinx.htmlwriter - ~~~~~~~~~~~~~~~~~ - - docutils writers handling Sphinx' custom nodes. - - :copyright: 2007-2008 by Georg Brandl. - :license: BSD. -""" - -import sys -import posixpath -import os - -from docutils import nodes -from docutils.writers.html4css1 import Writer, HTMLTranslator as BaseTranslator - -from sphinx.locale import admonitionlabels, versionlabels -from sphinx.highlighting import PygmentsBridge -from sphinx.util.smartypants import sphinx_smarty_pants - -try: - import Image # check for the Python Imaging Library -except ImportError: - Image = None - -class HTMLWriter(Writer): - def __init__(self, builder): - Writer.__init__(self) - self.builder = builder - - def translate(self): - # sadly, this is mostly copied from parent class - self.visitor = visitor = self.builder.translator_class(self.builder, - self.document) - self.document.walkabout(visitor) - self.output = visitor.astext() - for attr in ('head_prefix', 'stylesheet', 'head', 'body_prefix', - 'body_pre_docinfo', 'docinfo', 'body', 'fragment', - 'body_suffix', 'meta', 'title', 'subtitle', 'header', - 'footer', 'html_prolog', 'html_head', 'html_title', - 'html_subtitle', 'html_body', ): - setattr(self, attr, getattr(visitor, attr, None)) - self.clean_meta = ''.join(visitor.meta[2:]) - - -class HTMLTranslator(BaseTranslator): - """ - Our custom HTML translator. - """ - - def __init__(self, builder, *args, **kwds): - BaseTranslator.__init__(self, *args, **kwds) - self.highlighter = PygmentsBridge('html', builder.config.pygments_style) - self.no_smarty = 0 - self.builder = builder - self.highlightlang = builder.config.highlight_language - self.highlightlinenothreshold = sys.maxint - self.protect_literal_text = 0 - - def visit_desc(self, node): - self.body.append(self.starttag(node, 'dl', CLASS=node['desctype'])) - def depart_desc(self, node): - self.body.append('\n\n') - - def visit_desc_signature(self, node): - # the id is set automatically - self.body.append(self.starttag(node, 'dt')) - # anchor for per-desc interactive data - if node.parent['desctype'] != 'describe' and node['ids'] and node['first']: - self.body.append('' % node['ids'][0]) - if node.parent['desctype'] in ('class', 'exception'): - self.body.append('%s ' % node.parent['desctype']) - def depart_desc_signature(self, node): - if node['ids'] and self.builder.add_definition_links: - self.body.append(u'\u00B6' % - _('Permalink to this definition')) - self.body.append('\n') - - def visit_desc_addname(self, node): - self.body.append(self.starttag(node, 'tt', '', CLASS='descclassname')) - def depart_desc_addname(self, node): - self.body.append('') - - def visit_desc_type(self, node): - pass - def depart_desc_type(self, node): - pass - - def visit_desc_name(self, node): - self.body.append(self.starttag(node, 'tt', '', CLASS='descname')) - def depart_desc_name(self, node): - self.body.append('') - - def visit_desc_parameterlist(self, node): - self.body.append('(') - self.first_param = 1 - def depart_desc_parameterlist(self, node): - self.body.append(')') - - def visit_desc_parameter(self, node): - if not self.first_param: - self.body.append(', ') - else: - self.first_param = 0 - if not node.hasattr('noemph'): - self.body.append('') - def depart_desc_parameter(self, node): - if not node.hasattr('noemph'): - self.body.append('') - - def visit_desc_optional(self, node): - self.body.append('[') - def depart_desc_optional(self, node): - self.body.append(']') - - def visit_desc_annotation(self, node): - self.body.append(self.starttag(node, 'em', CLASS='property')) - def depart_desc_annotation(self, node): - self.body.append('') - - def visit_desc_content(self, node): - self.body.append(self.starttag(node, 'dd', '')) - def depart_desc_content(self, node): - self.body.append('') - - def visit_refcount(self, node): - self.body.append(self.starttag(node, 'em', '', CLASS='refcount')) - def depart_refcount(self, node): - self.body.append('') - - def visit_versionmodified(self, node): - self.body.append(self.starttag(node, 'p')) - text = versionlabels[node['type']] % node['version'] - if len(node): - text += ': ' - else: - text += '.' - self.body.append('%s' % text) - def depart_versionmodified(self, node): - self.body.append('

    \n') - - # overwritten - def visit_reference(self, node): - BaseTranslator.visit_reference(self, node) - if node.hasattr('reftitle'): - # ugly hack to add a title attribute - starttag = self.body[-1] - if not starttag.startswith(' tag - self.section_level += 1 - self.body.append(self.starttag(node, 'div', CLASS='section')) - - def visit_title(self, node): - # don't move the id attribute inside the tag - BaseTranslator.visit_title(self, node, move_ids=0) - - # overwritten - def visit_literal_block(self, node): - if node.rawsource != node.astext(): - # most probably a parsed-literal block -- don't highlight - return BaseTranslator.visit_literal_block(self, node) - lang = self.highlightlang - linenos = node.rawsource.count('\n') >= self.highlightlinenothreshold - 1 - if node.has_key('language'): - # code-block directives - lang = node['language'] - if node.has_key('linenos'): - linenos = node['linenos'] - highlighted = self.highlighter.highlight_block(node.rawsource, lang, linenos) - starttag = self.starttag(node, 'div', suffix='', CLASS='highlight-%s' % lang) - self.body.append(starttag + highlighted + '
  • \n') - raise nodes.SkipNode - - def visit_doctest_block(self, node): - self.visit_literal_block(node) - - # overwritten - def visit_literal(self, node): - if len(node.children) == 1 and \ - node.children[0] in ('None', 'True', 'False'): - node['classes'].append('xref') - self.body.append(self.starttag(node, 'tt', '', CLASS='docutils literal')) - self.protect_literal_text += 1 - def depart_literal(self, node): - self.protect_literal_text -= 1 - self.body.append('') - - def visit_productionlist(self, node): - self.body.append(self.starttag(node, 'pre')) - names = [] - for production in node: - names.append(production['tokenname']) - maxlen = max(len(name) for name in names) - for production in node: - if production['tokenname']: - lastname = production['tokenname'].ljust(maxlen) - self.body.append(self.starttag(production, 'strong', '')) - self.body.append(lastname + ' ::= ') - else: - self.body.append('%s ' % (' '*len(lastname))) - production.walkabout(self) - self.body.append('\n') - self.body.append('\n') - raise nodes.SkipNode - def depart_productionlist(self, node): - pass - - def visit_production(self, node): - pass - def depart_production(self, node): - pass - - def visit_centered(self, node): - self.body.append(self.starttag(node, 'p', CLASS="centered") + '') - def depart_centered(self, node): - self.body.append('

    ') - - def visit_compact_paragraph(self, node): - pass - def depart_compact_paragraph(self, node): - pass - - def visit_highlightlang(self, node): - self.highlightlang = node['lang'] - self.highlightlinenothreshold = node['linenothreshold'] - def depart_highlightlang(self, node): - pass - - # overwritten - def visit_image(self, node): - olduri = node['uri'] - # rewrite the URI if the environment knows about it - if olduri in self.builder.images: - node['uri'] = posixpath.join(self.builder.imgpath, - self.builder.images[olduri]) - - if node.has_key('scale'): - if Image and not (node.has_key('width') - and node.has_key('height')): - try: - im = Image.open(os.path.join(self.builder.srcdir, - olduri)) - except (IOError, # Source image can't be found or opened - UnicodeError): # PIL doesn't like Unicode paths. - print olduri - pass - else: - if not node.has_key('width'): - node['width'] = str(im.size[0]) - if not node.has_key('height'): - node['height'] = str(im.size[1]) - del im - BaseTranslator.visit_image(self, node) - - def visit_toctree(self, node): - # this only happens when formatting a toc from env.tocs -- in this - # case we don't want to include the subtree - raise nodes.SkipNode - - def visit_index(self, node): - raise nodes.SkipNode - - def visit_tabular_col_spec(self, node): - raise nodes.SkipNode - - def visit_glossary(self, node): - pass - def depart_glossary(self, node): - pass - - def visit_acks(self, node): - pass - def depart_acks(self, node): - pass - - def visit_module(self, node): - pass - def depart_module(self, node): - pass - - def bulk_text_processor(self, text): - return text - - # overwritten - def visit_Text(self, node): - text = node.astext() - encoded = self.encode(text) - if self.protect_literal_text: - # moved here from base class's visit_literal to support - # more formatting in literal nodes - for token in self.words_and_spaces.findall(encoded): - if token.strip(): - # protect literal text from line wrapping - self.body.append('%s' % token) - elif token in ' \n': - # allow breaks at whitespace - self.body.append(token) - else: - # protect runs of multiple spaces; the last one can wrap - self.body.append(' ' * (len(token)-1) + ' ') - else: - if self.in_mailto and self.settings.cloak_email_addresses: - encoded = self.cloak_email(encoded) - else: - encoded = self.bulk_text_processor(encoded) - self.body.append(encoded) - - # these are all for docutils 0.5 compatibility - - def visit_note(self, node): - self.visit_admonition(node, 'note') - def depart_note(self, node): - self.depart_admonition(node) - - def visit_warning(self, node): - self.visit_admonition(node, 'warning') - def depart_warning(self, node): - self.depart_admonition(node) - - def visit_attention(self, node): - self.visit_admonition(node, 'attention') - - def depart_attention(self, node): - self.depart_admonition() - - def visit_caution(self, node): - self.visit_admonition(node, 'caution') - def depart_caution(self, node): - self.depart_admonition() - - def visit_danger(self, node): - self.visit_admonition(node, 'danger') - def depart_danger(self, node): - self.depart_admonition() - - def visit_error(self, node): - self.visit_admonition(node, 'error') - def depart_error(self, node): - self.depart_admonition() - - def visit_hint(self, node): - self.visit_admonition(node, 'hint') - def depart_hint(self, node): - self.depart_admonition() - - def visit_important(self, node): - self.visit_admonition(node, 'important') - def depart_important(self, node): - self.depart_admonition() - - def visit_tip(self, node): - self.visit_admonition(node, 'tip') - def depart_tip(self, node): - self.depart_admonition() - - # these are only handled specially in the SmartyPantsHTMLTranslator - def visit_literal_emphasis(self, node): - return self.visit_emphasis(node) - def depart_literal_emphasis(self, node): - return self.depart_emphasis(node) - - def depart_title(self, node): - close_tag = self.context[-1] - if self.builder.add_header_links and \ - (close_tag.startswith('\u00B6
    ' % - _('Permalink to this headline')) - BaseTranslator.depart_title(self, node) - - def unknown_visit(self, node): - raise NotImplementedError('Unknown node: ' + node.__class__.__name__) - - -class SmartyPantsHTMLTranslator(HTMLTranslator): - """ - Handle ordinary text via smartypants, converting quotes and dashes - to the correct entities. - """ - - def __init__(self, *args, **kwds): - self.no_smarty = 0 - HTMLTranslator.__init__(self, *args, **kwds) - - def visit_literal(self, node): - self.no_smarty += 1 - try: - # this raises SkipNode - HTMLTranslator.visit_literal(self, node) - finally: - self.no_smarty -= 1 - - def visit_literal_emphasis(self, node): - self.no_smarty += 1 - self.visit_emphasis(node) - - def depart_literal_emphasis(self, node): - self.depart_emphasis(node) - self.no_smarty -= 1 - - def visit_desc_signature(self, node): - self.no_smarty += 1 - HTMLTranslator.visit_desc_signature(self, node) - - def depart_desc_signature(self, node): - self.no_smarty -= 1 - HTMLTranslator.depart_desc_signature(self, node) - - def visit_productionlist(self, node): - self.no_smarty += 1 - try: - HTMLTranslator.visit_productionlist(self, node) - finally: - self.no_smarty -= 1 - - def visit_option(self, node): - self.no_smarty += 1 - HTMLTranslator.visit_option(self, node) - def depart_option(self, node): - self.no_smarty -= 1 - HTMLTranslator.depart_option(self, node) - - def bulk_text_processor(self, text): - if self.no_smarty <= 0: - return sphinx_smarty_pants(text) - return text diff --git a/sphinx/latexwriter.py b/sphinx/latexwriter.py deleted file mode 100644 index 94cb23db..00000000 --- a/sphinx/latexwriter.py +++ /dev/null @@ -1,1185 +0,0 @@ -# -*- coding: utf-8 -*- -""" - sphinx.latexwriter - ~~~~~~~~~~~~~~~~~~ - - Custom docutils writer for LaTeX. - - Much of this code is adapted from Dave Kuhlman's "docpy" writer from his - docutils sandbox. - - :copyright: 2007-2008 by Georg Brandl, Dave Kuhlman. - :license: BSD. -""" - -import re -import sys -from os import path - -from docutils import nodes, writers -from docutils.writers.latex2e import Babel - -from sphinx import addnodes -from sphinx import highlighting -from sphinx.locale import admonitionlabels, versionlabels -from sphinx.util import ustrftime -from sphinx.util.texescape import tex_escape_map -from sphinx.util.smartypants import educateQuotesLatex - -HEADER = r'''%% Generated by Sphinx. -\documentclass[%(papersize)s,%(pointsize)s%(classoptions)s]{%(docclass)s} -%(inputenc)s -%(fontenc)s -%(babel)s -%(fontpkg)s -%(fncychap)s -\usepackage{sphinx} -%(preamble)s - -\title{%(title)s} -\date{%(date)s} -\release{%(release)s} -\author{%(author)s} -\newcommand{\sphinxlogo}{%(logo)s} -\renewcommand{\releasename}{%(releasename)s} -%(makeindex)s -%(makemodindex)s -''' - -BEGIN_DOC = r''' -\begin{document} -%(shorthandoff)s -%(maketitle)s -%(tableofcontents)s -''' - -FOOTER = r''' -%(footer)s -\renewcommand{\indexname}{%(modindexname)s} -%(printmodindex)s -\renewcommand{\indexname}{%(indexname)s} -%(printindex)s -\end{document} -''' - - -class LaTeXWriter(writers.Writer): - - supported = ('sphinxlatex',) - - settings_spec = ('LaTeX writer options', '', ( - ('Document name', ['--docname'], {'default': ''}), - ('Document class', ['--docclass'], {'default': 'manual'}), - ('Author', ['--author'], {'default': ''}), - )) - settings_defaults = {} - - output = None - - def __init__(self, builder): - writers.Writer.__init__(self) - self.builder = builder - - def translate(self): - visitor = LaTeXTranslator(self.document, self.builder) - self.document.walkabout(visitor) - self.output = visitor.astext() - - -# Helper classes - -class ExtBabel(Babel): - def get_shorthandoff(self): - shortlang = self.language.split('_')[0] - if shortlang in ('de', 'sl', 'pt', 'es', 'nl', 'pl'): - return '\\shorthandoff{"}' - return '' - - _ISO639_TO_BABEL = Babel._ISO639_TO_BABEL.copy() - _ISO639_TO_BABEL['sl'] = 'slovene' - - -class Table(object): - def __init__(self): - self.col = 0 - self.colcount = 0 - self.colspec = None - self.had_head = False - self.has_verbatim = False - self.caption = None - - -class Desc(object): - def __init__(self, node): - self.env = LaTeXTranslator.desc_map.get(node['desctype'], 'describe') - self.type = self.cls = self.name = self.params = self.annotation = '' - self.count = 0 - - -class LaTeXTranslator(nodes.NodeVisitor): - sectionnames = ["part", "chapter", "section", "subsection", - "subsubsection", "paragraph", "subparagraph"] - - ignore_missing_images = False - - default_elements = { - 'docclass': 'manual', - 'papersize': 'letterpaper', - 'pointsize': '10pt', - 'classoptions': '', - 'inputenc': '\\usepackage[utf8]{inputenc}', - 'fontenc': '\\usepackage[T1]{fontenc}', - 'babel': '\\usepackage{babel}', - 'fontpkg': '\\usepackage{times}', - 'fncychap': '\\usepackage[Bjarne]{fncychap}', - 'preamble': '', - 'title': '', - 'date': '', - 'release': '', - 'author': '', - 'logo': '', - 'releasename': 'Release', - 'makeindex': '\\makeindex', - 'makemodindex': '\\makemodindex', - 'shorthandoff': '', - 'maketitle': '\\maketitle', - 'tableofcontents': '\\tableofcontents', - 'footer': '', - 'printmodindex': '\\printmodindex', - 'printindex': '\\printindex', - } - - def __init__(self, document, builder): - nodes.NodeVisitor.__init__(self, document) - self.builder = builder - self.body = [] - - # sort out some elements - papersize = builder.config.latex_paper_size + 'paper' - if papersize == 'paper': # e.g. command line "-D latex_paper_size=" - papersize = 'letterpaper' - - self.elements = self.default_elements.copy() - self.elements.update({ - 'docclass': document.settings.docclass, - 'papersize': papersize, - 'pointsize': builder.config.latex_font_size, - # if empty, the title is set to the first section title - 'title': document.settings.title, - 'date': ustrftime(builder.config.today_fmt or _('%B %d, %Y')), - 'release': builder.config.release, - 'author': document.settings.author, - 'releasename': _('Release'), - 'preamble': builder.config.latex_preamble, - 'modindexname': _('Module Index'), - 'indexname': _('Index'), - }) - if builder.config.latex_logo: - self.elements['logo'] = '\\includegraphics{%s}\\par' % \ - path.basename(builder.config.latex_logo) - if builder.config.language: - babel = ExtBabel(builder.config.language) - lang = babel.get_language() - if lang: - self.elements['classoptions'] += ',' + babel.get_language() - else: - self.builder.warn('no Babel option known for language %r' % - builder.config.language) - self.elements['shorthandoff'] = babel.get_shorthandoff() - self.elements['fncychap'] = '\\usepackage[Sonny]{fncychap}' - else: - self.elements['classoptions'] += ',english' - if not builder.config.latex_use_modindex: - self.elements['makemodindex'] = '' - self.elements['printmodindex'] = '' - # allow the user to override them all - self.elements.update(builder.config.latex_elements) - - self.highlighter = highlighting.PygmentsBridge( - 'latex', builder.config.pygments_style) - self.context = [] - self.descstack = [] - self.bibitems = [] - self.table = None - self.next_table_colspec = None - self.highlightlang = builder.config.highlight_language - self.highlightlinenothreshold = sys.maxint - self.written_ids = set() - self.footnotestack = [] - if self.elements['docclass'] == 'manual': - if builder.config.latex_use_parts: - self.top_sectionlevel = 0 - else: - self.top_sectionlevel = 1 - else: - self.top_sectionlevel = 2 - self.next_section_target = None - # flags - self.verbatim = None - self.in_title = 0 - self.in_production_list = 0 - self.first_document = 1 - self.this_is_the_title = 1 - self.literal_whitespace = 0 - self.no_contractions = 0 - - def astext(self): - return (HEADER % self.elements + self.highlighter.get_stylesheet() + - u''.join(self.body) + FOOTER % self.elements) - - def visit_document(self, node): - self.footnotestack.append(self.collect_footnotes(node)) - if self.first_document == 1: - # the first document is all the regular content ... - self.body.append(BEGIN_DOC % self.elements) - self.first_document = 0 - elif self.first_document == 0: - # ... and all others are the appendices - self.body.append('\n\\appendix\n') - self.first_document = -1 - # "- 1" because the level is increased before the title is visited - self.sectionlevel = self.top_sectionlevel - 1 - def depart_document(self, node): - if self.bibitems: - widest_label = "" - for bi in self.bibitems: - if len(widest_label) < len(bi[0]): - widest_label = bi[0] - self.body.append('\n\\begin{thebibliography}{%s}\n' % widest_label) - for bi in self.bibitems: - # cite_key: underscores must not be escaped - cite_key = bi[0].replace(r"\_", "_") - self.body.append('\\bibitem[%s]{%s}{%s}\n' % (bi[0], cite_key, bi[1])) - self.body.append('\\end{thebibliography}\n') - self.bibitems = [] - - def visit_start_of_file(self, node): - # This marks the begin of a new file; therefore the current module and - # class must be reset - self.body.append('\n\\resetcurrentobjects\n') - # and also, new footnotes - self.footnotestack.append(self.collect_footnotes(node)) - - def collect_footnotes(self, node): - fnotes = {} - def footnotes_under(n): - if isinstance(n, nodes.footnote): - yield n - else: - for c in n.children: - if isinstance(c, addnodes.start_of_file): - continue - for k in footnotes_under(c): - yield k - for fn in footnotes_under(node): - num = fn.children[0].astext().strip() - fnotes[num] = fn - fn.parent.remove(fn) - return fnotes - - def depart_start_of_file(self, node): - self.footnotestack.pop() - - def visit_highlightlang(self, node): - self.highlightlang = node['lang'] - self.highlightlinenothreshold = node['linenothreshold'] - raise nodes.SkipNode - - def visit_section(self, node): - if not self.this_is_the_title: - self.sectionlevel += 1 - self.body.append('\n\n') - if self.next_section_target: - self.body.append(r'\hypertarget{%s}{}' % self.next_section_target) - self.next_section_target = None - #if node.get('ids'): - # for id in node['ids']: - # if id not in self.written_ids: - # self.body.append(r'\hypertarget{%s}{}' % id) - # self.written_ids.add(id) - def depart_section(self, node): - self.sectionlevel = max(self.sectionlevel - 1, self.top_sectionlevel - 1) - - def visit_problematic(self, node): - self.body.append(r'{\color{red}\bfseries{}') - def depart_problematic(self, node): - self.body.append('}') - - def visit_topic(self, node): - self.body.append('\\setbox0\\vbox{\n' - '\\begin{minipage}{0.95\\textwidth}\n') - def depart_topic(self, node): - self.body.append('\\end{minipage}}\n' - '\\begin{center}\\setlength{\\fboxsep}{5pt}' - '\\shadowbox{\\box0}\\end{center}\n') - visit_sidebar = visit_topic - depart_sidebar = depart_topic - - def visit_glossary(self, node): - pass - def depart_glossary(self, node): - pass - - def visit_productionlist(self, node): - self.body.append('\n\n\\begin{productionlist}\n') - self.in_production_list = 1 - def depart_productionlist(self, node): - self.body.append('\\end{productionlist}\n\n') - self.in_production_list = 0 - - def visit_production(self, node): - if node['tokenname']: - self.body.append('\\production{%s}{' % self.encode(node['tokenname'])) - else: - self.body.append('\\productioncont{') - def depart_production(self, node): - self.body.append('}\n') - - def visit_transition(self, node): - self.body.append('\n\n\\bigskip\\hrule{}\\bigskip\n\n') - def depart_transition(self, node): - pass - - def visit_title(self, node): - parent = node.parent - if isinstance(parent, addnodes.seealso): - # the environment already handles this - raise nodes.SkipNode - elif self.this_is_the_title: - if len(node.children) != 1 and not isinstance(node.children[0], nodes.Text): - self.builder.warn('document title is not a single Text node') - if not self.elements['title']: - # text needs to be escaped since it is inserted into - # the output literally - self.elements['title'] = node.astext().translate(tex_escape_map) - self.this_is_the_title = 0 - raise nodes.SkipNode - elif isinstance(parent, nodes.section): - try: - self.body.append(r'\%s{' % self.sectionnames[self.sectionlevel]) - except IndexError: - from sphinx.application import SphinxError - raise SphinxError('too many nesting section levels for LaTeX, ' - 'at heading: %s' % node.astext()) - self.context.append('}\n') - elif isinstance(parent, (nodes.topic, nodes.sidebar)): - self.body.append(r'\textbf{') - self.context.append('}\n\n\medskip\n\n') - elif isinstance(parent, nodes.Admonition): - self.body.append('{') - self.context.append('}\n') - elif isinstance(parent, nodes.table): - self.table.caption = self.encode(node.astext()) - raise nodes.SkipNode - else: - self.builder.warn('encountered title node not in section, topic, ' - 'table, admonition or sidebar') - self.body.append('\\textbf{') - self.context.append('}\n') - self.in_title = 1 - def depart_title(self, node): - self.in_title = 0 - self.body.append(self.context.pop()) - - def visit_subtitle(self, node): - if isinstance(node.parent, nodes.sidebar): - self.body.append('~\\\\\n\\textbf{') - self.context.append('}\n\\smallskip\n') - else: - self.context.append('') - def depart_subtitle(self, node): - self.body.append(self.context.pop()) - - desc_map = { - 'function' : 'funcdesc', - 'class': 'classdesc', - 'method': 'methoddesc', - 'staticmethod': 'staticmethoddesc', - 'exception': 'excdesc', - 'data': 'datadesc', - 'attribute': 'memberdesc', - 'opcode': 'opcodedesc', - - 'cfunction': 'cfuncdesc', - 'cmember': 'cmemberdesc', - 'cmacro': 'csimplemacrodesc', - 'ctype': 'ctypedesc', - 'cvar': 'cvardesc', - - 'describe': 'describe', - # and all others are 'describe' too - } - - def visit_desc(self, node): - self.descstack.append(Desc(node)) - def depart_desc(self, node): - d = self.descstack.pop() - self.body.append("\\end{%s}\n" % d.env) - - def visit_desc_signature(self, node): - d = self.descstack[-1] - # reset these for every signature - d.type = d.cls = d.name = d.params = '' - def depart_desc_signature(self, node): - d = self.descstack[-1] - d.cls = d.cls.rstrip('.') - if node.parent['desctype'] != 'describe' and node['ids']: - hyper = '\\hypertarget{%s}{}' % node['ids'][0] - else: - hyper = '' - if d.count == 0: - t1 = "\n\n%s\\begin{%s}" % (hyper, d.env) - else: - t1 = "\n%s\\%sline" % (hyper, d.env[:-4]) - d.count += 1 - if d.env in ('funcdesc', 'classdesc', 'excclassdesc'): - t2 = "{%s}{%s}" % (d.name, d.params) - elif d.env in ('datadesc', 'excdesc', 'csimplemacrodesc'): - t2 = "{%s}" % (d.name) - elif d.env in ('methoddesc', 'staticmethoddesc'): - if d.cls: - t2 = "[%s]{%s}{%s}" % (d.cls, d.name, d.params) - else: - t2 = "{%s}{%s}" % (d.name, d.params) - elif d.env == 'memberdesc': - if d.cls: - t2 = "[%s]{%s}" % (d.cls, d.name) - else: - t2 = "{%s}" % d.name - elif d.env == 'cfuncdesc': - if d.cls: - # C++ class names - d.name = '%s::%s' % (d.cls, d.name) - t2 = "{%s}{%s}{%s}" % (d.type, d.name, d.params) - elif d.env == 'cmemberdesc': - try: - type, container = d.type.rsplit(' ', 1) - container = container.rstrip('.') - except ValueError: - container = '' - type = d.type - t2 = "{%s}{%s}{%s}" % (container, type, d.name) - elif d.env == 'cvardesc': - t2 = "{%s}{%s}" % (d.type, d.name) - elif d.env == 'ctypedesc': - t2 = "{%s}" % (d.name) - elif d.env == 'opcodedesc': - t2 = "{%s}{%s}" % (d.name, d.params) - elif d.env == 'describe': - t2 = "{%s}" % d.name - self.body.append(t1 + t2) - - def visit_desc_type(self, node): - d = self.descstack[-1] - if d.env == 'describe': - d.name += self.encode(node.astext()) - else: - self.descstack[-1].type = self.encode(node.astext().strip()) - raise nodes.SkipNode - - def visit_desc_name(self, node): - d = self.descstack[-1] - if d.env == 'describe': - d.name += self.encode(node.astext()) - else: - self.descstack[-1].name = self.encode(node.astext().strip()) - raise nodes.SkipNode - - def visit_desc_addname(self, node): - d = self.descstack[-1] - if d.env == 'describe': - d.name += self.encode(node.astext()) - else: - self.descstack[-1].cls = self.encode(node.astext().strip()) - raise nodes.SkipNode - - def visit_desc_parameterlist(self, node): - d = self.descstack[-1] - if d.env == 'describe': - d.name += self.encode(node.astext()) - else: - self.descstack[-1].params = self.encode(node.astext().strip()) - raise nodes.SkipNode - - def visit_desc_annotation(self, node): - d = self.descstack[-1] - if d.env == 'describe': - d.name += self.encode(node.astext()) - else: - self.descstack[-1].annotation = self.encode(node.astext().strip()) - raise nodes.SkipNode - - def visit_refcount(self, node): - self.body.append("\\emph{") - def depart_refcount(self, node): - self.body.append("}\\\\") - - def visit_desc_content(self, node): - if node.children and not isinstance(node.children[0], nodes.paragraph): - # avoid empty desc environment which causes a formatting bug - self.body.append('~') - def depart_desc_content(self, node): - pass - - def visit_seealso(self, node): - self.body.append("\n\n\\strong{%s:}\n\n" % admonitionlabels['seealso']) - def depart_seealso(self, node): - self.body.append("\n\n") - - def visit_rubric(self, node): - if len(node.children) == 1 and node.children[0].astext() == 'Footnotes': - raise nodes.SkipNode - self.body.append('\\paragraph{') - self.context.append('}\n') - def depart_rubric(self, node): - self.body.append(self.context.pop()) - - def visit_footnote(self, node): - pass - def depart_footnote(self, node): - pass - - def visit_label(self, node): - if isinstance(node.parent, nodes.citation): - self.bibitems[-1][0] = node.astext() - raise nodes.SkipNode - - def visit_tabular_col_spec(self, node): - self.next_table_colspec = node['spec'] - raise nodes.SkipNode - - def visit_table(self, node): - if self.table: - raise NotImplementedError('Nested tables are not supported.') - self.table = Table() - self.tablebody = [] - # Redirect body output until table is finished. - self._body = self.body - self.body = self.tablebody - def depart_table(self, node): - self.body = self._body - if self.table.caption is not None: - self.body.append('\n\\begin{threeparttable}\n' - '\\caption{%s}\n' % self.table.caption) - if self.table.has_verbatim: - self.body.append('\n\\begin{tabular}') - else: - self.body.append('\n\\begin{tabulary}{\\textwidth}') - if self.table.colspec: - self.body.append(self.table.colspec) - else: - if self.table.has_verbatim: - colwidth = 0.95 / self.table.colcount - colspec = ('p{%.3f\\textwidth}|' % colwidth) * self.table.colcount - self.body.append('{|' + colspec + '}\n') - else: - self.body.append('{|' + ('L|' * self.table.colcount) + '}\n') - self.body.extend(self.tablebody) - if self.table.has_verbatim: - self.body.append('\\end{tabular}\n\n') - else: - self.body.append('\\end{tabulary}\n\n') - if self.table.caption is not None: - self.body.append('\\end{threeparttable}\n\n') - self.table = None - self.tablebody = None - - def visit_colspec(self, node): - self.table.colcount += 1 - def depart_colspec(self, node): - pass - - def visit_tgroup(self, node): - pass - def depart_tgroup(self, node): - pass - - def visit_thead(self, node): - if self.next_table_colspec: - self.table.colspec = '{%s}\n' % self.next_table_colspec - self.next_table_colspec = None - self.body.append('\\hline\n') - self.table.had_head = True - def depart_thead(self, node): - self.body.append('\\hline\n') - - def visit_tbody(self, node): - if not self.table.had_head: - self.visit_thead(node) - def depart_tbody(self, node): - self.body.append('\\hline\n') - - def visit_row(self, node): - self.table.col = 0 - def depart_row(self, node): - self.body.append('\\\\\n') - - def visit_entry(self, node): - if node.has_key('morerows') or node.has_key('morecols'): - raise NotImplementedError('Column or row spanning cells are ' - 'not implemented.') - if self.table.col > 0: - self.body.append(' & ') - self.table.col += 1 - if isinstance(node.parent.parent, nodes.thead): - self.body.append('\\textbf{') - self.context.append('}') - else: - self.context.append('') - def depart_entry(self, node): - self.body.append(self.context.pop()) # header - - def visit_acks(self, node): - # this is a list in the source, but should be rendered as a - # comma-separated list here - self.body.append('\n\n') - self.body.append(', '.join(n.astext() for n in node.children[0].children) + '.') - self.body.append('\n\n') - raise nodes.SkipNode - - def visit_bullet_list(self, node): - self.body.append('\\begin{itemize}\n' ) - def depart_bullet_list(self, node): - self.body.append('\\end{itemize}\n' ) - - def visit_enumerated_list(self, node): - self.body.append('\\begin{enumerate}\n' ) - def depart_enumerated_list(self, node): - self.body.append('\\end{enumerate}\n' ) - - def visit_list_item(self, node): - # Append "{}" in case the next character is "[", which would break - # LaTeX's list environment (no numbering and the "[" is not printed). - self.body.append(r'\item {} ') - def depart_list_item(self, node): - self.body.append('\n') - - def visit_definition_list(self, node): - self.body.append('\\begin{description}\n') - def depart_definition_list(self, node): - self.body.append('\\end{description}\n') - - def visit_definition_list_item(self, node): - pass - def depart_definition_list_item(self, node): - pass - - def visit_term(self, node): - ctx = ']' - if node.has_key('ids') and node['ids']: - ctx += '\\hypertarget{%s}{}' % node['ids'][0] - self.body.append('\\item[') - self.context.append(ctx) - def depart_term(self, node): - self.body.append(self.context.pop()) - - def visit_classifier(self, node): - self.body.append('{[}') - def depart_classifier(self, node): - self.body.append('{]}') - - def visit_definition(self, node): - pass - def depart_definition(self, node): - self.body.append('\n') - - def visit_field_list(self, node): - self.body.append('\\begin{quote}\\begin{description}\n') - def depart_field_list(self, node): - self.body.append('\\end{description}\\end{quote}\n') - - def visit_field(self, node): - pass - def depart_field(self, node): - pass - - visit_field_name = visit_term - depart_field_name = depart_term - - visit_field_body = visit_definition - depart_field_body = depart_definition - - def visit_paragraph(self, node): - self.body.append('\n') - def depart_paragraph(self, node): - self.body.append('\n') - - def visit_centered(self, node): - self.body.append('\n\\begin{centering}') - def depart_centered(self, node): - self.body.append('\n\\end{centering}') - - def visit_module(self, node): - modname = node['modname'] - self.body.append('\n\\declaremodule[%s]{}{%s}' % (modname.replace('_', ''), - self.encode(modname))) - self.body.append('\n\\modulesynopsis{%s}' % self.encode(node['synopsis'])) - if node.has_key('platform'): - self.body.append('\\platform{%s}' % self.encode(node['platform'])) - def depart_module(self, node): - pass - - def latex_image_length(self, width_str): - match = re.match('(\d*\.?\d*)\s*(\S*)', width_str) - if not match: - # fallback - return width_str - res = width_str - amount, unit = match.groups()[:2] - if not unit or unit == "px": - # pixels: let LaTeX alone - return None - elif unit == "%": - res = "%.3f\\linewidth" % (float(amount) / 100.0) - return res - - def visit_image(self, node): - attrs = node.attributes - pre = [] # in reverse order - post = [] - include_graphics_options = [] - inline = isinstance(node.parent, nodes.TextElement) - if attrs.has_key('scale'): - # Could also be done with ``scale`` option to - # ``\includegraphics``; doing it this way for consistency. - pre.append('\\scalebox{%f}{' % (attrs['scale'] / 100.0,)) - post.append('}') - if attrs.has_key('width'): - w = self.latex_image_length(attrs['width']) - if w: - include_graphics_options.append('width=%s' % w) - if attrs.has_key('height'): - h = self.latex_image_length(attrs['height']) - include_graphics_options.append('height=%s' % h) - if attrs.has_key('align'): - align_prepost = { - # By default latex aligns the top of an image. - (1, 'top'): ('', ''), - (1, 'middle'): ('\\raisebox{-0.5\\height}{', '}'), - (1, 'bottom'): ('\\raisebox{-\\height}{', '}'), - (0, 'center'): ('{\\hfill', '\\hfill}'), - # These 2 don't exactly do the right thing. The image should - # be floated alongside the paragraph. See - # http://www.w3.org/TR/html4/struct/objects.html#adef-align-IMG - (0, 'left'): ('{', '\\hfill}'), - (0, 'right'): ('{\\hfill', '}'),} - try: - pre.append(align_prepost[inline, attrs['align']][0]) - post.append(align_prepost[inline, attrs['align']][1]) - except KeyError: - pass # XXX complain here? - if not inline: - pre.append('\n') - post.append('\n') - pre.reverse() - if node['uri'] in self.builder.images: - uri = self.builder.images[node['uri']] - else: - # missing image! - if self.ignore_missing_images: - return - uri = node['uri'] - if uri.find('://') != -1: - # ignore remote images - return - self.body.extend(pre) - options = '' - if include_graphics_options: - options = '[%s]' % ','.join(include_graphics_options) - self.body.append('\\includegraphics%s{%s}' % (options, uri)) - self.body.extend(post) - def depart_image(self, node): - pass - - def visit_figure(self, node): - if (not node.attributes.has_key('align') or - node.attributes['align'] == 'center'): - # centering does not add vertical space like center. - align = '\n\\centering' - align_end = '' - else: - # TODO non vertical space for other alignments. - align = '\\begin{flush%s}' % node.attributes['align'] - align_end = '\\end{flush%s}' % node.attributes['align'] - self.body.append('\\begin{figure}[htbp]%s\n' % align) - self.context.append('%s\\end{figure}\n' % align_end) - def depart_figure(self, node): - self.body.append(self.context.pop()) - - def visit_caption(self, node): - self.body.append('\\caption{') - def depart_caption(self, node): - self.body.append('}') - - def visit_legend(self, node): - self.body.append('{\\small ') - def depart_legend(self, node): - self.body.append('}') - - def visit_admonition(self, node): - self.body.append('\n\\begin{notice}{note}') - def depart_admonition(self, node): - self.body.append('\\end{notice}\n') - - def _make_visit_admonition(name): - def visit_admonition(self, node): - self.body.append('\n\\begin{notice}{%s}{%s:}' % - (name, admonitionlabels[name])) - return visit_admonition - def _depart_named_admonition(self, node): - self.body.append('\\end{notice}\n') - - visit_attention = _make_visit_admonition('attention') - depart_attention = _depart_named_admonition - visit_caution = _make_visit_admonition('caution') - depart_caution = _depart_named_admonition - visit_danger = _make_visit_admonition('danger') - depart_danger = _depart_named_admonition - visit_error = _make_visit_admonition('error') - depart_error = _depart_named_admonition - visit_hint = _make_visit_admonition('hint') - depart_hint = _depart_named_admonition - visit_important = _make_visit_admonition('important') - depart_important = _depart_named_admonition - visit_note = _make_visit_admonition('note') - depart_note = _depart_named_admonition - visit_tip = _make_visit_admonition('tip') - depart_tip = _depart_named_admonition - visit_warning = _make_visit_admonition('warning') - depart_warning = _depart_named_admonition - - def visit_versionmodified(self, node): - intro = versionlabels[node['type']] % node['version'] - if node.children: - intro += ': ' - else: - intro += '.' - self.body.append(intro) - def depart_versionmodified(self, node): - pass - - def visit_target(self, node): - def add_target(id): - # indexing uses standard LaTeX index markup, so the targets - # will be generated differently - if not id.startswith('index-'): - self.body.append(r'\hypertarget{%s}{}' % id) - - if node.has_key('refid') and node['refid'] not in self.written_ids: - parindex = node.parent.index(node) - try: - next = node.parent[parindex+1] - if isinstance(next, nodes.section): - self.next_section_target = node['refid'] - return - except IndexError: - pass - add_target(node['refid']) - self.written_ids.add(node['refid']) - def depart_target(self, node): - pass - - def visit_attribution(self, node): - self.body.append('\n\\begin{flushright}\n') - self.body.append('---') - def depart_attribution(self, node): - self.body.append('\n\\end{flushright}\n') - - def visit_index(self, node, scre=re.compile(r';\s*')): - entries = node['entries'] - for type, string, tid, _ in entries: - if type == 'single': - self.body.append(r'\index{%s}' % scre.sub('!', self.encode(string))) - elif type == 'pair': - parts = tuple(self.encode(x.strip()) for x in string.split(';', 1)) - self.body.append(r'\indexii{%s}{%s}' % parts) - elif type == 'triple': - parts = tuple(self.encode(x.strip()) for x in string.split(';', 2)) - self.body.append(r'\indexiii{%s}{%s}{%s}' % parts) - else: - self.builder.warn('unknown index entry type %s found' % type) - raise nodes.SkipNode - - def visit_raw(self, node): - if 'latex' in node.get('format', '').split(): - self.body.append(node.astext()) - raise nodes.SkipNode - - def visit_reference(self, node): - uri = node.get('refuri', '') - if self.in_title or not uri: - self.context.append('') - elif uri.startswith('mailto:') or uri.startswith('http:') or \ - uri.startswith('https:') or uri.startswith('ftp:'): - self.body.append('\\href{%s}{' % self.encode(uri)) - self.context.append('}') - elif uri.startswith('#'): - self.body.append('\\hyperlink{%s}{' % uri[1:]) - self.context.append('}') - elif uri.startswith('@token'): - if self.in_production_list: - self.body.append('\\token{') - else: - self.body.append('\\grammartoken{') - self.context.append('}') - else: - self.builder.warn('unusable reference target found: %s' % uri) - self.context.append('') - def depart_reference(self, node): - self.body.append(self.context.pop()) - - def visit_pending_xref(self, node): - pass - def depart_pending_xref(self, node): - pass - - def visit_emphasis(self, node): - self.body.append(r'\emph{') - def depart_emphasis(self, node): - self.body.append('}') - - def visit_literal_emphasis(self, node): - self.body.append(r'\emph{\texttt{') - self.no_contractions += 1 - def depart_literal_emphasis(self, node): - self.body.append('}}') - self.no_contractions -= 1 - - def visit_strong(self, node): - self.body.append(r'\textbf{') - def depart_strong(self, node): - self.body.append('}') - - def visit_title_reference(self, node): - self.body.append(r'\emph{') - def depart_title_reference(self, node): - self.body.append('}') - - def visit_citation(self, node): - # TODO maybe use cite bibitems - self.bibitems.append(['', '']) - self.context.append(len(self.body)) - def depart_citation(self, node): - size = self.context.pop() - text = ''.join(self.body[size:]) - del self.body[size:] - self.bibitems[-1][1] = text - - def visit_citation_reference(self, node): - citeid = node.astext() - self.body.append('\\cite{%s}' % citeid) - raise nodes.SkipNode - - def visit_literal(self, node): - content = self.encode(node.astext().strip()) - if self.in_title: - self.body.append(r'\texttt{%s}' % content) - elif node.has_key('role') and node['role'] == 'samp': - self.body.append(r'\samp{%s}' % content) - else: - self.body.append(r'\code{%s}' % content) - raise nodes.SkipNode - - def visit_footnote_reference(self, node): - num = node.astext().strip() - try: - fn = self.footnotestack[-1][num] - except (KeyError, IndexError): - raise nodes.SkipNode - self.body.append('\\footnote{') - fn.walkabout(self) - raise nodes.SkipChildren - def depart_footnote_reference(self, node): - self.body.append('}') - - def visit_literal_block(self, node): - self.verbatim = '' - def depart_literal_block(self, node): - code = self.verbatim.rstrip('\n') - lang = self.highlightlang - linenos = code.count('\n') >= self.highlightlinenothreshold - 1 - if node.has_key('language'): - # code-block directives - lang = node['language'] - if node.has_key('linenos'): - linenos = node['linenos'] - hlcode = self.highlighter.highlight_block(code, lang, linenos) - # workaround for Unicode issue - hlcode = hlcode.replace(u'€', u'@texteuro[]') - # must use original Verbatim environment and "tabular" environment - if self.table: - hlcode = hlcode.replace('\\begin{Verbatim}', - '\\begin{OriginalVerbatim}') - self.table.has_verbatim = True - # get consistent trailer - hlcode = hlcode.rstrip()[:-14] # strip \end{Verbatim} - hlcode = hlcode.rstrip() + '\n' - self.body.append('\n' + hlcode + '\\end{%sVerbatim}\n' % - (self.table and 'Original' or '')) - self.verbatim = None - visit_doctest_block = visit_literal_block - depart_doctest_block = depart_literal_block - - def visit_line_block(self, node): - """line-block: - * whitespace (including linebreaks) is significant - * inline markup is supported. - * serif typeface - """ - self.body.append('{\\raggedright{}') - self.literal_whitespace = 1 - def depart_line_block(self, node): - self.literal_whitespace = 0 - # remove the last \\ - del self.body[-1] - self.body.append('}\n') - - def visit_line(self, node): - self._line_start = len(self.body) - def depart_line(self, node): - if self._line_start == len(self.body): - # no output in this line -- add a nonbreaking space, else the - # \\ command will give an error - self.body.append('~') - if self.table is not None: - self.body.append('\\newline\n') - else: - self.body.append('\\\\\n') - - def visit_block_quote(self, node): - # If the block quote contains a single object and that object - # is a list, then generate a list not a block quote. - # This lets us indent lists. - done = 0 - if len(node.children) == 1: - child = node.children[0] - if isinstance(child, nodes.bullet_list) or \ - isinstance(child, nodes.enumerated_list): - done = 1 - if not done: - self.body.append('\\begin{quote}\n') - def depart_block_quote(self, node): - done = 0 - if len(node.children) == 1: - child = node.children[0] - if isinstance(child, nodes.bullet_list) or \ - isinstance(child, nodes.enumerated_list): - done = 1 - if not done: - self.body.append('\\end{quote}\n') - - # option node handling copied from docutils' latex writer - - def visit_option(self, node): - if self.context[-1]: - # this is not the first option - self.body.append(', ') - def depart_option(self, node): - # flag that the first option is done. - self.context[-1] += 1 - - def visit_option_argument(self, node): - """The delimiter betweeen an option and its argument.""" - self.body.append(node.get('delimiter', ' ')) - def depart_option_argument(self, node): - pass - - def visit_option_group(self, node): - self.body.append('\\item [') - # flag for first option - self.context.append(0) - def depart_option_group(self, node): - self.context.pop() # the flag - self.body.append('] ') - - def visit_option_list(self, node): - self.body.append('\\begin{optionlist}{3cm}\n') - def depart_option_list(self, node): - self.body.append('\\end{optionlist}\n') - - def visit_option_list_item(self, node): - pass - def depart_option_list_item(self, node): - pass - - def visit_option_string(self, node): - ostring = node.astext() - self.body.append(self.encode(ostring.replace('--', u'-{-}'))) - raise nodes.SkipNode - - def visit_description(self, node): - self.body.append( ' ' ) - def depart_description(self, node): - pass - - def visit_superscript(self, node): - self.body.append('$^{\\text{') - def depart_superscript(self, node): - self.body.append('}}$') - - def visit_subscript(self, node): - self.body.append('$_{\\text{') - def depart_subscript(self, node): - self.body.append('}}$') - - def visit_substitution_definition(self, node): - raise nodes.SkipNode - - def visit_substitution_reference(self, node): - raise nodes.SkipNode - - def visit_generated(self, node): - pass - def depart_generated(self, node): - pass - - def visit_compound(self, node): - pass - def depart_compound(self, node): - pass - - def visit_container(self, node): - pass - def depart_container(self, node): - pass - - def visit_decoration(self, node): - pass - def depart_decoration(self, node): - pass - - # text handling - - def encode(self, text): - text = unicode(text).translate(tex_escape_map) - if self.literal_whitespace: - # Insert a blank before the newline, to avoid - # ! LaTeX Error: There's no line here to end. - text = text.replace(u'\n', u'~\\\\\n').replace(u' ', u'~') - if self.no_contractions: - text = text.replace('--', u'-{-}') - return text - - def visit_Text(self, node): - if self.verbatim is not None: - self.verbatim += node.astext() - else: - text = self.encode(node.astext()) - self.body.append(educateQuotesLatex(text)) - def depart_Text(self, node): - pass - - def visit_comment(self, node): - raise nodes.SkipNode - - def visit_meta(self, node): - # only valid for HTML - raise nodes.SkipNode - - def visit_system_message(self, node): - pass - def depart_system_message(self, node): - self.body.append('\n') - - def unknown_visit(self, node): - raise NotImplementedError('Unknown node: ' + node.__class__.__name__) diff --git a/sphinx/linkcheck.py b/sphinx/linkcheck.py deleted file mode 100644 index 37aeb7a7..00000000 --- a/sphinx/linkcheck.py +++ /dev/null @@ -1,130 +0,0 @@ -# -*- coding: utf-8 -*- -""" - sphinx.linkcheck - ~~~~~~~~~~~~~~~~ - - The CheckExternalLinksBuilder class. - - :copyright: 2008 by Georg Brandl, Thomas Lamb. - :license: BSD. -""" - -import socket -from os import path -from urllib2 import build_opener, HTTPError - -from docutils import nodes - -from sphinx.builder import Builder -from sphinx.util.console import purple, red, darkgreen - -# create an opener that will simulate a browser user-agent -opener = build_opener() -opener.addheaders = [('User-agent', 'Mozilla/5.0')] - - -class CheckExternalLinksBuilder(Builder): - """ - Checks for broken external links. - """ - name = 'linkcheck' - - def init(self): - self.good = set() - self.broken = {} - self.redirected = {} - # set a timeout for non-responding servers - socket.setdefaulttimeout(5.0) - # create output file - open(path.join(self.outdir, 'output.txt'), 'w').close() - - def get_target_uri(self, docname, typ=None): - return '' - - def get_outdated_docs(self): - return self.env.found_docs - - def prepare_writing(self, docnames): - return - - def write_doc(self, docname, doctree): - self.info() - for node in doctree.traverse(nodes.reference): - try: - self.check(node, docname) - except KeyError: - continue - - def check(self, node, docname): - uri = node['refuri'] - - if '#' in uri: - uri = uri.split('#')[0] - - if uri in self.good: - return - - lineno = None - while lineno is None and node: - node = node.parent - lineno = node.line - - if uri[0:5] == 'http:' or uri[0:6] == 'https:': - self.info(uri, nonl=1) - - if uri in self.broken: - (r, s) = self.broken[uri] - elif uri in self.redirected: - (r, s) = self.redirected[uri] - else: - (r, s) = self.resolve(uri) - - if r == 0: - self.info(' - ' + darkgreen('working')) - self.good.add(uri) - elif r == 2: - self.info(' - ' + red('broken: ') + s) - self.write_entry('broken', docname, lineno, uri + ': ' + s) - self.broken[uri] = (r, s) - if self.app.quiet: - self.warn('%s:%s: broken link: %s' % (docname, lineno, uri)) - else: - self.info(' - ' + purple('redirected') + ' to ' + s) - self.write_entry('redirected', docname, lineno, uri + ' to ' + s) - self.redirected[uri] = (r, s) - elif len(uri) == 0 or uri[0:7] == 'mailto:' or uri[0:4] == 'ftp:': - return - else: - self.warn(uri + ' - ' + red('malformed!')) - self.write_entry('malformed', docname, lineno, uri) - if self.app.quiet: - self.warn('%s:%s: malformed link: %s' % (docname, lineno, uri)) - self.app.statuscode = 1 - - if self.broken: - self.app.statuscode = 1 - - def write_entry(self, what, docname, line, uri): - output = open(path.join(self.outdir, 'output.txt'), 'a') - output.write("%s:%s: [%s] %s\n" % (self.env.doc2path(docname, None), - line, what, uri)) - output.close() - - def resolve(self, uri): - try: - f = opener.open(uri) - f.close() - except HTTPError, err: - #if err.code == 403 and uri.startswith('http://en.wikipedia.org/'): - # # Wikipedia blocks requests from urllib User-Agent - # return (0, 0) - return (2, str(err)) - except Exception, err: - return (2, str(err)) - if f.url.rstrip('/') == uri.rstrip('/'): - return (0, 0) - else: - return (1, f.url) - - def finish(self): - return diff --git a/sphinx/textwriter.py b/sphinx/textwriter.py deleted file mode 100644 index 4aa18039..00000000 --- a/sphinx/textwriter.py +++ /dev/null @@ -1,679 +0,0 @@ -# -*- coding: utf-8 -*- -""" - sphinx.textwriter - ~~~~~~~~~~~~~~~~~ - - Custom docutils writer for plain text. - - :copyright: 2008 by Georg Brandl. - :license: BSD. -""" - -import re -import textwrap - -from docutils import nodes, writers - -from sphinx import addnodes -from sphinx.locale import admonitionlabels, versionlabels - - -class TextWriter(writers.Writer): - supported = ('text',) - settings_spec = ('No options here.', '', ()) - settings_defaults = {} - - output = None - - def __init__(self, builder): - writers.Writer.__init__(self) - self.builder = builder - - def translate(self): - visitor = TextTranslator(self.document, self.builder) - self.document.walkabout(visitor) - self.output = visitor.body - -# monkey-patch... -new_wordsep_re = re.compile( - r'(\s+|' # any whitespace - r'(?<=\s)(?::[a-z-]+:)?`\S+|' # interpreted text start - r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words - r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash -textwrap.TextWrapper.wordsep_re = new_wordsep_re - -MAXWIDTH = 70 -STDINDENT = 3 - - -class TextTranslator(nodes.NodeVisitor): - sectionchars = '*=-~"+' - - def __init__(self, document, builder): - nodes.NodeVisitor.__init__(self, document) - - self.states = [[]] - self.stateindent = [0] - self.sectionlevel = 0 - self.table = None - - def add_text(self, text): - self.states[-1].append((-1, text)) - def new_state(self, indent=STDINDENT): - self.states.append([]) - self.stateindent.append(indent) - def end_state(self, wrap=True, end=[''], first=None): - content = self.states.pop() - maxindent = sum(self.stateindent) - indent = self.stateindent.pop() - result = [] - toformat = [] - def do_format(): - if not toformat: - return - if wrap: - res = textwrap.wrap(''.join(toformat), width=MAXWIDTH-maxindent) - else: - res = ''.join(toformat).splitlines() - if end: - res += end - result.append((indent, res)) - for itemindent, item in content: - if itemindent == -1: - toformat.append(item) - else: - do_format() - result.append((indent + itemindent, item)) - toformat = [] - do_format() - if first is not None and result: - itemindent, item = result[0] - if item: - result.insert(0, (itemindent - indent, [first + item[0]])) - result[1] = (itemindent, item[1:]) - self.states[-1].extend(result) - - def visit_document(self, node): - self.new_state(0) - def depart_document(self, node): - self.end_state() - self.body = '\n'.join(line and (' '*indent + line) - for indent, lines in self.states[0] - for line in lines) - # XXX header/footer? - - def visit_highlightlang(self, node): - raise nodes.SkipNode - - def visit_section(self, node): - self._title_char = self.sectionchars[self.sectionlevel] - self.sectionlevel += 1 - def depart_section(self, node): - self.sectionlevel -= 1 - - def visit_topic(self, node): - self.new_state(0) - def depart_topic(self, node): - self.end_state() - - visit_sidebar = visit_topic - depart_sidebar = depart_topic - - def visit_rubric(self, node): - self.new_state(0) - self.add_text('-[ ') - def depart_rubric(self, node): - self.add_text(' ]-') - self.end_state() - - def visit_compound(self, node): - pass - def depart_compound(self, node): - pass - - def visit_glossary(self, node): - pass - def depart_glossary(self, node): - pass - - def visit_title(self, node): - if isinstance(node.parent, nodes.Admonition): - self.add_text(node.astext()+': ') - raise nodes.SkipNode - self.new_state(0) - def depart_title(self, node): - if isinstance(node.parent, nodes.section): - char = self._title_char - else: - char = '^' - text = ''.join(x[1] for x in self.states.pop() if x[0] == -1) - self.stateindent.pop() - self.states[-1].append((0, ['', text, '%s' % (char * len(text)), ''])) - - def visit_subtitle(self, node): - pass - def depart_subtitle(self, node): - pass - - def visit_attribution(self, node): - self.add_text('-- ') - def depart_attribution(self, node): - pass - - def visit_module(self, node): - if node.has_key('platform'): - self.new_state(0) - self.add_text(_('Platform: %s') % node['platform']) - self.end_state() - raise nodes.SkipNode - - def visit_desc(self, node): - pass - def depart_desc(self, node): - pass - - def visit_desc_signature(self, node): - self.new_state(0) - if node.parent['desctype'] in ('class', 'exception'): - self.add_text('%s ' % node.parent['desctype']) - def depart_desc_signature(self, node): - # XXX: wrap signatures in a way that makes sense - self.end_state(wrap=False, end=None) - - def visit_desc_name(self, node): - pass - def depart_desc_name(self, node): - pass - - def visit_desc_addname(self, node): - pass - def depart_desc_addname(self, node): - pass - - def visit_desc_type(self, node): - pass - def depart_desc_type(self, node): - pass - - def visit_desc_parameterlist(self, node): - self.add_text('(') - self.first_param = 1 - def depart_desc_parameterlist(self, node): - self.add_text(')') - - def visit_desc_parameter(self, node): - if not self.first_param: - self.add_text(', ') - else: - self.first_param = 0 - self.add_text(node.astext()) - raise nodes.SkipNode - - def visit_desc_optional(self, node): - self.add_text('[') - def depart_desc_optional(self, node): - self.add_text(']') - - def visit_desc_annotation(self, node): - pass - def depart_desc_annotation(self, node): - pass - - def visit_refcount(self, node): - pass - def depart_refcount(self, node): - pass - - def visit_desc_content(self, node): - self.new_state() - self.add_text('\n') - def depart_desc_content(self, node): - self.end_state() - - def visit_figure(self, node): - self.new_state() - def depart_figure(self, node): - self.end_state() - - def visit_caption(self, node): - pass - def depart_caption(self, node): - pass - - def visit_productionlist(self, node): - self.new_state() - names = [] - for production in node: - names.append(production['tokenname']) - maxlen = max(len(name) for name in names) - for production in node: - if production['tokenname']: - self.add_text(production['tokenname'].ljust(maxlen) + ' ::=') - lastname = production['tokenname'] - else: - self.add_text('%s ' % (' '*len(lastname))) - self.add_text(production.astext() + '\n') - self.end_state(wrap=False) - raise nodes.SkipNode - - def visit_seealso(self, node): - self.new_state() - def depart_seealso(self, node): - self.end_state(first='') - - def visit_footnote(self, node): - self._footnote = node.children[0].astext().strip() - self.new_state(len(self._footnote) + 3) - def depart_footnote(self, node): - self.end_state(first='[%s] ' % self._footnote) - - def visit_citation(self, node): - if len(node) and isinstance(node[0], nodes.label): - self._citlabel = node[0].astext() - else: - self._citlabel = '' - self.new_state(len(self._citlabel) + 3) - def depart_citation(self, node): - self.end_state(first='[%s] ' % self._citlabel) - - def visit_label(self, node): - raise nodes.SkipNode - - # XXX: option list could use some better styling - - def visit_option_list(self, node): - pass - def depart_option_list(self, node): - pass - - def visit_option_list_item(self, node): - self.new_state(0) - def depart_option_list_item(self, node): - self.end_state() - - def visit_option_group(self, node): - self._firstoption = True - def depart_option_group(self, node): - self.add_text(' ') - - def visit_option(self, node): - if self._firstoption: - self._firstoption = False - else: - self.add_text(', ') - def depart_option(self, node): - pass - - def visit_option_string(self, node): - pass - def depart_option_string(self, node): - pass - - def visit_option_argument(self, node): - self.add_text(node['delimiter']) - def depart_option_argument(self, node): - pass - - def visit_description(self, node): - pass - def depart_description(self, node): - pass - - def visit_tabular_col_spec(self, node): - raise nodes.SkipNode - - def visit_colspec(self, node): - self.table[0].append(node['colwidth']) - raise nodes.SkipNode - - def visit_tgroup(self, node): - pass - def depart_tgroup(self, node): - pass - - def visit_thead(self, node): - pass - def depart_thead(self, node): - pass - - def visit_tbody(self, node): - self.table.append('sep') - def depart_tbody(self, node): - pass - - def visit_row(self, node): - self.table.append([]) - def depart_row(self, node): - pass - - def visit_entry(self, node): - if node.has_key('morerows') or node.has_key('morecols'): - raise NotImplementedError('Column or row spanning cells are ' - 'not implemented.') - self.new_state(0) - def depart_entry(self, node): - text = '\n'.join('\n'.join(x[1]) for x in self.states.pop()) - self.stateindent.pop() - self.table[-1].append(text) - - def visit_table(self, node): - if self.table: - raise NotImplementedError('Nested tables are not supported.') - self.new_state(0) - self.table = [[]] - def depart_table(self, node): - lines = self.table[1:] - fmted_rows = [] - colwidths = self.table[0] - realwidths = colwidths[:] - separator = 0 - # don't allow paragraphs in table cells for now - for line in lines: - if line == 'sep': - separator = len(fmted_rows) - else: - cells = [] - for i, cell in enumerate(line): - par = textwrap.wrap(cell, width=colwidths[i]) - if par: - maxwidth = max(map(len, par)) - else: - maxwidth = 0 - realwidths[i] = max(realwidths[i], maxwidth) - cells.append(par) - fmted_rows.append(cells) - - def writesep(char='-'): - out = ['+'] - for width in realwidths: - out.append(char * (width+2)) - out.append('+') - self.add_text(''.join(out) + '\n') - - def writerow(row): - lines = map(None, *row) - for line in lines: - out = ['|'] - for i, cell in enumerate(line): - if cell: - out.append(' ' + cell.ljust(realwidths[i]+1)) - else: - out.append(' ' * (realwidths[i] + 2)) - out.append('|') - self.add_text(''.join(out) + '\n') - - for i, row in enumerate(fmted_rows): - if separator and i == separator: - writesep('=') - else: - writesep('-') - writerow(row) - writesep('-') - self.table = None - self.end_state(wrap=False) - - def visit_acks(self, node): - self.new_state(0) - self.add_text(', '.join(n.astext() for n in node.children[0].children) + '.') - self.end_state() - raise nodes.SkipNode - - def visit_image(self, node): - self.add_text(_('[image]')) - raise nodes.SkipNode - - def visit_transition(self, node): - indent = sum(self.stateindent) - self.new_state(0) - self.add_text('=' * (MAXWIDTH - indent)) - self.end_state() - raise nodes.SkipNode - - def visit_bullet_list(self, node): - self._list_counter = -1 - def depart_bullet_list(self, node): - pass - - def visit_enumerated_list(self, node): - self._list_counter = 0 - def depart_enumerated_list(self, node): - pass - - def visit_definition_list(self, node): - self._list_counter = -2 - def depart_definition_list(self, node): - pass - - def visit_list_item(self, node): - if self._list_counter == -1: - # bullet list - self.new_state(2) - elif self._list_counter == -2: - # definition list - pass - else: - # enumerated list - self._list_counter += 1 - self.new_state(len(str(self._list_counter)) + 2) - def depart_list_item(self, node): - if self._list_counter == -1: - self.end_state(first='* ', end=None) - elif self._list_counter == -2: - pass - else: - self.end_state(first='%s. ' % self._list_counter, end=None) - - def visit_definition_list_item(self, node): - self._li_has_classifier = len(node) >= 2 and \ - isinstance(node[1], nodes.classifier) - def depart_definition_list_item(self, node): - pass - - def visit_term(self, node): - self.new_state(0) - def depart_term(self, node): - if not self._li_has_classifier: - self.end_state(end=None) - - def visit_classifier(self, node): - self.add_text(' : ') - def depart_classifier(self, node): - self.end_state(end=None) - - def visit_definition(self, node): - self.new_state() - def depart_definition(self, node): - self.end_state() - - def visit_field_list(self, node): - pass - def depart_field_list(self, node): - pass - - def visit_field(self, node): - pass - def depart_field(self, node): - pass - - def visit_field_name(self, node): - self.new_state(0) - def depart_field_name(self, node): - self.add_text(':') - self.end_state(end=None) - - def visit_field_body(self, node): - self.new_state() - def depart_field_body(self, node): - self.end_state() - - def visit_centered(self, node): - pass - def depart_centered(self, node): - pass - - def visit_admonition(self, node): - self.new_state(0) - def depart_admonition(self, node): - self.end_state() - - def _visit_admonition(self, node): - self.new_state(2) - def _make_depart_admonition(name): - def depart_admonition(self, node): - self.end_state(first=admonitionlabels[name] + ': ') - return depart_admonition - - visit_attention = _visit_admonition - depart_attention = _make_depart_admonition('attention') - visit_caution = _visit_admonition - depart_caution = _make_depart_admonition('caution') - visit_danger = _visit_admonition - depart_danger = _make_depart_admonition('danger') - visit_error = _visit_admonition - depart_error = _make_depart_admonition('error') - visit_hint = _visit_admonition - depart_hint = _make_depart_admonition('hint') - visit_important = _visit_admonition - depart_important = _make_depart_admonition('important') - visit_note = _visit_admonition - depart_note = _make_depart_admonition('note') - visit_tip = _visit_admonition - depart_tip = _make_depart_admonition('tip') - visit_warning = _visit_admonition - depart_warning = _make_depart_admonition('warning') - - def visit_versionmodified(self, node): - self.new_state(0) - if node.children: - self.add_text(versionlabels[node['type']] % node['version'] + ': ') - else: - self.add_text(versionlabels[node['type']] % node['version'] + '.') - def depart_versionmodified(self, node): - self.end_state() - - def visit_literal_block(self, node): - self.new_state() - def depart_literal_block(self, node): - self.end_state(wrap=False) - - def visit_doctest_block(self, node): - self.new_state(0) - def depart_doctest_block(self, node): - self.end_state(wrap=False) - - def visit_line_block(self, node): - self.new_state(0) - def depart_line_block(self, node): - self.end_state(wrap=False) - - def visit_line(self, node): - pass - def depart_line(self, node): - pass - - def visit_block_quote(self, node): - self.new_state() - def depart_block_quote(self, node): - self.end_state() - - def visit_compact_paragraph(self, node): - pass - def depart_compact_paragraph(self, node): - pass - - def visit_paragraph(self, node): - if not isinstance(node.parent, nodes.Admonition) or \ - isinstance(node.parent, addnodes.seealso): - self.new_state(0) - def depart_paragraph(self, node): - if not isinstance(node.parent, nodes.Admonition) or \ - isinstance(node.parent, addnodes.seealso): - self.end_state() - - def visit_target(self, node): - raise nodes.SkipNode - - def visit_index(self, node): - raise nodes.SkipNode - - def visit_substitution_definition(self, node): - raise nodes.SkipNode - - def visit_pending_xref(self, node): - pass - def depart_pending_xref(self, node): - pass - - def visit_reference(self, node): - pass - def depart_reference(self, node): - pass - - def visit_emphasis(self, node): - self.add_text('*') - def depart_emphasis(self, node): - self.add_text('*') - - def visit_literal_emphasis(self, node): - self.add_text('*') - def depart_literal_emphasis(self, node): - self.add_text('*') - - def visit_strong(self, node): - self.add_text('**') - def depart_strong(self, node): - self.add_text('**') - - def visit_title_reference(self, node): - self.add_text('*') - def depart_title_reference(self, node): - self.add_text('*') - - def visit_literal(self, node): - self.add_text('``') - def depart_literal(self, node): - self.add_text('``') - - def visit_subscript(self, node): - self.add_text('_') - def depart_subscript(self, node): - pass - - def visit_superscript(self, node): - self.add_text('^') - def depart_superscript(self, node): - pass - - def visit_footnote_reference(self, node): - self.add_text('[%s]' % node.astext()) - raise nodes.SkipNode - - def visit_citation_reference(self, node): - self.add_text('[%s]' % node.astext()) - raise nodes.SkipNode - - def visit_Text(self, node): - self.add_text(node.astext()) - def depart_Text(self, node): - pass - - def visit_problematic(self, node): - self.add_text('>>') - def depart_problematic(self, node): - self.add_text('<<') - - def visit_system_message(self, node): - self.new_state(0) - self.add_text('' % node.astext()) - self.end_state() - raise nodes.SkipNode - - def visit_comment(self, node): - raise nodes.SkipNode - - def visit_meta(self, node): - # only valid for HTML - raise nodes.SkipNode - - def unknown_visit(self, node): - raise NotImplementedError('Unknown node: ' + node.__class__.__name__) diff --git a/sphinx/writers/__init__.py b/sphinx/writers/__init__.py new file mode 100644 index 00000000..902f04d3 --- /dev/null +++ b/sphinx/writers/__init__.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- +""" + sphinx.writers + ~~~~~~~~~~~~~~ + + Custom docutils writers. + + :copyright: 2008 by Georg Brandl. + :license: BSD. +""" diff --git a/sphinx/writers/html.py b/sphinx/writers/html.py new file mode 100644 index 00000000..b82d7ccc --- /dev/null +++ b/sphinx/writers/html.py @@ -0,0 +1,457 @@ +# -*- coding: utf-8 -*- +""" + sphinx.writers.html + ~~~~~~~~~~~~~~~~~~~ + + docutils writers handling Sphinx' custom nodes. + + :copyright: 2007-2008 by Georg Brandl. + :license: BSD. +""" + +import sys +import posixpath +import os + +from docutils import nodes +from docutils.writers.html4css1 import Writer, HTMLTranslator as BaseTranslator + +from sphinx.locale import admonitionlabels, versionlabels +from sphinx.highlighting import PygmentsBridge +from sphinx.util.smartypants import sphinx_smarty_pants + +try: + import Image # check for the Python Imaging Library +except ImportError: + Image = None + +class HTMLWriter(Writer): + def __init__(self, builder): + Writer.__init__(self) + self.builder = builder + + def translate(self): + # sadly, this is mostly copied from parent class + self.visitor = visitor = self.builder.translator_class(self.builder, + self.document) + self.document.walkabout(visitor) + self.output = visitor.astext() + for attr in ('head_prefix', 'stylesheet', 'head', 'body_prefix', + 'body_pre_docinfo', 'docinfo', 'body', 'fragment', + 'body_suffix', 'meta', 'title', 'subtitle', 'header', + 'footer', 'html_prolog', 'html_head', 'html_title', + 'html_subtitle', 'html_body', ): + setattr(self, attr, getattr(visitor, attr, None)) + self.clean_meta = ''.join(visitor.meta[2:]) + + +class HTMLTranslator(BaseTranslator): + """ + Our custom HTML translator. + """ + + def __init__(self, builder, *args, **kwds): + BaseTranslator.__init__(self, *args, **kwds) + self.highlighter = PygmentsBridge('html', builder.config.pygments_style) + self.no_smarty = 0 + self.builder = builder + self.highlightlang = builder.config.highlight_language + self.highlightlinenothreshold = sys.maxint + self.protect_literal_text = 0 + + def visit_desc(self, node): + self.body.append(self.starttag(node, 'dl', CLASS=node['desctype'])) + def depart_desc(self, node): + self.body.append('\n\n') + + def visit_desc_signature(self, node): + # the id is set automatically + self.body.append(self.starttag(node, 'dt')) + # anchor for per-desc interactive data + if node.parent['desctype'] != 'describe' and node['ids'] and node['first']: + self.body.append('' % node['ids'][0]) + if node.parent['desctype'] in ('class', 'exception'): + self.body.append('%s ' % node.parent['desctype']) + def depart_desc_signature(self, node): + if node['ids'] and self.builder.add_definition_links: + self.body.append(u'\u00B6' % + _('Permalink to this definition')) + self.body.append('\n') + + def visit_desc_addname(self, node): + self.body.append(self.starttag(node, 'tt', '', CLASS='descclassname')) + def depart_desc_addname(self, node): + self.body.append('') + + def visit_desc_type(self, node): + pass + def depart_desc_type(self, node): + pass + + def visit_desc_name(self, node): + self.body.append(self.starttag(node, 'tt', '', CLASS='descname')) + def depart_desc_name(self, node): + self.body.append('') + + def visit_desc_parameterlist(self, node): + self.body.append('(') + self.first_param = 1 + def depart_desc_parameterlist(self, node): + self.body.append(')') + + def visit_desc_parameter(self, node): + if not self.first_param: + self.body.append(', ') + else: + self.first_param = 0 + if not node.hasattr('noemph'): + self.body.append('') + def depart_desc_parameter(self, node): + if not node.hasattr('noemph'): + self.body.append('') + + def visit_desc_optional(self, node): + self.body.append('[') + def depart_desc_optional(self, node): + self.body.append(']') + + def visit_desc_annotation(self, node): + self.body.append(self.starttag(node, 'em', CLASS='property')) + def depart_desc_annotation(self, node): + self.body.append('') + + def visit_desc_content(self, node): + self.body.append(self.starttag(node, 'dd', '')) + def depart_desc_content(self, node): + self.body.append('') + + def visit_refcount(self, node): + self.body.append(self.starttag(node, 'em', '', CLASS='refcount')) + def depart_refcount(self, node): + self.body.append('') + + def visit_versionmodified(self, node): + self.body.append(self.starttag(node, 'p')) + text = versionlabels[node['type']] % node['version'] + if len(node): + text += ': ' + else: + text += '.' + self.body.append('%s' % text) + def depart_versionmodified(self, node): + self.body.append('

    \n') + + # overwritten + def visit_reference(self, node): + BaseTranslator.visit_reference(self, node) + if node.hasattr('reftitle'): + # ugly hack to add a title attribute + starttag = self.body[-1] + if not starttag.startswith(' tag + self.section_level += 1 + self.body.append(self.starttag(node, 'div', CLASS='section')) + + def visit_title(self, node): + # don't move the id attribute inside the tag + BaseTranslator.visit_title(self, node, move_ids=0) + + # overwritten + def visit_literal_block(self, node): + if node.rawsource != node.astext(): + # most probably a parsed-literal block -- don't highlight + return BaseTranslator.visit_literal_block(self, node) + lang = self.highlightlang + linenos = node.rawsource.count('\n') >= self.highlightlinenothreshold - 1 + if node.has_key('language'): + # code-block directives + lang = node['language'] + if node.has_key('linenos'): + linenos = node['linenos'] + highlighted = self.highlighter.highlight_block(node.rawsource, lang, linenos) + starttag = self.starttag(node, 'div', suffix='', CLASS='highlight-%s' % lang) + self.body.append(starttag + highlighted + '
    \n') + raise nodes.SkipNode + + def visit_doctest_block(self, node): + self.visit_literal_block(node) + + # overwritten + def visit_literal(self, node): + if len(node.children) == 1 and \ + node.children[0] in ('None', 'True', 'False'): + node['classes'].append('xref') + self.body.append(self.starttag(node, 'tt', '', CLASS='docutils literal')) + self.protect_literal_text += 1 + def depart_literal(self, node): + self.protect_literal_text -= 1 + self.body.append('') + + def visit_productionlist(self, node): + self.body.append(self.starttag(node, 'pre')) + names = [] + for production in node: + names.append(production['tokenname']) + maxlen = max(len(name) for name in names) + for production in node: + if production['tokenname']: + lastname = production['tokenname'].ljust(maxlen) + self.body.append(self.starttag(production, 'strong', '')) + self.body.append(lastname + ' ::= ') + else: + self.body.append('%s ' % (' '*len(lastname))) + production.walkabout(self) + self.body.append('\n') + self.body.append('\n') + raise nodes.SkipNode + def depart_productionlist(self, node): + pass + + def visit_production(self, node): + pass + def depart_production(self, node): + pass + + def visit_centered(self, node): + self.body.append(self.starttag(node, 'p', CLASS="centered") + '') + def depart_centered(self, node): + self.body.append('

    ') + + def visit_compact_paragraph(self, node): + pass + def depart_compact_paragraph(self, node): + pass + + def visit_highlightlang(self, node): + self.highlightlang = node['lang'] + self.highlightlinenothreshold = node['linenothreshold'] + def depart_highlightlang(self, node): + pass + + # overwritten + def visit_image(self, node): + olduri = node['uri'] + # rewrite the URI if the environment knows about it + if olduri in self.builder.images: + node['uri'] = posixpath.join(self.builder.imgpath, + self.builder.images[olduri]) + + if node.has_key('scale'): + if Image and not (node.has_key('width') + and node.has_key('height')): + try: + im = Image.open(os.path.join(self.builder.srcdir, + olduri)) + except (IOError, # Source image can't be found or opened + UnicodeError): # PIL doesn't like Unicode paths. + print olduri + pass + else: + if not node.has_key('width'): + node['width'] = str(im.size[0]) + if not node.has_key('height'): + node['height'] = str(im.size[1]) + del im + BaseTranslator.visit_image(self, node) + + def visit_toctree(self, node): + # this only happens when formatting a toc from env.tocs -- in this + # case we don't want to include the subtree + raise nodes.SkipNode + + def visit_index(self, node): + raise nodes.SkipNode + + def visit_tabular_col_spec(self, node): + raise nodes.SkipNode + + def visit_glossary(self, node): + pass + def depart_glossary(self, node): + pass + + def visit_acks(self, node): + pass + def depart_acks(self, node): + pass + + def visit_module(self, node): + pass + def depart_module(self, node): + pass + + def bulk_text_processor(self, text): + return text + + # overwritten + def visit_Text(self, node): + text = node.astext() + encoded = self.encode(text) + if self.protect_literal_text: + # moved here from base class's visit_literal to support + # more formatting in literal nodes + for token in self.words_and_spaces.findall(encoded): + if token.strip(): + # protect literal text from line wrapping + self.body.append('%s' % token) + elif token in ' \n': + # allow breaks at whitespace + self.body.append(token) + else: + # protect runs of multiple spaces; the last one can wrap + self.body.append(' ' * (len(token)-1) + ' ') + else: + if self.in_mailto and self.settings.cloak_email_addresses: + encoded = self.cloak_email(encoded) + else: + encoded = self.bulk_text_processor(encoded) + self.body.append(encoded) + + # these are all for docutils 0.5 compatibility + + def visit_note(self, node): + self.visit_admonition(node, 'note') + def depart_note(self, node): + self.depart_admonition(node) + + def visit_warning(self, node): + self.visit_admonition(node, 'warning') + def depart_warning(self, node): + self.depart_admonition(node) + + def visit_attention(self, node): + self.visit_admonition(node, 'attention') + + def depart_attention(self, node): + self.depart_admonition() + + def visit_caution(self, node): + self.visit_admonition(node, 'caution') + def depart_caution(self, node): + self.depart_admonition() + + def visit_danger(self, node): + self.visit_admonition(node, 'danger') + def depart_danger(self, node): + self.depart_admonition() + + def visit_error(self, node): + self.visit_admonition(node, 'error') + def depart_error(self, node): + self.depart_admonition() + + def visit_hint(self, node): + self.visit_admonition(node, 'hint') + def depart_hint(self, node): + self.depart_admonition() + + def visit_important(self, node): + self.visit_admonition(node, 'important') + def depart_important(self, node): + self.depart_admonition() + + def visit_tip(self, node): + self.visit_admonition(node, 'tip') + def depart_tip(self, node): + self.depart_admonition() + + # these are only handled specially in the SmartyPantsHTMLTranslator + def visit_literal_emphasis(self, node): + return self.visit_emphasis(node) + def depart_literal_emphasis(self, node): + return self.depart_emphasis(node) + + def depart_title(self, node): + close_tag = self.context[-1] + if self.builder.add_header_links and \ + (close_tag.startswith('\u00B6
    ' % + _('Permalink to this headline')) + BaseTranslator.depart_title(self, node) + + def unknown_visit(self, node): + raise NotImplementedError('Unknown node: ' + node.__class__.__name__) + + +class SmartyPantsHTMLTranslator(HTMLTranslator): + """ + Handle ordinary text via smartypants, converting quotes and dashes + to the correct entities. + """ + + def __init__(self, *args, **kwds): + self.no_smarty = 0 + HTMLTranslator.__init__(self, *args, **kwds) + + def visit_literal(self, node): + self.no_smarty += 1 + try: + # this raises SkipNode + HTMLTranslator.visit_literal(self, node) + finally: + self.no_smarty -= 1 + + def visit_literal_emphasis(self, node): + self.no_smarty += 1 + self.visit_emphasis(node) + + def depart_literal_emphasis(self, node): + self.depart_emphasis(node) + self.no_smarty -= 1 + + def visit_desc_signature(self, node): + self.no_smarty += 1 + HTMLTranslator.visit_desc_signature(self, node) + + def depart_desc_signature(self, node): + self.no_smarty -= 1 + HTMLTranslator.depart_desc_signature(self, node) + + def visit_productionlist(self, node): + self.no_smarty += 1 + try: + HTMLTranslator.visit_productionlist(self, node) + finally: + self.no_smarty -= 1 + + def visit_option(self, node): + self.no_smarty += 1 + HTMLTranslator.visit_option(self, node) + def depart_option(self, node): + self.no_smarty -= 1 + HTMLTranslator.depart_option(self, node) + + def bulk_text_processor(self, text): + if self.no_smarty <= 0: + return sphinx_smarty_pants(text) + return text diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py new file mode 100644 index 00000000..bfa9c120 --- /dev/null +++ b/sphinx/writers/latex.py @@ -0,0 +1,1185 @@ +# -*- coding: utf-8 -*- +""" + sphinx.writers.latex + ~~~~~~~~~~~~~~~~~~~~ + + Custom docutils writer for LaTeX. + + Much of this code is adapted from Dave Kuhlman's "docpy" writer from his + docutils sandbox. + + :copyright: 2007-2008 by Georg Brandl, Dave Kuhlman. + :license: BSD. +""" + +import re +import sys +from os import path + +from docutils import nodes, writers +from docutils.writers.latex2e import Babel + +from sphinx import addnodes +from sphinx import highlighting +from sphinx.locale import admonitionlabels, versionlabels +from sphinx.util import ustrftime +from sphinx.util.texescape import tex_escape_map +from sphinx.util.smartypants import educateQuotesLatex + +HEADER = r'''%% Generated by Sphinx. +\documentclass[%(papersize)s,%(pointsize)s%(classoptions)s]{%(docclass)s} +%(inputenc)s +%(fontenc)s +%(babel)s +%(fontpkg)s +%(fncychap)s +\usepackage{sphinx} +%(preamble)s + +\title{%(title)s} +\date{%(date)s} +\release{%(release)s} +\author{%(author)s} +\newcommand{\sphinxlogo}{%(logo)s} +\renewcommand{\releasename}{%(releasename)s} +%(makeindex)s +%(makemodindex)s +''' + +BEGIN_DOC = r''' +\begin{document} +%(shorthandoff)s +%(maketitle)s +%(tableofcontents)s +''' + +FOOTER = r''' +%(footer)s +\renewcommand{\indexname}{%(modindexname)s} +%(printmodindex)s +\renewcommand{\indexname}{%(indexname)s} +%(printindex)s +\end{document} +''' + + +class LaTeXWriter(writers.Writer): + + supported = ('sphinxlatex',) + + settings_spec = ('LaTeX writer options', '', ( + ('Document name', ['--docname'], {'default': ''}), + ('Document class', ['--docclass'], {'default': 'manual'}), + ('Author', ['--author'], {'default': ''}), + )) + settings_defaults = {} + + output = None + + def __init__(self, builder): + writers.Writer.__init__(self) + self.builder = builder + + def translate(self): + visitor = LaTeXTranslator(self.document, self.builder) + self.document.walkabout(visitor) + self.output = visitor.astext() + + +# Helper classes + +class ExtBabel(Babel): + def get_shorthandoff(self): + shortlang = self.language.split('_')[0] + if shortlang in ('de', 'sl', 'pt', 'es', 'nl', 'pl'): + return '\\shorthandoff{"}' + return '' + + _ISO639_TO_BABEL = Babel._ISO639_TO_BABEL.copy() + _ISO639_TO_BABEL['sl'] = 'slovene' + + +class Table(object): + def __init__(self): + self.col = 0 + self.colcount = 0 + self.colspec = None + self.had_head = False + self.has_verbatim = False + self.caption = None + + +class Desc(object): + def __init__(self, node): + self.env = LaTeXTranslator.desc_map.get(node['desctype'], 'describe') + self.type = self.cls = self.name = self.params = self.annotation = '' + self.count = 0 + + +class LaTeXTranslator(nodes.NodeVisitor): + sectionnames = ["part", "chapter", "section", "subsection", + "subsubsection", "paragraph", "subparagraph"] + + ignore_missing_images = False + + default_elements = { + 'docclass': 'manual', + 'papersize': 'letterpaper', + 'pointsize': '10pt', + 'classoptions': '', + 'inputenc': '\\usepackage[utf8]{inputenc}', + 'fontenc': '\\usepackage[T1]{fontenc}', + 'babel': '\\usepackage{babel}', + 'fontpkg': '\\usepackage{times}', + 'fncychap': '\\usepackage[Bjarne]{fncychap}', + 'preamble': '', + 'title': '', + 'date': '', + 'release': '', + 'author': '', + 'logo': '', + 'releasename': 'Release', + 'makeindex': '\\makeindex', + 'makemodindex': '\\makemodindex', + 'shorthandoff': '', + 'maketitle': '\\maketitle', + 'tableofcontents': '\\tableofcontents', + 'footer': '', + 'printmodindex': '\\printmodindex', + 'printindex': '\\printindex', + } + + def __init__(self, document, builder): + nodes.NodeVisitor.__init__(self, document) + self.builder = builder + self.body = [] + + # sort out some elements + papersize = builder.config.latex_paper_size + 'paper' + if papersize == 'paper': # e.g. command line "-D latex_paper_size=" + papersize = 'letterpaper' + + self.elements = self.default_elements.copy() + self.elements.update({ + 'docclass': document.settings.docclass, + 'papersize': papersize, + 'pointsize': builder.config.latex_font_size, + # if empty, the title is set to the first section title + 'title': document.settings.title, + 'date': ustrftime(builder.config.today_fmt or _('%B %d, %Y')), + 'release': builder.config.release, + 'author': document.settings.author, + 'releasename': _('Release'), + 'preamble': builder.config.latex_preamble, + 'modindexname': _('Module Index'), + 'indexname': _('Index'), + }) + if builder.config.latex_logo: + self.elements['logo'] = '\\includegraphics{%s}\\par' % \ + path.basename(builder.config.latex_logo) + if builder.config.language: + babel = ExtBabel(builder.config.language) + lang = babel.get_language() + if lang: + self.elements['classoptions'] += ',' + babel.get_language() + else: + self.builder.warn('no Babel option known for language %r' % + builder.config.language) + self.elements['shorthandoff'] = babel.get_shorthandoff() + self.elements['fncychap'] = '\\usepackage[Sonny]{fncychap}' + else: + self.elements['classoptions'] += ',english' + if not builder.config.latex_use_modindex: + self.elements['makemodindex'] = '' + self.elements['printmodindex'] = '' + # allow the user to override them all + self.elements.update(builder.config.latex_elements) + + self.highlighter = highlighting.PygmentsBridge( + 'latex', builder.config.pygments_style) + self.context = [] + self.descstack = [] + self.bibitems = [] + self.table = None + self.next_table_colspec = None + self.highlightlang = builder.config.highlight_language + self.highlightlinenothreshold = sys.maxint + self.written_ids = set() + self.footnotestack = [] + if self.elements['docclass'] == 'manual': + if builder.config.latex_use_parts: + self.top_sectionlevel = 0 + else: + self.top_sectionlevel = 1 + else: + self.top_sectionlevel = 2 + self.next_section_target = None + # flags + self.verbatim = None + self.in_title = 0 + self.in_production_list = 0 + self.first_document = 1 + self.this_is_the_title = 1 + self.literal_whitespace = 0 + self.no_contractions = 0 + + def astext(self): + return (HEADER % self.elements + self.highlighter.get_stylesheet() + + u''.join(self.body) + FOOTER % self.elements) + + def visit_document(self, node): + self.footnotestack.append(self.collect_footnotes(node)) + if self.first_document == 1: + # the first document is all the regular content ... + self.body.append(BEGIN_DOC % self.elements) + self.first_document = 0 + elif self.first_document == 0: + # ... and all others are the appendices + self.body.append('\n\\appendix\n') + self.first_document = -1 + # "- 1" because the level is increased before the title is visited + self.sectionlevel = self.top_sectionlevel - 1 + def depart_document(self, node): + if self.bibitems: + widest_label = "" + for bi in self.bibitems: + if len(widest_label) < len(bi[0]): + widest_label = bi[0] + self.body.append('\n\\begin{thebibliography}{%s}\n' % widest_label) + for bi in self.bibitems: + # cite_key: underscores must not be escaped + cite_key = bi[0].replace(r"\_", "_") + self.body.append('\\bibitem[%s]{%s}{%s}\n' % (bi[0], cite_key, bi[1])) + self.body.append('\\end{thebibliography}\n') + self.bibitems = [] + + def visit_start_of_file(self, node): + # This marks the begin of a new file; therefore the current module and + # class must be reset + self.body.append('\n\\resetcurrentobjects\n') + # and also, new footnotes + self.footnotestack.append(self.collect_footnotes(node)) + + def collect_footnotes(self, node): + fnotes = {} + def footnotes_under(n): + if isinstance(n, nodes.footnote): + yield n + else: + for c in n.children: + if isinstance(c, addnodes.start_of_file): + continue + for k in footnotes_under(c): + yield k + for fn in footnotes_under(node): + num = fn.children[0].astext().strip() + fnotes[num] = fn + fn.parent.remove(fn) + return fnotes + + def depart_start_of_file(self, node): + self.footnotestack.pop() + + def visit_highlightlang(self, node): + self.highlightlang = node['lang'] + self.highlightlinenothreshold = node['linenothreshold'] + raise nodes.SkipNode + + def visit_section(self, node): + if not self.this_is_the_title: + self.sectionlevel += 1 + self.body.append('\n\n') + if self.next_section_target: + self.body.append(r'\hypertarget{%s}{}' % self.next_section_target) + self.next_section_target = None + #if node.get('ids'): + # for id in node['ids']: + # if id not in self.written_ids: + # self.body.append(r'\hypertarget{%s}{}' % id) + # self.written_ids.add(id) + def depart_section(self, node): + self.sectionlevel = max(self.sectionlevel - 1, self.top_sectionlevel - 1) + + def visit_problematic(self, node): + self.body.append(r'{\color{red}\bfseries{}') + def depart_problematic(self, node): + self.body.append('}') + + def visit_topic(self, node): + self.body.append('\\setbox0\\vbox{\n' + '\\begin{minipage}{0.95\\textwidth}\n') + def depart_topic(self, node): + self.body.append('\\end{minipage}}\n' + '\\begin{center}\\setlength{\\fboxsep}{5pt}' + '\\shadowbox{\\box0}\\end{center}\n') + visit_sidebar = visit_topic + depart_sidebar = depart_topic + + def visit_glossary(self, node): + pass + def depart_glossary(self, node): + pass + + def visit_productionlist(self, node): + self.body.append('\n\n\\begin{productionlist}\n') + self.in_production_list = 1 + def depart_productionlist(self, node): + self.body.append('\\end{productionlist}\n\n') + self.in_production_list = 0 + + def visit_production(self, node): + if node['tokenname']: + self.body.append('\\production{%s}{' % self.encode(node['tokenname'])) + else: + self.body.append('\\productioncont{') + def depart_production(self, node): + self.body.append('}\n') + + def visit_transition(self, node): + self.body.append('\n\n\\bigskip\\hrule{}\\bigskip\n\n') + def depart_transition(self, node): + pass + + def visit_title(self, node): + parent = node.parent + if isinstance(parent, addnodes.seealso): + # the environment already handles this + raise nodes.SkipNode + elif self.this_is_the_title: + if len(node.children) != 1 and not isinstance(node.children[0], nodes.Text): + self.builder.warn('document title is not a single Text node') + if not self.elements['title']: + # text needs to be escaped since it is inserted into + # the output literally + self.elements['title'] = node.astext().translate(tex_escape_map) + self.this_is_the_title = 0 + raise nodes.SkipNode + elif isinstance(parent, nodes.section): + try: + self.body.append(r'\%s{' % self.sectionnames[self.sectionlevel]) + except IndexError: + from sphinx.application import SphinxError + raise SphinxError('too many nesting section levels for LaTeX, ' + 'at heading: %s' % node.astext()) + self.context.append('}\n') + elif isinstance(parent, (nodes.topic, nodes.sidebar)): + self.body.append(r'\textbf{') + self.context.append('}\n\n\medskip\n\n') + elif isinstance(parent, nodes.Admonition): + self.body.append('{') + self.context.append('}\n') + elif isinstance(parent, nodes.table): + self.table.caption = self.encode(node.astext()) + raise nodes.SkipNode + else: + self.builder.warn('encountered title node not in section, topic, ' + 'table, admonition or sidebar') + self.body.append('\\textbf{') + self.context.append('}\n') + self.in_title = 1 + def depart_title(self, node): + self.in_title = 0 + self.body.append(self.context.pop()) + + def visit_subtitle(self, node): + if isinstance(node.parent, nodes.sidebar): + self.body.append('~\\\\\n\\textbf{') + self.context.append('}\n\\smallskip\n') + else: + self.context.append('') + def depart_subtitle(self, node): + self.body.append(self.context.pop()) + + desc_map = { + 'function' : 'funcdesc', + 'class': 'classdesc', + 'method': 'methoddesc', + 'staticmethod': 'staticmethoddesc', + 'exception': 'excdesc', + 'data': 'datadesc', + 'attribute': 'memberdesc', + 'opcode': 'opcodedesc', + + 'cfunction': 'cfuncdesc', + 'cmember': 'cmemberdesc', + 'cmacro': 'csimplemacrodesc', + 'ctype': 'ctypedesc', + 'cvar': 'cvardesc', + + 'describe': 'describe', + # and all others are 'describe' too + } + + def visit_desc(self, node): + self.descstack.append(Desc(node)) + def depart_desc(self, node): + d = self.descstack.pop() + self.body.append("\\end{%s}\n" % d.env) + + def visit_desc_signature(self, node): + d = self.descstack[-1] + # reset these for every signature + d.type = d.cls = d.name = d.params = '' + def depart_desc_signature(self, node): + d = self.descstack[-1] + d.cls = d.cls.rstrip('.') + if node.parent['desctype'] != 'describe' and node['ids']: + hyper = '\\hypertarget{%s}{}' % node['ids'][0] + else: + hyper = '' + if d.count == 0: + t1 = "\n\n%s\\begin{%s}" % (hyper, d.env) + else: + t1 = "\n%s\\%sline" % (hyper, d.env[:-4]) + d.count += 1 + if d.env in ('funcdesc', 'classdesc', 'excclassdesc'): + t2 = "{%s}{%s}" % (d.name, d.params) + elif d.env in ('datadesc', 'excdesc', 'csimplemacrodesc'): + t2 = "{%s}" % (d.name) + elif d.env in ('methoddesc', 'staticmethoddesc'): + if d.cls: + t2 = "[%s]{%s}{%s}" % (d.cls, d.name, d.params) + else: + t2 = "{%s}{%s}" % (d.name, d.params) + elif d.env == 'memberdesc': + if d.cls: + t2 = "[%s]{%s}" % (d.cls, d.name) + else: + t2 = "{%s}" % d.name + elif d.env == 'cfuncdesc': + if d.cls: + # C++ class names + d.name = '%s::%s' % (d.cls, d.name) + t2 = "{%s}{%s}{%s}" % (d.type, d.name, d.params) + elif d.env == 'cmemberdesc': + try: + type, container = d.type.rsplit(' ', 1) + container = container.rstrip('.') + except ValueError: + container = '' + type = d.type + t2 = "{%s}{%s}{%s}" % (container, type, d.name) + elif d.env == 'cvardesc': + t2 = "{%s}{%s}" % (d.type, d.name) + elif d.env == 'ctypedesc': + t2 = "{%s}" % (d.name) + elif d.env == 'opcodedesc': + t2 = "{%s}{%s}" % (d.name, d.params) + elif d.env == 'describe': + t2 = "{%s}" % d.name + self.body.append(t1 + t2) + + def visit_desc_type(self, node): + d = self.descstack[-1] + if d.env == 'describe': + d.name += self.encode(node.astext()) + else: + self.descstack[-1].type = self.encode(node.astext().strip()) + raise nodes.SkipNode + + def visit_desc_name(self, node): + d = self.descstack[-1] + if d.env == 'describe': + d.name += self.encode(node.astext()) + else: + self.descstack[-1].name = self.encode(node.astext().strip()) + raise nodes.SkipNode + + def visit_desc_addname(self, node): + d = self.descstack[-1] + if d.env == 'describe': + d.name += self.encode(node.astext()) + else: + self.descstack[-1].cls = self.encode(node.astext().strip()) + raise nodes.SkipNode + + def visit_desc_parameterlist(self, node): + d = self.descstack[-1] + if d.env == 'describe': + d.name += self.encode(node.astext()) + else: + self.descstack[-1].params = self.encode(node.astext().strip()) + raise nodes.SkipNode + + def visit_desc_annotation(self, node): + d = self.descstack[-1] + if d.env == 'describe': + d.name += self.encode(node.astext()) + else: + self.descstack[-1].annotation = self.encode(node.astext().strip()) + raise nodes.SkipNode + + def visit_refcount(self, node): + self.body.append("\\emph{") + def depart_refcount(self, node): + self.body.append("}\\\\") + + def visit_desc_content(self, node): + if node.children and not isinstance(node.children[0], nodes.paragraph): + # avoid empty desc environment which causes a formatting bug + self.body.append('~') + def depart_desc_content(self, node): + pass + + def visit_seealso(self, node): + self.body.append("\n\n\\strong{%s:}\n\n" % admonitionlabels['seealso']) + def depart_seealso(self, node): + self.body.append("\n\n") + + def visit_rubric(self, node): + if len(node.children) == 1 and node.children[0].astext() == 'Footnotes': + raise nodes.SkipNode + self.body.append('\\paragraph{') + self.context.append('}\n') + def depart_rubric(self, node): + self.body.append(self.context.pop()) + + def visit_footnote(self, node): + pass + def depart_footnote(self, node): + pass + + def visit_label(self, node): + if isinstance(node.parent, nodes.citation): + self.bibitems[-1][0] = node.astext() + raise nodes.SkipNode + + def visit_tabular_col_spec(self, node): + self.next_table_colspec = node['spec'] + raise nodes.SkipNode + + def visit_table(self, node): + if self.table: + raise NotImplementedError('Nested tables are not supported.') + self.table = Table() + self.tablebody = [] + # Redirect body output until table is finished. + self._body = self.body + self.body = self.tablebody + def depart_table(self, node): + self.body = self._body + if self.table.caption is not None: + self.body.append('\n\\begin{threeparttable}\n' + '\\caption{%s}\n' % self.table.caption) + if self.table.has_verbatim: + self.body.append('\n\\begin{tabular}') + else: + self.body.append('\n\\begin{tabulary}{\\textwidth}') + if self.table.colspec: + self.body.append(self.table.colspec) + else: + if self.table.has_verbatim: + colwidth = 0.95 / self.table.colcount + colspec = ('p{%.3f\\textwidth}|' % colwidth) * self.table.colcount + self.body.append('{|' + colspec + '}\n') + else: + self.body.append('{|' + ('L|' * self.table.colcount) + '}\n') + self.body.extend(self.tablebody) + if self.table.has_verbatim: + self.body.append('\\end{tabular}\n\n') + else: + self.body.append('\\end{tabulary}\n\n') + if self.table.caption is not None: + self.body.append('\\end{threeparttable}\n\n') + self.table = None + self.tablebody = None + + def visit_colspec(self, node): + self.table.colcount += 1 + def depart_colspec(self, node): + pass + + def visit_tgroup(self, node): + pass + def depart_tgroup(self, node): + pass + + def visit_thead(self, node): + if self.next_table_colspec: + self.table.colspec = '{%s}\n' % self.next_table_colspec + self.next_table_colspec = None + self.body.append('\\hline\n') + self.table.had_head = True + def depart_thead(self, node): + self.body.append('\\hline\n') + + def visit_tbody(self, node): + if not self.table.had_head: + self.visit_thead(node) + def depart_tbody(self, node): + self.body.append('\\hline\n') + + def visit_row(self, node): + self.table.col = 0 + def depart_row(self, node): + self.body.append('\\\\\n') + + def visit_entry(self, node): + if node.has_key('morerows') or node.has_key('morecols'): + raise NotImplementedError('Column or row spanning cells are ' + 'not implemented.') + if self.table.col > 0: + self.body.append(' & ') + self.table.col += 1 + if isinstance(node.parent.parent, nodes.thead): + self.body.append('\\textbf{') + self.context.append('}') + else: + self.context.append('') + def depart_entry(self, node): + self.body.append(self.context.pop()) # header + + def visit_acks(self, node): + # this is a list in the source, but should be rendered as a + # comma-separated list here + self.body.append('\n\n') + self.body.append(', '.join(n.astext() for n in node.children[0].children) + '.') + self.body.append('\n\n') + raise nodes.SkipNode + + def visit_bullet_list(self, node): + self.body.append('\\begin{itemize}\n' ) + def depart_bullet_list(self, node): + self.body.append('\\end{itemize}\n' ) + + def visit_enumerated_list(self, node): + self.body.append('\\begin{enumerate}\n' ) + def depart_enumerated_list(self, node): + self.body.append('\\end{enumerate}\n' ) + + def visit_list_item(self, node): + # Append "{}" in case the next character is "[", which would break + # LaTeX's list environment (no numbering and the "[" is not printed). + self.body.append(r'\item {} ') + def depart_list_item(self, node): + self.body.append('\n') + + def visit_definition_list(self, node): + self.body.append('\\begin{description}\n') + def depart_definition_list(self, node): + self.body.append('\\end{description}\n') + + def visit_definition_list_item(self, node): + pass + def depart_definition_list_item(self, node): + pass + + def visit_term(self, node): + ctx = ']' + if node.has_key('ids') and node['ids']: + ctx += '\\hypertarget{%s}{}' % node['ids'][0] + self.body.append('\\item[') + self.context.append(ctx) + def depart_term(self, node): + self.body.append(self.context.pop()) + + def visit_classifier(self, node): + self.body.append('{[}') + def depart_classifier(self, node): + self.body.append('{]}') + + def visit_definition(self, node): + pass + def depart_definition(self, node): + self.body.append('\n') + + def visit_field_list(self, node): + self.body.append('\\begin{quote}\\begin{description}\n') + def depart_field_list(self, node): + self.body.append('\\end{description}\\end{quote}\n') + + def visit_field(self, node): + pass + def depart_field(self, node): + pass + + visit_field_name = visit_term + depart_field_name = depart_term + + visit_field_body = visit_definition + depart_field_body = depart_definition + + def visit_paragraph(self, node): + self.body.append('\n') + def depart_paragraph(self, node): + self.body.append('\n') + + def visit_centered(self, node): + self.body.append('\n\\begin{centering}') + def depart_centered(self, node): + self.body.append('\n\\end{centering}') + + def visit_module(self, node): + modname = node['modname'] + self.body.append('\n\\declaremodule[%s]{}{%s}' % (modname.replace('_', ''), + self.encode(modname))) + self.body.append('\n\\modulesynopsis{%s}' % self.encode(node['synopsis'])) + if node.has_key('platform'): + self.body.append('\\platform{%s}' % self.encode(node['platform'])) + def depart_module(self, node): + pass + + def latex_image_length(self, width_str): + match = re.match('(\d*\.?\d*)\s*(\S*)', width_str) + if not match: + # fallback + return width_str + res = width_str + amount, unit = match.groups()[:2] + if not unit or unit == "px": + # pixels: let LaTeX alone + return None + elif unit == "%": + res = "%.3f\\linewidth" % (float(amount) / 100.0) + return res + + def visit_image(self, node): + attrs = node.attributes + pre = [] # in reverse order + post = [] + include_graphics_options = [] + inline = isinstance(node.parent, nodes.TextElement) + if attrs.has_key('scale'): + # Could also be done with ``scale`` option to + # ``\includegraphics``; doing it this way for consistency. + pre.append('\\scalebox{%f}{' % (attrs['scale'] / 100.0,)) + post.append('}') + if attrs.has_key('width'): + w = self.latex_image_length(attrs['width']) + if w: + include_graphics_options.append('width=%s' % w) + if attrs.has_key('height'): + h = self.latex_image_length(attrs['height']) + include_graphics_options.append('height=%s' % h) + if attrs.has_key('align'): + align_prepost = { + # By default latex aligns the top of an image. + (1, 'top'): ('', ''), + (1, 'middle'): ('\\raisebox{-0.5\\height}{', '}'), + (1, 'bottom'): ('\\raisebox{-\\height}{', '}'), + (0, 'center'): ('{\\hfill', '\\hfill}'), + # These 2 don't exactly do the right thing. The image should + # be floated alongside the paragraph. See + # http://www.w3.org/TR/html4/struct/objects.html#adef-align-IMG + (0, 'left'): ('{', '\\hfill}'), + (0, 'right'): ('{\\hfill', '}'),} + try: + pre.append(align_prepost[inline, attrs['align']][0]) + post.append(align_prepost[inline, attrs['align']][1]) + except KeyError: + pass # XXX complain here? + if not inline: + pre.append('\n') + post.append('\n') + pre.reverse() + if node['uri'] in self.builder.images: + uri = self.builder.images[node['uri']] + else: + # missing image! + if self.ignore_missing_images: + return + uri = node['uri'] + if uri.find('://') != -1: + # ignore remote images + return + self.body.extend(pre) + options = '' + if include_graphics_options: + options = '[%s]' % ','.join(include_graphics_options) + self.body.append('\\includegraphics%s{%s}' % (options, uri)) + self.body.extend(post) + def depart_image(self, node): + pass + + def visit_figure(self, node): + if (not node.attributes.has_key('align') or + node.attributes['align'] == 'center'): + # centering does not add vertical space like center. + align = '\n\\centering' + align_end = '' + else: + # TODO non vertical space for other alignments. + align = '\\begin{flush%s}' % node.attributes['align'] + align_end = '\\end{flush%s}' % node.attributes['align'] + self.body.append('\\begin{figure}[htbp]%s\n' % align) + self.context.append('%s\\end{figure}\n' % align_end) + def depart_figure(self, node): + self.body.append(self.context.pop()) + + def visit_caption(self, node): + self.body.append('\\caption{') + def depart_caption(self, node): + self.body.append('}') + + def visit_legend(self, node): + self.body.append('{\\small ') + def depart_legend(self, node): + self.body.append('}') + + def visit_admonition(self, node): + self.body.append('\n\\begin{notice}{note}') + def depart_admonition(self, node): + self.body.append('\\end{notice}\n') + + def _make_visit_admonition(name): + def visit_admonition(self, node): + self.body.append('\n\\begin{notice}{%s}{%s:}' % + (name, admonitionlabels[name])) + return visit_admonition + def _depart_named_admonition(self, node): + self.body.append('\\end{notice}\n') + + visit_attention = _make_visit_admonition('attention') + depart_attention = _depart_named_admonition + visit_caution = _make_visit_admonition('caution') + depart_caution = _depart_named_admonition + visit_danger = _make_visit_admonition('danger') + depart_danger = _depart_named_admonition + visit_error = _make_visit_admonition('error') + depart_error = _depart_named_admonition + visit_hint = _make_visit_admonition('hint') + depart_hint = _depart_named_admonition + visit_important = _make_visit_admonition('important') + depart_important = _depart_named_admonition + visit_note = _make_visit_admonition('note') + depart_note = _depart_named_admonition + visit_tip = _make_visit_admonition('tip') + depart_tip = _depart_named_admonition + visit_warning = _make_visit_admonition('warning') + depart_warning = _depart_named_admonition + + def visit_versionmodified(self, node): + intro = versionlabels[node['type']] % node['version'] + if node.children: + intro += ': ' + else: + intro += '.' + self.body.append(intro) + def depart_versionmodified(self, node): + pass + + def visit_target(self, node): + def add_target(id): + # indexing uses standard LaTeX index markup, so the targets + # will be generated differently + if not id.startswith('index-'): + self.body.append(r'\hypertarget{%s}{}' % id) + + if node.has_key('refid') and node['refid'] not in self.written_ids: + parindex = node.parent.index(node) + try: + next = node.parent[parindex+1] + if isinstance(next, nodes.section): + self.next_section_target = node['refid'] + return + except IndexError: + pass + add_target(node['refid']) + self.written_ids.add(node['refid']) + def depart_target(self, node): + pass + + def visit_attribution(self, node): + self.body.append('\n\\begin{flushright}\n') + self.body.append('---') + def depart_attribution(self, node): + self.body.append('\n\\end{flushright}\n') + + def visit_index(self, node, scre=re.compile(r';\s*')): + entries = node['entries'] + for type, string, tid, _ in entries: + if type == 'single': + self.body.append(r'\index{%s}' % scre.sub('!', self.encode(string))) + elif type == 'pair': + parts = tuple(self.encode(x.strip()) for x in string.split(';', 1)) + self.body.append(r'\indexii{%s}{%s}' % parts) + elif type == 'triple': + parts = tuple(self.encode(x.strip()) for x in string.split(';', 2)) + self.body.append(r'\indexiii{%s}{%s}{%s}' % parts) + else: + self.builder.warn('unknown index entry type %s found' % type) + raise nodes.SkipNode + + def visit_raw(self, node): + if 'latex' in node.get('format', '').split(): + self.body.append(node.astext()) + raise nodes.SkipNode + + def visit_reference(self, node): + uri = node.get('refuri', '') + if self.in_title or not uri: + self.context.append('') + elif uri.startswith('mailto:') or uri.startswith('http:') or \ + uri.startswith('https:') or uri.startswith('ftp:'): + self.body.append('\\href{%s}{' % self.encode(uri)) + self.context.append('}') + elif uri.startswith('#'): + self.body.append('\\hyperlink{%s}{' % uri[1:]) + self.context.append('}') + elif uri.startswith('@token'): + if self.in_production_list: + self.body.append('\\token{') + else: + self.body.append('\\grammartoken{') + self.context.append('}') + else: + self.builder.warn('unusable reference target found: %s' % uri) + self.context.append('') + def depart_reference(self, node): + self.body.append(self.context.pop()) + + def visit_pending_xref(self, node): + pass + def depart_pending_xref(self, node): + pass + + def visit_emphasis(self, node): + self.body.append(r'\emph{') + def depart_emphasis(self, node): + self.body.append('}') + + def visit_literal_emphasis(self, node): + self.body.append(r'\emph{\texttt{') + self.no_contractions += 1 + def depart_literal_emphasis(self, node): + self.body.append('}}') + self.no_contractions -= 1 + + def visit_strong(self, node): + self.body.append(r'\textbf{') + def depart_strong(self, node): + self.body.append('}') + + def visit_title_reference(self, node): + self.body.append(r'\emph{') + def depart_title_reference(self, node): + self.body.append('}') + + def visit_citation(self, node): + # TODO maybe use cite bibitems + self.bibitems.append(['', '']) + self.context.append(len(self.body)) + def depart_citation(self, node): + size = self.context.pop() + text = ''.join(self.body[size:]) + del self.body[size:] + self.bibitems[-1][1] = text + + def visit_citation_reference(self, node): + citeid = node.astext() + self.body.append('\\cite{%s}' % citeid) + raise nodes.SkipNode + + def visit_literal(self, node): + content = self.encode(node.astext().strip()) + if self.in_title: + self.body.append(r'\texttt{%s}' % content) + elif node.has_key('role') and node['role'] == 'samp': + self.body.append(r'\samp{%s}' % content) + else: + self.body.append(r'\code{%s}' % content) + raise nodes.SkipNode + + def visit_footnote_reference(self, node): + num = node.astext().strip() + try: + fn = self.footnotestack[-1][num] + except (KeyError, IndexError): + raise nodes.SkipNode + self.body.append('\\footnote{') + fn.walkabout(self) + raise nodes.SkipChildren + def depart_footnote_reference(self, node): + self.body.append('}') + + def visit_literal_block(self, node): + self.verbatim = '' + def depart_literal_block(self, node): + code = self.verbatim.rstrip('\n') + lang = self.highlightlang + linenos = code.count('\n') >= self.highlightlinenothreshold - 1 + if node.has_key('language'): + # code-block directives + lang = node['language'] + if node.has_key('linenos'): + linenos = node['linenos'] + hlcode = self.highlighter.highlight_block(code, lang, linenos) + # workaround for Unicode issue + hlcode = hlcode.replace(u'€', u'@texteuro[]') + # must use original Verbatim environment and "tabular" environment + if self.table: + hlcode = hlcode.replace('\\begin{Verbatim}', + '\\begin{OriginalVerbatim}') + self.table.has_verbatim = True + # get consistent trailer + hlcode = hlcode.rstrip()[:-14] # strip \end{Verbatim} + hlcode = hlcode.rstrip() + '\n' + self.body.append('\n' + hlcode + '\\end{%sVerbatim}\n' % + (self.table and 'Original' or '')) + self.verbatim = None + visit_doctest_block = visit_literal_block + depart_doctest_block = depart_literal_block + + def visit_line_block(self, node): + """line-block: + * whitespace (including linebreaks) is significant + * inline markup is supported. + * serif typeface + """ + self.body.append('{\\raggedright{}') + self.literal_whitespace = 1 + def depart_line_block(self, node): + self.literal_whitespace = 0 + # remove the last \\ + del self.body[-1] + self.body.append('}\n') + + def visit_line(self, node): + self._line_start = len(self.body) + def depart_line(self, node): + if self._line_start == len(self.body): + # no output in this line -- add a nonbreaking space, else the + # \\ command will give an error + self.body.append('~') + if self.table is not None: + self.body.append('\\newline\n') + else: + self.body.append('\\\\\n') + + def visit_block_quote(self, node): + # If the block quote contains a single object and that object + # is a list, then generate a list not a block quote. + # This lets us indent lists. + done = 0 + if len(node.children) == 1: + child = node.children[0] + if isinstance(child, nodes.bullet_list) or \ + isinstance(child, nodes.enumerated_list): + done = 1 + if not done: + self.body.append('\\begin{quote}\n') + def depart_block_quote(self, node): + done = 0 + if len(node.children) == 1: + child = node.children[0] + if isinstance(child, nodes.bullet_list) or \ + isinstance(child, nodes.enumerated_list): + done = 1 + if not done: + self.body.append('\\end{quote}\n') + + # option node handling copied from docutils' latex writer + + def visit_option(self, node): + if self.context[-1]: + # this is not the first option + self.body.append(', ') + def depart_option(self, node): + # flag that the first option is done. + self.context[-1] += 1 + + def visit_option_argument(self, node): + """The delimiter betweeen an option and its argument.""" + self.body.append(node.get('delimiter', ' ')) + def depart_option_argument(self, node): + pass + + def visit_option_group(self, node): + self.body.append('\\item [') + # flag for first option + self.context.append(0) + def depart_option_group(self, node): + self.context.pop() # the flag + self.body.append('] ') + + def visit_option_list(self, node): + self.body.append('\\begin{optionlist}{3cm}\n') + def depart_option_list(self, node): + self.body.append('\\end{optionlist}\n') + + def visit_option_list_item(self, node): + pass + def depart_option_list_item(self, node): + pass + + def visit_option_string(self, node): + ostring = node.astext() + self.body.append(self.encode(ostring.replace('--', u'-{-}'))) + raise nodes.SkipNode + + def visit_description(self, node): + self.body.append( ' ' ) + def depart_description(self, node): + pass + + def visit_superscript(self, node): + self.body.append('$^{\\text{') + def depart_superscript(self, node): + self.body.append('}}$') + + def visit_subscript(self, node): + self.body.append('$_{\\text{') + def depart_subscript(self, node): + self.body.append('}}$') + + def visit_substitution_definition(self, node): + raise nodes.SkipNode + + def visit_substitution_reference(self, node): + raise nodes.SkipNode + + def visit_generated(self, node): + pass + def depart_generated(self, node): + pass + + def visit_compound(self, node): + pass + def depart_compound(self, node): + pass + + def visit_container(self, node): + pass + def depart_container(self, node): + pass + + def visit_decoration(self, node): + pass + def depart_decoration(self, node): + pass + + # text handling + + def encode(self, text): + text = unicode(text).translate(tex_escape_map) + if self.literal_whitespace: + # Insert a blank before the newline, to avoid + # ! LaTeX Error: There's no line here to end. + text = text.replace(u'\n', u'~\\\\\n').replace(u' ', u'~') + if self.no_contractions: + text = text.replace('--', u'-{-}') + return text + + def visit_Text(self, node): + if self.verbatim is not None: + self.verbatim += node.astext() + else: + text = self.encode(node.astext()) + self.body.append(educateQuotesLatex(text)) + def depart_Text(self, node): + pass + + def visit_comment(self, node): + raise nodes.SkipNode + + def visit_meta(self, node): + # only valid for HTML + raise nodes.SkipNode + + def visit_system_message(self, node): + pass + def depart_system_message(self, node): + self.body.append('\n') + + def unknown_visit(self, node): + raise NotImplementedError('Unknown node: ' + node.__class__.__name__) diff --git a/sphinx/writers/text.py b/sphinx/writers/text.py new file mode 100644 index 00000000..74c637ca --- /dev/null +++ b/sphinx/writers/text.py @@ -0,0 +1,679 @@ +# -*- coding: utf-8 -*- +""" + sphinx.writers.text + ~~~~~~~~~~~~~~~~~~~ + + Custom docutils writer for plain text. + + :copyright: 2008 by Georg Brandl. + :license: BSD. +""" + +import re +import textwrap + +from docutils import nodes, writers + +from sphinx import addnodes +from sphinx.locale import admonitionlabels, versionlabels + + +class TextWriter(writers.Writer): + supported = ('text',) + settings_spec = ('No options here.', '', ()) + settings_defaults = {} + + output = None + + def __init__(self, builder): + writers.Writer.__init__(self) + self.builder = builder + + def translate(self): + visitor = TextTranslator(self.document, self.builder) + self.document.walkabout(visitor) + self.output = visitor.body + +# monkey-patch... +new_wordsep_re = re.compile( + r'(\s+|' # any whitespace + r'(?<=\s)(?::[a-z-]+:)?`\S+|' # interpreted text start + r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words + r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash +textwrap.TextWrapper.wordsep_re = new_wordsep_re + +MAXWIDTH = 70 +STDINDENT = 3 + + +class TextTranslator(nodes.NodeVisitor): + sectionchars = '*=-~"+' + + def __init__(self, document, builder): + nodes.NodeVisitor.__init__(self, document) + + self.states = [[]] + self.stateindent = [0] + self.sectionlevel = 0 + self.table = None + + def add_text(self, text): + self.states[-1].append((-1, text)) + def new_state(self, indent=STDINDENT): + self.states.append([]) + self.stateindent.append(indent) + def end_state(self, wrap=True, end=[''], first=None): + content = self.states.pop() + maxindent = sum(self.stateindent) + indent = self.stateindent.pop() + result = [] + toformat = [] + def do_format(): + if not toformat: + return + if wrap: + res = textwrap.wrap(''.join(toformat), width=MAXWIDTH-maxindent) + else: + res = ''.join(toformat).splitlines() + if end: + res += end + result.append((indent, res)) + for itemindent, item in content: + if itemindent == -1: + toformat.append(item) + else: + do_format() + result.append((indent + itemindent, item)) + toformat = [] + do_format() + if first is not None and result: + itemindent, item = result[0] + if item: + result.insert(0, (itemindent - indent, [first + item[0]])) + result[1] = (itemindent, item[1:]) + self.states[-1].extend(result) + + def visit_document(self, node): + self.new_state(0) + def depart_document(self, node): + self.end_state() + self.body = '\n'.join(line and (' '*indent + line) + for indent, lines in self.states[0] + for line in lines) + # XXX header/footer? + + def visit_highlightlang(self, node): + raise nodes.SkipNode + + def visit_section(self, node): + self._title_char = self.sectionchars[self.sectionlevel] + self.sectionlevel += 1 + def depart_section(self, node): + self.sectionlevel -= 1 + + def visit_topic(self, node): + self.new_state(0) + def depart_topic(self, node): + self.end_state() + + visit_sidebar = visit_topic + depart_sidebar = depart_topic + + def visit_rubric(self, node): + self.new_state(0) + self.add_text('-[ ') + def depart_rubric(self, node): + self.add_text(' ]-') + self.end_state() + + def visit_compound(self, node): + pass + def depart_compound(self, node): + pass + + def visit_glossary(self, node): + pass + def depart_glossary(self, node): + pass + + def visit_title(self, node): + if isinstance(node.parent, nodes.Admonition): + self.add_text(node.astext()+': ') + raise nodes.SkipNode + self.new_state(0) + def depart_title(self, node): + if isinstance(node.parent, nodes.section): + char = self._title_char + else: + char = '^' + text = ''.join(x[1] for x in self.states.pop() if x[0] == -1) + self.stateindent.pop() + self.states[-1].append((0, ['', text, '%s' % (char * len(text)), ''])) + + def visit_subtitle(self, node): + pass + def depart_subtitle(self, node): + pass + + def visit_attribution(self, node): + self.add_text('-- ') + def depart_attribution(self, node): + pass + + def visit_module(self, node): + if node.has_key('platform'): + self.new_state(0) + self.add_text(_('Platform: %s') % node['platform']) + self.end_state() + raise nodes.SkipNode + + def visit_desc(self, node): + pass + def depart_desc(self, node): + pass + + def visit_desc_signature(self, node): + self.new_state(0) + if node.parent['desctype'] in ('class', 'exception'): + self.add_text('%s ' % node.parent['desctype']) + def depart_desc_signature(self, node): + # XXX: wrap signatures in a way that makes sense + self.end_state(wrap=False, end=None) + + def visit_desc_name(self, node): + pass + def depart_desc_name(self, node): + pass + + def visit_desc_addname(self, node): + pass + def depart_desc_addname(self, node): + pass + + def visit_desc_type(self, node): + pass + def depart_desc_type(self, node): + pass + + def visit_desc_parameterlist(self, node): + self.add_text('(') + self.first_param = 1 + def depart_desc_parameterlist(self, node): + self.add_text(')') + + def visit_desc_parameter(self, node): + if not self.first_param: + self.add_text(', ') + else: + self.first_param = 0 + self.add_text(node.astext()) + raise nodes.SkipNode + + def visit_desc_optional(self, node): + self.add_text('[') + def depart_desc_optional(self, node): + self.add_text(']') + + def visit_desc_annotation(self, node): + pass + def depart_desc_annotation(self, node): + pass + + def visit_refcount(self, node): + pass + def depart_refcount(self, node): + pass + + def visit_desc_content(self, node): + self.new_state() + self.add_text('\n') + def depart_desc_content(self, node): + self.end_state() + + def visit_figure(self, node): + self.new_state() + def depart_figure(self, node): + self.end_state() + + def visit_caption(self, node): + pass + def depart_caption(self, node): + pass + + def visit_productionlist(self, node): + self.new_state() + names = [] + for production in node: + names.append(production['tokenname']) + maxlen = max(len(name) for name in names) + for production in node: + if production['tokenname']: + self.add_text(production['tokenname'].ljust(maxlen) + ' ::=') + lastname = production['tokenname'] + else: + self.add_text('%s ' % (' '*len(lastname))) + self.add_text(production.astext() + '\n') + self.end_state(wrap=False) + raise nodes.SkipNode + + def visit_seealso(self, node): + self.new_state() + def depart_seealso(self, node): + self.end_state(first='') + + def visit_footnote(self, node): + self._footnote = node.children[0].astext().strip() + self.new_state(len(self._footnote) + 3) + def depart_footnote(self, node): + self.end_state(first='[%s] ' % self._footnote) + + def visit_citation(self, node): + if len(node) and isinstance(node[0], nodes.label): + self._citlabel = node[0].astext() + else: + self._citlabel = '' + self.new_state(len(self._citlabel) + 3) + def depart_citation(self, node): + self.end_state(first='[%s] ' % self._citlabel) + + def visit_label(self, node): + raise nodes.SkipNode + + # XXX: option list could use some better styling + + def visit_option_list(self, node): + pass + def depart_option_list(self, node): + pass + + def visit_option_list_item(self, node): + self.new_state(0) + def depart_option_list_item(self, node): + self.end_state() + + def visit_option_group(self, node): + self._firstoption = True + def depart_option_group(self, node): + self.add_text(' ') + + def visit_option(self, node): + if self._firstoption: + self._firstoption = False + else: + self.add_text(', ') + def depart_option(self, node): + pass + + def visit_option_string(self, node): + pass + def depart_option_string(self, node): + pass + + def visit_option_argument(self, node): + self.add_text(node['delimiter']) + def depart_option_argument(self, node): + pass + + def visit_description(self, node): + pass + def depart_description(self, node): + pass + + def visit_tabular_col_spec(self, node): + raise nodes.SkipNode + + def visit_colspec(self, node): + self.table[0].append(node['colwidth']) + raise nodes.SkipNode + + def visit_tgroup(self, node): + pass + def depart_tgroup(self, node): + pass + + def visit_thead(self, node): + pass + def depart_thead(self, node): + pass + + def visit_tbody(self, node): + self.table.append('sep') + def depart_tbody(self, node): + pass + + def visit_row(self, node): + self.table.append([]) + def depart_row(self, node): + pass + + def visit_entry(self, node): + if node.has_key('morerows') or node.has_key('morecols'): + raise NotImplementedError('Column or row spanning cells are ' + 'not implemented.') + self.new_state(0) + def depart_entry(self, node): + text = '\n'.join('\n'.join(x[1]) for x in self.states.pop()) + self.stateindent.pop() + self.table[-1].append(text) + + def visit_table(self, node): + if self.table: + raise NotImplementedError('Nested tables are not supported.') + self.new_state(0) + self.table = [[]] + def depart_table(self, node): + lines = self.table[1:] + fmted_rows = [] + colwidths = self.table[0] + realwidths = colwidths[:] + separator = 0 + # don't allow paragraphs in table cells for now + for line in lines: + if line == 'sep': + separator = len(fmted_rows) + else: + cells = [] + for i, cell in enumerate(line): + par = textwrap.wrap(cell, width=colwidths[i]) + if par: + maxwidth = max(map(len, par)) + else: + maxwidth = 0 + realwidths[i] = max(realwidths[i], maxwidth) + cells.append(par) + fmted_rows.append(cells) + + def writesep(char='-'): + out = ['+'] + for width in realwidths: + out.append(char * (width+2)) + out.append('+') + self.add_text(''.join(out) + '\n') + + def writerow(row): + lines = map(None, *row) + for line in lines: + out = ['|'] + for i, cell in enumerate(line): + if cell: + out.append(' ' + cell.ljust(realwidths[i]+1)) + else: + out.append(' ' * (realwidths[i] + 2)) + out.append('|') + self.add_text(''.join(out) + '\n') + + for i, row in enumerate(fmted_rows): + if separator and i == separator: + writesep('=') + else: + writesep('-') + writerow(row) + writesep('-') + self.table = None + self.end_state(wrap=False) + + def visit_acks(self, node): + self.new_state(0) + self.add_text(', '.join(n.astext() for n in node.children[0].children) + '.') + self.end_state() + raise nodes.SkipNode + + def visit_image(self, node): + self.add_text(_('[image]')) + raise nodes.SkipNode + + def visit_transition(self, node): + indent = sum(self.stateindent) + self.new_state(0) + self.add_text('=' * (MAXWIDTH - indent)) + self.end_state() + raise nodes.SkipNode + + def visit_bullet_list(self, node): + self._list_counter = -1 + def depart_bullet_list(self, node): + pass + + def visit_enumerated_list(self, node): + self._list_counter = 0 + def depart_enumerated_list(self, node): + pass + + def visit_definition_list(self, node): + self._list_counter = -2 + def depart_definition_list(self, node): + pass + + def visit_list_item(self, node): + if self._list_counter == -1: + # bullet list + self.new_state(2) + elif self._list_counter == -2: + # definition list + pass + else: + # enumerated list + self._list_counter += 1 + self.new_state(len(str(self._list_counter)) + 2) + def depart_list_item(self, node): + if self._list_counter == -1: + self.end_state(first='* ', end=None) + elif self._list_counter == -2: + pass + else: + self.end_state(first='%s. ' % self._list_counter, end=None) + + def visit_definition_list_item(self, node): + self._li_has_classifier = len(node) >= 2 and \ + isinstance(node[1], nodes.classifier) + def depart_definition_list_item(self, node): + pass + + def visit_term(self, node): + self.new_state(0) + def depart_term(self, node): + if not self._li_has_classifier: + self.end_state(end=None) + + def visit_classifier(self, node): + self.add_text(' : ') + def depart_classifier(self, node): + self.end_state(end=None) + + def visit_definition(self, node): + self.new_state() + def depart_definition(self, node): + self.end_state() + + def visit_field_list(self, node): + pass + def depart_field_list(self, node): + pass + + def visit_field(self, node): + pass + def depart_field(self, node): + pass + + def visit_field_name(self, node): + self.new_state(0) + def depart_field_name(self, node): + self.add_text(':') + self.end_state(end=None) + + def visit_field_body(self, node): + self.new_state() + def depart_field_body(self, node): + self.end_state() + + def visit_centered(self, node): + pass + def depart_centered(self, node): + pass + + def visit_admonition(self, node): + self.new_state(0) + def depart_admonition(self, node): + self.end_state() + + def _visit_admonition(self, node): + self.new_state(2) + def _make_depart_admonition(name): + def depart_admonition(self, node): + self.end_state(first=admonitionlabels[name] + ': ') + return depart_admonition + + visit_attention = _visit_admonition + depart_attention = _make_depart_admonition('attention') + visit_caution = _visit_admonition + depart_caution = _make_depart_admonition('caution') + visit_danger = _visit_admonition + depart_danger = _make_depart_admonition('danger') + visit_error = _visit_admonition + depart_error = _make_depart_admonition('error') + visit_hint = _visit_admonition + depart_hint = _make_depart_admonition('hint') + visit_important = _visit_admonition + depart_important = _make_depart_admonition('important') + visit_note = _visit_admonition + depart_note = _make_depart_admonition('note') + visit_tip = _visit_admonition + depart_tip = _make_depart_admonition('tip') + visit_warning = _visit_admonition + depart_warning = _make_depart_admonition('warning') + + def visit_versionmodified(self, node): + self.new_state(0) + if node.children: + self.add_text(versionlabels[node['type']] % node['version'] + ': ') + else: + self.add_text(versionlabels[node['type']] % node['version'] + '.') + def depart_versionmodified(self, node): + self.end_state() + + def visit_literal_block(self, node): + self.new_state() + def depart_literal_block(self, node): + self.end_state(wrap=False) + + def visit_doctest_block(self, node): + self.new_state(0) + def depart_doctest_block(self, node): + self.end_state(wrap=False) + + def visit_line_block(self, node): + self.new_state(0) + def depart_line_block(self, node): + self.end_state(wrap=False) + + def visit_line(self, node): + pass + def depart_line(self, node): + pass + + def visit_block_quote(self, node): + self.new_state() + def depart_block_quote(self, node): + self.end_state() + + def visit_compact_paragraph(self, node): + pass + def depart_compact_paragraph(self, node): + pass + + def visit_paragraph(self, node): + if not isinstance(node.parent, nodes.Admonition) or \ + isinstance(node.parent, addnodes.seealso): + self.new_state(0) + def depart_paragraph(self, node): + if not isinstance(node.parent, nodes.Admonition) or \ + isinstance(node.parent, addnodes.seealso): + self.end_state() + + def visit_target(self, node): + raise nodes.SkipNode + + def visit_index(self, node): + raise nodes.SkipNode + + def visit_substitution_definition(self, node): + raise nodes.SkipNode + + def visit_pending_xref(self, node): + pass + def depart_pending_xref(self, node): + pass + + def visit_reference(self, node): + pass + def depart_reference(self, node): + pass + + def visit_emphasis(self, node): + self.add_text('*') + def depart_emphasis(self, node): + self.add_text('*') + + def visit_literal_emphasis(self, node): + self.add_text('*') + def depart_literal_emphasis(self, node): + self.add_text('*') + + def visit_strong(self, node): + self.add_text('**') + def depart_strong(self, node): + self.add_text('**') + + def visit_title_reference(self, node): + self.add_text('*') + def depart_title_reference(self, node): + self.add_text('*') + + def visit_literal(self, node): + self.add_text('``') + def depart_literal(self, node): + self.add_text('``') + + def visit_subscript(self, node): + self.add_text('_') + def depart_subscript(self, node): + pass + + def visit_superscript(self, node): + self.add_text('^') + def depart_superscript(self, node): + pass + + def visit_footnote_reference(self, node): + self.add_text('[%s]' % node.astext()) + raise nodes.SkipNode + + def visit_citation_reference(self, node): + self.add_text('[%s]' % node.astext()) + raise nodes.SkipNode + + def visit_Text(self, node): + self.add_text(node.astext()) + def depart_Text(self, node): + pass + + def visit_problematic(self, node): + self.add_text('>>') + def depart_problematic(self, node): + self.add_text('<<') + + def visit_system_message(self, node): + self.new_state(0) + self.add_text('' % node.astext()) + self.end_state() + raise nodes.SkipNode + + def visit_comment(self, node): + raise nodes.SkipNode + + def visit_meta(self, node): + # only valid for HTML + raise nodes.SkipNode + + def unknown_visit(self, node): + raise NotImplementedError('Unknown node: ' + node.__class__.__name__) diff --git a/tests/test_build.py b/tests/test_build.py index 8ca17938..0443aada 100644 --- a/tests/test_build.py +++ b/tests/test_build.py @@ -20,7 +20,7 @@ from util import * from etree13 import ElementTree as ET from sphinx.builder import StandaloneHTMLBuilder, LaTeXBuilder -from sphinx.latexwriter import LaTeXTranslator +from sphinx.writers.latex import LaTeXTranslator html_warnfile = StringIO() diff --git a/tests/test_markup.py b/tests/test_markup.py index f1125103..a4b7cf77 100644 --- a/tests/test_markup.py +++ b/tests/test_markup.py @@ -17,8 +17,8 @@ from docutils import frontend, utils, nodes from docutils.parsers import rst from sphinx import addnodes -from sphinx.htmlwriter import HTMLWriter, SmartyPantsHTMLTranslator -from sphinx.latexwriter import LaTeXWriter, LaTeXTranslator +from sphinx.writers.html import HTMLWriter, SmartyPantsHTMLTranslator +from sphinx.writers.latex import LaTeXWriter, LaTeXTranslator def setup_module(): global app, settings, parser -- cgit v1.2.1 From f8f41e1341f4aeb4458f74e4e4e47833210abdcb Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sat, 29 Nov 2008 20:03:56 +0100 Subject: This is version 0.6. --- sphinx/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sphinx/__init__.py b/sphinx/__init__.py index 2df41707..9a41d4a0 100644 --- a/sphinx/__init__.py +++ b/sphinx/__init__.py @@ -13,8 +13,8 @@ import sys from os import path __revision__ = '$Revision$' -__version__ = '0.5' -__released__ = '0.5' +__version__ = '0.6' +__released__ = '0.6 (hg)' package_dir = path.abspath(path.dirname(__file__)) -- cgit v1.2.1 From e9c79382e51d6aca7600addf51c53c4bb0344196 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sat, 29 Nov 2008 20:04:11 +0100 Subject: Some more fixes after the great renaming. --- doc/builders.rst | 8 +++++++- doc/config.rst | 6 +++--- doc/ext/appapi.rst | 2 +- doc/ext/builderapi.rst | 2 +- doc/glossary.rst | 2 +- doc/templating.rst | 4 ++-- sphinx/ext/coverage.py | 2 +- sphinx/ext/doctest.py | 2 +- 8 files changed, 17 insertions(+), 11 deletions(-) diff --git a/doc/builders.rst b/doc/builders.rst index 508ab3c5..dd07a96a 100644 --- a/doc/builders.rst +++ b/doc/builders.rst @@ -3,7 +3,7 @@ Available builders ================== -.. module:: sphinx.builder +.. module:: sphinx.builders :synopsis: Available built-in builder classes. These are the built-in Sphinx builders. More builders can be added by @@ -13,6 +13,7 @@ The builder's "name" must be given to the **-b** command-line option of :program:`sphinx-build` to select a builder. +.. module:: sphinx.builders.html .. class:: StandaloneHTMLBuilder This is the standard HTML builder. Its output is a directory with HTML @@ -30,6 +31,7 @@ The builder's "name" must be given to the **-b** command-line option of Its name is ``htmlhelp``. +.. module:: sphinx.builders.latex .. class:: LaTeXBuilder This builder produces a bunch of LaTeX files in the output directory. You @@ -50,6 +52,7 @@ The builder's "name" must be given to the **-b** command-line option of Its name is ``latex``. +.. module:: sphinx.builders.text .. class:: TextBuilder This builder produces a text file for each reST file -- this is almost the @@ -60,6 +63,7 @@ The builder's "name" must be given to the **-b** command-line option of .. versionadded:: 0.4 +.. currentmodule:: sphinx.builders.html .. class:: SerializingHTMLBuilder This builder uses a module that implements the Python serialization API @@ -135,6 +139,7 @@ The builder's "name" must be given to the **-b** command-line option of .. versionadded:: 0.5 +.. module:: sphinx.builders.changes .. class:: ChangesBuilder This builder produces an HTML overview of all :dir:`versionadded`, @@ -144,6 +149,7 @@ The builder's "name" must be given to the **-b** command-line option of Its name is ``changes``. +.. module:: sphinx.builders.linkcheck .. class:: CheckExternalLinksBuilder This builder scans all documents for external links, tries to open them with diff --git a/doc/config.rst b/doc/config.rst index 819bf468..78671b68 100644 --- a/doc/config.rst +++ b/doc/config.rst @@ -22,8 +22,8 @@ Important points to note: * The term "fully-qualified name" refers to a string that names an importable Python object inside a module; for example, the FQN - ``"sphinx.builder.Builder"`` means the ``Builder`` class in the - ``sphinx.builder`` module. + ``"sphinx.builders.Builder"`` means the ``Builder`` class in the + ``sphinx.builders`` module. * Remember that document names use ``/`` as the path separator and don't contain the file name extension. @@ -412,7 +412,7 @@ that use Sphinx' HTMLWriter class. .. confval:: html_translator_class A string with the fully-qualified name of a HTML Translator class, that is, a - subclass of Sphinx' :class:`~sphinx.htmlwriter.HTMLTranslator`, that is used + subclass of Sphinx' :class:`~sphinx.writers.html.HTMLTranslator`, that is used to translate document trees to HTML. Default is ``None`` (use the builtin translator). diff --git a/doc/ext/appapi.rst b/doc/ext/appapi.rst index fcc29e38..355e42bd 100644 --- a/doc/ext/appapi.rst +++ b/doc/ext/appapi.rst @@ -13,7 +13,7 @@ the following public API: .. method:: Sphinx.add_builder(builder) Register a new builder. *builder* must be a class that inherits from - :class:`~sphinx.builder.Builder`. + :class:`~sphinx.builders.Builder`. .. method:: Sphinx.add_config_value(name, default, rebuild_env) diff --git a/doc/ext/builderapi.rst b/doc/ext/builderapi.rst index adc41016..72c388fb 100644 --- a/doc/ext/builderapi.rst +++ b/doc/ext/builderapi.rst @@ -5,7 +5,7 @@ Writing new builders .. todo:: Expand this. -.. currentmodule:: sphinx.builder +.. currentmodule:: sphinx.builders .. class:: Builder diff --git a/doc/glossary.rst b/doc/glossary.rst index 6a80ad36..7ec787ff 100644 --- a/doc/glossary.rst +++ b/doc/glossary.rst @@ -6,7 +6,7 @@ Glossary .. glossary:: builder - A class (inheriting from :class:`~sphinx.builder.Builder`) that takes + A class (inheriting from :class:`~sphinx.builders.Builder`) that takes parsed documents and performs an action on them. Normally, builders translate the documents to an output format, but it is also possible to use the builder builders that e.g. check for broken links in the diff --git a/doc/templating.rst b/doc/templating.rst index 61a8a72b..fa6f2682 100644 --- a/doc/templating.rst +++ b/doc/templating.rst @@ -19,10 +19,10 @@ No. You have several other options: configuration value accordingly. * You can :ref:`write a custom builder ` that derives from - :class:`~sphinx.builder.StandaloneHTMLBuilder` and calls your template engine + :class:`~sphinx.builders.StandaloneHTMLBuilder` and calls your template engine of choice. -* You can use the :class:`~sphinx.builder.PickleHTMLBuilder` that produces +* You can use the :class:`~sphinx.builders.PickleHTMLBuilder` that produces pickle files with the page contents, and postprocess them using a custom tool, or use them in your Web application. diff --git a/sphinx/ext/coverage.py b/sphinx/ext/coverage.py index f03dbc1e..dfe01419 100644 --- a/sphinx/ext/coverage.py +++ b/sphinx/ext/coverage.py @@ -16,7 +16,7 @@ import inspect import cPickle as pickle from os import path -from sphinx.builder import Builder +from sphinx.builders import Builder # utility diff --git a/sphinx/ext/doctest.py b/sphinx/ext/doctest.py index badd50ff..aa38bc71 100644 --- a/sphinx/ext/doctest.py +++ b/sphinx/ext/doctest.py @@ -21,7 +21,7 @@ doctest = __import__('doctest') from docutils import nodes from docutils.parsers.rst import directives -from sphinx.builder import Builder +from sphinx.builders import Builder from sphinx.util.console import bold blankline_re = re.compile(r'^\s*', re.MULTILINE) -- cgit v1.2.1 From 229d3eb16b0c949ec87c21555345a83087317fe7 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sun, 30 Nov 2008 16:33:56 +0100 Subject: Allow giving values for dict type config values in the overrides. --- CHANGES | 12 ++++++++++++ doc/intro.rst | 8 ++++++-- sphinx/config.py | 6 ++++++ tests/test_config.py | 4 +++- 4 files changed, 27 insertions(+), 3 deletions(-) diff --git a/CHANGES b/CHANGES index 63321df1..5d9bc20c 100644 --- a/CHANGES +++ b/CHANGES @@ -1,3 +1,15 @@ +Release 0.6 (in development) +============================ + +New features added +------------------ + +* Other changes: + + - Allow giving config overrides for single dict keys on the command + line. + + Release 0.5 (Nov 23, 2008) -- Birthday release! =============================================== diff --git a/doc/intro.rst b/doc/intro.rst index 47e016b3..7ce9d1fa 100644 --- a/doc/intro.rst +++ b/doc/intro.rst @@ -111,8 +111,12 @@ The :program:`sphinx-build` script has several more options: .. versionadded:: 0.5 **-D** *setting=value* - Override a configuration value set in the :file:`conf.py` file. (The value - must be a string value.) + Override a configuration value set in the :file:`conf.py` file. The value + must be a string or dictionary value. For the latter, supply the setting + name and key like this: ``-D latex_elements.docclass=scrartcl``. + + .. versionchanged:: 0.6 + The value can now be a dictionary value. **-A** *name=value* Make the *name* assigned to *value* in the HTML templates. diff --git a/sphinx/config.py b/sphinx/config.py index fa04ac2a..a9cae4cd 100644 --- a/sphinx/config.py +++ b/sphinx/config.py @@ -111,6 +111,12 @@ class Config(object): def init_values(self): config = self._raw_config + for valname, value in self.overrides.iteritems(): + if '.' in valname: + realvalname, key = valname.split('.', 1) + config.setdefault(realvalname, {})[key] = value + else: + config[valname] = value config.update(self.overrides) for name in config: if name in self.values: diff --git a/tests/test_config.py b/tests/test_config.py index 57d1936b..53cba59c 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -15,7 +15,8 @@ from util import * from sphinx.application import ExtensionError -@with_app(confoverrides={'master_doc': 'master', 'nonexisting_value': 'True'}) +@with_app(confoverrides={'master_doc': 'master', 'nonexisting_value': 'True', + 'latex_elements.docclass': 'scrartcl'}) def test_core_config(app): cfg = app.config @@ -26,6 +27,7 @@ def test_core_config(app): # overrides assert cfg.master_doc == 'master' + assert cfg.latex_elements['docclass'] == 'scrartcl' # simple default values assert 'exclude_dirs' not in cfg.__dict__ -- cgit v1.2.1 From 4b201975a442d45a3ae5118a38c3baa07940f9b3 Mon Sep 17 00:00:00 2001 From: mitsuhiko Date: Sun, 30 Nov 2008 19:55:59 +0100 Subject: Changed an `__import__` call in the highlighter module because that trigger a bug in the python importer. --- sphinx/highlighting.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sphinx/highlighting.py b/sphinx/highlighting.py index 1637b2c3..3017133a 100644 --- a/sphinx/highlighting.py +++ b/sphinx/highlighting.py @@ -89,7 +89,7 @@ class PygmentsBridge(object): style = SphinxStyle elif '.' in stylename: module, stylename = stylename.rsplit('.', 1) - style = getattr(__import__(module, None, None, ['']), stylename) + style = getattr(__import__(module, None, None, ['__name__']), stylename) else: style = get_style_by_name(stylename) self.hfmter = {False: HtmlFormatter(style=style), -- cgit v1.2.1 From 0647cdecb439fa4dfd9bbdf094409e262cf33483 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sun, 30 Nov 2008 19:58:29 +0100 Subject: Add Sphinx.add_lexer(). --- CHANGES | 4 ++++ doc/ext/appapi.rst | 7 +++++++ sphinx/application.py | 6 ++++++ sphinx/highlighting.py | 1 + tests/test_highlighting.py | 37 +++++++++++++++++++++++++++++++++++++ 5 files changed, 55 insertions(+) create mode 100644 tests/test_highlighting.py diff --git a/CHANGES b/CHANGES index 5d9bc20c..b45ce66c 100644 --- a/CHANGES +++ b/CHANGES @@ -4,6 +4,10 @@ Release 0.6 (in development) New features added ------------------ +* Extension API: + + - Add Sphinx.add_lexer() to add custom Pygments lexers. + * Other changes: - Allow giving config overrides for single dict keys on the command diff --git a/doc/ext/appapi.rst b/doc/ext/appapi.rst index 355e42bd..3dd5282b 100644 --- a/doc/ext/appapi.rst +++ b/doc/ext/appapi.rst @@ -167,6 +167,13 @@ the following public API: :confval:`the docs for the config value `. .. versionadded:: 0.5 + +.. method:: Sphinx.add_lexer(alias, lexer) + + Use *lexer*, which must be an instance of a Pygments lexer class, to + highlight code blocks with the given language *alias*. + + .. versionadded:: 0.6 .. method:: Sphinx.connect(event, callback) diff --git a/sphinx/application.py b/sphinx/application.py index 6c644bbe..f7c57592 100644 --- a/sphinx/application.py +++ b/sphinx/application.py @@ -297,6 +297,12 @@ class Sphinx(object): StandaloneHTMLBuilder.script_files.append( posixpath.join('_static', filename)) + def add_lexer(self, alias, lexer): + from sphinx.highlighting import lexers + if lexers is None: + return + lexers[alias] = lexer + class TemplateBridge(object): """ diff --git a/sphinx/highlighting.py b/sphinx/highlighting.py index 3017133a..d85aac23 100644 --- a/sphinx/highlighting.py +++ b/sphinx/highlighting.py @@ -30,6 +30,7 @@ try: from pygments.token import Generic, Comment, Number except ImportError: pygments = None + lexers = None else: class SphinxStyle(Style): """ diff --git a/tests/test_highlighting.py b/tests/test_highlighting.py new file mode 100644 index 00000000..5c353946 --- /dev/null +++ b/tests/test_highlighting.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +""" + test_highlighting + ~~~~~~~~~~~~~~~~~ + + Test the Pygments highlighting bridge. + + :copyright: 2008 by Georg Brandl. + :license: BSD. +""" + +from util import * + +from pygments.lexer import RegexLexer +from pygments.token import Text, Name + +from sphinx.highlighting import PygmentsBridge + + +class MyLexer(RegexLexer): + name = 'testlexer' + + tokens = { + 'root': [ + ('a', Name), + ('b', Text), + ], + } + + +@with_app() +def test_add_lexer(app): + app.add_lexer('test', MyLexer()) + + bridge = PygmentsBridge('html') + ret = bridge.highlight_block('ab', 'test') + assert 'ab' in ret -- cgit v1.2.1 From c001d82adcbb31d9c392ed5dd77a821f96a0c8db Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sun, 30 Nov 2008 20:29:34 +0100 Subject: Allow using different Pygments formatters. --- sphinx/highlighting.py | 28 ++++++++++++++++++---------- tests/test_highlighting.py | 14 ++++++++++++++ 2 files changed, 32 insertions(+), 10 deletions(-) diff --git a/sphinx/highlighting.py b/sphinx/highlighting.py index d85aac23..927b42d9 100644 --- a/sphinx/highlighting.py +++ b/sphinx/highlighting.py @@ -82,7 +82,12 @@ if sys.version_info < (2, 5): class PygmentsBridge(object): - def __init__(self, dest='html', stylename='sphinx'): + # Set these attributes if you want to have different Pygments formatters + # than the default ones. + html_formatter = HtmlFormatter + latex_formatter = LatexFormatter + + def __init__(self, dest='html', stylename='sphinx', ): self.dest = dest if not pygments: return @@ -93,11 +98,14 @@ class PygmentsBridge(object): style = getattr(__import__(module, None, None, ['__name__']), stylename) else: style = get_style_by_name(stylename) - self.hfmter = {False: HtmlFormatter(style=style), - True: HtmlFormatter(style=style, linenos=True)} - self.lfmter = {False: LatexFormatter(style=style, commandprefix='PYG'), - True: LatexFormatter(style=style, linenos=True, - commandprefix='PYG')} + if dest == 'html': + self.fmter = {False: self.html_formatter(style=style), + True: self.html_formatter(style=style, linenos=True)} + else: + self.fmter = {False: self.latex_formatter(style=style, + commandprefix='PYG'), + True: self.latex_formatter(style=style, linenos=True, + commandprefix='PYG')} def unhighlighted(self, source): if self.dest == 'html': @@ -171,9 +179,9 @@ class PygmentsBridge(object): lexer.add_filter('raiseonerror') try: if self.dest == 'html': - return highlight(source, lexer, self.hfmter[bool(linenos)]) + return highlight(source, lexer, self.fmter[bool(linenos)]) else: - hlsource = highlight(source, lexer, self.lfmter[bool(linenos)]) + hlsource = highlight(source, lexer, self.fmter[bool(linenos)]) return hlsource.translate(tex_hl_escape_map) except ErrorToken: # this is most probably not the selected language, @@ -187,9 +195,9 @@ class PygmentsBridge(object): # no HTML styles needed return '' if self.dest == 'html': - return self.hfmter[0].get_style_defs() + return self.fmter[0].get_style_defs() else: - styledefs = self.lfmter[0].get_style_defs() + styledefs = self.fmter[0].get_style_defs() # workaround for Pygments < 0.12 if styledefs.startswith('\\newcommand\\at{@}'): styledefs += _LATEX_STYLES diff --git a/tests/test_highlighting.py b/tests/test_highlighting.py index 5c353946..067c37cb 100644 --- a/tests/test_highlighting.py +++ b/tests/test_highlighting.py @@ -13,6 +13,7 @@ from util import * from pygments.lexer import RegexLexer from pygments.token import Text, Name +from pygments.formatters.html import HtmlFormatter from sphinx.highlighting import PygmentsBridge @@ -27,6 +28,10 @@ class MyLexer(RegexLexer): ], } +class MyFormatter(HtmlFormatter): + def format(self, tokensource, outfile): + outfile.write('test') + @with_app() def test_add_lexer(app): @@ -35,3 +40,12 @@ def test_add_lexer(app): bridge = PygmentsBridge('html') ret = bridge.highlight_block('ab', 'test') assert 'ab' in ret + +def test_set_formatter(): + PygmentsBridge.html_formatter = MyFormatter + try: + bridge = PygmentsBridge('html') + ret = bridge.highlight_block('foo', 'python') + assert ret == 'test' + finally: + PygmentsBridge.html_formatter = HtmlFormatter -- cgit v1.2.1 From f1bd7914bd3fb656b58e7bf72d64bec06f3034d7 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sun, 30 Nov 2008 20:38:59 +0100 Subject: Add html_add_permalinks config value. --- CHANGES | 13 ++++++++++--- doc/config.rst | 9 +++++++++ sphinx/builders/html.py | 3 +-- sphinx/builders/htmlhelp.py | 3 +-- sphinx/config.py | 1 + sphinx/writers/html.py | 5 +++-- 6 files changed, 25 insertions(+), 9 deletions(-) diff --git a/CHANGES b/CHANGES index b45ce66c..bf865f60 100644 --- a/CHANGES +++ b/CHANGES @@ -4,14 +4,21 @@ Release 0.6 (in development) New features added ------------------ +* Configuration: + + - The new ``html_add_permalinks`` config value can be used to + switch off the generated "paragraph sign" permalinks for each + heading and definition environment. + * Extension API: - - Add Sphinx.add_lexer() to add custom Pygments lexers. + - There is now a Sphinx.add_lexer() method to add custom Pygments + lexers. * Other changes: - - Allow giving config overrides for single dict keys on the command - line. + - Config overrides for single dict keys can now be given on the + command line. Release 0.5 (Nov 23, 2008) -- Birthday release! diff --git a/doc/config.rst b/doc/config.rst index 78671b68..b6c33460 100644 --- a/doc/config.rst +++ b/doc/config.rst @@ -332,6 +332,15 @@ that use Sphinx' HTMLWriter class. If true, *SmartyPants* will be used to convert quotes and dashes to typographically correct entities. Default: ``True``. +.. confval:: html_add_permalinks + + If true, Sphinx will add "permalinks" for each heading and description + environment as paragraph signs that become visible when the mouse hovers over + them. Default: ``True``. + + .. versionadded:: 0.6 + Previously, this was always activated. + .. confval:: html_sidebars Custom sidebar templates, must be a dictionary that maps document names to diff --git a/sphinx/builders/html.py b/sphinx/builders/html.py index adf58be1..7592dced 100644 --- a/sphinx/builders/html.py +++ b/sphinx/builders/html.py @@ -53,8 +53,7 @@ class StandaloneHTMLBuilder(Builder): supported_image_types = ['image/svg+xml', 'image/png', 'image/gif', 'image/jpeg'] searchindex_filename = 'searchindex.js' - add_header_links = True - add_definition_links = True + add_permalinks = True # This is a class attribute because it is mutated by Sphinx.add_javascript. script_files = ['_static/jquery.js', '_static/doctools.js'] diff --git a/sphinx/builders/htmlhelp.py b/sphinx/builders/htmlhelp.py index 23900f36..20bb0d5c 100644 --- a/sphinx/builders/htmlhelp.py +++ b/sphinx/builders/htmlhelp.py @@ -132,8 +132,7 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder): supported_image_types = ['image/png', 'image/gif', 'image/jpeg'] # don't add links - add_header_links = False - add_definition_links = False + add_permalinks = False def init(self): StandaloneHTMLBuilder.init(self) diff --git a/sphinx/config.py b/sphinx/config.py index a9cae4cd..1ea5f66f 100644 --- a/sphinx/config.py +++ b/sphinx/config.py @@ -65,6 +65,7 @@ class Config(object): html_sidebars = ({}, False), html_additional_pages = ({}, False), html_use_modindex = (True, False), + html_add_permalinks = (True, False), html_use_index = (True, False), html_split_index = (False, False), html_copy_source = (True, False), diff --git a/sphinx/writers/html.py b/sphinx/writers/html.py index b82d7ccc..1b9205ce 100644 --- a/sphinx/writers/html.py +++ b/sphinx/writers/html.py @@ -58,6 +58,7 @@ class HTMLTranslator(BaseTranslator): self.highlightlang = builder.config.highlight_language self.highlightlinenothreshold = sys.maxint self.protect_literal_text = 0 + self.add_permalinks = builder.config.html_add_permalinks def visit_desc(self, node): self.body.append(self.starttag(node, 'dl', CLASS=node['desctype'])) @@ -73,7 +74,7 @@ class HTMLTranslator(BaseTranslator): if node.parent['desctype'] in ('class', 'exception'): self.body.append('%s ' % node.parent['desctype']) def depart_desc_signature(self, node): - if node['ids'] and self.builder.add_definition_links: + if node['ids'] and self.add_permalinks and self.builder.add_permalinks: self.body.append(u'\u00B6' % _('Permalink to this definition')) @@ -388,7 +389,7 @@ class HTMLTranslator(BaseTranslator): def depart_title(self, node): close_tag = self.context[-1] - if self.builder.add_header_links and \ + if self.add_permalinks and self.builder.add_permalinks and \ (close_tag.startswith(' Date: Fri, 5 Dec 2008 12:26:40 +0100 Subject: Fix #69: a "self" too much. --- sphinx/builders/htmlhelp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sphinx/builders/htmlhelp.py b/sphinx/builders/htmlhelp.py index 20bb0d5c..b1c7fbc5 100644 --- a/sphinx/builders/htmlhelp.py +++ b/sphinx/builders/htmlhelp.py @@ -140,7 +140,7 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder): self.out_suffix = '.html' def handle_finish(self): - self.build_hhx(self, self.outdir, self.config.htmlhelp_basename) + self.build_hhx(self.outdir, self.config.htmlhelp_basename) def build_hhx(self, outdir, outname): self.info('dumping stopword list...') -- cgit v1.2.1 From 41cdb9e79dd86a337e8978c80d2af0938758ac0c Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Fri, 5 Dec 2008 12:27:08 +0100 Subject: Fix #64: import error in intersphinx. --- sphinx/ext/intersphinx.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sphinx/ext/intersphinx.py b/sphinx/ext/intersphinx.py index 5d9607c9..85733ac4 100644 --- a/sphinx/ext/intersphinx.py +++ b/sphinx/ext/intersphinx.py @@ -31,7 +31,7 @@ from os import path from docutils import nodes -from sphinx.builders import INVENTORY_FILENAME +from sphinx.builders.html import INVENTORY_FILENAME def fetch_inventory(app, uri, inv): -- cgit v1.2.1 From 2353c3dbea3b132c00fdfabbcc4c06d0decc35d6 Mon Sep 17 00:00:00 2001 From: mitsuhiko Date: Sun, 7 Dec 2008 23:17:29 +0100 Subject: Added better error message for missing roman.py --- sphinx/__init__.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/sphinx/__init__.py b/sphinx/__init__.py index 9a41d4a0..43343ff2 100644 --- a/sphinx/__init__.py +++ b/sphinx/__init__.py @@ -31,17 +31,23 @@ def main(argv=sys.argv): errstr = str(err) if errstr.lower().startswith('no module named'): whichmod = errstr[16:] + hint = '' if whichmod.startswith('docutils'): whichmod = 'Docutils library' elif whichmod.startswith('jinja'): whichmod = 'Jinja library' elif whichmod == 'roman': whichmod = 'roman module (which is distributed with Docutils)' + hint = ('This can happen if you upgraded docutils using\n' + 'easy_install without uninstalling the old version' + 'first.') else: whichmod += ' module' print >>sys.stderr, \ 'Error: The %s cannot be found. Did you install Sphinx '\ 'and its dependencies correctly?' % whichmod + if hint: + print >> sys.stderr, hint return 1 raise return cmdline.main(argv) -- cgit v1.2.1 From 03d6c8b26c27a480f9c8fe3079fa8fa2c2e7ed23 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 8 Dec 2008 08:59:51 +0100 Subject: Move again. --- sphinx/builders/htmlhelp.py | 220 ++++++++ sphinx/builders/linkcheck.py | 130 +++++ sphinx/htmlhelp.py | 220 -------- sphinx/htmlwriter.py | 457 ---------------- sphinx/latexwriter.py | 1185 ------------------------------------------ sphinx/linkcheck.py | 130 ----- sphinx/textwriter.py | 679 ------------------------ sphinx/writers/html.py | 457 ++++++++++++++++ sphinx/writers/latex.py | 1185 ++++++++++++++++++++++++++++++++++++++++++ sphinx/writers/text.py | 679 ++++++++++++++++++++++++ 10 files changed, 2671 insertions(+), 2671 deletions(-) create mode 100644 sphinx/builders/htmlhelp.py create mode 100644 sphinx/builders/linkcheck.py delete mode 100644 sphinx/htmlhelp.py delete mode 100644 sphinx/htmlwriter.py delete mode 100644 sphinx/latexwriter.py delete mode 100644 sphinx/linkcheck.py delete mode 100644 sphinx/textwriter.py create mode 100644 sphinx/writers/html.py create mode 100644 sphinx/writers/latex.py create mode 100644 sphinx/writers/text.py diff --git a/sphinx/builders/htmlhelp.py b/sphinx/builders/htmlhelp.py new file mode 100644 index 00000000..4cc68bc9 --- /dev/null +++ b/sphinx/builders/htmlhelp.py @@ -0,0 +1,220 @@ +# -*- coding: utf-8 -*- +""" + sphinx.htmlhelp + ~~~~~~~~~~~~~~~ + + Build HTML help support files. + Adapted from the original Doc/tools/prechm.py. + + :copyright: 2007-2008 by Georg Brandl. + :license: BSD. +""" + +import os +import cgi +from os import path + +from docutils import nodes + +from sphinx import addnodes + +# Project file (*.hhp) template. 'outname' is the file basename (like +# the pythlp in pythlp.hhp); 'version' is the doc version number (like +# the 2.2 in Python 2.2). +# The magical numbers in the long line under [WINDOWS] set most of the +# user-visible features (visible buttons, tabs, etc). +# About 0x10384e: This defines the buttons in the help viewer. The +# following defns are taken from htmlhelp.h. Not all possibilities +# actually work, and not all those that work are available from the Help +# Workshop GUI. In particular, the Zoom/Font button works and is not +# available from the GUI. The ones we're using are marked with 'x': +# +# 0x000002 Hide/Show x +# 0x000004 Back x +# 0x000008 Forward x +# 0x000010 Stop +# 0x000020 Refresh +# 0x000040 Home x +# 0x000080 Forward +# 0x000100 Back +# 0x000200 Notes +# 0x000400 Contents +# 0x000800 Locate x +# 0x001000 Options x +# 0x002000 Print x +# 0x004000 Index +# 0x008000 Search +# 0x010000 History +# 0x020000 Favorites +# 0x040000 Jump 1 +# 0x080000 Jump 2 +# 0x100000 Zoom/Font x +# 0x200000 TOC Next +# 0x400000 TOC Prev + +project_template = '''\ +[OPTIONS] +Binary TOC=Yes +Binary Index=No +Compiled file=%(outname)s.chm +Contents file=%(outname)s.hhc +Default Window=%(outname)s +Default topic=index.html +Display compile progress=No +Full text search stop list file=%(outname)s.stp +Full-text search=Yes +Index file=%(outname)s.hhk +Language=0x409 +Title=%(title)s + +[WINDOWS] +%(outname)s="%(title)s","%(outname)s.hhc","%(outname)s.hhk",\ +"index.html","index.html",,,,,0x63520,220,0x10384e,[0,0,1024,768],,,,,,,0 + +[FILES] +''' + +contents_header = '''\ + + + + + + + + + + +
      +''' + +contents_footer = '''\ +
    +''' + +object_sitemap = '''\ + + + + +''' + +# List of words the full text search facility shouldn't index. This +# becomes file outname.stp. Note that this list must be pretty small! +# Different versions of the MS docs claim the file has a maximum size of +# 256 or 512 bytes (including \r\n at the end of each line). +# Note that "and", "or", "not" and "near" are operators in the search +# language, so no point indexing them even if we wanted to. +stopwords = """ +a and are as at +be but by +for +if in into is it +near no not +of on or +such +that the their then there these they this to +was will with +""".split() + + +def build_hhx(builder, outdir, outname): + builder.info('dumping stopword list...') + f = open(path.join(outdir, outname+'.stp'), 'w') + try: + for word in sorted(stopwords): + print >>f, word + finally: + f.close() + + builder.info('writing project file...') + f = open(path.join(outdir, outname+'.hhp'), 'w') + try: + f.write(project_template % {'outname': outname, + 'title': builder.config.html_title, + 'version': builder.config.version, + 'project': builder.config.project}) + if not outdir.endswith(os.sep): + outdir += os.sep + olen = len(outdir) + for root, dirs, files in os.walk(outdir): + staticdir = (root == path.join(outdir, '_static')) + for fn in files: + if (staticdir and not fn.endswith('.js')) or fn.endswith('.html'): + print >>f, path.join(root, fn)[olen:].replace(os.sep, '\\') + finally: + f.close() + + builder.info('writing TOC file...') + f = open(path.join(outdir, outname+'.hhc'), 'w') + try: + f.write(contents_header) + # special books + f.write('
  • ' + object_sitemap % (builder.config.html_short_title, + 'index.html')) + if builder.config.html_use_modindex: + f.write('
  • ' + object_sitemap % (_('Global Module Index'), + 'modindex.html')) + # the TOC + tocdoc = builder.env.get_and_resolve_doctree(builder.config.master_doc, builder, + prune_toctrees=False) + def write_toc(node, ullevel=0): + if isinstance(node, nodes.list_item): + f.write('
  • ') + for subnode in node: + write_toc(subnode, ullevel) + elif isinstance(node, nodes.reference): + link = node['refuri'] + title = cgi.escape(node.astext()).replace('"','"') + item = object_sitemap % (title, link) + f.write(item.encode('ascii', 'xmlcharrefreplace')) + elif isinstance(node, nodes.bullet_list): + if ullevel != 0: + f.write('
      \n') + for subnode in node: + write_toc(subnode, ullevel+1) + if ullevel != 0: + f.write('
    \n') + elif isinstance(node, addnodes.compact_paragraph): + for subnode in node: + write_toc(subnode, ullevel) + istoctree = lambda node: isinstance(node, addnodes.compact_paragraph) and \ + node.has_key('toctree') + for node in tocdoc.traverse(istoctree): + write_toc(node) + f.write(contents_footer) + finally: + f.close() + + builder.info('writing index file...') + index = builder.env.create_index(builder) + f = open(path.join(outdir, outname+'.hhk'), 'w') + try: + f.write('
      \n') + def write_index(title, refs, subitems): + def write_param(name, value): + item = ' \n' % (name, value) + f.write(item.encode('ascii', 'xmlcharrefreplace')) + title = cgi.escape(title) + f.write('
    • \n') + write_param('Keyword', title) + if len(refs) == 0: + write_param('See Also', title) + elif len(refs) == 1: + write_param('Local', refs[0]) + else: + for i, ref in enumerate(refs): + write_param('Name', '[%d] %s' % (i, ref)) # XXX: better title? + write_param('Local', ref) + f.write('\n') + if subitems: + f.write('
        ') + for subitem in subitems: + write_index(subitem[0], subitem[1], []) + f.write('
      ') + for (key, group) in index: + for title, (refs, subitems) in group: + write_index(title, refs, subitems) + f.write('
    \n') + finally: + f.close() diff --git a/sphinx/builders/linkcheck.py b/sphinx/builders/linkcheck.py new file mode 100644 index 00000000..37aeb7a7 --- /dev/null +++ b/sphinx/builders/linkcheck.py @@ -0,0 +1,130 @@ +# -*- coding: utf-8 -*- +""" + sphinx.linkcheck + ~~~~~~~~~~~~~~~~ + + The CheckExternalLinksBuilder class. + + :copyright: 2008 by Georg Brandl, Thomas Lamb. + :license: BSD. +""" + +import socket +from os import path +from urllib2 import build_opener, HTTPError + +from docutils import nodes + +from sphinx.builder import Builder +from sphinx.util.console import purple, red, darkgreen + +# create an opener that will simulate a browser user-agent +opener = build_opener() +opener.addheaders = [('User-agent', 'Mozilla/5.0')] + + +class CheckExternalLinksBuilder(Builder): + """ + Checks for broken external links. + """ + name = 'linkcheck' + + def init(self): + self.good = set() + self.broken = {} + self.redirected = {} + # set a timeout for non-responding servers + socket.setdefaulttimeout(5.0) + # create output file + open(path.join(self.outdir, 'output.txt'), 'w').close() + + def get_target_uri(self, docname, typ=None): + return '' + + def get_outdated_docs(self): + return self.env.found_docs + + def prepare_writing(self, docnames): + return + + def write_doc(self, docname, doctree): + self.info() + for node in doctree.traverse(nodes.reference): + try: + self.check(node, docname) + except KeyError: + continue + + def check(self, node, docname): + uri = node['refuri'] + + if '#' in uri: + uri = uri.split('#')[0] + + if uri in self.good: + return + + lineno = None + while lineno is None and node: + node = node.parent + lineno = node.line + + if uri[0:5] == 'http:' or uri[0:6] == 'https:': + self.info(uri, nonl=1) + + if uri in self.broken: + (r, s) = self.broken[uri] + elif uri in self.redirected: + (r, s) = self.redirected[uri] + else: + (r, s) = self.resolve(uri) + + if r == 0: + self.info(' - ' + darkgreen('working')) + self.good.add(uri) + elif r == 2: + self.info(' - ' + red('broken: ') + s) + self.write_entry('broken', docname, lineno, uri + ': ' + s) + self.broken[uri] = (r, s) + if self.app.quiet: + self.warn('%s:%s: broken link: %s' % (docname, lineno, uri)) + else: + self.info(' - ' + purple('redirected') + ' to ' + s) + self.write_entry('redirected', docname, lineno, uri + ' to ' + s) + self.redirected[uri] = (r, s) + elif len(uri) == 0 or uri[0:7] == 'mailto:' or uri[0:4] == 'ftp:': + return + else: + self.warn(uri + ' - ' + red('malformed!')) + self.write_entry('malformed', docname, lineno, uri) + if self.app.quiet: + self.warn('%s:%s: malformed link: %s' % (docname, lineno, uri)) + self.app.statuscode = 1 + + if self.broken: + self.app.statuscode = 1 + + def write_entry(self, what, docname, line, uri): + output = open(path.join(self.outdir, 'output.txt'), 'a') + output.write("%s:%s: [%s] %s\n" % (self.env.doc2path(docname, None), + line, what, uri)) + output.close() + + def resolve(self, uri): + try: + f = opener.open(uri) + f.close() + except HTTPError, err: + #if err.code == 403 and uri.startswith('http://en.wikipedia.org/'): + # # Wikipedia blocks requests from urllib User-Agent + # return (0, 0) + return (2, str(err)) + except Exception, err: + return (2, str(err)) + if f.url.rstrip('/') == uri.rstrip('/'): + return (0, 0) + else: + return (1, f.url) + + def finish(self): + return diff --git a/sphinx/htmlhelp.py b/sphinx/htmlhelp.py deleted file mode 100644 index 4cc68bc9..00000000 --- a/sphinx/htmlhelp.py +++ /dev/null @@ -1,220 +0,0 @@ -# -*- coding: utf-8 -*- -""" - sphinx.htmlhelp - ~~~~~~~~~~~~~~~ - - Build HTML help support files. - Adapted from the original Doc/tools/prechm.py. - - :copyright: 2007-2008 by Georg Brandl. - :license: BSD. -""" - -import os -import cgi -from os import path - -from docutils import nodes - -from sphinx import addnodes - -# Project file (*.hhp) template. 'outname' is the file basename (like -# the pythlp in pythlp.hhp); 'version' is the doc version number (like -# the 2.2 in Python 2.2). -# The magical numbers in the long line under [WINDOWS] set most of the -# user-visible features (visible buttons, tabs, etc). -# About 0x10384e: This defines the buttons in the help viewer. The -# following defns are taken from htmlhelp.h. Not all possibilities -# actually work, and not all those that work are available from the Help -# Workshop GUI. In particular, the Zoom/Font button works and is not -# available from the GUI. The ones we're using are marked with 'x': -# -# 0x000002 Hide/Show x -# 0x000004 Back x -# 0x000008 Forward x -# 0x000010 Stop -# 0x000020 Refresh -# 0x000040 Home x -# 0x000080 Forward -# 0x000100 Back -# 0x000200 Notes -# 0x000400 Contents -# 0x000800 Locate x -# 0x001000 Options x -# 0x002000 Print x -# 0x004000 Index -# 0x008000 Search -# 0x010000 History -# 0x020000 Favorites -# 0x040000 Jump 1 -# 0x080000 Jump 2 -# 0x100000 Zoom/Font x -# 0x200000 TOC Next -# 0x400000 TOC Prev - -project_template = '''\ -[OPTIONS] -Binary TOC=Yes -Binary Index=No -Compiled file=%(outname)s.chm -Contents file=%(outname)s.hhc -Default Window=%(outname)s -Default topic=index.html -Display compile progress=No -Full text search stop list file=%(outname)s.stp -Full-text search=Yes -Index file=%(outname)s.hhk -Language=0x409 -Title=%(title)s - -[WINDOWS] -%(outname)s="%(title)s","%(outname)s.hhc","%(outname)s.hhk",\ -"index.html","index.html",,,,,0x63520,220,0x10384e,[0,0,1024,768],,,,,,,0 - -[FILES] -''' - -contents_header = '''\ - - - - - - - - - - -
      -''' - -contents_footer = '''\ -
    -''' - -object_sitemap = '''\ - - - - -''' - -# List of words the full text search facility shouldn't index. This -# becomes file outname.stp. Note that this list must be pretty small! -# Different versions of the MS docs claim the file has a maximum size of -# 256 or 512 bytes (including \r\n at the end of each line). -# Note that "and", "or", "not" and "near" are operators in the search -# language, so no point indexing them even if we wanted to. -stopwords = """ -a and are as at -be but by -for -if in into is it -near no not -of on or -such -that the their then there these they this to -was will with -""".split() - - -def build_hhx(builder, outdir, outname): - builder.info('dumping stopword list...') - f = open(path.join(outdir, outname+'.stp'), 'w') - try: - for word in sorted(stopwords): - print >>f, word - finally: - f.close() - - builder.info('writing project file...') - f = open(path.join(outdir, outname+'.hhp'), 'w') - try: - f.write(project_template % {'outname': outname, - 'title': builder.config.html_title, - 'version': builder.config.version, - 'project': builder.config.project}) - if not outdir.endswith(os.sep): - outdir += os.sep - olen = len(outdir) - for root, dirs, files in os.walk(outdir): - staticdir = (root == path.join(outdir, '_static')) - for fn in files: - if (staticdir and not fn.endswith('.js')) or fn.endswith('.html'): - print >>f, path.join(root, fn)[olen:].replace(os.sep, '\\') - finally: - f.close() - - builder.info('writing TOC file...') - f = open(path.join(outdir, outname+'.hhc'), 'w') - try: - f.write(contents_header) - # special books - f.write('
  • ' + object_sitemap % (builder.config.html_short_title, - 'index.html')) - if builder.config.html_use_modindex: - f.write('
  • ' + object_sitemap % (_('Global Module Index'), - 'modindex.html')) - # the TOC - tocdoc = builder.env.get_and_resolve_doctree(builder.config.master_doc, builder, - prune_toctrees=False) - def write_toc(node, ullevel=0): - if isinstance(node, nodes.list_item): - f.write('
  • ') - for subnode in node: - write_toc(subnode, ullevel) - elif isinstance(node, nodes.reference): - link = node['refuri'] - title = cgi.escape(node.astext()).replace('"','"') - item = object_sitemap % (title, link) - f.write(item.encode('ascii', 'xmlcharrefreplace')) - elif isinstance(node, nodes.bullet_list): - if ullevel != 0: - f.write('
      \n') - for subnode in node: - write_toc(subnode, ullevel+1) - if ullevel != 0: - f.write('
    \n') - elif isinstance(node, addnodes.compact_paragraph): - for subnode in node: - write_toc(subnode, ullevel) - istoctree = lambda node: isinstance(node, addnodes.compact_paragraph) and \ - node.has_key('toctree') - for node in tocdoc.traverse(istoctree): - write_toc(node) - f.write(contents_footer) - finally: - f.close() - - builder.info('writing index file...') - index = builder.env.create_index(builder) - f = open(path.join(outdir, outname+'.hhk'), 'w') - try: - f.write('
      \n') - def write_index(title, refs, subitems): - def write_param(name, value): - item = ' \n' % (name, value) - f.write(item.encode('ascii', 'xmlcharrefreplace')) - title = cgi.escape(title) - f.write('
    • \n') - write_param('Keyword', title) - if len(refs) == 0: - write_param('See Also', title) - elif len(refs) == 1: - write_param('Local', refs[0]) - else: - for i, ref in enumerate(refs): - write_param('Name', '[%d] %s' % (i, ref)) # XXX: better title? - write_param('Local', ref) - f.write('\n') - if subitems: - f.write('
        ') - for subitem in subitems: - write_index(subitem[0], subitem[1], []) - f.write('
      ') - for (key, group) in index: - for title, (refs, subitems) in group: - write_index(title, refs, subitems) - f.write('
    \n') - finally: - f.close() diff --git a/sphinx/htmlwriter.py b/sphinx/htmlwriter.py deleted file mode 100644 index 0505fd08..00000000 --- a/sphinx/htmlwriter.py +++ /dev/null @@ -1,457 +0,0 @@ -# -*- coding: utf-8 -*- -""" - sphinx.htmlwriter - ~~~~~~~~~~~~~~~~~ - - docutils writers handling Sphinx' custom nodes. - - :copyright: 2007-2008 by Georg Brandl. - :license: BSD. -""" - -import sys -import posixpath -import os - -from docutils import nodes -from docutils.writers.html4css1 import Writer, HTMLTranslator as BaseTranslator - -from sphinx.locale import admonitionlabels, versionlabels -from sphinx.highlighting import PygmentsBridge -from sphinx.util.smartypants import sphinx_smarty_pants - -try: - import Image # check for the Python Imaging Library -except ImportError: - Image = None - -class HTMLWriter(Writer): - def __init__(self, builder): - Writer.__init__(self) - self.builder = builder - - def translate(self): - # sadly, this is mostly copied from parent class - self.visitor = visitor = self.builder.translator_class(self.builder, - self.document) - self.document.walkabout(visitor) - self.output = visitor.astext() - for attr in ('head_prefix', 'stylesheet', 'head', 'body_prefix', - 'body_pre_docinfo', 'docinfo', 'body', 'fragment', - 'body_suffix', 'meta', 'title', 'subtitle', 'header', - 'footer', 'html_prolog', 'html_head', 'html_title', - 'html_subtitle', 'html_body', ): - setattr(self, attr, getattr(visitor, attr, None)) - self.clean_meta = ''.join(visitor.meta[2:]) - - -class HTMLTranslator(BaseTranslator): - """ - Our custom HTML translator. - """ - - def __init__(self, builder, *args, **kwds): - BaseTranslator.__init__(self, *args, **kwds) - self.highlighter = PygmentsBridge('html', builder.config.pygments_style) - self.no_smarty = 0 - self.builder = builder - self.highlightlang = builder.config.highlight_language - self.highlightlinenothreshold = sys.maxint - self.protect_literal_text = 0 - - def visit_desc(self, node): - self.body.append(self.starttag(node, 'dl', CLASS=node['desctype'])) - def depart_desc(self, node): - self.body.append('\n\n') - - def visit_desc_signature(self, node): - # the id is set automatically - self.body.append(self.starttag(node, 'dt')) - # anchor for per-desc interactive data - if node.parent['desctype'] != 'describe' and node['ids'] and node['first']: - self.body.append('' % node['ids'][0]) - if node.parent['desctype'] in ('class', 'exception'): - self.body.append('%s ' % node.parent['desctype']) - def depart_desc_signature(self, node): - if node['ids'] and self.builder.add_definition_links: - self.body.append(u'\u00B6' % - _('Permalink to this definition')) - self.body.append('\n') - - def visit_desc_addname(self, node): - self.body.append(self.starttag(node, 'tt', '', CLASS='descclassname')) - def depart_desc_addname(self, node): - self.body.append('') - - def visit_desc_type(self, node): - pass - def depart_desc_type(self, node): - pass - - def visit_desc_name(self, node): - self.body.append(self.starttag(node, 'tt', '', CLASS='descname')) - def depart_desc_name(self, node): - self.body.append('') - - def visit_desc_parameterlist(self, node): - self.body.append('(') - self.first_param = 1 - def depart_desc_parameterlist(self, node): - self.body.append(')') - - def visit_desc_parameter(self, node): - if not self.first_param: - self.body.append(', ') - else: - self.first_param = 0 - if not node.hasattr('noemph'): - self.body.append('') - def depart_desc_parameter(self, node): - if not node.hasattr('noemph'): - self.body.append('') - - def visit_desc_optional(self, node): - self.body.append('[') - def depart_desc_optional(self, node): - self.body.append(']') - - def visit_desc_annotation(self, node): - self.body.append(self.starttag(node, 'em', CLASS='property')) - def depart_desc_annotation(self, node): - self.body.append('') - - def visit_desc_content(self, node): - self.body.append(self.starttag(node, 'dd', '')) - def depart_desc_content(self, node): - self.body.append('') - - def visit_refcount(self, node): - self.body.append(self.starttag(node, 'em', '', CLASS='refcount')) - def depart_refcount(self, node): - self.body.append('') - - def visit_versionmodified(self, node): - self.body.append(self.starttag(node, 'p')) - text = versionlabels[node['type']] % node['version'] - if len(node): - text += ': ' - else: - text += '.' - self.body.append('%s' % text) - def depart_versionmodified(self, node): - self.body.append('

    \n') - - # overwritten - def visit_reference(self, node): - BaseTranslator.visit_reference(self, node) - if node.hasattr('reftitle'): - # ugly hack to add a title attribute - starttag = self.body[-1] - if not starttag.startswith(' tag - self.section_level += 1 - self.body.append(self.starttag(node, 'div', CLASS='section')) - - def visit_title(self, node): - # don't move the id attribute inside the tag - BaseTranslator.visit_title(self, node, move_ids=0) - - # overwritten - def visit_literal_block(self, node): - if node.rawsource != node.astext(): - # most probably a parsed-literal block -- don't highlight - return BaseTranslator.visit_literal_block(self, node) - lang = self.highlightlang - linenos = node.rawsource.count('\n') >= self.highlightlinenothreshold - 1 - if node.has_key('language'): - # code-block directives - lang = node['language'] - if node.has_key('linenos'): - linenos = node['linenos'] - highlighted = self.highlighter.highlight_block(node.rawsource, lang, linenos) - starttag = self.starttag(node, 'div', suffix='', CLASS='highlight-%s' % lang) - self.body.append(starttag + highlighted + '\n') - raise nodes.SkipNode - - def visit_doctest_block(self, node): - self.visit_literal_block(node) - - # overwritten - def visit_literal(self, node): - if len(node.children) == 1 and \ - node.children[0] in ('None', 'True', 'False'): - node['classes'].append('xref') - self.body.append(self.starttag(node, 'tt', '', CLASS='docutils literal')) - self.protect_literal_text += 1 - def depart_literal(self, node): - self.protect_literal_text -= 1 - self.body.append('') - - def visit_productionlist(self, node): - self.body.append(self.starttag(node, 'pre')) - names = [] - for production in node: - names.append(production['tokenname']) - maxlen = max(len(name) for name in names) - for production in node: - if production['tokenname']: - lastname = production['tokenname'].ljust(maxlen) - self.body.append(self.starttag(production, 'strong', '')) - self.body.append(lastname + ' ::= ') - else: - self.body.append('%s ' % (' '*len(lastname))) - production.walkabout(self) - self.body.append('\n') - self.body.append('\n') - raise nodes.SkipNode - def depart_productionlist(self, node): - pass - - def visit_production(self, node): - pass - def depart_production(self, node): - pass - - def visit_centered(self, node): - self.body.append(self.starttag(node, 'p', CLASS="centered") + '') - def depart_centered(self, node): - self.body.append('

    ') - - def visit_compact_paragraph(self, node): - pass - def depart_compact_paragraph(self, node): - pass - - def visit_highlightlang(self, node): - self.highlightlang = node['lang'] - self.highlightlinenothreshold = node['linenothreshold'] - def depart_highlightlang(self, node): - pass - - # overwritten - def visit_image(self, node): - olduri = node['uri'] - # rewrite the URI if the environment knows about it - if olduri in self.builder.images: - node['uri'] = posixpath.join(self.builder.imgpath, - self.builder.images[olduri]) - - if node.has_key('scale'): - if Image and not (node.has_key('width') - and node.has_key('height')): - try: - im = Image.open(os.path.join(self.builder.srcdir, - olduri)) - except (IOError, # Source image can't be found or opened - UnicodeError): # PIL doesn't like Unicode paths. - print olduri - pass - else: - if not node.has_key('width'): - node['width'] = str(im.size[0]) - if not node.has_key('height'): - node['height'] = str(im.size[1]) - del im - BaseTranslator.visit_image(self, node) - - def visit_toctree(self, node): - # this only happens when formatting a toc from env.tocs -- in this - # case we don't want to include the subtree - raise nodes.SkipNode - - def visit_index(self, node): - raise nodes.SkipNode - - def visit_tabular_col_spec(self, node): - raise nodes.SkipNode - - def visit_glossary(self, node): - pass - def depart_glossary(self, node): - pass - - def visit_acks(self, node): - pass - def depart_acks(self, node): - pass - - def visit_module(self, node): - pass - def depart_module(self, node): - pass - - def bulk_text_processor(self, text): - return text - - # overwritten - def visit_Text(self, node): - text = node.astext() - encoded = self.encode(text) - if self.protect_literal_text: - # moved here from base class's visit_literal to support - # more formatting in literal nodes - for token in self.words_and_spaces.findall(encoded): - if token.strip(): - # protect literal text from line wrapping - self.body.append('%s' % token) - elif token in ' \n': - # allow breaks at whitespace - self.body.append(token) - else: - # protect runs of multiple spaces; the last one can wrap - self.body.append(' ' * (len(token)-1) + ' ') - else: - if self.in_mailto and self.settings.cloak_email_addresses: - encoded = self.cloak_email(encoded) - else: - encoded = self.bulk_text_processor(encoded) - self.body.append(encoded) - - # these are all for docutils 0.5 compatibility - - def visit_note(self, node): - self.visit_admonition(node, 'note') - def depart_note(self, node): - self.depart_admonition(node) - - def visit_warning(self, node): - self.visit_admonition(node, 'warning') - def depart_warning(self, node): - self.depart_admonition(node) - - def visit_attention(self, node): - self.visit_admonition(node, 'attention') - - def depart_attention(self, node): - self.depart_admonition() - - def visit_caution(self, node): - self.visit_admonition(node, 'caution') - def depart_caution(self, node): - self.depart_admonition() - - def visit_danger(self, node): - self.visit_admonition(node, 'danger') - def depart_danger(self, node): - self.depart_admonition() - - def visit_error(self, node): - self.visit_admonition(node, 'error') - def depart_error(self, node): - self.depart_admonition() - - def visit_hint(self, node): - self.visit_admonition(node, 'hint') - def depart_hint(self, node): - self.depart_admonition() - - def visit_important(self, node): - self.visit_admonition(node, 'important') - def depart_important(self, node): - self.depart_admonition() - - def visit_tip(self, node): - self.visit_admonition(node, 'tip') - def depart_tip(self, node): - self.depart_admonition() - - # these are only handled specially in the SmartyPantsHTMLTranslator - def visit_literal_emphasis(self, node): - return self.visit_emphasis(node) - def depart_literal_emphasis(self, node): - return self.depart_emphasis(node) - - def depart_title(self, node): - close_tag = self.context[-1] - if self.builder.add_header_links and \ - (close_tag.startswith('\u00B6
    ' % - _('Permalink to this headline')) - BaseTranslator.depart_title(self, node) - - def unknown_visit(self, node): - raise NotImplementedError('Unknown node: ' + node.__class__.__name__) - - -class SmartyPantsHTMLTranslator(HTMLTranslator): - """ - Handle ordinary text via smartypants, converting quotes and dashes - to the correct entities. - """ - - def __init__(self, *args, **kwds): - self.no_smarty = 0 - HTMLTranslator.__init__(self, *args, **kwds) - - def visit_literal(self, node): - self.no_smarty += 1 - try: - # this raises SkipNode - HTMLTranslator.visit_literal(self, node) - finally: - self.no_smarty -= 1 - - def visit_literal_emphasis(self, node): - self.no_smarty += 1 - self.visit_emphasis(node) - - def depart_literal_emphasis(self, node): - self.depart_emphasis(node) - self.no_smarty -= 1 - - def visit_desc_signature(self, node): - self.no_smarty += 1 - HTMLTranslator.visit_desc_signature(self, node) - - def depart_desc_signature(self, node): - self.no_smarty -= 1 - HTMLTranslator.depart_desc_signature(self, node) - - def visit_productionlist(self, node): - self.no_smarty += 1 - try: - HTMLTranslator.visit_productionlist(self, node) - finally: - self.no_smarty -= 1 - - def visit_option(self, node): - self.no_smarty += 1 - HTMLTranslator.visit_option(self, node) - def depart_option(self, node): - self.no_smarty -= 1 - HTMLTranslator.depart_option(self, node) - - def bulk_text_processor(self, text): - if self.no_smarty <= 0: - return sphinx_smarty_pants(text) - return text diff --git a/sphinx/latexwriter.py b/sphinx/latexwriter.py deleted file mode 100644 index 94cb23db..00000000 --- a/sphinx/latexwriter.py +++ /dev/null @@ -1,1185 +0,0 @@ -# -*- coding: utf-8 -*- -""" - sphinx.latexwriter - ~~~~~~~~~~~~~~~~~~ - - Custom docutils writer for LaTeX. - - Much of this code is adapted from Dave Kuhlman's "docpy" writer from his - docutils sandbox. - - :copyright: 2007-2008 by Georg Brandl, Dave Kuhlman. - :license: BSD. -""" - -import re -import sys -from os import path - -from docutils import nodes, writers -from docutils.writers.latex2e import Babel - -from sphinx import addnodes -from sphinx import highlighting -from sphinx.locale import admonitionlabels, versionlabels -from sphinx.util import ustrftime -from sphinx.util.texescape import tex_escape_map -from sphinx.util.smartypants import educateQuotesLatex - -HEADER = r'''%% Generated by Sphinx. -\documentclass[%(papersize)s,%(pointsize)s%(classoptions)s]{%(docclass)s} -%(inputenc)s -%(fontenc)s -%(babel)s -%(fontpkg)s -%(fncychap)s -\usepackage{sphinx} -%(preamble)s - -\title{%(title)s} -\date{%(date)s} -\release{%(release)s} -\author{%(author)s} -\newcommand{\sphinxlogo}{%(logo)s} -\renewcommand{\releasename}{%(releasename)s} -%(makeindex)s -%(makemodindex)s -''' - -BEGIN_DOC = r''' -\begin{document} -%(shorthandoff)s -%(maketitle)s -%(tableofcontents)s -''' - -FOOTER = r''' -%(footer)s -\renewcommand{\indexname}{%(modindexname)s} -%(printmodindex)s -\renewcommand{\indexname}{%(indexname)s} -%(printindex)s -\end{document} -''' - - -class LaTeXWriter(writers.Writer): - - supported = ('sphinxlatex',) - - settings_spec = ('LaTeX writer options', '', ( - ('Document name', ['--docname'], {'default': ''}), - ('Document class', ['--docclass'], {'default': 'manual'}), - ('Author', ['--author'], {'default': ''}), - )) - settings_defaults = {} - - output = None - - def __init__(self, builder): - writers.Writer.__init__(self) - self.builder = builder - - def translate(self): - visitor = LaTeXTranslator(self.document, self.builder) - self.document.walkabout(visitor) - self.output = visitor.astext() - - -# Helper classes - -class ExtBabel(Babel): - def get_shorthandoff(self): - shortlang = self.language.split('_')[0] - if shortlang in ('de', 'sl', 'pt', 'es', 'nl', 'pl'): - return '\\shorthandoff{"}' - return '' - - _ISO639_TO_BABEL = Babel._ISO639_TO_BABEL.copy() - _ISO639_TO_BABEL['sl'] = 'slovene' - - -class Table(object): - def __init__(self): - self.col = 0 - self.colcount = 0 - self.colspec = None - self.had_head = False - self.has_verbatim = False - self.caption = None - - -class Desc(object): - def __init__(self, node): - self.env = LaTeXTranslator.desc_map.get(node['desctype'], 'describe') - self.type = self.cls = self.name = self.params = self.annotation = '' - self.count = 0 - - -class LaTeXTranslator(nodes.NodeVisitor): - sectionnames = ["part", "chapter", "section", "subsection", - "subsubsection", "paragraph", "subparagraph"] - - ignore_missing_images = False - - default_elements = { - 'docclass': 'manual', - 'papersize': 'letterpaper', - 'pointsize': '10pt', - 'classoptions': '', - 'inputenc': '\\usepackage[utf8]{inputenc}', - 'fontenc': '\\usepackage[T1]{fontenc}', - 'babel': '\\usepackage{babel}', - 'fontpkg': '\\usepackage{times}', - 'fncychap': '\\usepackage[Bjarne]{fncychap}', - 'preamble': '', - 'title': '', - 'date': '', - 'release': '', - 'author': '', - 'logo': '', - 'releasename': 'Release', - 'makeindex': '\\makeindex', - 'makemodindex': '\\makemodindex', - 'shorthandoff': '', - 'maketitle': '\\maketitle', - 'tableofcontents': '\\tableofcontents', - 'footer': '', - 'printmodindex': '\\printmodindex', - 'printindex': '\\printindex', - } - - def __init__(self, document, builder): - nodes.NodeVisitor.__init__(self, document) - self.builder = builder - self.body = [] - - # sort out some elements - papersize = builder.config.latex_paper_size + 'paper' - if papersize == 'paper': # e.g. command line "-D latex_paper_size=" - papersize = 'letterpaper' - - self.elements = self.default_elements.copy() - self.elements.update({ - 'docclass': document.settings.docclass, - 'papersize': papersize, - 'pointsize': builder.config.latex_font_size, - # if empty, the title is set to the first section title - 'title': document.settings.title, - 'date': ustrftime(builder.config.today_fmt or _('%B %d, %Y')), - 'release': builder.config.release, - 'author': document.settings.author, - 'releasename': _('Release'), - 'preamble': builder.config.latex_preamble, - 'modindexname': _('Module Index'), - 'indexname': _('Index'), - }) - if builder.config.latex_logo: - self.elements['logo'] = '\\includegraphics{%s}\\par' % \ - path.basename(builder.config.latex_logo) - if builder.config.language: - babel = ExtBabel(builder.config.language) - lang = babel.get_language() - if lang: - self.elements['classoptions'] += ',' + babel.get_language() - else: - self.builder.warn('no Babel option known for language %r' % - builder.config.language) - self.elements['shorthandoff'] = babel.get_shorthandoff() - self.elements['fncychap'] = '\\usepackage[Sonny]{fncychap}' - else: - self.elements['classoptions'] += ',english' - if not builder.config.latex_use_modindex: - self.elements['makemodindex'] = '' - self.elements['printmodindex'] = '' - # allow the user to override them all - self.elements.update(builder.config.latex_elements) - - self.highlighter = highlighting.PygmentsBridge( - 'latex', builder.config.pygments_style) - self.context = [] - self.descstack = [] - self.bibitems = [] - self.table = None - self.next_table_colspec = None - self.highlightlang = builder.config.highlight_language - self.highlightlinenothreshold = sys.maxint - self.written_ids = set() - self.footnotestack = [] - if self.elements['docclass'] == 'manual': - if builder.config.latex_use_parts: - self.top_sectionlevel = 0 - else: - self.top_sectionlevel = 1 - else: - self.top_sectionlevel = 2 - self.next_section_target = None - # flags - self.verbatim = None - self.in_title = 0 - self.in_production_list = 0 - self.first_document = 1 - self.this_is_the_title = 1 - self.literal_whitespace = 0 - self.no_contractions = 0 - - def astext(self): - return (HEADER % self.elements + self.highlighter.get_stylesheet() + - u''.join(self.body) + FOOTER % self.elements) - - def visit_document(self, node): - self.footnotestack.append(self.collect_footnotes(node)) - if self.first_document == 1: - # the first document is all the regular content ... - self.body.append(BEGIN_DOC % self.elements) - self.first_document = 0 - elif self.first_document == 0: - # ... and all others are the appendices - self.body.append('\n\\appendix\n') - self.first_document = -1 - # "- 1" because the level is increased before the title is visited - self.sectionlevel = self.top_sectionlevel - 1 - def depart_document(self, node): - if self.bibitems: - widest_label = "" - for bi in self.bibitems: - if len(widest_label) < len(bi[0]): - widest_label = bi[0] - self.body.append('\n\\begin{thebibliography}{%s}\n' % widest_label) - for bi in self.bibitems: - # cite_key: underscores must not be escaped - cite_key = bi[0].replace(r"\_", "_") - self.body.append('\\bibitem[%s]{%s}{%s}\n' % (bi[0], cite_key, bi[1])) - self.body.append('\\end{thebibliography}\n') - self.bibitems = [] - - def visit_start_of_file(self, node): - # This marks the begin of a new file; therefore the current module and - # class must be reset - self.body.append('\n\\resetcurrentobjects\n') - # and also, new footnotes - self.footnotestack.append(self.collect_footnotes(node)) - - def collect_footnotes(self, node): - fnotes = {} - def footnotes_under(n): - if isinstance(n, nodes.footnote): - yield n - else: - for c in n.children: - if isinstance(c, addnodes.start_of_file): - continue - for k in footnotes_under(c): - yield k - for fn in footnotes_under(node): - num = fn.children[0].astext().strip() - fnotes[num] = fn - fn.parent.remove(fn) - return fnotes - - def depart_start_of_file(self, node): - self.footnotestack.pop() - - def visit_highlightlang(self, node): - self.highlightlang = node['lang'] - self.highlightlinenothreshold = node['linenothreshold'] - raise nodes.SkipNode - - def visit_section(self, node): - if not self.this_is_the_title: - self.sectionlevel += 1 - self.body.append('\n\n') - if self.next_section_target: - self.body.append(r'\hypertarget{%s}{}' % self.next_section_target) - self.next_section_target = None - #if node.get('ids'): - # for id in node['ids']: - # if id not in self.written_ids: - # self.body.append(r'\hypertarget{%s}{}' % id) - # self.written_ids.add(id) - def depart_section(self, node): - self.sectionlevel = max(self.sectionlevel - 1, self.top_sectionlevel - 1) - - def visit_problematic(self, node): - self.body.append(r'{\color{red}\bfseries{}') - def depart_problematic(self, node): - self.body.append('}') - - def visit_topic(self, node): - self.body.append('\\setbox0\\vbox{\n' - '\\begin{minipage}{0.95\\textwidth}\n') - def depart_topic(self, node): - self.body.append('\\end{minipage}}\n' - '\\begin{center}\\setlength{\\fboxsep}{5pt}' - '\\shadowbox{\\box0}\\end{center}\n') - visit_sidebar = visit_topic - depart_sidebar = depart_topic - - def visit_glossary(self, node): - pass - def depart_glossary(self, node): - pass - - def visit_productionlist(self, node): - self.body.append('\n\n\\begin{productionlist}\n') - self.in_production_list = 1 - def depart_productionlist(self, node): - self.body.append('\\end{productionlist}\n\n') - self.in_production_list = 0 - - def visit_production(self, node): - if node['tokenname']: - self.body.append('\\production{%s}{' % self.encode(node['tokenname'])) - else: - self.body.append('\\productioncont{') - def depart_production(self, node): - self.body.append('}\n') - - def visit_transition(self, node): - self.body.append('\n\n\\bigskip\\hrule{}\\bigskip\n\n') - def depart_transition(self, node): - pass - - def visit_title(self, node): - parent = node.parent - if isinstance(parent, addnodes.seealso): - # the environment already handles this - raise nodes.SkipNode - elif self.this_is_the_title: - if len(node.children) != 1 and not isinstance(node.children[0], nodes.Text): - self.builder.warn('document title is not a single Text node') - if not self.elements['title']: - # text needs to be escaped since it is inserted into - # the output literally - self.elements['title'] = node.astext().translate(tex_escape_map) - self.this_is_the_title = 0 - raise nodes.SkipNode - elif isinstance(parent, nodes.section): - try: - self.body.append(r'\%s{' % self.sectionnames[self.sectionlevel]) - except IndexError: - from sphinx.application import SphinxError - raise SphinxError('too many nesting section levels for LaTeX, ' - 'at heading: %s' % node.astext()) - self.context.append('}\n') - elif isinstance(parent, (nodes.topic, nodes.sidebar)): - self.body.append(r'\textbf{') - self.context.append('}\n\n\medskip\n\n') - elif isinstance(parent, nodes.Admonition): - self.body.append('{') - self.context.append('}\n') - elif isinstance(parent, nodes.table): - self.table.caption = self.encode(node.astext()) - raise nodes.SkipNode - else: - self.builder.warn('encountered title node not in section, topic, ' - 'table, admonition or sidebar') - self.body.append('\\textbf{') - self.context.append('}\n') - self.in_title = 1 - def depart_title(self, node): - self.in_title = 0 - self.body.append(self.context.pop()) - - def visit_subtitle(self, node): - if isinstance(node.parent, nodes.sidebar): - self.body.append('~\\\\\n\\textbf{') - self.context.append('}\n\\smallskip\n') - else: - self.context.append('') - def depart_subtitle(self, node): - self.body.append(self.context.pop()) - - desc_map = { - 'function' : 'funcdesc', - 'class': 'classdesc', - 'method': 'methoddesc', - 'staticmethod': 'staticmethoddesc', - 'exception': 'excdesc', - 'data': 'datadesc', - 'attribute': 'memberdesc', - 'opcode': 'opcodedesc', - - 'cfunction': 'cfuncdesc', - 'cmember': 'cmemberdesc', - 'cmacro': 'csimplemacrodesc', - 'ctype': 'ctypedesc', - 'cvar': 'cvardesc', - - 'describe': 'describe', - # and all others are 'describe' too - } - - def visit_desc(self, node): - self.descstack.append(Desc(node)) - def depart_desc(self, node): - d = self.descstack.pop() - self.body.append("\\end{%s}\n" % d.env) - - def visit_desc_signature(self, node): - d = self.descstack[-1] - # reset these for every signature - d.type = d.cls = d.name = d.params = '' - def depart_desc_signature(self, node): - d = self.descstack[-1] - d.cls = d.cls.rstrip('.') - if node.parent['desctype'] != 'describe' and node['ids']: - hyper = '\\hypertarget{%s}{}' % node['ids'][0] - else: - hyper = '' - if d.count == 0: - t1 = "\n\n%s\\begin{%s}" % (hyper, d.env) - else: - t1 = "\n%s\\%sline" % (hyper, d.env[:-4]) - d.count += 1 - if d.env in ('funcdesc', 'classdesc', 'excclassdesc'): - t2 = "{%s}{%s}" % (d.name, d.params) - elif d.env in ('datadesc', 'excdesc', 'csimplemacrodesc'): - t2 = "{%s}" % (d.name) - elif d.env in ('methoddesc', 'staticmethoddesc'): - if d.cls: - t2 = "[%s]{%s}{%s}" % (d.cls, d.name, d.params) - else: - t2 = "{%s}{%s}" % (d.name, d.params) - elif d.env == 'memberdesc': - if d.cls: - t2 = "[%s]{%s}" % (d.cls, d.name) - else: - t2 = "{%s}" % d.name - elif d.env == 'cfuncdesc': - if d.cls: - # C++ class names - d.name = '%s::%s' % (d.cls, d.name) - t2 = "{%s}{%s}{%s}" % (d.type, d.name, d.params) - elif d.env == 'cmemberdesc': - try: - type, container = d.type.rsplit(' ', 1) - container = container.rstrip('.') - except ValueError: - container = '' - type = d.type - t2 = "{%s}{%s}{%s}" % (container, type, d.name) - elif d.env == 'cvardesc': - t2 = "{%s}{%s}" % (d.type, d.name) - elif d.env == 'ctypedesc': - t2 = "{%s}" % (d.name) - elif d.env == 'opcodedesc': - t2 = "{%s}{%s}" % (d.name, d.params) - elif d.env == 'describe': - t2 = "{%s}" % d.name - self.body.append(t1 + t2) - - def visit_desc_type(self, node): - d = self.descstack[-1] - if d.env == 'describe': - d.name += self.encode(node.astext()) - else: - self.descstack[-1].type = self.encode(node.astext().strip()) - raise nodes.SkipNode - - def visit_desc_name(self, node): - d = self.descstack[-1] - if d.env == 'describe': - d.name += self.encode(node.astext()) - else: - self.descstack[-1].name = self.encode(node.astext().strip()) - raise nodes.SkipNode - - def visit_desc_addname(self, node): - d = self.descstack[-1] - if d.env == 'describe': - d.name += self.encode(node.astext()) - else: - self.descstack[-1].cls = self.encode(node.astext().strip()) - raise nodes.SkipNode - - def visit_desc_parameterlist(self, node): - d = self.descstack[-1] - if d.env == 'describe': - d.name += self.encode(node.astext()) - else: - self.descstack[-1].params = self.encode(node.astext().strip()) - raise nodes.SkipNode - - def visit_desc_annotation(self, node): - d = self.descstack[-1] - if d.env == 'describe': - d.name += self.encode(node.astext()) - else: - self.descstack[-1].annotation = self.encode(node.astext().strip()) - raise nodes.SkipNode - - def visit_refcount(self, node): - self.body.append("\\emph{") - def depart_refcount(self, node): - self.body.append("}\\\\") - - def visit_desc_content(self, node): - if node.children and not isinstance(node.children[0], nodes.paragraph): - # avoid empty desc environment which causes a formatting bug - self.body.append('~') - def depart_desc_content(self, node): - pass - - def visit_seealso(self, node): - self.body.append("\n\n\\strong{%s:}\n\n" % admonitionlabels['seealso']) - def depart_seealso(self, node): - self.body.append("\n\n") - - def visit_rubric(self, node): - if len(node.children) == 1 and node.children[0].astext() == 'Footnotes': - raise nodes.SkipNode - self.body.append('\\paragraph{') - self.context.append('}\n') - def depart_rubric(self, node): - self.body.append(self.context.pop()) - - def visit_footnote(self, node): - pass - def depart_footnote(self, node): - pass - - def visit_label(self, node): - if isinstance(node.parent, nodes.citation): - self.bibitems[-1][0] = node.astext() - raise nodes.SkipNode - - def visit_tabular_col_spec(self, node): - self.next_table_colspec = node['spec'] - raise nodes.SkipNode - - def visit_table(self, node): - if self.table: - raise NotImplementedError('Nested tables are not supported.') - self.table = Table() - self.tablebody = [] - # Redirect body output until table is finished. - self._body = self.body - self.body = self.tablebody - def depart_table(self, node): - self.body = self._body - if self.table.caption is not None: - self.body.append('\n\\begin{threeparttable}\n' - '\\caption{%s}\n' % self.table.caption) - if self.table.has_verbatim: - self.body.append('\n\\begin{tabular}') - else: - self.body.append('\n\\begin{tabulary}{\\textwidth}') - if self.table.colspec: - self.body.append(self.table.colspec) - else: - if self.table.has_verbatim: - colwidth = 0.95 / self.table.colcount - colspec = ('p{%.3f\\textwidth}|' % colwidth) * self.table.colcount - self.body.append('{|' + colspec + '}\n') - else: - self.body.append('{|' + ('L|' * self.table.colcount) + '}\n') - self.body.extend(self.tablebody) - if self.table.has_verbatim: - self.body.append('\\end{tabular}\n\n') - else: - self.body.append('\\end{tabulary}\n\n') - if self.table.caption is not None: - self.body.append('\\end{threeparttable}\n\n') - self.table = None - self.tablebody = None - - def visit_colspec(self, node): - self.table.colcount += 1 - def depart_colspec(self, node): - pass - - def visit_tgroup(self, node): - pass - def depart_tgroup(self, node): - pass - - def visit_thead(self, node): - if self.next_table_colspec: - self.table.colspec = '{%s}\n' % self.next_table_colspec - self.next_table_colspec = None - self.body.append('\\hline\n') - self.table.had_head = True - def depart_thead(self, node): - self.body.append('\\hline\n') - - def visit_tbody(self, node): - if not self.table.had_head: - self.visit_thead(node) - def depart_tbody(self, node): - self.body.append('\\hline\n') - - def visit_row(self, node): - self.table.col = 0 - def depart_row(self, node): - self.body.append('\\\\\n') - - def visit_entry(self, node): - if node.has_key('morerows') or node.has_key('morecols'): - raise NotImplementedError('Column or row spanning cells are ' - 'not implemented.') - if self.table.col > 0: - self.body.append(' & ') - self.table.col += 1 - if isinstance(node.parent.parent, nodes.thead): - self.body.append('\\textbf{') - self.context.append('}') - else: - self.context.append('') - def depart_entry(self, node): - self.body.append(self.context.pop()) # header - - def visit_acks(self, node): - # this is a list in the source, but should be rendered as a - # comma-separated list here - self.body.append('\n\n') - self.body.append(', '.join(n.astext() for n in node.children[0].children) + '.') - self.body.append('\n\n') - raise nodes.SkipNode - - def visit_bullet_list(self, node): - self.body.append('\\begin{itemize}\n' ) - def depart_bullet_list(self, node): - self.body.append('\\end{itemize}\n' ) - - def visit_enumerated_list(self, node): - self.body.append('\\begin{enumerate}\n' ) - def depart_enumerated_list(self, node): - self.body.append('\\end{enumerate}\n' ) - - def visit_list_item(self, node): - # Append "{}" in case the next character is "[", which would break - # LaTeX's list environment (no numbering and the "[" is not printed). - self.body.append(r'\item {} ') - def depart_list_item(self, node): - self.body.append('\n') - - def visit_definition_list(self, node): - self.body.append('\\begin{description}\n') - def depart_definition_list(self, node): - self.body.append('\\end{description}\n') - - def visit_definition_list_item(self, node): - pass - def depart_definition_list_item(self, node): - pass - - def visit_term(self, node): - ctx = ']' - if node.has_key('ids') and node['ids']: - ctx += '\\hypertarget{%s}{}' % node['ids'][0] - self.body.append('\\item[') - self.context.append(ctx) - def depart_term(self, node): - self.body.append(self.context.pop()) - - def visit_classifier(self, node): - self.body.append('{[}') - def depart_classifier(self, node): - self.body.append('{]}') - - def visit_definition(self, node): - pass - def depart_definition(self, node): - self.body.append('\n') - - def visit_field_list(self, node): - self.body.append('\\begin{quote}\\begin{description}\n') - def depart_field_list(self, node): - self.body.append('\\end{description}\\end{quote}\n') - - def visit_field(self, node): - pass - def depart_field(self, node): - pass - - visit_field_name = visit_term - depart_field_name = depart_term - - visit_field_body = visit_definition - depart_field_body = depart_definition - - def visit_paragraph(self, node): - self.body.append('\n') - def depart_paragraph(self, node): - self.body.append('\n') - - def visit_centered(self, node): - self.body.append('\n\\begin{centering}') - def depart_centered(self, node): - self.body.append('\n\\end{centering}') - - def visit_module(self, node): - modname = node['modname'] - self.body.append('\n\\declaremodule[%s]{}{%s}' % (modname.replace('_', ''), - self.encode(modname))) - self.body.append('\n\\modulesynopsis{%s}' % self.encode(node['synopsis'])) - if node.has_key('platform'): - self.body.append('\\platform{%s}' % self.encode(node['platform'])) - def depart_module(self, node): - pass - - def latex_image_length(self, width_str): - match = re.match('(\d*\.?\d*)\s*(\S*)', width_str) - if not match: - # fallback - return width_str - res = width_str - amount, unit = match.groups()[:2] - if not unit or unit == "px": - # pixels: let LaTeX alone - return None - elif unit == "%": - res = "%.3f\\linewidth" % (float(amount) / 100.0) - return res - - def visit_image(self, node): - attrs = node.attributes - pre = [] # in reverse order - post = [] - include_graphics_options = [] - inline = isinstance(node.parent, nodes.TextElement) - if attrs.has_key('scale'): - # Could also be done with ``scale`` option to - # ``\includegraphics``; doing it this way for consistency. - pre.append('\\scalebox{%f}{' % (attrs['scale'] / 100.0,)) - post.append('}') - if attrs.has_key('width'): - w = self.latex_image_length(attrs['width']) - if w: - include_graphics_options.append('width=%s' % w) - if attrs.has_key('height'): - h = self.latex_image_length(attrs['height']) - include_graphics_options.append('height=%s' % h) - if attrs.has_key('align'): - align_prepost = { - # By default latex aligns the top of an image. - (1, 'top'): ('', ''), - (1, 'middle'): ('\\raisebox{-0.5\\height}{', '}'), - (1, 'bottom'): ('\\raisebox{-\\height}{', '}'), - (0, 'center'): ('{\\hfill', '\\hfill}'), - # These 2 don't exactly do the right thing. The image should - # be floated alongside the paragraph. See - # http://www.w3.org/TR/html4/struct/objects.html#adef-align-IMG - (0, 'left'): ('{', '\\hfill}'), - (0, 'right'): ('{\\hfill', '}'),} - try: - pre.append(align_prepost[inline, attrs['align']][0]) - post.append(align_prepost[inline, attrs['align']][1]) - except KeyError: - pass # XXX complain here? - if not inline: - pre.append('\n') - post.append('\n') - pre.reverse() - if node['uri'] in self.builder.images: - uri = self.builder.images[node['uri']] - else: - # missing image! - if self.ignore_missing_images: - return - uri = node['uri'] - if uri.find('://') != -1: - # ignore remote images - return - self.body.extend(pre) - options = '' - if include_graphics_options: - options = '[%s]' % ','.join(include_graphics_options) - self.body.append('\\includegraphics%s{%s}' % (options, uri)) - self.body.extend(post) - def depart_image(self, node): - pass - - def visit_figure(self, node): - if (not node.attributes.has_key('align') or - node.attributes['align'] == 'center'): - # centering does not add vertical space like center. - align = '\n\\centering' - align_end = '' - else: - # TODO non vertical space for other alignments. - align = '\\begin{flush%s}' % node.attributes['align'] - align_end = '\\end{flush%s}' % node.attributes['align'] - self.body.append('\\begin{figure}[htbp]%s\n' % align) - self.context.append('%s\\end{figure}\n' % align_end) - def depart_figure(self, node): - self.body.append(self.context.pop()) - - def visit_caption(self, node): - self.body.append('\\caption{') - def depart_caption(self, node): - self.body.append('}') - - def visit_legend(self, node): - self.body.append('{\\small ') - def depart_legend(self, node): - self.body.append('}') - - def visit_admonition(self, node): - self.body.append('\n\\begin{notice}{note}') - def depart_admonition(self, node): - self.body.append('\\end{notice}\n') - - def _make_visit_admonition(name): - def visit_admonition(self, node): - self.body.append('\n\\begin{notice}{%s}{%s:}' % - (name, admonitionlabels[name])) - return visit_admonition - def _depart_named_admonition(self, node): - self.body.append('\\end{notice}\n') - - visit_attention = _make_visit_admonition('attention') - depart_attention = _depart_named_admonition - visit_caution = _make_visit_admonition('caution') - depart_caution = _depart_named_admonition - visit_danger = _make_visit_admonition('danger') - depart_danger = _depart_named_admonition - visit_error = _make_visit_admonition('error') - depart_error = _depart_named_admonition - visit_hint = _make_visit_admonition('hint') - depart_hint = _depart_named_admonition - visit_important = _make_visit_admonition('important') - depart_important = _depart_named_admonition - visit_note = _make_visit_admonition('note') - depart_note = _depart_named_admonition - visit_tip = _make_visit_admonition('tip') - depart_tip = _depart_named_admonition - visit_warning = _make_visit_admonition('warning') - depart_warning = _depart_named_admonition - - def visit_versionmodified(self, node): - intro = versionlabels[node['type']] % node['version'] - if node.children: - intro += ': ' - else: - intro += '.' - self.body.append(intro) - def depart_versionmodified(self, node): - pass - - def visit_target(self, node): - def add_target(id): - # indexing uses standard LaTeX index markup, so the targets - # will be generated differently - if not id.startswith('index-'): - self.body.append(r'\hypertarget{%s}{}' % id) - - if node.has_key('refid') and node['refid'] not in self.written_ids: - parindex = node.parent.index(node) - try: - next = node.parent[parindex+1] - if isinstance(next, nodes.section): - self.next_section_target = node['refid'] - return - except IndexError: - pass - add_target(node['refid']) - self.written_ids.add(node['refid']) - def depart_target(self, node): - pass - - def visit_attribution(self, node): - self.body.append('\n\\begin{flushright}\n') - self.body.append('---') - def depart_attribution(self, node): - self.body.append('\n\\end{flushright}\n') - - def visit_index(self, node, scre=re.compile(r';\s*')): - entries = node['entries'] - for type, string, tid, _ in entries: - if type == 'single': - self.body.append(r'\index{%s}' % scre.sub('!', self.encode(string))) - elif type == 'pair': - parts = tuple(self.encode(x.strip()) for x in string.split(';', 1)) - self.body.append(r'\indexii{%s}{%s}' % parts) - elif type == 'triple': - parts = tuple(self.encode(x.strip()) for x in string.split(';', 2)) - self.body.append(r'\indexiii{%s}{%s}{%s}' % parts) - else: - self.builder.warn('unknown index entry type %s found' % type) - raise nodes.SkipNode - - def visit_raw(self, node): - if 'latex' in node.get('format', '').split(): - self.body.append(node.astext()) - raise nodes.SkipNode - - def visit_reference(self, node): - uri = node.get('refuri', '') - if self.in_title or not uri: - self.context.append('') - elif uri.startswith('mailto:') or uri.startswith('http:') or \ - uri.startswith('https:') or uri.startswith('ftp:'): - self.body.append('\\href{%s}{' % self.encode(uri)) - self.context.append('}') - elif uri.startswith('#'): - self.body.append('\\hyperlink{%s}{' % uri[1:]) - self.context.append('}') - elif uri.startswith('@token'): - if self.in_production_list: - self.body.append('\\token{') - else: - self.body.append('\\grammartoken{') - self.context.append('}') - else: - self.builder.warn('unusable reference target found: %s' % uri) - self.context.append('') - def depart_reference(self, node): - self.body.append(self.context.pop()) - - def visit_pending_xref(self, node): - pass - def depart_pending_xref(self, node): - pass - - def visit_emphasis(self, node): - self.body.append(r'\emph{') - def depart_emphasis(self, node): - self.body.append('}') - - def visit_literal_emphasis(self, node): - self.body.append(r'\emph{\texttt{') - self.no_contractions += 1 - def depart_literal_emphasis(self, node): - self.body.append('}}') - self.no_contractions -= 1 - - def visit_strong(self, node): - self.body.append(r'\textbf{') - def depart_strong(self, node): - self.body.append('}') - - def visit_title_reference(self, node): - self.body.append(r'\emph{') - def depart_title_reference(self, node): - self.body.append('}') - - def visit_citation(self, node): - # TODO maybe use cite bibitems - self.bibitems.append(['', '']) - self.context.append(len(self.body)) - def depart_citation(self, node): - size = self.context.pop() - text = ''.join(self.body[size:]) - del self.body[size:] - self.bibitems[-1][1] = text - - def visit_citation_reference(self, node): - citeid = node.astext() - self.body.append('\\cite{%s}' % citeid) - raise nodes.SkipNode - - def visit_literal(self, node): - content = self.encode(node.astext().strip()) - if self.in_title: - self.body.append(r'\texttt{%s}' % content) - elif node.has_key('role') and node['role'] == 'samp': - self.body.append(r'\samp{%s}' % content) - else: - self.body.append(r'\code{%s}' % content) - raise nodes.SkipNode - - def visit_footnote_reference(self, node): - num = node.astext().strip() - try: - fn = self.footnotestack[-1][num] - except (KeyError, IndexError): - raise nodes.SkipNode - self.body.append('\\footnote{') - fn.walkabout(self) - raise nodes.SkipChildren - def depart_footnote_reference(self, node): - self.body.append('}') - - def visit_literal_block(self, node): - self.verbatim = '' - def depart_literal_block(self, node): - code = self.verbatim.rstrip('\n') - lang = self.highlightlang - linenos = code.count('\n') >= self.highlightlinenothreshold - 1 - if node.has_key('language'): - # code-block directives - lang = node['language'] - if node.has_key('linenos'): - linenos = node['linenos'] - hlcode = self.highlighter.highlight_block(code, lang, linenos) - # workaround for Unicode issue - hlcode = hlcode.replace(u'€', u'@texteuro[]') - # must use original Verbatim environment and "tabular" environment - if self.table: - hlcode = hlcode.replace('\\begin{Verbatim}', - '\\begin{OriginalVerbatim}') - self.table.has_verbatim = True - # get consistent trailer - hlcode = hlcode.rstrip()[:-14] # strip \end{Verbatim} - hlcode = hlcode.rstrip() + '\n' - self.body.append('\n' + hlcode + '\\end{%sVerbatim}\n' % - (self.table and 'Original' or '')) - self.verbatim = None - visit_doctest_block = visit_literal_block - depart_doctest_block = depart_literal_block - - def visit_line_block(self, node): - """line-block: - * whitespace (including linebreaks) is significant - * inline markup is supported. - * serif typeface - """ - self.body.append('{\\raggedright{}') - self.literal_whitespace = 1 - def depart_line_block(self, node): - self.literal_whitespace = 0 - # remove the last \\ - del self.body[-1] - self.body.append('}\n') - - def visit_line(self, node): - self._line_start = len(self.body) - def depart_line(self, node): - if self._line_start == len(self.body): - # no output in this line -- add a nonbreaking space, else the - # \\ command will give an error - self.body.append('~') - if self.table is not None: - self.body.append('\\newline\n') - else: - self.body.append('\\\\\n') - - def visit_block_quote(self, node): - # If the block quote contains a single object and that object - # is a list, then generate a list not a block quote. - # This lets us indent lists. - done = 0 - if len(node.children) == 1: - child = node.children[0] - if isinstance(child, nodes.bullet_list) or \ - isinstance(child, nodes.enumerated_list): - done = 1 - if not done: - self.body.append('\\begin{quote}\n') - def depart_block_quote(self, node): - done = 0 - if len(node.children) == 1: - child = node.children[0] - if isinstance(child, nodes.bullet_list) or \ - isinstance(child, nodes.enumerated_list): - done = 1 - if not done: - self.body.append('\\end{quote}\n') - - # option node handling copied from docutils' latex writer - - def visit_option(self, node): - if self.context[-1]: - # this is not the first option - self.body.append(', ') - def depart_option(self, node): - # flag that the first option is done. - self.context[-1] += 1 - - def visit_option_argument(self, node): - """The delimiter betweeen an option and its argument.""" - self.body.append(node.get('delimiter', ' ')) - def depart_option_argument(self, node): - pass - - def visit_option_group(self, node): - self.body.append('\\item [') - # flag for first option - self.context.append(0) - def depart_option_group(self, node): - self.context.pop() # the flag - self.body.append('] ') - - def visit_option_list(self, node): - self.body.append('\\begin{optionlist}{3cm}\n') - def depart_option_list(self, node): - self.body.append('\\end{optionlist}\n') - - def visit_option_list_item(self, node): - pass - def depart_option_list_item(self, node): - pass - - def visit_option_string(self, node): - ostring = node.astext() - self.body.append(self.encode(ostring.replace('--', u'-{-}'))) - raise nodes.SkipNode - - def visit_description(self, node): - self.body.append( ' ' ) - def depart_description(self, node): - pass - - def visit_superscript(self, node): - self.body.append('$^{\\text{') - def depart_superscript(self, node): - self.body.append('}}$') - - def visit_subscript(self, node): - self.body.append('$_{\\text{') - def depart_subscript(self, node): - self.body.append('}}$') - - def visit_substitution_definition(self, node): - raise nodes.SkipNode - - def visit_substitution_reference(self, node): - raise nodes.SkipNode - - def visit_generated(self, node): - pass - def depart_generated(self, node): - pass - - def visit_compound(self, node): - pass - def depart_compound(self, node): - pass - - def visit_container(self, node): - pass - def depart_container(self, node): - pass - - def visit_decoration(self, node): - pass - def depart_decoration(self, node): - pass - - # text handling - - def encode(self, text): - text = unicode(text).translate(tex_escape_map) - if self.literal_whitespace: - # Insert a blank before the newline, to avoid - # ! LaTeX Error: There's no line here to end. - text = text.replace(u'\n', u'~\\\\\n').replace(u' ', u'~') - if self.no_contractions: - text = text.replace('--', u'-{-}') - return text - - def visit_Text(self, node): - if self.verbatim is not None: - self.verbatim += node.astext() - else: - text = self.encode(node.astext()) - self.body.append(educateQuotesLatex(text)) - def depart_Text(self, node): - pass - - def visit_comment(self, node): - raise nodes.SkipNode - - def visit_meta(self, node): - # only valid for HTML - raise nodes.SkipNode - - def visit_system_message(self, node): - pass - def depart_system_message(self, node): - self.body.append('\n') - - def unknown_visit(self, node): - raise NotImplementedError('Unknown node: ' + node.__class__.__name__) diff --git a/sphinx/linkcheck.py b/sphinx/linkcheck.py deleted file mode 100644 index 37aeb7a7..00000000 --- a/sphinx/linkcheck.py +++ /dev/null @@ -1,130 +0,0 @@ -# -*- coding: utf-8 -*- -""" - sphinx.linkcheck - ~~~~~~~~~~~~~~~~ - - The CheckExternalLinksBuilder class. - - :copyright: 2008 by Georg Brandl, Thomas Lamb. - :license: BSD. -""" - -import socket -from os import path -from urllib2 import build_opener, HTTPError - -from docutils import nodes - -from sphinx.builder import Builder -from sphinx.util.console import purple, red, darkgreen - -# create an opener that will simulate a browser user-agent -opener = build_opener() -opener.addheaders = [('User-agent', 'Mozilla/5.0')] - - -class CheckExternalLinksBuilder(Builder): - """ - Checks for broken external links. - """ - name = 'linkcheck' - - def init(self): - self.good = set() - self.broken = {} - self.redirected = {} - # set a timeout for non-responding servers - socket.setdefaulttimeout(5.0) - # create output file - open(path.join(self.outdir, 'output.txt'), 'w').close() - - def get_target_uri(self, docname, typ=None): - return '' - - def get_outdated_docs(self): - return self.env.found_docs - - def prepare_writing(self, docnames): - return - - def write_doc(self, docname, doctree): - self.info() - for node in doctree.traverse(nodes.reference): - try: - self.check(node, docname) - except KeyError: - continue - - def check(self, node, docname): - uri = node['refuri'] - - if '#' in uri: - uri = uri.split('#')[0] - - if uri in self.good: - return - - lineno = None - while lineno is None and node: - node = node.parent - lineno = node.line - - if uri[0:5] == 'http:' or uri[0:6] == 'https:': - self.info(uri, nonl=1) - - if uri in self.broken: - (r, s) = self.broken[uri] - elif uri in self.redirected: - (r, s) = self.redirected[uri] - else: - (r, s) = self.resolve(uri) - - if r == 0: - self.info(' - ' + darkgreen('working')) - self.good.add(uri) - elif r == 2: - self.info(' - ' + red('broken: ') + s) - self.write_entry('broken', docname, lineno, uri + ': ' + s) - self.broken[uri] = (r, s) - if self.app.quiet: - self.warn('%s:%s: broken link: %s' % (docname, lineno, uri)) - else: - self.info(' - ' + purple('redirected') + ' to ' + s) - self.write_entry('redirected', docname, lineno, uri + ' to ' + s) - self.redirected[uri] = (r, s) - elif len(uri) == 0 or uri[0:7] == 'mailto:' or uri[0:4] == 'ftp:': - return - else: - self.warn(uri + ' - ' + red('malformed!')) - self.write_entry('malformed', docname, lineno, uri) - if self.app.quiet: - self.warn('%s:%s: malformed link: %s' % (docname, lineno, uri)) - self.app.statuscode = 1 - - if self.broken: - self.app.statuscode = 1 - - def write_entry(self, what, docname, line, uri): - output = open(path.join(self.outdir, 'output.txt'), 'a') - output.write("%s:%s: [%s] %s\n" % (self.env.doc2path(docname, None), - line, what, uri)) - output.close() - - def resolve(self, uri): - try: - f = opener.open(uri) - f.close() - except HTTPError, err: - #if err.code == 403 and uri.startswith('http://en.wikipedia.org/'): - # # Wikipedia blocks requests from urllib User-Agent - # return (0, 0) - return (2, str(err)) - except Exception, err: - return (2, str(err)) - if f.url.rstrip('/') == uri.rstrip('/'): - return (0, 0) - else: - return (1, f.url) - - def finish(self): - return diff --git a/sphinx/textwriter.py b/sphinx/textwriter.py deleted file mode 100644 index 4aa18039..00000000 --- a/sphinx/textwriter.py +++ /dev/null @@ -1,679 +0,0 @@ -# -*- coding: utf-8 -*- -""" - sphinx.textwriter - ~~~~~~~~~~~~~~~~~ - - Custom docutils writer for plain text. - - :copyright: 2008 by Georg Brandl. - :license: BSD. -""" - -import re -import textwrap - -from docutils import nodes, writers - -from sphinx import addnodes -from sphinx.locale import admonitionlabels, versionlabels - - -class TextWriter(writers.Writer): - supported = ('text',) - settings_spec = ('No options here.', '', ()) - settings_defaults = {} - - output = None - - def __init__(self, builder): - writers.Writer.__init__(self) - self.builder = builder - - def translate(self): - visitor = TextTranslator(self.document, self.builder) - self.document.walkabout(visitor) - self.output = visitor.body - -# monkey-patch... -new_wordsep_re = re.compile( - r'(\s+|' # any whitespace - r'(?<=\s)(?::[a-z-]+:)?`\S+|' # interpreted text start - r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words - r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash -textwrap.TextWrapper.wordsep_re = new_wordsep_re - -MAXWIDTH = 70 -STDINDENT = 3 - - -class TextTranslator(nodes.NodeVisitor): - sectionchars = '*=-~"+' - - def __init__(self, document, builder): - nodes.NodeVisitor.__init__(self, document) - - self.states = [[]] - self.stateindent = [0] - self.sectionlevel = 0 - self.table = None - - def add_text(self, text): - self.states[-1].append((-1, text)) - def new_state(self, indent=STDINDENT): - self.states.append([]) - self.stateindent.append(indent) - def end_state(self, wrap=True, end=[''], first=None): - content = self.states.pop() - maxindent = sum(self.stateindent) - indent = self.stateindent.pop() - result = [] - toformat = [] - def do_format(): - if not toformat: - return - if wrap: - res = textwrap.wrap(''.join(toformat), width=MAXWIDTH-maxindent) - else: - res = ''.join(toformat).splitlines() - if end: - res += end - result.append((indent, res)) - for itemindent, item in content: - if itemindent == -1: - toformat.append(item) - else: - do_format() - result.append((indent + itemindent, item)) - toformat = [] - do_format() - if first is not None and result: - itemindent, item = result[0] - if item: - result.insert(0, (itemindent - indent, [first + item[0]])) - result[1] = (itemindent, item[1:]) - self.states[-1].extend(result) - - def visit_document(self, node): - self.new_state(0) - def depart_document(self, node): - self.end_state() - self.body = '\n'.join(line and (' '*indent + line) - for indent, lines in self.states[0] - for line in lines) - # XXX header/footer? - - def visit_highlightlang(self, node): - raise nodes.SkipNode - - def visit_section(self, node): - self._title_char = self.sectionchars[self.sectionlevel] - self.sectionlevel += 1 - def depart_section(self, node): - self.sectionlevel -= 1 - - def visit_topic(self, node): - self.new_state(0) - def depart_topic(self, node): - self.end_state() - - visit_sidebar = visit_topic - depart_sidebar = depart_topic - - def visit_rubric(self, node): - self.new_state(0) - self.add_text('-[ ') - def depart_rubric(self, node): - self.add_text(' ]-') - self.end_state() - - def visit_compound(self, node): - pass - def depart_compound(self, node): - pass - - def visit_glossary(self, node): - pass - def depart_glossary(self, node): - pass - - def visit_title(self, node): - if isinstance(node.parent, nodes.Admonition): - self.add_text(node.astext()+': ') - raise nodes.SkipNode - self.new_state(0) - def depart_title(self, node): - if isinstance(node.parent, nodes.section): - char = self._title_char - else: - char = '^' - text = ''.join(x[1] for x in self.states.pop() if x[0] == -1) - self.stateindent.pop() - self.states[-1].append((0, ['', text, '%s' % (char * len(text)), ''])) - - def visit_subtitle(self, node): - pass - def depart_subtitle(self, node): - pass - - def visit_attribution(self, node): - self.add_text('-- ') - def depart_attribution(self, node): - pass - - def visit_module(self, node): - if node.has_key('platform'): - self.new_state(0) - self.add_text(_('Platform: %s') % node['platform']) - self.end_state() - raise nodes.SkipNode - - def visit_desc(self, node): - pass - def depart_desc(self, node): - pass - - def visit_desc_signature(self, node): - self.new_state(0) - if node.parent['desctype'] in ('class', 'exception'): - self.add_text('%s ' % node.parent['desctype']) - def depart_desc_signature(self, node): - # XXX: wrap signatures in a way that makes sense - self.end_state(wrap=False, end=None) - - def visit_desc_name(self, node): - pass - def depart_desc_name(self, node): - pass - - def visit_desc_addname(self, node): - pass - def depart_desc_addname(self, node): - pass - - def visit_desc_type(self, node): - pass - def depart_desc_type(self, node): - pass - - def visit_desc_parameterlist(self, node): - self.add_text('(') - self.first_param = 1 - def depart_desc_parameterlist(self, node): - self.add_text(')') - - def visit_desc_parameter(self, node): - if not self.first_param: - self.add_text(', ') - else: - self.first_param = 0 - self.add_text(node.astext()) - raise nodes.SkipNode - - def visit_desc_optional(self, node): - self.add_text('[') - def depart_desc_optional(self, node): - self.add_text(']') - - def visit_desc_annotation(self, node): - pass - def depart_desc_annotation(self, node): - pass - - def visit_refcount(self, node): - pass - def depart_refcount(self, node): - pass - - def visit_desc_content(self, node): - self.new_state() - self.add_text('\n') - def depart_desc_content(self, node): - self.end_state() - - def visit_figure(self, node): - self.new_state() - def depart_figure(self, node): - self.end_state() - - def visit_caption(self, node): - pass - def depart_caption(self, node): - pass - - def visit_productionlist(self, node): - self.new_state() - names = [] - for production in node: - names.append(production['tokenname']) - maxlen = max(len(name) for name in names) - for production in node: - if production['tokenname']: - self.add_text(production['tokenname'].ljust(maxlen) + ' ::=') - lastname = production['tokenname'] - else: - self.add_text('%s ' % (' '*len(lastname))) - self.add_text(production.astext() + '\n') - self.end_state(wrap=False) - raise nodes.SkipNode - - def visit_seealso(self, node): - self.new_state() - def depart_seealso(self, node): - self.end_state(first='') - - def visit_footnote(self, node): - self._footnote = node.children[0].astext().strip() - self.new_state(len(self._footnote) + 3) - def depart_footnote(self, node): - self.end_state(first='[%s] ' % self._footnote) - - def visit_citation(self, node): - if len(node) and isinstance(node[0], nodes.label): - self._citlabel = node[0].astext() - else: - self._citlabel = '' - self.new_state(len(self._citlabel) + 3) - def depart_citation(self, node): - self.end_state(first='[%s] ' % self._citlabel) - - def visit_label(self, node): - raise nodes.SkipNode - - # XXX: option list could use some better styling - - def visit_option_list(self, node): - pass - def depart_option_list(self, node): - pass - - def visit_option_list_item(self, node): - self.new_state(0) - def depart_option_list_item(self, node): - self.end_state() - - def visit_option_group(self, node): - self._firstoption = True - def depart_option_group(self, node): - self.add_text(' ') - - def visit_option(self, node): - if self._firstoption: - self._firstoption = False - else: - self.add_text(', ') - def depart_option(self, node): - pass - - def visit_option_string(self, node): - pass - def depart_option_string(self, node): - pass - - def visit_option_argument(self, node): - self.add_text(node['delimiter']) - def depart_option_argument(self, node): - pass - - def visit_description(self, node): - pass - def depart_description(self, node): - pass - - def visit_tabular_col_spec(self, node): - raise nodes.SkipNode - - def visit_colspec(self, node): - self.table[0].append(node['colwidth']) - raise nodes.SkipNode - - def visit_tgroup(self, node): - pass - def depart_tgroup(self, node): - pass - - def visit_thead(self, node): - pass - def depart_thead(self, node): - pass - - def visit_tbody(self, node): - self.table.append('sep') - def depart_tbody(self, node): - pass - - def visit_row(self, node): - self.table.append([]) - def depart_row(self, node): - pass - - def visit_entry(self, node): - if node.has_key('morerows') or node.has_key('morecols'): - raise NotImplementedError('Column or row spanning cells are ' - 'not implemented.') - self.new_state(0) - def depart_entry(self, node): - text = '\n'.join('\n'.join(x[1]) for x in self.states.pop()) - self.stateindent.pop() - self.table[-1].append(text) - - def visit_table(self, node): - if self.table: - raise NotImplementedError('Nested tables are not supported.') - self.new_state(0) - self.table = [[]] - def depart_table(self, node): - lines = self.table[1:] - fmted_rows = [] - colwidths = self.table[0] - realwidths = colwidths[:] - separator = 0 - # don't allow paragraphs in table cells for now - for line in lines: - if line == 'sep': - separator = len(fmted_rows) - else: - cells = [] - for i, cell in enumerate(line): - par = textwrap.wrap(cell, width=colwidths[i]) - if par: - maxwidth = max(map(len, par)) - else: - maxwidth = 0 - realwidths[i] = max(realwidths[i], maxwidth) - cells.append(par) - fmted_rows.append(cells) - - def writesep(char='-'): - out = ['+'] - for width in realwidths: - out.append(char * (width+2)) - out.append('+') - self.add_text(''.join(out) + '\n') - - def writerow(row): - lines = map(None, *row) - for line in lines: - out = ['|'] - for i, cell in enumerate(line): - if cell: - out.append(' ' + cell.ljust(realwidths[i]+1)) - else: - out.append(' ' * (realwidths[i] + 2)) - out.append('|') - self.add_text(''.join(out) + '\n') - - for i, row in enumerate(fmted_rows): - if separator and i == separator: - writesep('=') - else: - writesep('-') - writerow(row) - writesep('-') - self.table = None - self.end_state(wrap=False) - - def visit_acks(self, node): - self.new_state(0) - self.add_text(', '.join(n.astext() for n in node.children[0].children) + '.') - self.end_state() - raise nodes.SkipNode - - def visit_image(self, node): - self.add_text(_('[image]')) - raise nodes.SkipNode - - def visit_transition(self, node): - indent = sum(self.stateindent) - self.new_state(0) - self.add_text('=' * (MAXWIDTH - indent)) - self.end_state() - raise nodes.SkipNode - - def visit_bullet_list(self, node): - self._list_counter = -1 - def depart_bullet_list(self, node): - pass - - def visit_enumerated_list(self, node): - self._list_counter = 0 - def depart_enumerated_list(self, node): - pass - - def visit_definition_list(self, node): - self._list_counter = -2 - def depart_definition_list(self, node): - pass - - def visit_list_item(self, node): - if self._list_counter == -1: - # bullet list - self.new_state(2) - elif self._list_counter == -2: - # definition list - pass - else: - # enumerated list - self._list_counter += 1 - self.new_state(len(str(self._list_counter)) + 2) - def depart_list_item(self, node): - if self._list_counter == -1: - self.end_state(first='* ', end=None) - elif self._list_counter == -2: - pass - else: - self.end_state(first='%s. ' % self._list_counter, end=None) - - def visit_definition_list_item(self, node): - self._li_has_classifier = len(node) >= 2 and \ - isinstance(node[1], nodes.classifier) - def depart_definition_list_item(self, node): - pass - - def visit_term(self, node): - self.new_state(0) - def depart_term(self, node): - if not self._li_has_classifier: - self.end_state(end=None) - - def visit_classifier(self, node): - self.add_text(' : ') - def depart_classifier(self, node): - self.end_state(end=None) - - def visit_definition(self, node): - self.new_state() - def depart_definition(self, node): - self.end_state() - - def visit_field_list(self, node): - pass - def depart_field_list(self, node): - pass - - def visit_field(self, node): - pass - def depart_field(self, node): - pass - - def visit_field_name(self, node): - self.new_state(0) - def depart_field_name(self, node): - self.add_text(':') - self.end_state(end=None) - - def visit_field_body(self, node): - self.new_state() - def depart_field_body(self, node): - self.end_state() - - def visit_centered(self, node): - pass - def depart_centered(self, node): - pass - - def visit_admonition(self, node): - self.new_state(0) - def depart_admonition(self, node): - self.end_state() - - def _visit_admonition(self, node): - self.new_state(2) - def _make_depart_admonition(name): - def depart_admonition(self, node): - self.end_state(first=admonitionlabels[name] + ': ') - return depart_admonition - - visit_attention = _visit_admonition - depart_attention = _make_depart_admonition('attention') - visit_caution = _visit_admonition - depart_caution = _make_depart_admonition('caution') - visit_danger = _visit_admonition - depart_danger = _make_depart_admonition('danger') - visit_error = _visit_admonition - depart_error = _make_depart_admonition('error') - visit_hint = _visit_admonition - depart_hint = _make_depart_admonition('hint') - visit_important = _visit_admonition - depart_important = _make_depart_admonition('important') - visit_note = _visit_admonition - depart_note = _make_depart_admonition('note') - visit_tip = _visit_admonition - depart_tip = _make_depart_admonition('tip') - visit_warning = _visit_admonition - depart_warning = _make_depart_admonition('warning') - - def visit_versionmodified(self, node): - self.new_state(0) - if node.children: - self.add_text(versionlabels[node['type']] % node['version'] + ': ') - else: - self.add_text(versionlabels[node['type']] % node['version'] + '.') - def depart_versionmodified(self, node): - self.end_state() - - def visit_literal_block(self, node): - self.new_state() - def depart_literal_block(self, node): - self.end_state(wrap=False) - - def visit_doctest_block(self, node): - self.new_state(0) - def depart_doctest_block(self, node): - self.end_state(wrap=False) - - def visit_line_block(self, node): - self.new_state(0) - def depart_line_block(self, node): - self.end_state(wrap=False) - - def visit_line(self, node): - pass - def depart_line(self, node): - pass - - def visit_block_quote(self, node): - self.new_state() - def depart_block_quote(self, node): - self.end_state() - - def visit_compact_paragraph(self, node): - pass - def depart_compact_paragraph(self, node): - pass - - def visit_paragraph(self, node): - if not isinstance(node.parent, nodes.Admonition) or \ - isinstance(node.parent, addnodes.seealso): - self.new_state(0) - def depart_paragraph(self, node): - if not isinstance(node.parent, nodes.Admonition) or \ - isinstance(node.parent, addnodes.seealso): - self.end_state() - - def visit_target(self, node): - raise nodes.SkipNode - - def visit_index(self, node): - raise nodes.SkipNode - - def visit_substitution_definition(self, node): - raise nodes.SkipNode - - def visit_pending_xref(self, node): - pass - def depart_pending_xref(self, node): - pass - - def visit_reference(self, node): - pass - def depart_reference(self, node): - pass - - def visit_emphasis(self, node): - self.add_text('*') - def depart_emphasis(self, node): - self.add_text('*') - - def visit_literal_emphasis(self, node): - self.add_text('*') - def depart_literal_emphasis(self, node): - self.add_text('*') - - def visit_strong(self, node): - self.add_text('**') - def depart_strong(self, node): - self.add_text('**') - - def visit_title_reference(self, node): - self.add_text('*') - def depart_title_reference(self, node): - self.add_text('*') - - def visit_literal(self, node): - self.add_text('``') - def depart_literal(self, node): - self.add_text('``') - - def visit_subscript(self, node): - self.add_text('_') - def depart_subscript(self, node): - pass - - def visit_superscript(self, node): - self.add_text('^') - def depart_superscript(self, node): - pass - - def visit_footnote_reference(self, node): - self.add_text('[%s]' % node.astext()) - raise nodes.SkipNode - - def visit_citation_reference(self, node): - self.add_text('[%s]' % node.astext()) - raise nodes.SkipNode - - def visit_Text(self, node): - self.add_text(node.astext()) - def depart_Text(self, node): - pass - - def visit_problematic(self, node): - self.add_text('>>') - def depart_problematic(self, node): - self.add_text('<<') - - def visit_system_message(self, node): - self.new_state(0) - self.add_text('' % node.astext()) - self.end_state() - raise nodes.SkipNode - - def visit_comment(self, node): - raise nodes.SkipNode - - def visit_meta(self, node): - # only valid for HTML - raise nodes.SkipNode - - def unknown_visit(self, node): - raise NotImplementedError('Unknown node: ' + node.__class__.__name__) diff --git a/sphinx/writers/html.py b/sphinx/writers/html.py new file mode 100644 index 00000000..0505fd08 --- /dev/null +++ b/sphinx/writers/html.py @@ -0,0 +1,457 @@ +# -*- coding: utf-8 -*- +""" + sphinx.htmlwriter + ~~~~~~~~~~~~~~~~~ + + docutils writers handling Sphinx' custom nodes. + + :copyright: 2007-2008 by Georg Brandl. + :license: BSD. +""" + +import sys +import posixpath +import os + +from docutils import nodes +from docutils.writers.html4css1 import Writer, HTMLTranslator as BaseTranslator + +from sphinx.locale import admonitionlabels, versionlabels +from sphinx.highlighting import PygmentsBridge +from sphinx.util.smartypants import sphinx_smarty_pants + +try: + import Image # check for the Python Imaging Library +except ImportError: + Image = None + +class HTMLWriter(Writer): + def __init__(self, builder): + Writer.__init__(self) + self.builder = builder + + def translate(self): + # sadly, this is mostly copied from parent class + self.visitor = visitor = self.builder.translator_class(self.builder, + self.document) + self.document.walkabout(visitor) + self.output = visitor.astext() + for attr in ('head_prefix', 'stylesheet', 'head', 'body_prefix', + 'body_pre_docinfo', 'docinfo', 'body', 'fragment', + 'body_suffix', 'meta', 'title', 'subtitle', 'header', + 'footer', 'html_prolog', 'html_head', 'html_title', + 'html_subtitle', 'html_body', ): + setattr(self, attr, getattr(visitor, attr, None)) + self.clean_meta = ''.join(visitor.meta[2:]) + + +class HTMLTranslator(BaseTranslator): + """ + Our custom HTML translator. + """ + + def __init__(self, builder, *args, **kwds): + BaseTranslator.__init__(self, *args, **kwds) + self.highlighter = PygmentsBridge('html', builder.config.pygments_style) + self.no_smarty = 0 + self.builder = builder + self.highlightlang = builder.config.highlight_language + self.highlightlinenothreshold = sys.maxint + self.protect_literal_text = 0 + + def visit_desc(self, node): + self.body.append(self.starttag(node, 'dl', CLASS=node['desctype'])) + def depart_desc(self, node): + self.body.append('\n\n') + + def visit_desc_signature(self, node): + # the id is set automatically + self.body.append(self.starttag(node, 'dt')) + # anchor for per-desc interactive data + if node.parent['desctype'] != 'describe' and node['ids'] and node['first']: + self.body.append('' % node['ids'][0]) + if node.parent['desctype'] in ('class', 'exception'): + self.body.append('%s ' % node.parent['desctype']) + def depart_desc_signature(self, node): + if node['ids'] and self.builder.add_definition_links: + self.body.append(u'\u00B6' % + _('Permalink to this definition')) + self.body.append('\n') + + def visit_desc_addname(self, node): + self.body.append(self.starttag(node, 'tt', '', CLASS='descclassname')) + def depart_desc_addname(self, node): + self.body.append('') + + def visit_desc_type(self, node): + pass + def depart_desc_type(self, node): + pass + + def visit_desc_name(self, node): + self.body.append(self.starttag(node, 'tt', '', CLASS='descname')) + def depart_desc_name(self, node): + self.body.append('') + + def visit_desc_parameterlist(self, node): + self.body.append('(') + self.first_param = 1 + def depart_desc_parameterlist(self, node): + self.body.append(')') + + def visit_desc_parameter(self, node): + if not self.first_param: + self.body.append(', ') + else: + self.first_param = 0 + if not node.hasattr('noemph'): + self.body.append('') + def depart_desc_parameter(self, node): + if not node.hasattr('noemph'): + self.body.append('') + + def visit_desc_optional(self, node): + self.body.append('[') + def depart_desc_optional(self, node): + self.body.append(']') + + def visit_desc_annotation(self, node): + self.body.append(self.starttag(node, 'em', CLASS='property')) + def depart_desc_annotation(self, node): + self.body.append('') + + def visit_desc_content(self, node): + self.body.append(self.starttag(node, 'dd', '')) + def depart_desc_content(self, node): + self.body.append('') + + def visit_refcount(self, node): + self.body.append(self.starttag(node, 'em', '', CLASS='refcount')) + def depart_refcount(self, node): + self.body.append('') + + def visit_versionmodified(self, node): + self.body.append(self.starttag(node, 'p')) + text = versionlabels[node['type']] % node['version'] + if len(node): + text += ': ' + else: + text += '.' + self.body.append('%s' % text) + def depart_versionmodified(self, node): + self.body.append('

    \n') + + # overwritten + def visit_reference(self, node): + BaseTranslator.visit_reference(self, node) + if node.hasattr('reftitle'): + # ugly hack to add a title attribute + starttag = self.body[-1] + if not starttag.startswith(' tag + self.section_level += 1 + self.body.append(self.starttag(node, 'div', CLASS='section')) + + def visit_title(self, node): + # don't move the id attribute inside the tag + BaseTranslator.visit_title(self, node, move_ids=0) + + # overwritten + def visit_literal_block(self, node): + if node.rawsource != node.astext(): + # most probably a parsed-literal block -- don't highlight + return BaseTranslator.visit_literal_block(self, node) + lang = self.highlightlang + linenos = node.rawsource.count('\n') >= self.highlightlinenothreshold - 1 + if node.has_key('language'): + # code-block directives + lang = node['language'] + if node.has_key('linenos'): + linenos = node['linenos'] + highlighted = self.highlighter.highlight_block(node.rawsource, lang, linenos) + starttag = self.starttag(node, 'div', suffix='', CLASS='highlight-%s' % lang) + self.body.append(starttag + highlighted + '\n') + raise nodes.SkipNode + + def visit_doctest_block(self, node): + self.visit_literal_block(node) + + # overwritten + def visit_literal(self, node): + if len(node.children) == 1 and \ + node.children[0] in ('None', 'True', 'False'): + node['classes'].append('xref') + self.body.append(self.starttag(node, 'tt', '', CLASS='docutils literal')) + self.protect_literal_text += 1 + def depart_literal(self, node): + self.protect_literal_text -= 1 + self.body.append('') + + def visit_productionlist(self, node): + self.body.append(self.starttag(node, 'pre')) + names = [] + for production in node: + names.append(production['tokenname']) + maxlen = max(len(name) for name in names) + for production in node: + if production['tokenname']: + lastname = production['tokenname'].ljust(maxlen) + self.body.append(self.starttag(production, 'strong', '')) + self.body.append(lastname + ' ::= ') + else: + self.body.append('%s ' % (' '*len(lastname))) + production.walkabout(self) + self.body.append('\n') + self.body.append('\n') + raise nodes.SkipNode + def depart_productionlist(self, node): + pass + + def visit_production(self, node): + pass + def depart_production(self, node): + pass + + def visit_centered(self, node): + self.body.append(self.starttag(node, 'p', CLASS="centered") + '') + def depart_centered(self, node): + self.body.append('

    ') + + def visit_compact_paragraph(self, node): + pass + def depart_compact_paragraph(self, node): + pass + + def visit_highlightlang(self, node): + self.highlightlang = node['lang'] + self.highlightlinenothreshold = node['linenothreshold'] + def depart_highlightlang(self, node): + pass + + # overwritten + def visit_image(self, node): + olduri = node['uri'] + # rewrite the URI if the environment knows about it + if olduri in self.builder.images: + node['uri'] = posixpath.join(self.builder.imgpath, + self.builder.images[olduri]) + + if node.has_key('scale'): + if Image and not (node.has_key('width') + and node.has_key('height')): + try: + im = Image.open(os.path.join(self.builder.srcdir, + olduri)) + except (IOError, # Source image can't be found or opened + UnicodeError): # PIL doesn't like Unicode paths. + print olduri + pass + else: + if not node.has_key('width'): + node['width'] = str(im.size[0]) + if not node.has_key('height'): + node['height'] = str(im.size[1]) + del im + BaseTranslator.visit_image(self, node) + + def visit_toctree(self, node): + # this only happens when formatting a toc from env.tocs -- in this + # case we don't want to include the subtree + raise nodes.SkipNode + + def visit_index(self, node): + raise nodes.SkipNode + + def visit_tabular_col_spec(self, node): + raise nodes.SkipNode + + def visit_glossary(self, node): + pass + def depart_glossary(self, node): + pass + + def visit_acks(self, node): + pass + def depart_acks(self, node): + pass + + def visit_module(self, node): + pass + def depart_module(self, node): + pass + + def bulk_text_processor(self, text): + return text + + # overwritten + def visit_Text(self, node): + text = node.astext() + encoded = self.encode(text) + if self.protect_literal_text: + # moved here from base class's visit_literal to support + # more formatting in literal nodes + for token in self.words_and_spaces.findall(encoded): + if token.strip(): + # protect literal text from line wrapping + self.body.append('%s' % token) + elif token in ' \n': + # allow breaks at whitespace + self.body.append(token) + else: + # protect runs of multiple spaces; the last one can wrap + self.body.append(' ' * (len(token)-1) + ' ') + else: + if self.in_mailto and self.settings.cloak_email_addresses: + encoded = self.cloak_email(encoded) + else: + encoded = self.bulk_text_processor(encoded) + self.body.append(encoded) + + # these are all for docutils 0.5 compatibility + + def visit_note(self, node): + self.visit_admonition(node, 'note') + def depart_note(self, node): + self.depart_admonition(node) + + def visit_warning(self, node): + self.visit_admonition(node, 'warning') + def depart_warning(self, node): + self.depart_admonition(node) + + def visit_attention(self, node): + self.visit_admonition(node, 'attention') + + def depart_attention(self, node): + self.depart_admonition() + + def visit_caution(self, node): + self.visit_admonition(node, 'caution') + def depart_caution(self, node): + self.depart_admonition() + + def visit_danger(self, node): + self.visit_admonition(node, 'danger') + def depart_danger(self, node): + self.depart_admonition() + + def visit_error(self, node): + self.visit_admonition(node, 'error') + def depart_error(self, node): + self.depart_admonition() + + def visit_hint(self, node): + self.visit_admonition(node, 'hint') + def depart_hint(self, node): + self.depart_admonition() + + def visit_important(self, node): + self.visit_admonition(node, 'important') + def depart_important(self, node): + self.depart_admonition() + + def visit_tip(self, node): + self.visit_admonition(node, 'tip') + def depart_tip(self, node): + self.depart_admonition() + + # these are only handled specially in the SmartyPantsHTMLTranslator + def visit_literal_emphasis(self, node): + return self.visit_emphasis(node) + def depart_literal_emphasis(self, node): + return self.depart_emphasis(node) + + def depart_title(self, node): + close_tag = self.context[-1] + if self.builder.add_header_links and \ + (close_tag.startswith('\u00B6
    ' % + _('Permalink to this headline')) + BaseTranslator.depart_title(self, node) + + def unknown_visit(self, node): + raise NotImplementedError('Unknown node: ' + node.__class__.__name__) + + +class SmartyPantsHTMLTranslator(HTMLTranslator): + """ + Handle ordinary text via smartypants, converting quotes and dashes + to the correct entities. + """ + + def __init__(self, *args, **kwds): + self.no_smarty = 0 + HTMLTranslator.__init__(self, *args, **kwds) + + def visit_literal(self, node): + self.no_smarty += 1 + try: + # this raises SkipNode + HTMLTranslator.visit_literal(self, node) + finally: + self.no_smarty -= 1 + + def visit_literal_emphasis(self, node): + self.no_smarty += 1 + self.visit_emphasis(node) + + def depart_literal_emphasis(self, node): + self.depart_emphasis(node) + self.no_smarty -= 1 + + def visit_desc_signature(self, node): + self.no_smarty += 1 + HTMLTranslator.visit_desc_signature(self, node) + + def depart_desc_signature(self, node): + self.no_smarty -= 1 + HTMLTranslator.depart_desc_signature(self, node) + + def visit_productionlist(self, node): + self.no_smarty += 1 + try: + HTMLTranslator.visit_productionlist(self, node) + finally: + self.no_smarty -= 1 + + def visit_option(self, node): + self.no_smarty += 1 + HTMLTranslator.visit_option(self, node) + def depart_option(self, node): + self.no_smarty -= 1 + HTMLTranslator.depart_option(self, node) + + def bulk_text_processor(self, text): + if self.no_smarty <= 0: + return sphinx_smarty_pants(text) + return text diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py new file mode 100644 index 00000000..94cb23db --- /dev/null +++ b/sphinx/writers/latex.py @@ -0,0 +1,1185 @@ +# -*- coding: utf-8 -*- +""" + sphinx.latexwriter + ~~~~~~~~~~~~~~~~~~ + + Custom docutils writer for LaTeX. + + Much of this code is adapted from Dave Kuhlman's "docpy" writer from his + docutils sandbox. + + :copyright: 2007-2008 by Georg Brandl, Dave Kuhlman. + :license: BSD. +""" + +import re +import sys +from os import path + +from docutils import nodes, writers +from docutils.writers.latex2e import Babel + +from sphinx import addnodes +from sphinx import highlighting +from sphinx.locale import admonitionlabels, versionlabels +from sphinx.util import ustrftime +from sphinx.util.texescape import tex_escape_map +from sphinx.util.smartypants import educateQuotesLatex + +HEADER = r'''%% Generated by Sphinx. +\documentclass[%(papersize)s,%(pointsize)s%(classoptions)s]{%(docclass)s} +%(inputenc)s +%(fontenc)s +%(babel)s +%(fontpkg)s +%(fncychap)s +\usepackage{sphinx} +%(preamble)s + +\title{%(title)s} +\date{%(date)s} +\release{%(release)s} +\author{%(author)s} +\newcommand{\sphinxlogo}{%(logo)s} +\renewcommand{\releasename}{%(releasename)s} +%(makeindex)s +%(makemodindex)s +''' + +BEGIN_DOC = r''' +\begin{document} +%(shorthandoff)s +%(maketitle)s +%(tableofcontents)s +''' + +FOOTER = r''' +%(footer)s +\renewcommand{\indexname}{%(modindexname)s} +%(printmodindex)s +\renewcommand{\indexname}{%(indexname)s} +%(printindex)s +\end{document} +''' + + +class LaTeXWriter(writers.Writer): + + supported = ('sphinxlatex',) + + settings_spec = ('LaTeX writer options', '', ( + ('Document name', ['--docname'], {'default': ''}), + ('Document class', ['--docclass'], {'default': 'manual'}), + ('Author', ['--author'], {'default': ''}), + )) + settings_defaults = {} + + output = None + + def __init__(self, builder): + writers.Writer.__init__(self) + self.builder = builder + + def translate(self): + visitor = LaTeXTranslator(self.document, self.builder) + self.document.walkabout(visitor) + self.output = visitor.astext() + + +# Helper classes + +class ExtBabel(Babel): + def get_shorthandoff(self): + shortlang = self.language.split('_')[0] + if shortlang in ('de', 'sl', 'pt', 'es', 'nl', 'pl'): + return '\\shorthandoff{"}' + return '' + + _ISO639_TO_BABEL = Babel._ISO639_TO_BABEL.copy() + _ISO639_TO_BABEL['sl'] = 'slovene' + + +class Table(object): + def __init__(self): + self.col = 0 + self.colcount = 0 + self.colspec = None + self.had_head = False + self.has_verbatim = False + self.caption = None + + +class Desc(object): + def __init__(self, node): + self.env = LaTeXTranslator.desc_map.get(node['desctype'], 'describe') + self.type = self.cls = self.name = self.params = self.annotation = '' + self.count = 0 + + +class LaTeXTranslator(nodes.NodeVisitor): + sectionnames = ["part", "chapter", "section", "subsection", + "subsubsection", "paragraph", "subparagraph"] + + ignore_missing_images = False + + default_elements = { + 'docclass': 'manual', + 'papersize': 'letterpaper', + 'pointsize': '10pt', + 'classoptions': '', + 'inputenc': '\\usepackage[utf8]{inputenc}', + 'fontenc': '\\usepackage[T1]{fontenc}', + 'babel': '\\usepackage{babel}', + 'fontpkg': '\\usepackage{times}', + 'fncychap': '\\usepackage[Bjarne]{fncychap}', + 'preamble': '', + 'title': '', + 'date': '', + 'release': '', + 'author': '', + 'logo': '', + 'releasename': 'Release', + 'makeindex': '\\makeindex', + 'makemodindex': '\\makemodindex', + 'shorthandoff': '', + 'maketitle': '\\maketitle', + 'tableofcontents': '\\tableofcontents', + 'footer': '', + 'printmodindex': '\\printmodindex', + 'printindex': '\\printindex', + } + + def __init__(self, document, builder): + nodes.NodeVisitor.__init__(self, document) + self.builder = builder + self.body = [] + + # sort out some elements + papersize = builder.config.latex_paper_size + 'paper' + if papersize == 'paper': # e.g. command line "-D latex_paper_size=" + papersize = 'letterpaper' + + self.elements = self.default_elements.copy() + self.elements.update({ + 'docclass': document.settings.docclass, + 'papersize': papersize, + 'pointsize': builder.config.latex_font_size, + # if empty, the title is set to the first section title + 'title': document.settings.title, + 'date': ustrftime(builder.config.today_fmt or _('%B %d, %Y')), + 'release': builder.config.release, + 'author': document.settings.author, + 'releasename': _('Release'), + 'preamble': builder.config.latex_preamble, + 'modindexname': _('Module Index'), + 'indexname': _('Index'), + }) + if builder.config.latex_logo: + self.elements['logo'] = '\\includegraphics{%s}\\par' % \ + path.basename(builder.config.latex_logo) + if builder.config.language: + babel = ExtBabel(builder.config.language) + lang = babel.get_language() + if lang: + self.elements['classoptions'] += ',' + babel.get_language() + else: + self.builder.warn('no Babel option known for language %r' % + builder.config.language) + self.elements['shorthandoff'] = babel.get_shorthandoff() + self.elements['fncychap'] = '\\usepackage[Sonny]{fncychap}' + else: + self.elements['classoptions'] += ',english' + if not builder.config.latex_use_modindex: + self.elements['makemodindex'] = '' + self.elements['printmodindex'] = '' + # allow the user to override them all + self.elements.update(builder.config.latex_elements) + + self.highlighter = highlighting.PygmentsBridge( + 'latex', builder.config.pygments_style) + self.context = [] + self.descstack = [] + self.bibitems = [] + self.table = None + self.next_table_colspec = None + self.highlightlang = builder.config.highlight_language + self.highlightlinenothreshold = sys.maxint + self.written_ids = set() + self.footnotestack = [] + if self.elements['docclass'] == 'manual': + if builder.config.latex_use_parts: + self.top_sectionlevel = 0 + else: + self.top_sectionlevel = 1 + else: + self.top_sectionlevel = 2 + self.next_section_target = None + # flags + self.verbatim = None + self.in_title = 0 + self.in_production_list = 0 + self.first_document = 1 + self.this_is_the_title = 1 + self.literal_whitespace = 0 + self.no_contractions = 0 + + def astext(self): + return (HEADER % self.elements + self.highlighter.get_stylesheet() + + u''.join(self.body) + FOOTER % self.elements) + + def visit_document(self, node): + self.footnotestack.append(self.collect_footnotes(node)) + if self.first_document == 1: + # the first document is all the regular content ... + self.body.append(BEGIN_DOC % self.elements) + self.first_document = 0 + elif self.first_document == 0: + # ... and all others are the appendices + self.body.append('\n\\appendix\n') + self.first_document = -1 + # "- 1" because the level is increased before the title is visited + self.sectionlevel = self.top_sectionlevel - 1 + def depart_document(self, node): + if self.bibitems: + widest_label = "" + for bi in self.bibitems: + if len(widest_label) < len(bi[0]): + widest_label = bi[0] + self.body.append('\n\\begin{thebibliography}{%s}\n' % widest_label) + for bi in self.bibitems: + # cite_key: underscores must not be escaped + cite_key = bi[0].replace(r"\_", "_") + self.body.append('\\bibitem[%s]{%s}{%s}\n' % (bi[0], cite_key, bi[1])) + self.body.append('\\end{thebibliography}\n') + self.bibitems = [] + + def visit_start_of_file(self, node): + # This marks the begin of a new file; therefore the current module and + # class must be reset + self.body.append('\n\\resetcurrentobjects\n') + # and also, new footnotes + self.footnotestack.append(self.collect_footnotes(node)) + + def collect_footnotes(self, node): + fnotes = {} + def footnotes_under(n): + if isinstance(n, nodes.footnote): + yield n + else: + for c in n.children: + if isinstance(c, addnodes.start_of_file): + continue + for k in footnotes_under(c): + yield k + for fn in footnotes_under(node): + num = fn.children[0].astext().strip() + fnotes[num] = fn + fn.parent.remove(fn) + return fnotes + + def depart_start_of_file(self, node): + self.footnotestack.pop() + + def visit_highlightlang(self, node): + self.highlightlang = node['lang'] + self.highlightlinenothreshold = node['linenothreshold'] + raise nodes.SkipNode + + def visit_section(self, node): + if not self.this_is_the_title: + self.sectionlevel += 1 + self.body.append('\n\n') + if self.next_section_target: + self.body.append(r'\hypertarget{%s}{}' % self.next_section_target) + self.next_section_target = None + #if node.get('ids'): + # for id in node['ids']: + # if id not in self.written_ids: + # self.body.append(r'\hypertarget{%s}{}' % id) + # self.written_ids.add(id) + def depart_section(self, node): + self.sectionlevel = max(self.sectionlevel - 1, self.top_sectionlevel - 1) + + def visit_problematic(self, node): + self.body.append(r'{\color{red}\bfseries{}') + def depart_problematic(self, node): + self.body.append('}') + + def visit_topic(self, node): + self.body.append('\\setbox0\\vbox{\n' + '\\begin{minipage}{0.95\\textwidth}\n') + def depart_topic(self, node): + self.body.append('\\end{minipage}}\n' + '\\begin{center}\\setlength{\\fboxsep}{5pt}' + '\\shadowbox{\\box0}\\end{center}\n') + visit_sidebar = visit_topic + depart_sidebar = depart_topic + + def visit_glossary(self, node): + pass + def depart_glossary(self, node): + pass + + def visit_productionlist(self, node): + self.body.append('\n\n\\begin{productionlist}\n') + self.in_production_list = 1 + def depart_productionlist(self, node): + self.body.append('\\end{productionlist}\n\n') + self.in_production_list = 0 + + def visit_production(self, node): + if node['tokenname']: + self.body.append('\\production{%s}{' % self.encode(node['tokenname'])) + else: + self.body.append('\\productioncont{') + def depart_production(self, node): + self.body.append('}\n') + + def visit_transition(self, node): + self.body.append('\n\n\\bigskip\\hrule{}\\bigskip\n\n') + def depart_transition(self, node): + pass + + def visit_title(self, node): + parent = node.parent + if isinstance(parent, addnodes.seealso): + # the environment already handles this + raise nodes.SkipNode + elif self.this_is_the_title: + if len(node.children) != 1 and not isinstance(node.children[0], nodes.Text): + self.builder.warn('document title is not a single Text node') + if not self.elements['title']: + # text needs to be escaped since it is inserted into + # the output literally + self.elements['title'] = node.astext().translate(tex_escape_map) + self.this_is_the_title = 0 + raise nodes.SkipNode + elif isinstance(parent, nodes.section): + try: + self.body.append(r'\%s{' % self.sectionnames[self.sectionlevel]) + except IndexError: + from sphinx.application import SphinxError + raise SphinxError('too many nesting section levels for LaTeX, ' + 'at heading: %s' % node.astext()) + self.context.append('}\n') + elif isinstance(parent, (nodes.topic, nodes.sidebar)): + self.body.append(r'\textbf{') + self.context.append('}\n\n\medskip\n\n') + elif isinstance(parent, nodes.Admonition): + self.body.append('{') + self.context.append('}\n') + elif isinstance(parent, nodes.table): + self.table.caption = self.encode(node.astext()) + raise nodes.SkipNode + else: + self.builder.warn('encountered title node not in section, topic, ' + 'table, admonition or sidebar') + self.body.append('\\textbf{') + self.context.append('}\n') + self.in_title = 1 + def depart_title(self, node): + self.in_title = 0 + self.body.append(self.context.pop()) + + def visit_subtitle(self, node): + if isinstance(node.parent, nodes.sidebar): + self.body.append('~\\\\\n\\textbf{') + self.context.append('}\n\\smallskip\n') + else: + self.context.append('') + def depart_subtitle(self, node): + self.body.append(self.context.pop()) + + desc_map = { + 'function' : 'funcdesc', + 'class': 'classdesc', + 'method': 'methoddesc', + 'staticmethod': 'staticmethoddesc', + 'exception': 'excdesc', + 'data': 'datadesc', + 'attribute': 'memberdesc', + 'opcode': 'opcodedesc', + + 'cfunction': 'cfuncdesc', + 'cmember': 'cmemberdesc', + 'cmacro': 'csimplemacrodesc', + 'ctype': 'ctypedesc', + 'cvar': 'cvardesc', + + 'describe': 'describe', + # and all others are 'describe' too + } + + def visit_desc(self, node): + self.descstack.append(Desc(node)) + def depart_desc(self, node): + d = self.descstack.pop() + self.body.append("\\end{%s}\n" % d.env) + + def visit_desc_signature(self, node): + d = self.descstack[-1] + # reset these for every signature + d.type = d.cls = d.name = d.params = '' + def depart_desc_signature(self, node): + d = self.descstack[-1] + d.cls = d.cls.rstrip('.') + if node.parent['desctype'] != 'describe' and node['ids']: + hyper = '\\hypertarget{%s}{}' % node['ids'][0] + else: + hyper = '' + if d.count == 0: + t1 = "\n\n%s\\begin{%s}" % (hyper, d.env) + else: + t1 = "\n%s\\%sline" % (hyper, d.env[:-4]) + d.count += 1 + if d.env in ('funcdesc', 'classdesc', 'excclassdesc'): + t2 = "{%s}{%s}" % (d.name, d.params) + elif d.env in ('datadesc', 'excdesc', 'csimplemacrodesc'): + t2 = "{%s}" % (d.name) + elif d.env in ('methoddesc', 'staticmethoddesc'): + if d.cls: + t2 = "[%s]{%s}{%s}" % (d.cls, d.name, d.params) + else: + t2 = "{%s}{%s}" % (d.name, d.params) + elif d.env == 'memberdesc': + if d.cls: + t2 = "[%s]{%s}" % (d.cls, d.name) + else: + t2 = "{%s}" % d.name + elif d.env == 'cfuncdesc': + if d.cls: + # C++ class names + d.name = '%s::%s' % (d.cls, d.name) + t2 = "{%s}{%s}{%s}" % (d.type, d.name, d.params) + elif d.env == 'cmemberdesc': + try: + type, container = d.type.rsplit(' ', 1) + container = container.rstrip('.') + except ValueError: + container = '' + type = d.type + t2 = "{%s}{%s}{%s}" % (container, type, d.name) + elif d.env == 'cvardesc': + t2 = "{%s}{%s}" % (d.type, d.name) + elif d.env == 'ctypedesc': + t2 = "{%s}" % (d.name) + elif d.env == 'opcodedesc': + t2 = "{%s}{%s}" % (d.name, d.params) + elif d.env == 'describe': + t2 = "{%s}" % d.name + self.body.append(t1 + t2) + + def visit_desc_type(self, node): + d = self.descstack[-1] + if d.env == 'describe': + d.name += self.encode(node.astext()) + else: + self.descstack[-1].type = self.encode(node.astext().strip()) + raise nodes.SkipNode + + def visit_desc_name(self, node): + d = self.descstack[-1] + if d.env == 'describe': + d.name += self.encode(node.astext()) + else: + self.descstack[-1].name = self.encode(node.astext().strip()) + raise nodes.SkipNode + + def visit_desc_addname(self, node): + d = self.descstack[-1] + if d.env == 'describe': + d.name += self.encode(node.astext()) + else: + self.descstack[-1].cls = self.encode(node.astext().strip()) + raise nodes.SkipNode + + def visit_desc_parameterlist(self, node): + d = self.descstack[-1] + if d.env == 'describe': + d.name += self.encode(node.astext()) + else: + self.descstack[-1].params = self.encode(node.astext().strip()) + raise nodes.SkipNode + + def visit_desc_annotation(self, node): + d = self.descstack[-1] + if d.env == 'describe': + d.name += self.encode(node.astext()) + else: + self.descstack[-1].annotation = self.encode(node.astext().strip()) + raise nodes.SkipNode + + def visit_refcount(self, node): + self.body.append("\\emph{") + def depart_refcount(self, node): + self.body.append("}\\\\") + + def visit_desc_content(self, node): + if node.children and not isinstance(node.children[0], nodes.paragraph): + # avoid empty desc environment which causes a formatting bug + self.body.append('~') + def depart_desc_content(self, node): + pass + + def visit_seealso(self, node): + self.body.append("\n\n\\strong{%s:}\n\n" % admonitionlabels['seealso']) + def depart_seealso(self, node): + self.body.append("\n\n") + + def visit_rubric(self, node): + if len(node.children) == 1 and node.children[0].astext() == 'Footnotes': + raise nodes.SkipNode + self.body.append('\\paragraph{') + self.context.append('}\n') + def depart_rubric(self, node): + self.body.append(self.context.pop()) + + def visit_footnote(self, node): + pass + def depart_footnote(self, node): + pass + + def visit_label(self, node): + if isinstance(node.parent, nodes.citation): + self.bibitems[-1][0] = node.astext() + raise nodes.SkipNode + + def visit_tabular_col_spec(self, node): + self.next_table_colspec = node['spec'] + raise nodes.SkipNode + + def visit_table(self, node): + if self.table: + raise NotImplementedError('Nested tables are not supported.') + self.table = Table() + self.tablebody = [] + # Redirect body output until table is finished. + self._body = self.body + self.body = self.tablebody + def depart_table(self, node): + self.body = self._body + if self.table.caption is not None: + self.body.append('\n\\begin{threeparttable}\n' + '\\caption{%s}\n' % self.table.caption) + if self.table.has_verbatim: + self.body.append('\n\\begin{tabular}') + else: + self.body.append('\n\\begin{tabulary}{\\textwidth}') + if self.table.colspec: + self.body.append(self.table.colspec) + else: + if self.table.has_verbatim: + colwidth = 0.95 / self.table.colcount + colspec = ('p{%.3f\\textwidth}|' % colwidth) * self.table.colcount + self.body.append('{|' + colspec + '}\n') + else: + self.body.append('{|' + ('L|' * self.table.colcount) + '}\n') + self.body.extend(self.tablebody) + if self.table.has_verbatim: + self.body.append('\\end{tabular}\n\n') + else: + self.body.append('\\end{tabulary}\n\n') + if self.table.caption is not None: + self.body.append('\\end{threeparttable}\n\n') + self.table = None + self.tablebody = None + + def visit_colspec(self, node): + self.table.colcount += 1 + def depart_colspec(self, node): + pass + + def visit_tgroup(self, node): + pass + def depart_tgroup(self, node): + pass + + def visit_thead(self, node): + if self.next_table_colspec: + self.table.colspec = '{%s}\n' % self.next_table_colspec + self.next_table_colspec = None + self.body.append('\\hline\n') + self.table.had_head = True + def depart_thead(self, node): + self.body.append('\\hline\n') + + def visit_tbody(self, node): + if not self.table.had_head: + self.visit_thead(node) + def depart_tbody(self, node): + self.body.append('\\hline\n') + + def visit_row(self, node): + self.table.col = 0 + def depart_row(self, node): + self.body.append('\\\\\n') + + def visit_entry(self, node): + if node.has_key('morerows') or node.has_key('morecols'): + raise NotImplementedError('Column or row spanning cells are ' + 'not implemented.') + if self.table.col > 0: + self.body.append(' & ') + self.table.col += 1 + if isinstance(node.parent.parent, nodes.thead): + self.body.append('\\textbf{') + self.context.append('}') + else: + self.context.append('') + def depart_entry(self, node): + self.body.append(self.context.pop()) # header + + def visit_acks(self, node): + # this is a list in the source, but should be rendered as a + # comma-separated list here + self.body.append('\n\n') + self.body.append(', '.join(n.astext() for n in node.children[0].children) + '.') + self.body.append('\n\n') + raise nodes.SkipNode + + def visit_bullet_list(self, node): + self.body.append('\\begin{itemize}\n' ) + def depart_bullet_list(self, node): + self.body.append('\\end{itemize}\n' ) + + def visit_enumerated_list(self, node): + self.body.append('\\begin{enumerate}\n' ) + def depart_enumerated_list(self, node): + self.body.append('\\end{enumerate}\n' ) + + def visit_list_item(self, node): + # Append "{}" in case the next character is "[", which would break + # LaTeX's list environment (no numbering and the "[" is not printed). + self.body.append(r'\item {} ') + def depart_list_item(self, node): + self.body.append('\n') + + def visit_definition_list(self, node): + self.body.append('\\begin{description}\n') + def depart_definition_list(self, node): + self.body.append('\\end{description}\n') + + def visit_definition_list_item(self, node): + pass + def depart_definition_list_item(self, node): + pass + + def visit_term(self, node): + ctx = ']' + if node.has_key('ids') and node['ids']: + ctx += '\\hypertarget{%s}{}' % node['ids'][0] + self.body.append('\\item[') + self.context.append(ctx) + def depart_term(self, node): + self.body.append(self.context.pop()) + + def visit_classifier(self, node): + self.body.append('{[}') + def depart_classifier(self, node): + self.body.append('{]}') + + def visit_definition(self, node): + pass + def depart_definition(self, node): + self.body.append('\n') + + def visit_field_list(self, node): + self.body.append('\\begin{quote}\\begin{description}\n') + def depart_field_list(self, node): + self.body.append('\\end{description}\\end{quote}\n') + + def visit_field(self, node): + pass + def depart_field(self, node): + pass + + visit_field_name = visit_term + depart_field_name = depart_term + + visit_field_body = visit_definition + depart_field_body = depart_definition + + def visit_paragraph(self, node): + self.body.append('\n') + def depart_paragraph(self, node): + self.body.append('\n') + + def visit_centered(self, node): + self.body.append('\n\\begin{centering}') + def depart_centered(self, node): + self.body.append('\n\\end{centering}') + + def visit_module(self, node): + modname = node['modname'] + self.body.append('\n\\declaremodule[%s]{}{%s}' % (modname.replace('_', ''), + self.encode(modname))) + self.body.append('\n\\modulesynopsis{%s}' % self.encode(node['synopsis'])) + if node.has_key('platform'): + self.body.append('\\platform{%s}' % self.encode(node['platform'])) + def depart_module(self, node): + pass + + def latex_image_length(self, width_str): + match = re.match('(\d*\.?\d*)\s*(\S*)', width_str) + if not match: + # fallback + return width_str + res = width_str + amount, unit = match.groups()[:2] + if not unit or unit == "px": + # pixels: let LaTeX alone + return None + elif unit == "%": + res = "%.3f\\linewidth" % (float(amount) / 100.0) + return res + + def visit_image(self, node): + attrs = node.attributes + pre = [] # in reverse order + post = [] + include_graphics_options = [] + inline = isinstance(node.parent, nodes.TextElement) + if attrs.has_key('scale'): + # Could also be done with ``scale`` option to + # ``\includegraphics``; doing it this way for consistency. + pre.append('\\scalebox{%f}{' % (attrs['scale'] / 100.0,)) + post.append('}') + if attrs.has_key('width'): + w = self.latex_image_length(attrs['width']) + if w: + include_graphics_options.append('width=%s' % w) + if attrs.has_key('height'): + h = self.latex_image_length(attrs['height']) + include_graphics_options.append('height=%s' % h) + if attrs.has_key('align'): + align_prepost = { + # By default latex aligns the top of an image. + (1, 'top'): ('', ''), + (1, 'middle'): ('\\raisebox{-0.5\\height}{', '}'), + (1, 'bottom'): ('\\raisebox{-\\height}{', '}'), + (0, 'center'): ('{\\hfill', '\\hfill}'), + # These 2 don't exactly do the right thing. The image should + # be floated alongside the paragraph. See + # http://www.w3.org/TR/html4/struct/objects.html#adef-align-IMG + (0, 'left'): ('{', '\\hfill}'), + (0, 'right'): ('{\\hfill', '}'),} + try: + pre.append(align_prepost[inline, attrs['align']][0]) + post.append(align_prepost[inline, attrs['align']][1]) + except KeyError: + pass # XXX complain here? + if not inline: + pre.append('\n') + post.append('\n') + pre.reverse() + if node['uri'] in self.builder.images: + uri = self.builder.images[node['uri']] + else: + # missing image! + if self.ignore_missing_images: + return + uri = node['uri'] + if uri.find('://') != -1: + # ignore remote images + return + self.body.extend(pre) + options = '' + if include_graphics_options: + options = '[%s]' % ','.join(include_graphics_options) + self.body.append('\\includegraphics%s{%s}' % (options, uri)) + self.body.extend(post) + def depart_image(self, node): + pass + + def visit_figure(self, node): + if (not node.attributes.has_key('align') or + node.attributes['align'] == 'center'): + # centering does not add vertical space like center. + align = '\n\\centering' + align_end = '' + else: + # TODO non vertical space for other alignments. + align = '\\begin{flush%s}' % node.attributes['align'] + align_end = '\\end{flush%s}' % node.attributes['align'] + self.body.append('\\begin{figure}[htbp]%s\n' % align) + self.context.append('%s\\end{figure}\n' % align_end) + def depart_figure(self, node): + self.body.append(self.context.pop()) + + def visit_caption(self, node): + self.body.append('\\caption{') + def depart_caption(self, node): + self.body.append('}') + + def visit_legend(self, node): + self.body.append('{\\small ') + def depart_legend(self, node): + self.body.append('}') + + def visit_admonition(self, node): + self.body.append('\n\\begin{notice}{note}') + def depart_admonition(self, node): + self.body.append('\\end{notice}\n') + + def _make_visit_admonition(name): + def visit_admonition(self, node): + self.body.append('\n\\begin{notice}{%s}{%s:}' % + (name, admonitionlabels[name])) + return visit_admonition + def _depart_named_admonition(self, node): + self.body.append('\\end{notice}\n') + + visit_attention = _make_visit_admonition('attention') + depart_attention = _depart_named_admonition + visit_caution = _make_visit_admonition('caution') + depart_caution = _depart_named_admonition + visit_danger = _make_visit_admonition('danger') + depart_danger = _depart_named_admonition + visit_error = _make_visit_admonition('error') + depart_error = _depart_named_admonition + visit_hint = _make_visit_admonition('hint') + depart_hint = _depart_named_admonition + visit_important = _make_visit_admonition('important') + depart_important = _depart_named_admonition + visit_note = _make_visit_admonition('note') + depart_note = _depart_named_admonition + visit_tip = _make_visit_admonition('tip') + depart_tip = _depart_named_admonition + visit_warning = _make_visit_admonition('warning') + depart_warning = _depart_named_admonition + + def visit_versionmodified(self, node): + intro = versionlabels[node['type']] % node['version'] + if node.children: + intro += ': ' + else: + intro += '.' + self.body.append(intro) + def depart_versionmodified(self, node): + pass + + def visit_target(self, node): + def add_target(id): + # indexing uses standard LaTeX index markup, so the targets + # will be generated differently + if not id.startswith('index-'): + self.body.append(r'\hypertarget{%s}{}' % id) + + if node.has_key('refid') and node['refid'] not in self.written_ids: + parindex = node.parent.index(node) + try: + next = node.parent[parindex+1] + if isinstance(next, nodes.section): + self.next_section_target = node['refid'] + return + except IndexError: + pass + add_target(node['refid']) + self.written_ids.add(node['refid']) + def depart_target(self, node): + pass + + def visit_attribution(self, node): + self.body.append('\n\\begin{flushright}\n') + self.body.append('---') + def depart_attribution(self, node): + self.body.append('\n\\end{flushright}\n') + + def visit_index(self, node, scre=re.compile(r';\s*')): + entries = node['entries'] + for type, string, tid, _ in entries: + if type == 'single': + self.body.append(r'\index{%s}' % scre.sub('!', self.encode(string))) + elif type == 'pair': + parts = tuple(self.encode(x.strip()) for x in string.split(';', 1)) + self.body.append(r'\indexii{%s}{%s}' % parts) + elif type == 'triple': + parts = tuple(self.encode(x.strip()) for x in string.split(';', 2)) + self.body.append(r'\indexiii{%s}{%s}{%s}' % parts) + else: + self.builder.warn('unknown index entry type %s found' % type) + raise nodes.SkipNode + + def visit_raw(self, node): + if 'latex' in node.get('format', '').split(): + self.body.append(node.astext()) + raise nodes.SkipNode + + def visit_reference(self, node): + uri = node.get('refuri', '') + if self.in_title or not uri: + self.context.append('') + elif uri.startswith('mailto:') or uri.startswith('http:') or \ + uri.startswith('https:') or uri.startswith('ftp:'): + self.body.append('\\href{%s}{' % self.encode(uri)) + self.context.append('}') + elif uri.startswith('#'): + self.body.append('\\hyperlink{%s}{' % uri[1:]) + self.context.append('}') + elif uri.startswith('@token'): + if self.in_production_list: + self.body.append('\\token{') + else: + self.body.append('\\grammartoken{') + self.context.append('}') + else: + self.builder.warn('unusable reference target found: %s' % uri) + self.context.append('') + def depart_reference(self, node): + self.body.append(self.context.pop()) + + def visit_pending_xref(self, node): + pass + def depart_pending_xref(self, node): + pass + + def visit_emphasis(self, node): + self.body.append(r'\emph{') + def depart_emphasis(self, node): + self.body.append('}') + + def visit_literal_emphasis(self, node): + self.body.append(r'\emph{\texttt{') + self.no_contractions += 1 + def depart_literal_emphasis(self, node): + self.body.append('}}') + self.no_contractions -= 1 + + def visit_strong(self, node): + self.body.append(r'\textbf{') + def depart_strong(self, node): + self.body.append('}') + + def visit_title_reference(self, node): + self.body.append(r'\emph{') + def depart_title_reference(self, node): + self.body.append('}') + + def visit_citation(self, node): + # TODO maybe use cite bibitems + self.bibitems.append(['', '']) + self.context.append(len(self.body)) + def depart_citation(self, node): + size = self.context.pop() + text = ''.join(self.body[size:]) + del self.body[size:] + self.bibitems[-1][1] = text + + def visit_citation_reference(self, node): + citeid = node.astext() + self.body.append('\\cite{%s}' % citeid) + raise nodes.SkipNode + + def visit_literal(self, node): + content = self.encode(node.astext().strip()) + if self.in_title: + self.body.append(r'\texttt{%s}' % content) + elif node.has_key('role') and node['role'] == 'samp': + self.body.append(r'\samp{%s}' % content) + else: + self.body.append(r'\code{%s}' % content) + raise nodes.SkipNode + + def visit_footnote_reference(self, node): + num = node.astext().strip() + try: + fn = self.footnotestack[-1][num] + except (KeyError, IndexError): + raise nodes.SkipNode + self.body.append('\\footnote{') + fn.walkabout(self) + raise nodes.SkipChildren + def depart_footnote_reference(self, node): + self.body.append('}') + + def visit_literal_block(self, node): + self.verbatim = '' + def depart_literal_block(self, node): + code = self.verbatim.rstrip('\n') + lang = self.highlightlang + linenos = code.count('\n') >= self.highlightlinenothreshold - 1 + if node.has_key('language'): + # code-block directives + lang = node['language'] + if node.has_key('linenos'): + linenos = node['linenos'] + hlcode = self.highlighter.highlight_block(code, lang, linenos) + # workaround for Unicode issue + hlcode = hlcode.replace(u'€', u'@texteuro[]') + # must use original Verbatim environment and "tabular" environment + if self.table: + hlcode = hlcode.replace('\\begin{Verbatim}', + '\\begin{OriginalVerbatim}') + self.table.has_verbatim = True + # get consistent trailer + hlcode = hlcode.rstrip()[:-14] # strip \end{Verbatim} + hlcode = hlcode.rstrip() + '\n' + self.body.append('\n' + hlcode + '\\end{%sVerbatim}\n' % + (self.table and 'Original' or '')) + self.verbatim = None + visit_doctest_block = visit_literal_block + depart_doctest_block = depart_literal_block + + def visit_line_block(self, node): + """line-block: + * whitespace (including linebreaks) is significant + * inline markup is supported. + * serif typeface + """ + self.body.append('{\\raggedright{}') + self.literal_whitespace = 1 + def depart_line_block(self, node): + self.literal_whitespace = 0 + # remove the last \\ + del self.body[-1] + self.body.append('}\n') + + def visit_line(self, node): + self._line_start = len(self.body) + def depart_line(self, node): + if self._line_start == len(self.body): + # no output in this line -- add a nonbreaking space, else the + # \\ command will give an error + self.body.append('~') + if self.table is not None: + self.body.append('\\newline\n') + else: + self.body.append('\\\\\n') + + def visit_block_quote(self, node): + # If the block quote contains a single object and that object + # is a list, then generate a list not a block quote. + # This lets us indent lists. + done = 0 + if len(node.children) == 1: + child = node.children[0] + if isinstance(child, nodes.bullet_list) or \ + isinstance(child, nodes.enumerated_list): + done = 1 + if not done: + self.body.append('\\begin{quote}\n') + def depart_block_quote(self, node): + done = 0 + if len(node.children) == 1: + child = node.children[0] + if isinstance(child, nodes.bullet_list) or \ + isinstance(child, nodes.enumerated_list): + done = 1 + if not done: + self.body.append('\\end{quote}\n') + + # option node handling copied from docutils' latex writer + + def visit_option(self, node): + if self.context[-1]: + # this is not the first option + self.body.append(', ') + def depart_option(self, node): + # flag that the first option is done. + self.context[-1] += 1 + + def visit_option_argument(self, node): + """The delimiter betweeen an option and its argument.""" + self.body.append(node.get('delimiter', ' ')) + def depart_option_argument(self, node): + pass + + def visit_option_group(self, node): + self.body.append('\\item [') + # flag for first option + self.context.append(0) + def depart_option_group(self, node): + self.context.pop() # the flag + self.body.append('] ') + + def visit_option_list(self, node): + self.body.append('\\begin{optionlist}{3cm}\n') + def depart_option_list(self, node): + self.body.append('\\end{optionlist}\n') + + def visit_option_list_item(self, node): + pass + def depart_option_list_item(self, node): + pass + + def visit_option_string(self, node): + ostring = node.astext() + self.body.append(self.encode(ostring.replace('--', u'-{-}'))) + raise nodes.SkipNode + + def visit_description(self, node): + self.body.append( ' ' ) + def depart_description(self, node): + pass + + def visit_superscript(self, node): + self.body.append('$^{\\text{') + def depart_superscript(self, node): + self.body.append('}}$') + + def visit_subscript(self, node): + self.body.append('$_{\\text{') + def depart_subscript(self, node): + self.body.append('}}$') + + def visit_substitution_definition(self, node): + raise nodes.SkipNode + + def visit_substitution_reference(self, node): + raise nodes.SkipNode + + def visit_generated(self, node): + pass + def depart_generated(self, node): + pass + + def visit_compound(self, node): + pass + def depart_compound(self, node): + pass + + def visit_container(self, node): + pass + def depart_container(self, node): + pass + + def visit_decoration(self, node): + pass + def depart_decoration(self, node): + pass + + # text handling + + def encode(self, text): + text = unicode(text).translate(tex_escape_map) + if self.literal_whitespace: + # Insert a blank before the newline, to avoid + # ! LaTeX Error: There's no line here to end. + text = text.replace(u'\n', u'~\\\\\n').replace(u' ', u'~') + if self.no_contractions: + text = text.replace('--', u'-{-}') + return text + + def visit_Text(self, node): + if self.verbatim is not None: + self.verbatim += node.astext() + else: + text = self.encode(node.astext()) + self.body.append(educateQuotesLatex(text)) + def depart_Text(self, node): + pass + + def visit_comment(self, node): + raise nodes.SkipNode + + def visit_meta(self, node): + # only valid for HTML + raise nodes.SkipNode + + def visit_system_message(self, node): + pass + def depart_system_message(self, node): + self.body.append('\n') + + def unknown_visit(self, node): + raise NotImplementedError('Unknown node: ' + node.__class__.__name__) diff --git a/sphinx/writers/text.py b/sphinx/writers/text.py new file mode 100644 index 00000000..4aa18039 --- /dev/null +++ b/sphinx/writers/text.py @@ -0,0 +1,679 @@ +# -*- coding: utf-8 -*- +""" + sphinx.textwriter + ~~~~~~~~~~~~~~~~~ + + Custom docutils writer for plain text. + + :copyright: 2008 by Georg Brandl. + :license: BSD. +""" + +import re +import textwrap + +from docutils import nodes, writers + +from sphinx import addnodes +from sphinx.locale import admonitionlabels, versionlabels + + +class TextWriter(writers.Writer): + supported = ('text',) + settings_spec = ('No options here.', '', ()) + settings_defaults = {} + + output = None + + def __init__(self, builder): + writers.Writer.__init__(self) + self.builder = builder + + def translate(self): + visitor = TextTranslator(self.document, self.builder) + self.document.walkabout(visitor) + self.output = visitor.body + +# monkey-patch... +new_wordsep_re = re.compile( + r'(\s+|' # any whitespace + r'(?<=\s)(?::[a-z-]+:)?`\S+|' # interpreted text start + r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words + r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash +textwrap.TextWrapper.wordsep_re = new_wordsep_re + +MAXWIDTH = 70 +STDINDENT = 3 + + +class TextTranslator(nodes.NodeVisitor): + sectionchars = '*=-~"+' + + def __init__(self, document, builder): + nodes.NodeVisitor.__init__(self, document) + + self.states = [[]] + self.stateindent = [0] + self.sectionlevel = 0 + self.table = None + + def add_text(self, text): + self.states[-1].append((-1, text)) + def new_state(self, indent=STDINDENT): + self.states.append([]) + self.stateindent.append(indent) + def end_state(self, wrap=True, end=[''], first=None): + content = self.states.pop() + maxindent = sum(self.stateindent) + indent = self.stateindent.pop() + result = [] + toformat = [] + def do_format(): + if not toformat: + return + if wrap: + res = textwrap.wrap(''.join(toformat), width=MAXWIDTH-maxindent) + else: + res = ''.join(toformat).splitlines() + if end: + res += end + result.append((indent, res)) + for itemindent, item in content: + if itemindent == -1: + toformat.append(item) + else: + do_format() + result.append((indent + itemindent, item)) + toformat = [] + do_format() + if first is not None and result: + itemindent, item = result[0] + if item: + result.insert(0, (itemindent - indent, [first + item[0]])) + result[1] = (itemindent, item[1:]) + self.states[-1].extend(result) + + def visit_document(self, node): + self.new_state(0) + def depart_document(self, node): + self.end_state() + self.body = '\n'.join(line and (' '*indent + line) + for indent, lines in self.states[0] + for line in lines) + # XXX header/footer? + + def visit_highlightlang(self, node): + raise nodes.SkipNode + + def visit_section(self, node): + self._title_char = self.sectionchars[self.sectionlevel] + self.sectionlevel += 1 + def depart_section(self, node): + self.sectionlevel -= 1 + + def visit_topic(self, node): + self.new_state(0) + def depart_topic(self, node): + self.end_state() + + visit_sidebar = visit_topic + depart_sidebar = depart_topic + + def visit_rubric(self, node): + self.new_state(0) + self.add_text('-[ ') + def depart_rubric(self, node): + self.add_text(' ]-') + self.end_state() + + def visit_compound(self, node): + pass + def depart_compound(self, node): + pass + + def visit_glossary(self, node): + pass + def depart_glossary(self, node): + pass + + def visit_title(self, node): + if isinstance(node.parent, nodes.Admonition): + self.add_text(node.astext()+': ') + raise nodes.SkipNode + self.new_state(0) + def depart_title(self, node): + if isinstance(node.parent, nodes.section): + char = self._title_char + else: + char = '^' + text = ''.join(x[1] for x in self.states.pop() if x[0] == -1) + self.stateindent.pop() + self.states[-1].append((0, ['', text, '%s' % (char * len(text)), ''])) + + def visit_subtitle(self, node): + pass + def depart_subtitle(self, node): + pass + + def visit_attribution(self, node): + self.add_text('-- ') + def depart_attribution(self, node): + pass + + def visit_module(self, node): + if node.has_key('platform'): + self.new_state(0) + self.add_text(_('Platform: %s') % node['platform']) + self.end_state() + raise nodes.SkipNode + + def visit_desc(self, node): + pass + def depart_desc(self, node): + pass + + def visit_desc_signature(self, node): + self.new_state(0) + if node.parent['desctype'] in ('class', 'exception'): + self.add_text('%s ' % node.parent['desctype']) + def depart_desc_signature(self, node): + # XXX: wrap signatures in a way that makes sense + self.end_state(wrap=False, end=None) + + def visit_desc_name(self, node): + pass + def depart_desc_name(self, node): + pass + + def visit_desc_addname(self, node): + pass + def depart_desc_addname(self, node): + pass + + def visit_desc_type(self, node): + pass + def depart_desc_type(self, node): + pass + + def visit_desc_parameterlist(self, node): + self.add_text('(') + self.first_param = 1 + def depart_desc_parameterlist(self, node): + self.add_text(')') + + def visit_desc_parameter(self, node): + if not self.first_param: + self.add_text(', ') + else: + self.first_param = 0 + self.add_text(node.astext()) + raise nodes.SkipNode + + def visit_desc_optional(self, node): + self.add_text('[') + def depart_desc_optional(self, node): + self.add_text(']') + + def visit_desc_annotation(self, node): + pass + def depart_desc_annotation(self, node): + pass + + def visit_refcount(self, node): + pass + def depart_refcount(self, node): + pass + + def visit_desc_content(self, node): + self.new_state() + self.add_text('\n') + def depart_desc_content(self, node): + self.end_state() + + def visit_figure(self, node): + self.new_state() + def depart_figure(self, node): + self.end_state() + + def visit_caption(self, node): + pass + def depart_caption(self, node): + pass + + def visit_productionlist(self, node): + self.new_state() + names = [] + for production in node: + names.append(production['tokenname']) + maxlen = max(len(name) for name in names) + for production in node: + if production['tokenname']: + self.add_text(production['tokenname'].ljust(maxlen) + ' ::=') + lastname = production['tokenname'] + else: + self.add_text('%s ' % (' '*len(lastname))) + self.add_text(production.astext() + '\n') + self.end_state(wrap=False) + raise nodes.SkipNode + + def visit_seealso(self, node): + self.new_state() + def depart_seealso(self, node): + self.end_state(first='') + + def visit_footnote(self, node): + self._footnote = node.children[0].astext().strip() + self.new_state(len(self._footnote) + 3) + def depart_footnote(self, node): + self.end_state(first='[%s] ' % self._footnote) + + def visit_citation(self, node): + if len(node) and isinstance(node[0], nodes.label): + self._citlabel = node[0].astext() + else: + self._citlabel = '' + self.new_state(len(self._citlabel) + 3) + def depart_citation(self, node): + self.end_state(first='[%s] ' % self._citlabel) + + def visit_label(self, node): + raise nodes.SkipNode + + # XXX: option list could use some better styling + + def visit_option_list(self, node): + pass + def depart_option_list(self, node): + pass + + def visit_option_list_item(self, node): + self.new_state(0) + def depart_option_list_item(self, node): + self.end_state() + + def visit_option_group(self, node): + self._firstoption = True + def depart_option_group(self, node): + self.add_text(' ') + + def visit_option(self, node): + if self._firstoption: + self._firstoption = False + else: + self.add_text(', ') + def depart_option(self, node): + pass + + def visit_option_string(self, node): + pass + def depart_option_string(self, node): + pass + + def visit_option_argument(self, node): + self.add_text(node['delimiter']) + def depart_option_argument(self, node): + pass + + def visit_description(self, node): + pass + def depart_description(self, node): + pass + + def visit_tabular_col_spec(self, node): + raise nodes.SkipNode + + def visit_colspec(self, node): + self.table[0].append(node['colwidth']) + raise nodes.SkipNode + + def visit_tgroup(self, node): + pass + def depart_tgroup(self, node): + pass + + def visit_thead(self, node): + pass + def depart_thead(self, node): + pass + + def visit_tbody(self, node): + self.table.append('sep') + def depart_tbody(self, node): + pass + + def visit_row(self, node): + self.table.append([]) + def depart_row(self, node): + pass + + def visit_entry(self, node): + if node.has_key('morerows') or node.has_key('morecols'): + raise NotImplementedError('Column or row spanning cells are ' + 'not implemented.') + self.new_state(0) + def depart_entry(self, node): + text = '\n'.join('\n'.join(x[1]) for x in self.states.pop()) + self.stateindent.pop() + self.table[-1].append(text) + + def visit_table(self, node): + if self.table: + raise NotImplementedError('Nested tables are not supported.') + self.new_state(0) + self.table = [[]] + def depart_table(self, node): + lines = self.table[1:] + fmted_rows = [] + colwidths = self.table[0] + realwidths = colwidths[:] + separator = 0 + # don't allow paragraphs in table cells for now + for line in lines: + if line == 'sep': + separator = len(fmted_rows) + else: + cells = [] + for i, cell in enumerate(line): + par = textwrap.wrap(cell, width=colwidths[i]) + if par: + maxwidth = max(map(len, par)) + else: + maxwidth = 0 + realwidths[i] = max(realwidths[i], maxwidth) + cells.append(par) + fmted_rows.append(cells) + + def writesep(char='-'): + out = ['+'] + for width in realwidths: + out.append(char * (width+2)) + out.append('+') + self.add_text(''.join(out) + '\n') + + def writerow(row): + lines = map(None, *row) + for line in lines: + out = ['|'] + for i, cell in enumerate(line): + if cell: + out.append(' ' + cell.ljust(realwidths[i]+1)) + else: + out.append(' ' * (realwidths[i] + 2)) + out.append('|') + self.add_text(''.join(out) + '\n') + + for i, row in enumerate(fmted_rows): + if separator and i == separator: + writesep('=') + else: + writesep('-') + writerow(row) + writesep('-') + self.table = None + self.end_state(wrap=False) + + def visit_acks(self, node): + self.new_state(0) + self.add_text(', '.join(n.astext() for n in node.children[0].children) + '.') + self.end_state() + raise nodes.SkipNode + + def visit_image(self, node): + self.add_text(_('[image]')) + raise nodes.SkipNode + + def visit_transition(self, node): + indent = sum(self.stateindent) + self.new_state(0) + self.add_text('=' * (MAXWIDTH - indent)) + self.end_state() + raise nodes.SkipNode + + def visit_bullet_list(self, node): + self._list_counter = -1 + def depart_bullet_list(self, node): + pass + + def visit_enumerated_list(self, node): + self._list_counter = 0 + def depart_enumerated_list(self, node): + pass + + def visit_definition_list(self, node): + self._list_counter = -2 + def depart_definition_list(self, node): + pass + + def visit_list_item(self, node): + if self._list_counter == -1: + # bullet list + self.new_state(2) + elif self._list_counter == -2: + # definition list + pass + else: + # enumerated list + self._list_counter += 1 + self.new_state(len(str(self._list_counter)) + 2) + def depart_list_item(self, node): + if self._list_counter == -1: + self.end_state(first='* ', end=None) + elif self._list_counter == -2: + pass + else: + self.end_state(first='%s. ' % self._list_counter, end=None) + + def visit_definition_list_item(self, node): + self._li_has_classifier = len(node) >= 2 and \ + isinstance(node[1], nodes.classifier) + def depart_definition_list_item(self, node): + pass + + def visit_term(self, node): + self.new_state(0) + def depart_term(self, node): + if not self._li_has_classifier: + self.end_state(end=None) + + def visit_classifier(self, node): + self.add_text(' : ') + def depart_classifier(self, node): + self.end_state(end=None) + + def visit_definition(self, node): + self.new_state() + def depart_definition(self, node): + self.end_state() + + def visit_field_list(self, node): + pass + def depart_field_list(self, node): + pass + + def visit_field(self, node): + pass + def depart_field(self, node): + pass + + def visit_field_name(self, node): + self.new_state(0) + def depart_field_name(self, node): + self.add_text(':') + self.end_state(end=None) + + def visit_field_body(self, node): + self.new_state() + def depart_field_body(self, node): + self.end_state() + + def visit_centered(self, node): + pass + def depart_centered(self, node): + pass + + def visit_admonition(self, node): + self.new_state(0) + def depart_admonition(self, node): + self.end_state() + + def _visit_admonition(self, node): + self.new_state(2) + def _make_depart_admonition(name): + def depart_admonition(self, node): + self.end_state(first=admonitionlabels[name] + ': ') + return depart_admonition + + visit_attention = _visit_admonition + depart_attention = _make_depart_admonition('attention') + visit_caution = _visit_admonition + depart_caution = _make_depart_admonition('caution') + visit_danger = _visit_admonition + depart_danger = _make_depart_admonition('danger') + visit_error = _visit_admonition + depart_error = _make_depart_admonition('error') + visit_hint = _visit_admonition + depart_hint = _make_depart_admonition('hint') + visit_important = _visit_admonition + depart_important = _make_depart_admonition('important') + visit_note = _visit_admonition + depart_note = _make_depart_admonition('note') + visit_tip = _visit_admonition + depart_tip = _make_depart_admonition('tip') + visit_warning = _visit_admonition + depart_warning = _make_depart_admonition('warning') + + def visit_versionmodified(self, node): + self.new_state(0) + if node.children: + self.add_text(versionlabels[node['type']] % node['version'] + ': ') + else: + self.add_text(versionlabels[node['type']] % node['version'] + '.') + def depart_versionmodified(self, node): + self.end_state() + + def visit_literal_block(self, node): + self.new_state() + def depart_literal_block(self, node): + self.end_state(wrap=False) + + def visit_doctest_block(self, node): + self.new_state(0) + def depart_doctest_block(self, node): + self.end_state(wrap=False) + + def visit_line_block(self, node): + self.new_state(0) + def depart_line_block(self, node): + self.end_state(wrap=False) + + def visit_line(self, node): + pass + def depart_line(self, node): + pass + + def visit_block_quote(self, node): + self.new_state() + def depart_block_quote(self, node): + self.end_state() + + def visit_compact_paragraph(self, node): + pass + def depart_compact_paragraph(self, node): + pass + + def visit_paragraph(self, node): + if not isinstance(node.parent, nodes.Admonition) or \ + isinstance(node.parent, addnodes.seealso): + self.new_state(0) + def depart_paragraph(self, node): + if not isinstance(node.parent, nodes.Admonition) or \ + isinstance(node.parent, addnodes.seealso): + self.end_state() + + def visit_target(self, node): + raise nodes.SkipNode + + def visit_index(self, node): + raise nodes.SkipNode + + def visit_substitution_definition(self, node): + raise nodes.SkipNode + + def visit_pending_xref(self, node): + pass + def depart_pending_xref(self, node): + pass + + def visit_reference(self, node): + pass + def depart_reference(self, node): + pass + + def visit_emphasis(self, node): + self.add_text('*') + def depart_emphasis(self, node): + self.add_text('*') + + def visit_literal_emphasis(self, node): + self.add_text('*') + def depart_literal_emphasis(self, node): + self.add_text('*') + + def visit_strong(self, node): + self.add_text('**') + def depart_strong(self, node): + self.add_text('**') + + def visit_title_reference(self, node): + self.add_text('*') + def depart_title_reference(self, node): + self.add_text('*') + + def visit_literal(self, node): + self.add_text('``') + def depart_literal(self, node): + self.add_text('``') + + def visit_subscript(self, node): + self.add_text('_') + def depart_subscript(self, node): + pass + + def visit_superscript(self, node): + self.add_text('^') + def depart_superscript(self, node): + pass + + def visit_footnote_reference(self, node): + self.add_text('[%s]' % node.astext()) + raise nodes.SkipNode + + def visit_citation_reference(self, node): + self.add_text('[%s]' % node.astext()) + raise nodes.SkipNode + + def visit_Text(self, node): + self.add_text(node.astext()) + def depart_Text(self, node): + pass + + def visit_problematic(self, node): + self.add_text('>>') + def depart_problematic(self, node): + self.add_text('<<') + + def visit_system_message(self, node): + self.new_state(0) + self.add_text('' % node.astext()) + self.end_state() + raise nodes.SkipNode + + def visit_comment(self, node): + raise nodes.SkipNode + + def visit_meta(self, node): + # only valid for HTML + raise nodes.SkipNode + + def unknown_visit(self, node): + raise NotImplementedError('Unknown node: ' + node.__class__.__name__) -- cgit v1.2.1 From bb0febdac28daeed9e1fe265dbbe40405a68ca3b Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 15 Dec 2008 10:21:01 +0100 Subject: Add Italian locale, translation by Sandro Dentella. --- CHANGES | 4 + doc/config.rst | 1 + sphinx/locale/it/LC_MESSAGES/sphinx.js | 1 + sphinx/locale/it/LC_MESSAGES/sphinx.mo | Bin 0 -> 8415 bytes sphinx/locale/it/LC_MESSAGES/sphinx.po | 599 +++++++++++++++++++++++++++++++++ sphinx/writers/latex.py | 2 +- 6 files changed, 606 insertions(+), 1 deletion(-) create mode 100644 sphinx/locale/it/LC_MESSAGES/sphinx.js create mode 100644 sphinx/locale/it/LC_MESSAGES/sphinx.mo create mode 100644 sphinx/locale/it/LC_MESSAGES/sphinx.po diff --git a/CHANGES b/CHANGES index aaf635f5..c54577ea 100644 --- a/CHANGES +++ b/CHANGES @@ -10,6 +10,10 @@ New features added switch off the generated "paragraph sign" permalinks for each heading and definition environment. +* New translations: + + - Italian by Sandro Dentella. + * Extension API: - There is now a Sphinx.add_lexer() method to add custom Pygments diff --git a/doc/config.rst b/doc/config.rst index 53816cc7..91cc7c3f 100644 --- a/doc/config.rst +++ b/doc/config.rst @@ -205,6 +205,7 @@ Project information * ``en`` -- English * ``es`` -- Spanish * ``fr`` -- French + * ``it`` -- Italian * ``nl`` -- Dutch * ``pl`` -- Polish * ``pt_BR`` -- Brazilian Portuguese diff --git a/sphinx/locale/it/LC_MESSAGES/sphinx.js b/sphinx/locale/it/LC_MESSAGES/sphinx.js new file mode 100644 index 00000000..c2c8be7b --- /dev/null +++ b/sphinx/locale/it/LC_MESSAGES/sphinx.js @@ -0,0 +1 @@ +Documentation.addTranslations({"locale": "it", "plural_expr": "(n != 1)", "messages": {"module, in ": "modulo, in", "Preparing search...": "Preparazione della ricerca", "Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories.": "La tua ricerca non ha trovato alcun risultato. Controlla la corettezzadei termini di ricerca e di avere selezionato un numero sufficiente di categorie", "Search finished, found %s page(s) matching the search query.": "Ricera terminata, trovate %s pagine corrispondenti alla ricerca.", ", in ": ", in ", "Permalink to this headline": "link permanente per questa inestazione", "Searching": "Ricerca in corso", "Permalink to this definition": "link permanente per questa definizione", "Hide Search Matches": "Nascondi i risultati della ricerca", "Search Results": "Risultati della ricerca"}}); \ No newline at end of file diff --git a/sphinx/locale/it/LC_MESSAGES/sphinx.mo b/sphinx/locale/it/LC_MESSAGES/sphinx.mo new file mode 100644 index 00000000..7818e876 Binary files /dev/null and b/sphinx/locale/it/LC_MESSAGES/sphinx.mo differ diff --git a/sphinx/locale/it/LC_MESSAGES/sphinx.po b/sphinx/locale/it/LC_MESSAGES/sphinx.po new file mode 100644 index 00000000..e7c796b0 --- /dev/null +++ b/sphinx/locale/it/LC_MESSAGES/sphinx.po @@ -0,0 +1,599 @@ +# Translations template for Sphinx. +# Copyright (C) 2008 ORGANIZATION +# This file is distributed under the same license as the Sphinx project. +# Sandro Dentella , 2008. +# +msgid "" +msgstr "" +"Project-Id-Version: Sphinx 0.5\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2008-11-27 18:39+0100\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.4\n" + +#: sphinx/builder.py:408 +#, python-format +msgid "%b %d, %Y" +msgstr "%d/%b/%Y" + +#: sphinx/builder.py:427 sphinx/templates/defindex.html:21 +msgid "General Index" +msgstr "Indice generale" + +#: sphinx/builder.py:427 +msgid "index" +msgstr "indice" + +#: sphinx/builder.py:429 sphinx/htmlhelp.py:156 +#: sphinx/templates/defindex.html:19 sphinx/templates/modindex.html:2 +#: sphinx/templates/modindex.html:13 +msgid "Global Module Index" +msgstr "Indice dei moduli" + +#: sphinx/builder.py:429 +msgid "modules" +msgstr "moduli" + +#: sphinx/builder.py:466 +msgid "next" +msgstr "successivo" + +#: sphinx/builder.py:473 +msgid "previous" +msgstr "precedente" + +#: sphinx/builder.py:1054 +msgid " (in " +msgstr " (in " + +#: sphinx/builder.py:1129 +msgid "Builtins" +msgstr "Builtin" + +#: sphinx/builder.py:1131 +msgid "Module level" +msgstr "Modulo" + +#: sphinx/environment.py:102 sphinx/latexwriter.py:169 +#, python-format +msgid "%B %d, %Y" +msgstr "%d %B %Y" + +#: sphinx/environment.py:291 sphinx/latexwriter.py:175 +#: sphinx/templates/genindex-single.html:2 +#: sphinx/templates/genindex-split.html:2 +#: sphinx/templates/genindex-split.html:5 sphinx/templates/genindex.html:2 +#: sphinx/templates/genindex.html:5 sphinx/templates/genindex.html:48 +#: sphinx/templates/layout.html:130 +msgid "Index" +msgstr "Indice" + +#: sphinx/environment.py:292 sphinx/latexwriter.py:174 +msgid "Module Index" +msgstr "Indice dei Moduli" + +#: sphinx/environment.py:293 sphinx/templates/defindex.html:16 +msgid "Search Page" +msgstr "Cerca" + +#: sphinx/htmlwriter.py:79 sphinx/static/doctools.js:145 +msgid "Permalink to this definition" +msgstr "link permanente per questa definizione" + +#: sphinx/htmlwriter.py:399 sphinx/static/doctools.js:139 +msgid "Permalink to this headline" +msgstr "link permanente per questa inestazione" + +#: sphinx/latexwriter.py:172 +msgid "Relelase" +msgstr "Release" + +#: sphinx/roles.py:53 sphinx/directives/desc.py:537 +#, python-format +msgid "environment variable; %s" +msgstr "variabile dámbiente, %s" + +#: sphinx/roles.py:60 +#, python-format +msgid "Python Enhancement Proposals!PEP %s" +msgstr "Python Enhancement Proposals!PEP %s" + +#: sphinx/textwriter.py:166 +#, python-format +msgid "Platform: %s" +msgstr "Piattaforma: %s" + +#: sphinx/textwriter.py:422 +msgid "[image]" +msgstr "[immagine]" + +#: sphinx/directives/desc.py:25 +#, python-format +msgid "%s() (built-in function)" +msgstr "%s() (funzione built-in)" + +#: sphinx/directives/desc.py:26 sphinx/directives/desc.py:42 +#: sphinx/directives/desc.py:54 +#, python-format +msgid "%s() (in module %s)" +msgstr "%s() (nel modulo %s)" + +#: sphinx/directives/desc.py:29 +#, python-format +msgid "%s (built-in variable)" +msgstr "%s (variabile built-in)" + +#: sphinx/directives/desc.py:30 sphinx/directives/desc.py:66 +#, python-format +msgid "%s (in module %s)" +msgstr "%s (nel modulo %s)" + +#: sphinx/directives/desc.py:33 +#, python-format +msgid "%s (built-in class)" +msgstr "%s (classe built-in)" + +#: sphinx/directives/desc.py:34 +#, python-format +msgid "%s (class in %s)" +msgstr "%s (classe in %s)" + +#: sphinx/directives/desc.py:46 +#, python-format +msgid "%s() (%s.%s method)" +msgstr "%s() (%s.%s metodo)" + +#: sphinx/directives/desc.py:48 +#, python-format +msgid "%s() (%s method)" +msgstr "%s() (%s metodo)" + +#: sphinx/directives/desc.py:58 +#, python-format +msgid "%s() (%s.%s static method)" +msgstr "%s() (%s.%s metodo statico)" + +#: sphinx/directives/desc.py:60 +#, python-format +msgid "%s() (%s static method)" +msgstr "%s() (%s metodo statico)" + +#: sphinx/directives/desc.py:70 +#, python-format +msgid "%s (%s.%s attribute)" +msgstr "%s (%s.%s attributo)" + +#: sphinx/directives/desc.py:72 +#, python-format +msgid "%s (%s attribute)" +msgstr "%s (%s attributo)" + +#: sphinx/directives/desc.py:74 +#, python-format +msgid "%s (C function)" +msgstr "%s (functione C)" + +#: sphinx/directives/desc.py:76 +#, python-format +msgid "%s (C member)" +msgstr "%s (membro C )" + +#: sphinx/directives/desc.py:78 +#, python-format +msgid "%s (C macro)" +msgstr "%s (macro C)" + +#: sphinx/directives/desc.py:80 +#, python-format +msgid "%s (C type)" +msgstr "%s (tipo C)" + +#: sphinx/directives/desc.py:82 +#, python-format +msgid "%s (C variable)" +msgstr "%s (variabile C)" + +#: sphinx/directives/desc.py:100 +msgid "Raises" +msgstr "Solleva" + +#: sphinx/directives/desc.py:104 +msgid "Variable" +msgstr "Variabile" + +#: sphinx/directives/desc.py:107 +msgid "Returns" +msgstr "Ritorna" + +#: sphinx/directives/desc.py:116 +msgid "Return type" +msgstr "Tipo di ritorno" + +#: sphinx/directives/desc.py:143 +msgid "Parameters" +msgstr "Parametri" + +#: sphinx/directives/desc.py:423 +#, python-format +msgid "%scommand line option; %s" +msgstr "%sopzione di linea di comando; %s" + +#: sphinx/directives/other.py:101 +msgid "Platforms: " +msgstr "Piattaforme:" + +#: sphinx/directives/other.py:106 +#, python-format +msgid "%s (module)" +msgstr "%s (modulo)" + +#: sphinx/directives/other.py:146 +msgid "Section author: " +msgstr "Autore della sezione" + +#: sphinx/directives/other.py:148 +msgid "Module author: " +msgstr "Autore del modulo" + +#: sphinx/directives/other.py:150 +msgid "Author: " +msgstr "Autore: " + +#: sphinx/directives/other.py:246 +msgid "See also" +msgstr "Vedi anche" + +#: sphinx/ext/todo.py:31 +msgid "Todo" +msgstr "Da fare" + +#: sphinx/ext/todo.py:75 +#, python-format +msgid "(The original entry is located in %s, line %d and can be found " +msgstr "(La riga originale si trova in %s, linea %d e può essere trovata " + +#: sphinx/ext/todo.py:81 +msgid "here" +msgstr "qui" + +#: sphinx/locale/__init__.py:15 +msgid "Attention" +msgstr "Attenzione" + +#: sphinx/locale/__init__.py:16 +msgid "Caution" +msgstr "Attenzione" + +#: sphinx/locale/__init__.py:17 +msgid "Danger" +msgstr "Pericolo" + +#: sphinx/locale/__init__.py:18 +msgid "Error" +msgstr "Errore" + +#: sphinx/locale/__init__.py:19 +msgid "Hint" +msgstr "Consiglio" + +#: sphinx/locale/__init__.py:20 +msgid "Important" +msgstr "Importante" + +#: sphinx/locale/__init__.py:21 +msgid "Note" +msgstr "Nota" + +#: sphinx/locale/__init__.py:22 +msgid "See Also" +msgstr "Vedi anche" + +#: sphinx/locale/__init__.py:23 +msgid "Tip" +msgstr "Suggerimento" + +#: sphinx/locale/__init__.py:24 +msgid "Warning" +msgstr "Avvertimento" + +#: sphinx/locale/__init__.py:28 +#, python-format +msgid "New in version %s" +msgstr "Nuovo nella versione %s" + +#: sphinx/locale/__init__.py:29 +#, python-format +msgid "Changed in version %s" +msgstr "Cambiato nella versione %s" + +#: sphinx/locale/__init__.py:30 +#, python-format +msgid "Deprecated since version %s" +msgstr "Deprecato dalla versione %s" + +#: sphinx/locale/__init__.py:34 +msgid "module" +msgstr "modulo" + +#: sphinx/locale/__init__.py:35 +msgid "keyword" +msgstr "keyword" + +#: sphinx/locale/__init__.py:36 +msgid "operator" +msgstr "operatore" + +#: sphinx/locale/__init__.py:37 +msgid "object" +msgstr "oggetto" + +#: sphinx/locale/__init__.py:38 +msgid "exception" +msgstr "eccezione" + +#: sphinx/locale/__init__.py:39 +msgid "statement" +msgstr "statement" + +#: sphinx/locale/__init__.py:40 +msgid "built-in function" +msgstr "funzione built-in" + +#: sphinx/static/doctools.js:174 +msgid "Hide Search Matches" +msgstr "Nascondi i risultati della ricerca" + +#: sphinx/static/searchtools.js:274 +msgid "Searching" +msgstr "Ricerca in corso" + +#: sphinx/static/searchtools.js:279 +msgid "Preparing search..." +msgstr "Preparazione della ricerca" + +#: sphinx/static/searchtools.js:338 +msgid "module, in " +msgstr "modulo, in" + +#: sphinx/static/searchtools.js:347 +msgid ", in " +msgstr ", in " + +#: sphinx/static/searchtools.js:447 sphinx/templates/search.html:18 +msgid "Search Results" +msgstr "Risultati della ricerca" + +#: sphinx/static/searchtools.js:449 +msgid "" +"Your search did not match any documents. Please make sure that all words " +"are spelled correctly and that you've selected enough categories." +msgstr "" +"La tua ricerca non ha trovato alcun risultato. Controlla la corettezza" +"dei termini di ricerca e di avere selezionato un numero sufficiente di categorie" + +#: sphinx/static/searchtools.js:451 +#, python-format +msgid "Search finished, found %s page(s) matching the search query." +msgstr "Ricera terminata, trovate %s pagine corrispondenti alla ricerca." + +#: sphinx/templates/defindex.html:2 +msgid "Overview" +msgstr "Sintesi" + +#: sphinx/templates/defindex.html:11 +msgid "Indices and tables:" +msgstr "Indici e tabelle:" + +#: sphinx/templates/defindex.html:14 +msgid "Complete Table of Contents" +msgstr "Tabella dei contenuti completa" + +#: sphinx/templates/defindex.html:15 +msgid "lists all sections and subsections" +msgstr "elenca l'insieme delle sezioni e sottosezioni" + +#: sphinx/templates/defindex.html:17 +msgid "search this documentation" +msgstr "cerca in questa documentazione" + +#: sphinx/templates/defindex.html:20 +msgid "quick access to all modules" +msgstr "accesso veloce ai moduli" + +#: sphinx/templates/defindex.html:22 +msgid "all functions, classes, terms" +msgstr "tutte le funzioni, classi e moduli" + +#: sphinx/templates/genindex-single.html:5 +#, python-format +msgid "Index – %(key)s" +msgstr "Indice – %(key)s" + +#: sphinx/templates/genindex-single.html:44 +#: sphinx/templates/genindex-split.html:14 +#: sphinx/templates/genindex-split.html:27 sphinx/templates/genindex.html:54 +msgid "Full index on one page" +msgstr "Indice completo in una pagina" + +#: sphinx/templates/genindex-split.html:7 +msgid "Index pages by letter" +msgstr "Indice delle pagine per lettera" + +#: sphinx/templates/genindex-split.html:15 +msgid "can be huge" +msgstr "può essere enorme" + +#: sphinx/templates/layout.html:9 +msgid "Navigation" +msgstr "Navigazione" + +#: sphinx/templates/layout.html:40 +msgid "Table Of Contents" +msgstr "Tablella dei contenuti" + +#: sphinx/templates/layout.html:46 +msgid "Previous topic" +msgstr "Argomento precedente" + +#: sphinx/templates/layout.html:47 +msgid "previous chapter" +msgstr "capitolo precedente" + +#: sphinx/templates/layout.html:50 +msgid "Next topic" +msgstr "Argomento successivo" + +#: sphinx/templates/layout.html:51 +msgid "next chapter" +msgstr "capitolo successivo" + +#: sphinx/templates/layout.html:55 +msgid "This Page" +msgstr "Questa pagina" + +#: sphinx/templates/layout.html:59 +msgid "Suggest Change" +msgstr "Suggerisci una modifica" + +#: sphinx/templates/layout.html:60 sphinx/templates/layout.html:62 +msgid "Show Source" +msgstr "Mostra sorgente" + +#: sphinx/templates/layout.html:71 +msgid "Quick search" +msgstr "Ricerca veloce" + +#: sphinx/templates/layout.html:71 +msgid "Keyword search" +msgstr "Ricerca per parola chiave" + +#: sphinx/templates/layout.html:73 +msgid "Go" +msgstr "Vai" + +#: sphinx/templates/layout.html:78 +msgid "Enter a module, class or function name." +msgstr "Inserisci un modulo, classe o nome di funzione" + +#: sphinx/templates/layout.html:119 +#, python-format +msgid "Search within %(docstitle)s" +msgstr "Cerca in %(docstitle)s" + +#: sphinx/templates/layout.html:128 +msgid "About these documents" +msgstr "A proposito di questi documenti" + +#: sphinx/templates/layout.html:131 sphinx/templates/search.html:2 +#: sphinx/templates/search.html:5 +msgid "Search" +msgstr "Cerca" + +#: sphinx/templates/layout.html:133 +msgid "Copyright" +msgstr "Copyright" + +#: sphinx/templates/layout.html:178 +#, python-format +msgid "© Copyright %(copyright)s." +msgstr "© Copyright %(copyright)s." + +#: sphinx/templates/layout.html:180 +#, python-format +msgid "© Copyright %(copyright)s." +msgstr "© Copyright %(copyright)s." + +#: sphinx/templates/layout.html:183 +#, python-format +msgid "Last updated on %(last_updated)s." +msgstr "Ultimo Aggiornamento on %(last_updated)s." + +#: sphinx/templates/layout.html:186 +#, python-format +msgid "" +"Created using Sphinx " +"%(sphinx_version)s." +msgstr "" +"Creato con Sphinx " +"%(sphinx_version)s." + +#: sphinx/templates/modindex.html:15 +msgid "Most popular modules:" +msgstr "Moduli più utilizzati" + +#: sphinx/templates/modindex.html:24 +msgid "Show modules only available on these platforms" +msgstr "Mostra solo i moduli disponibili su questa piattaforma" + +#: sphinx/templates/modindex.html:56 +msgid "Deprecated" +msgstr "Deprecato" + +#: sphinx/templates/opensearch.xml:4 +#, python-format +msgid "Search %(docstitle)s" +msgstr "Cerca %(docstitle)s" + +#: sphinx/templates/page.html:8 +msgid "" +"Note: You requested an out-of-date URL from this server." +" We've tried to redirect you to the new location of this page, but it may" +" not be the right one." +msgstr "" +"Nota: Hai chiesto un URL non più valido." +" Abbiamo provato a ridirigerti verso il nuovo indirizzo, ma potrebbe " +" non essere quello giusto" + +#: sphinx/templates/search.html:7 +msgid "" +"From here you can search these documents. Enter your search\n" +" words into the box below and click \"search\". Note that the search\n" +" function will automatically search for all of the words. Pages\n" +" containing fewer words won't appear in the result list." +msgstr "" +"Puoi effettuare una ricerca in questi documenti. Immetti le parole chiave \n" +" della tua ricerca nel riquadro sottostante \"search\". Nota che la funzione\n" +" di ricerca cerca automaticamente per tutte le parole. Le pagine\n" +" che contendono meno parole non compariranno nei risultati di ricerca." + +#: sphinx/templates/search.html:14 +msgid "search" +msgstr "cerca" + +#: sphinx/templates/search.html:20 +msgid "Your search did not match any results." +msgstr "La tua ricerca non ha ottenuto risultati" + +#: sphinx/templates/changes/frameset.html:5 +#: sphinx/templates/changes/versionchanges.html:12 +#, python-format +msgid "Changes in Version %(version)s — %(docstitle)s" +msgstr "Modifiche nella Versione %(version)s — %(docstitle)s" + +#: sphinx/templates/changes/rstsource.html:5 +#, python-format +msgid "%(filename)s — %(docstitle)s" +msgstr "%(filename)s — %(docstitle)s" + +#: sphinx/templates/changes/versionchanges.html:17 +#, python-format +msgid "Automatically generated list of changes in version %(version)s" +msgstr "Lista delle modifiche generata automaticamente nella versione %(version)s" + +#: sphinx/templates/changes/versionchanges.html:18 +msgid "Library changes" +msgstr "Modifiche nela libreria" + +#: sphinx/templates/changes/versionchanges.html:23 +msgid "C API changes" +msgstr "Modifche nelle API C" + +#: sphinx/templates/changes/versionchanges.html:25 +msgid "Other changes" +msgstr "Altre modifiche" + diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py index 8191cb91..952042d8 100644 --- a/sphinx/writers/latex.py +++ b/sphinx/writers/latex.py @@ -91,7 +91,7 @@ class LaTeXWriter(writers.Writer): class ExtBabel(Babel): def get_shorthandoff(self): shortlang = self.language.split('_')[0] - if shortlang in ('de', 'sl', 'pt', 'es', 'nl', 'pl'): + if shortlang in ('de', 'sl', 'pt', 'es', 'nl', 'pl', 'it'): return '\\shorthandoff{"}' return '' -- cgit v1.2.1 From c0ee529e88d68c1564b96b525c8ec225a1525282 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 15 Dec 2008 12:33:13 +0100 Subject: Add "doctest_global_setup" conf val. --- CHANGES | 7 +++++-- doc/ext/doctest.rst | 8 ++++++++ sphinx/ext/doctest.py | 12 ++++++++++-- 3 files changed, 23 insertions(+), 4 deletions(-) diff --git a/CHANGES b/CHANGES index 4892c7e2..b2c07ad5 100644 --- a/CHANGES +++ b/CHANGES @@ -16,14 +16,17 @@ New features added * Extension API: - - There is now a Sphinx.add_lexer() method to add custom Pygments - lexers. + - There is now a ``Sphinx.add_lexer()`` method to be able to use + custom Pygments lexers easily. * Other changes: - Config overrides for single dict keys can now be given on the command line. + - There is now a ``doctest_global_setup`` config value that can + be used to give setup code for all doctests in the documentation. + Release 0.5.1 (in development) ============================== diff --git a/doc/ext/doctest.rst b/doc/ext/doctest.rst index 9de6ba9e..7117f6a9 100644 --- a/doc/ext/doctest.rst +++ b/doc/ext/doctest.rst @@ -149,6 +149,14 @@ There are also these config values for customizing the doctest extension: A list of directories that will be added to :data:`sys.path` when the doctest builder is used. (Make sure it contains absolute paths.) +.. confval:: doctest_global_setup + + Python code that is treated like it were put in a ``testsetup`` directive for + *every* file that is tested, and for every group. You can use this to + e.g. import modules you will always need in your doctests. + + .. versionadded:: 0.6 + .. confval:: doctest_test_doctest_blocks If this is a nonempty string (the default is ``'default'``), standard reST diff --git a/sphinx/ext/doctest.py b/sphinx/ext/doctest.py index aa38bc71..b03f42fb 100644 --- a/sphinx/ext/doctest.py +++ b/sphinx/ext/doctest.py @@ -98,9 +98,12 @@ class TestGroup(object): self.setup = [] self.tests = [] - def add_code(self, code): + def add_code(self, code, prepend=False): if code.type == 'testsetup': - self.setup.append(code) + if prepend: + self.setup.insert(0, code) + else: + self.setup.append(code) elif code.type == 'doctest': self.tests.append([code]) elif code.type == 'testcode': @@ -243,6 +246,10 @@ Doctest summary for code in add_to_all_groups: for group in groups.itervalues(): group.add_code(code) + if self.config.doctest_global_setup: + code = TestCode(self.config.doctest_global_setup, 'testsetup', lineno=0) + for group in groups.itervalues(): + group.add_code(code, prepend=True) if not groups: return @@ -322,3 +329,4 @@ def setup(app): # this config value adds to sys.path app.add_config_value('doctest_path', [], False) app.add_config_value('doctest_test_doctest_blocks', 'default', False) + app.add_config_value('doctest_global_setup', '', False) -- cgit v1.2.1 From bf841f36c1265a31ad82c1f8c4bceac238d3081f Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 15 Dec 2008 12:49:40 +0100 Subject: The ``autodoc_skip_member`` event now also gets to decide whether to skip members whose name starts with underscores. Previously, these members were always automatically skipped. Therefore, if you handle this event, add something like this to your event handler to restore the old behavior:: if name.startswith('_'): return True --- CHANGES | 11 +++++++++++ sphinx/ext/autodoc.py | 13 +++++++------ tests/test_autodoc.py | 12 ++++++++++++ 3 files changed, 30 insertions(+), 6 deletions(-) diff --git a/CHANGES b/CHANGES index 2e8a9743..7834e94a 100644 --- a/CHANGES +++ b/CHANGES @@ -4,6 +4,17 @@ Release 0.6 (in development) New features added ------------------ +* Incompatible changes: + + - The ``autodoc_skip_member`` event now also gets to decide + whether to skip members whose name starts with underscores. + Previously, these members were always automatically skipped. + Therefore, if you handle this event, add something like this + to your event handler to restore the old behavior:: + + if name.startswith('_'): + return True + * Configuration: - The new ``html_add_permalinks`` config value can be used to diff --git a/sphinx/ext/autodoc.py b/sphinx/ext/autodoc.py index ddaba04c..6b8da386 100644 --- a/sphinx/ext/autodoc.py +++ b/sphinx/ext/autodoc.py @@ -525,14 +525,15 @@ class RstGenerator(object): all_members = sorted(todoc.__dict__.iteritems()) else: all_members = [(mname, getattr(todoc, mname)) for mname in members] + for (membername, member) in all_members: - # ignore members whose name starts with _ by default if _all and membername.startswith('_'): - continue - - # ignore undocumented members if :undoc-members: is not given - doc = getattr(member, '__doc__', None) - skip = not self.options.undoc_members and not doc + # ignore members whose name starts with _ by default + skip = True + else: + # ignore undocumented members if :undoc-members: is not given + doc = getattr(member, '__doc__', None) + skip = not self.options.undoc_members and not doc # give the user a chance to decide whether this member should be skipped if self.env.app: # let extensions preprocess docstrings diff --git a/tests/test_autodoc.py b/tests/test_autodoc.py index ee3fdf1d..746c9b42 100644 --- a/tests/test_autodoc.py +++ b/tests/test_autodoc.py @@ -24,6 +24,7 @@ def setup_module(): app.builder.env.app = app app.connect('autodoc-process-docstring', process_docstring) app.connect('autodoc-process-signature', process_signature) + app.connect('autodoc-skip-member', skip_member) options = Struct( inherited_members = False, @@ -71,6 +72,13 @@ def process_signature(app, what, name, obj, options, args, retann): return '42', None +def skip_member(app, what, name, obj, skip, options): + if name.startswith('_'): + return True + if name == 'skipmeth': + return True + + def test_resolve_name(): # for modules assert gen.resolve_name('module', 'test_autodoc') == \ @@ -380,6 +388,10 @@ class Class(Base): def undocmeth(self): pass + def skipmeth(self): + """Method that should be skipped.""" + pass + @property def prop(self): """Property.""" -- cgit v1.2.1 From ccf4f7068170e90a4f8618c4dcf8cf54b424b1ca Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 15 Dec 2008 13:38:39 +0100 Subject: The new ``html_show_sourcelink`` config value can be used to switch off the links to the reST sources in the sidebar. --- CHANGES | 3 +++ doc/config.rst | 7 +++++++ sphinx/builders/html.py | 1 + sphinx/config.py | 1 + sphinx/quickstart.py | 4 ++-- sphinx/templates/layout.html | 14 +++----------- 6 files changed, 17 insertions(+), 13 deletions(-) diff --git a/CHANGES b/CHANGES index 7834e94a..63fb2db8 100644 --- a/CHANGES +++ b/CHANGES @@ -21,6 +21,9 @@ New features added switch off the generated "paragraph sign" permalinks for each heading and definition environment. + - The new ``html_show_sourcelink`` config value can be used to + switch off the links to the reST sources in the sidebar. + * New translations: - Italian by Sandro Dentella. diff --git a/doc/config.rst b/doc/config.rst index 91cc7c3f..a090da1c 100644 --- a/doc/config.rst +++ b/doc/config.rst @@ -409,6 +409,13 @@ that use Sphinx' HTMLWriter class. will only display the titles of matching documents, and no excerpt from the matching contents. +.. confval:: html_show_sourcelink + + If true (and :confval:`html_copy_source` is true as well), links to the + reST sources will be added to the sidebar. The default is ``True``. + + .. versionadded:: 0.6 + .. confval:: html_use_opensearch If nonempty, an `OpenSearch ` description file will be diff --git a/sphinx/builders/html.py b/sphinx/builders/html.py index 67b3557d..fbe61717 100644 --- a/sphinx/builders/html.py +++ b/sphinx/builders/html.py @@ -142,6 +142,7 @@ class StandaloneHTMLBuilder(Builder): shorttitle = self.config.html_short_title, show_sphinx = self.config.html_show_sphinx, has_source = self.config.html_copy_source, + show_source = self.config.html_show_sourcelink, file_suffix = self.out_suffix, script_files = self.script_files, sphinx_version = __version__, diff --git a/sphinx/config.py b/sphinx/config.py index 1ea5f66f..d12b8e90 100644 --- a/sphinx/config.py +++ b/sphinx/config.py @@ -69,6 +69,7 @@ class Config(object): html_use_index = (True, False), html_split_index = (False, False), html_copy_source = (True, False), + html_show_sourcelink = (True, False), html_use_opensearch = ('', False), html_file_suffix = (None, False), html_show_sphinx = (True, False), diff --git a/sphinx/quickstart.py b/sphinx/quickstart.py index 1f076575..91213f32 100644 --- a/sphinx/quickstart.py +++ b/sphinx/quickstart.py @@ -165,8 +165,8 @@ html_static_path = ['%(dot)sstatic'] # If true, the index is split into individual pages for each letter. #html_split_index = False -# If true, the reST sources are included in the HTML build as _sources/. -#html_copy_source = True +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the diff --git a/sphinx/templates/layout.html b/sphinx/templates/layout.html index d9c9045d..0ef14e45 100644 --- a/sphinx/templates/layout.html +++ b/sphinx/templates/layout.html @@ -51,16 +51,10 @@

    {{ next.title }}

    {%- endif %} {%- endblock %} - {%- if sourcename %} + {%- if show_source and has_source %}

    {{ _('This Page') }}

    {%- endif %} {%- if customsidebar %} @@ -68,15 +62,13 @@ {%- endif %} {%- block sidebarsearch %} {%- if pagename != "search" %} -

    {% if builder == 'web' %}{{ _('Keyword search')}}{% else %}{{ _('Quick search') }}{% endif %}

    +

    {{ _('Quick search') }}

    - {%- if builder == 'web' %} -

    {{ _('Enter a module, class or function name.') }}

    - {%- endif %} +

    {{ _('Enter search terms or a module, class or function name.') }}

    {%- endif %} {%- endblock %} -- cgit v1.2.1 From be7d070b44f2ceea19d42cda4c021b6dad874914 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 15 Dec 2008 13:50:54 +0100 Subject: Add a DeprecationWarning for sphinx.builder. --- sphinx/builder.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/sphinx/builder.py b/sphinx/builder.py index 2a19b751..a8f8497d 100644 --- a/sphinx/builder.py +++ b/sphinx/builder.py @@ -12,6 +12,8 @@ :license: BSD. """ +import warnings + from sphinx.builders import Builder from sphinx.builders.text import TextBuilder from sphinx.builders.html import StandaloneHTMLBuilder, WebHTMLBuilder, \ @@ -20,3 +22,7 @@ from sphinx.builders.latex import LaTeXBuilder from sphinx.builders.changes import ChangesBuilder from sphinx.builders.htmlhelp import HTMLHelpBuilder from sphinx.builders.linkcheck import CheckExternalLinksBuilder + +warnings.warn('The sphinx.builder module is deprecated; please import ' + 'builders from the respective sphinx.builders submodules.', + DeprecationWarning) -- cgit v1.2.1 From cab6cf69065f313e1748422d1255bb873354cb86 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 15 Dec 2008 14:06:05 +0100 Subject: Remove last web leftovers. --- sphinx/templates/layout.html | 8 -------- sphinx/templates/modindex.html | 20 -------------------- sphinx/templates/page.html | 8 -------- 3 files changed, 36 deletions(-) diff --git a/sphinx/templates/layout.html b/sphinx/templates/layout.html index 0ef14e45..bd1b2853 100644 --- a/sphinx/templates/layout.html +++ b/sphinx/templates/layout.html @@ -84,16 +84,8 @@ {%- set titlesuffix = " — " + docstitle|e %} {%- endif %} {{ title|striptags }}{{ titlesuffix }} - {%- if builder == 'web' %} - - {%- for link, type, title in page_links %} - - {%- endfor %} - {%- else %} - {%- endif %} {%- if builder != 'htmlhelp' %} -- cgit v1.2.1 From 8b649f402b93faee0e811347eaca237e8b61bd58 Mon Sep 17 00:00:00 2001 From: gbrandl Date: Mon, 29 Dec 2008 20:20:17 +0100 Subject: fix whitespace glitch --- sphinx/templates/layout.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sphinx/templates/layout.html b/sphinx/templates/layout.html index bf203a60..e011f643 100644 --- a/sphinx/templates/layout.html +++ b/sphinx/templates/layout.html @@ -76,7 +76,7 @@ {%- endif %} -{%- endmacro -%} +{%- endmacro %} -- cgit v1.2.1 From 22996b2579717271d02302787cd614ce7ee730c7 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 29 Dec 2008 20:22:18 +0100 Subject: Add pgen2 and custom utilities. --- sphinx/pycode/Grammar.txt | 155 +++++++++++++++ sphinx/pycode/__init__.py | 142 ++++++++++++++ sphinx/pycode/pgen2/__init__.py | 4 + sphinx/pycode/pgen2/driver.py | 145 ++++++++++++++ sphinx/pycode/pgen2/grammar.py | 171 +++++++++++++++++ sphinx/pycode/pgen2/literals.py | 60 ++++++ sphinx/pycode/pgen2/parse.py | 201 ++++++++++++++++++++ sphinx/pycode/pgen2/pgen.py | 384 +++++++++++++++++++++++++++++++++++++ sphinx/pycode/pgen2/token.py | 82 ++++++++ sphinx/pycode/pgen2/tokenize.py | 405 ++++++++++++++++++++++++++++++++++++++++ sphinx/pycode/pytree.py | 295 +++++++++++++++++++++++++++++ 11 files changed, 2044 insertions(+) create mode 100644 sphinx/pycode/Grammar.txt create mode 100644 sphinx/pycode/__init__.py create mode 100644 sphinx/pycode/pgen2/__init__.py create mode 100644 sphinx/pycode/pgen2/driver.py create mode 100644 sphinx/pycode/pgen2/grammar.py create mode 100644 sphinx/pycode/pgen2/literals.py create mode 100644 sphinx/pycode/pgen2/parse.py create mode 100644 sphinx/pycode/pgen2/pgen.py create mode 100755 sphinx/pycode/pgen2/token.py create mode 100644 sphinx/pycode/pgen2/tokenize.py create mode 100644 sphinx/pycode/pytree.py diff --git a/sphinx/pycode/Grammar.txt b/sphinx/pycode/Grammar.txt new file mode 100644 index 00000000..1f4a50ff --- /dev/null +++ b/sphinx/pycode/Grammar.txt @@ -0,0 +1,155 @@ +# Grammar for Python + +# Note: Changing the grammar specified in this file will most likely +# require corresponding changes in the parser module +# (../Modules/parsermodule.c). If you can't make the changes to +# that module yourself, please co-ordinate the required changes +# with someone who can; ask around on python-dev for help. Fred +# Drake will probably be listening there. + +# NOTE WELL: You should also follow all the steps listed in PEP 306, +# "How to Change Python's Grammar" + +# Commands for Kees Blom's railroad program +#diagram:token NAME +#diagram:token NUMBER +#diagram:token STRING +#diagram:token NEWLINE +#diagram:token ENDMARKER +#diagram:token INDENT +#diagram:output\input python.bla +#diagram:token DEDENT +#diagram:output\textwidth 20.04cm\oddsidemargin 0.0cm\evensidemargin 0.0cm +#diagram:rules + +# Start symbols for the grammar: +# file_input is a module or sequence of commands read from an input file; +# single_input is a single interactive statement; +# eval_input is the input for the eval() and input() functions. +# NB: compound_stmt in single_input is followed by extra NEWLINE! +file_input: (NEWLINE | stmt)* ENDMARKER +single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE +eval_input: testlist NEWLINE* ENDMARKER + +decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE +decorators: decorator+ +decorated: decorators (classdef | funcdef) +funcdef: 'def' NAME parameters ['->' test] ':' suite +parameters: '(' [typedargslist] ')' +typedargslist: ((tfpdef ['=' test] ',')* + ('*' [tname] (',' tname ['=' test])* [',' '**' tname] | '**' tname) + | tfpdef ['=' test] (',' tfpdef ['=' test])* [',']) +tname: NAME [':' test] +tfpdef: tname | '(' tfplist ')' +tfplist: tfpdef (',' tfpdef)* [','] +varargslist: ((vfpdef ['=' test] ',')* + ('*' [vname] (',' vname ['=' test])* [',' '**' vname] | '**' vname) + | vfpdef ['=' test] (',' vfpdef ['=' test])* [',']) +vname: NAME +vfpdef: vname | '(' vfplist ')' +vfplist: vfpdef (',' vfpdef)* [','] + +stmt: simple_stmt | compound_stmt +simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE +small_stmt: (expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt | + import_stmt | global_stmt | exec_stmt | assert_stmt) +expr_stmt: testlist (augassign (yield_expr|testlist) | + ('=' (yield_expr|testlist))*) +augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' | + '<<=' | '>>=' | '**=' | '//=') +# For normal assignments, additional restrictions enforced by the interpreter +print_stmt: 'print' ( [ test (',' test)* [','] ] | + '>>' test [ (',' test)+ [','] ] ) +del_stmt: 'del' exprlist +pass_stmt: 'pass' +flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt +break_stmt: 'break' +continue_stmt: 'continue' +return_stmt: 'return' [testlist] +yield_stmt: yield_expr +raise_stmt: 'raise' [test ['from' test | ',' test [',' test]]] +import_stmt: import_name | import_from +import_name: 'import' dotted_as_names +import_from: ('from' ('.'* dotted_name | '.'+) + 'import' ('*' | '(' import_as_names ')' | import_as_names)) +import_as_name: NAME ['as' NAME] +dotted_as_name: dotted_name ['as' NAME] +import_as_names: import_as_name (',' import_as_name)* [','] +dotted_as_names: dotted_as_name (',' dotted_as_name)* +dotted_name: NAME ('.' NAME)* +global_stmt: ('global' | 'nonlocal') NAME (',' NAME)* +exec_stmt: 'exec' expr ['in' test [',' test]] +assert_stmt: 'assert' test [',' test] + +compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated +if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite] +while_stmt: 'while' test ':' suite ['else' ':' suite] +for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] +try_stmt: ('try' ':' suite + ((except_clause ':' suite)+ + ['else' ':' suite] + ['finally' ':' suite] | + 'finally' ':' suite)) +with_stmt: 'with' test [ with_var ] ':' suite +with_var: 'as' expr +# NB compile.c makes sure that the default except clause is last +except_clause: 'except' [test [(',' | 'as') test]] +suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT + +# Backward compatibility cruft to support: +# [ x for x in lambda: True, lambda: False if x() ] +# even while also allowing: +# lambda x: 5 if x else 2 +# (But not a mix of the two) +testlist_safe: old_test [(',' old_test)+ [',']] +old_test: or_test | old_lambdef +old_lambdef: 'lambda' [varargslist] ':' old_test + +test: or_test ['if' or_test 'else' test] | lambdef +or_test: and_test ('or' and_test)* +and_test: not_test ('and' not_test)* +not_test: 'not' not_test | comparison +comparison: expr (comp_op expr)* +comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not' +expr: xor_expr ('|' xor_expr)* +xor_expr: and_expr ('^' and_expr)* +and_expr: shift_expr ('&' shift_expr)* +shift_expr: arith_expr (('<<'|'>>') arith_expr)* +arith_expr: term (('+'|'-') term)* +term: factor (('*'|'/'|'%'|'//') factor)* +factor: ('+'|'-'|'~') factor | power +power: atom trailer* ['**' factor] +atom: ('(' [yield_expr|testlist_gexp] ')' | + '[' [listmaker] ']' | + '{' [dictsetmaker] '}' | + '`' testlist1 '`' | + NAME | NUMBER | STRING+ | '.' '.' '.') +listmaker: test ( comp_for | (',' test)* [','] ) +testlist_gexp: test ( comp_for | (',' test)* [','] ) +lambdef: 'lambda' [varargslist] ':' test +trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME +subscriptlist: subscript (',' subscript)* [','] +subscript: test | [test] ':' [test] [sliceop] +sliceop: ':' [test] +exprlist: expr (',' expr)* [','] +testlist: test (',' test)* [','] +dictsetmaker: ( (test ':' test (comp_for | (',' test ':' test)* [','])) | + (test (comp_for | (',' test)* [','])) ) + +classdef: 'class' NAME ['(' [arglist] ')'] ':' suite + +arglist: (argument ',')* (argument [','] + |'*' test (',' argument)* [',' '**' test] + |'**' test) +argument: test [comp_for] | test '=' test # Really [keyword '='] test + +comp_iter: comp_for | comp_if +comp_for: 'for' exprlist 'in' testlist_safe [comp_iter] +comp_if: 'if' old_test [comp_iter] + +testlist1: test (',' test)* + +# not used in grammar, but may appear in "node" passed from Parser to Compiler +encoding_decl: NAME + +yield_expr: 'yield' [testlist] diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py new file mode 100644 index 00000000..b5d83e67 --- /dev/null +++ b/sphinx/pycode/__init__.py @@ -0,0 +1,142 @@ +# -*- coding: utf-8 -*- +""" + sphinx.pycode + ~~~~~~~~~~~~~ + + Utilities parsing and analyzing Python code. + + :copyright: 2008 by Georg Brandl. + :license: BSD, see LICENSE for details. +""" + +import sys +from os import path + +from sphinx.pycode import pytree +from sphinx.pycode.pgen2 import driver, token + + +# load the Python grammar +_grammarfile = path.join(path.dirname(__file__), 'Grammar.txt') +pygrammar = driver.load_grammar(_grammarfile) +pydriver = driver.Driver(pygrammar, convert=pytree.convert) + +class sym: pass +for k, v in pygrammar.symbol2number.iteritems(): + setattr(sym, k, v) + +# a dict mapping terminal and nonterminal numbers to their names +number2name = pygrammar.number2symbol.copy() +number2name.update(token.tok_name) + + +def prepare_commentdoc(s): + result = [] + lines = [line.strip() for line in s.expandtabs().splitlines()] + for line in lines: + if line.startswith('#: '): + result.append(line[3:]) + if result and result[-1]: + result.append('') + return '\n'.join(result) + + +_eq = pytree.Leaf(token.EQUAL, '=') + + +class ClassAttrVisitor(pytree.NodeVisitor): + def init(self): + self.namespace = [] + + def visit_classdef(self, node): + self.namespace.append(node[1].value) + self.generic_visit(node) + self.namespace.pop() + + def visit_expr_stmt(self, node): + if _eq in node.children: + prefix = node[0].get_prefix() + if not prefix: + prev = node[0].get_prev_leaf() + if prev and prev.type == token.INDENT: + prefix = prev.prefix + doc = prepare_commentdoc(prefix) + if doc: + targ = '.'.join(self.namespace + [node[0].compact()]) + print targ + print doc + + def visit_funcdef(self, node): + return + + +class ModuleAnalyzer(object): + + def __init__(self, tree, modname, srcname): + self.tree = tree + self.modname = modname + self.srcname = srcname + + @classmethod + def for_string(cls, string, modname, srcname=''): + return cls(pydriver.parse_string(string), modname, srcname) + + @classmethod + def for_file(cls, filename, modname): + # XXX if raises + fileobj = open(filename, 'r') + try: + return cls(pydriver.parse_stream(fileobj), modname, filename) + finally: + fileobj.close() + + @classmethod + def for_module(cls, modname): + if modname not in sys.modules: + # XXX + __import__(modname) + mod = sys.modules[modname] + if hasattr(mod, '__loader__'): + # XXX raises + source = mod.__loader__.get_source(modname) + return cls.for_string(source, modname) + filename = getattr(mod, '__file__', None) + if filename is None: + # XXX + raise RuntimeError('no source found') + if filename.lower().endswith('.pyo') or \ + filename.lower().endswith('.pyc'): + filename = filename[:-1] + elif not filename.lower().endswith('.py'): + raise RuntimeError('not a .py file') + if not path.isfile(filename): + # XXX + raise RuntimeError('source not present') + return cls.for_file(filename, modname) + + def find_defs(self): + attr_visitor = ClassAttrVisitor(number2name) + attr_visitor.namespace = [self.modname] + attr_visitor.visit(self.tree) + +class Test: + """doc""" + + #: testing... + x = 1 + """doc""" + + #: testing more... + x = 2 + + +#ma = ModuleAnalyzer.for_file(__file__.rstrip('c')) +import time +x0=time.time() +ma = ModuleAnalyzer.for_module('sphinx.builders.latex') +x1=time.time() +ma.find_defs() +x2=time.time() +print "%.4f %.4f" % (x1-x0, x2-x1) + +#print pytree.nice_repr(ma.tree, number2name, True) diff --git a/sphinx/pycode/pgen2/__init__.py b/sphinx/pycode/pgen2/__init__.py new file mode 100644 index 00000000..af390484 --- /dev/null +++ b/sphinx/pycode/pgen2/__init__.py @@ -0,0 +1,4 @@ +# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""The pgen2 package.""" diff --git a/sphinx/pycode/pgen2/driver.py b/sphinx/pycode/pgen2/driver.py new file mode 100644 index 00000000..3e9e1043 --- /dev/null +++ b/sphinx/pycode/pgen2/driver.py @@ -0,0 +1,145 @@ +# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +# Modifications: +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Parser driver. + +This provides a high-level interface to parse a file into a syntax tree. + +""" + +__author__ = "Guido van Rossum " + +__all__ = ["Driver", "load_grammar"] + +# Python imports +import os +import logging +import sys + +# Pgen imports +from sphinx.pycode.pgen2 import grammar, parse, token, tokenize, pgen + + +class Driver(object): + + def __init__(self, grammar, convert=None, logger=None): + self.grammar = grammar + if logger is None: + logger = logging.getLogger() + self.logger = logger + self.convert = convert + + def parse_tokens(self, tokens, debug=False): + """Parse a series of tokens and return the syntax tree.""" + # XXX Move the prefix computation into a wrapper around tokenize. + p = parse.Parser(self.grammar, self.convert) + p.setup() + lineno = 1 + column = 0 + type = value = start = end = line_text = None + prefix = "" + for quintuple in tokens: + type, value, start, end, line_text = quintuple + if start != (lineno, column): + assert (lineno, column) <= start, ((lineno, column), start) + s_lineno, s_column = start + if lineno < s_lineno: + prefix += "\n" * (s_lineno - lineno) + lineno = s_lineno + column = 0 + if column < s_column: + prefix += line_text[column:s_column] + column = s_column + if type in (tokenize.COMMENT, tokenize.NL): + prefix += value + lineno, column = end + if value.endswith("\n"): + lineno += 1 + column = 0 + continue + if type == token.OP: + type = grammar.opmap[value] + if debug: + self.logger.debug("%s %r (prefix=%r)", + token.tok_name[type], value, prefix) + if p.addtoken(type, value, (prefix, start)): + if debug: + self.logger.debug("Stop.") + break + prefix = "" + lineno, column = end + if value.endswith("\n"): + lineno += 1 + column = 0 + else: + # We never broke out -- EOF is too soon (how can this happen???) + raise parse.ParseError("incomplete input", t, v, x) + return p.rootnode + + def parse_stream_raw(self, stream, debug=False): + """Parse a stream and return the syntax tree.""" + tokens = tokenize.generate_tokens(stream.readline) + return self.parse_tokens(tokens, debug) + + def parse_stream(self, stream, debug=False): + """Parse a stream and return the syntax tree.""" + return self.parse_stream_raw(stream, debug) + + def parse_file(self, filename, debug=False): + """Parse a file and return the syntax tree.""" + stream = open(filename) + try: + return self.parse_stream(stream, debug) + finally: + stream.close() + + def parse_string(self, text, debug=False): + """Parse a string and return the syntax tree.""" + tokens = tokenize.generate_tokens(generate_lines(text).next) + return self.parse_tokens(tokens, debug) + + +def generate_lines(text): + """Generator that behaves like readline without using StringIO.""" + for line in text.splitlines(True): + yield line + while True: + yield "" + + +def load_grammar(gt="Grammar.txt", gp=None, + save=True, force=False, logger=None): + """Load the grammar (maybe from a pickle).""" + if logger is None: + logger = logging.getLogger() + if gp is None: + head, tail = os.path.splitext(gt) + if tail == ".txt": + tail = "" + gp = head + tail + ".".join(map(str, sys.version_info)) + ".pickle" + if force or not _newer(gp, gt): + logger.info("Generating grammar tables from %s", gt) + g = pgen.generate_grammar(gt) + if save: + logger.info("Writing grammar tables to %s", gp) + try: + g.dump(gp) + except IOError, e: + logger.info("Writing failed:"+str(e)) + else: + g = grammar.Grammar() + g.load(gp) + return g + + +def _newer(a, b): + """Inquire whether file a was written since file b.""" + if not os.path.exists(a): + return False + if not os.path.exists(b): + return True + return os.path.getmtime(a) >= os.path.getmtime(b) diff --git a/sphinx/pycode/pgen2/grammar.py b/sphinx/pycode/pgen2/grammar.py new file mode 100644 index 00000000..381d80e8 --- /dev/null +++ b/sphinx/pycode/pgen2/grammar.py @@ -0,0 +1,171 @@ +# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""This module defines the data structures used to represent a grammar. + +These are a bit arcane because they are derived from the data +structures used by Python's 'pgen' parser generator. + +There's also a table here mapping operators to their names in the +token module; the Python tokenize module reports all operators as the +fallback token code OP, but the parser needs the actual token code. + +""" + +# Python imports +import pickle + +# Local imports +from sphinx.pycode.pgen2 import token, tokenize + + +class Grammar(object): + """Pgen parsing tables tables conversion class. + + Once initialized, this class supplies the grammar tables for the + parsing engine implemented by parse.py. The parsing engine + accesses the instance variables directly. The class here does not + provide initialization of the tables; several subclasses exist to + do this (see the conv and pgen modules). + + The load() method reads the tables from a pickle file, which is + much faster than the other ways offered by subclasses. The pickle + file is written by calling dump() (after loading the grammar + tables using a subclass). The report() method prints a readable + representation of the tables to stdout, for debugging. + + The instance variables are as follows: + + symbol2number -- a dict mapping symbol names to numbers. Symbol + numbers are always 256 or higher, to distinguish + them from token numbers, which are between 0 and + 255 (inclusive). + + number2symbol -- a dict mapping numbers to symbol names; + these two are each other's inverse. + + states -- a list of DFAs, where each DFA is a list of + states, each state is is a list of arcs, and each + arc is a (i, j) pair where i is a label and j is + a state number. The DFA number is the index into + this list. (This name is slightly confusing.) + Final states are represented by a special arc of + the form (0, j) where j is its own state number. + + dfas -- a dict mapping symbol numbers to (DFA, first) + pairs, where DFA is an item from the states list + above, and first is a set of tokens that can + begin this grammar rule (represented by a dict + whose values are always 1). + + labels -- a list of (x, y) pairs where x is either a token + number or a symbol number, and y is either None + or a string; the strings are keywords. The label + number is the index in this list; label numbers + are used to mark state transitions (arcs) in the + DFAs. + + start -- the number of the grammar's start symbol. + + keywords -- a dict mapping keyword strings to arc labels. + + tokens -- a dict mapping token numbers to arc labels. + + """ + + def __init__(self): + self.symbol2number = {} + self.number2symbol = {} + self.states = [] + self.dfas = {} + self.labels = [(0, "EMPTY")] + self.keywords = {} + self.tokens = {} + self.symbol2label = {} + self.start = 256 + + def dump(self, filename): + """Dump the grammar tables to a pickle file.""" + f = open(filename, "wb") + pickle.dump(self.__dict__, f, 2) + f.close() + + def load(self, filename): + """Load the grammar tables from a pickle file.""" + f = open(filename, "rb") + d = pickle.load(f) + f.close() + self.__dict__.update(d) + + def report(self): + """Dump the grammar tables to standard output, for debugging.""" + from pprint import pprint + print "s2n" + pprint(self.symbol2number) + print "n2s" + pprint(self.number2symbol) + print "states" + pprint(self.states) + print "dfas" + pprint(self.dfas) + print "labels" + pprint(self.labels) + print "start", self.start + + +# Map from operator to number (since tokenize doesn't do this) + +opmap_raw = """ +( LPAR +) RPAR +[ LSQB +] RSQB +: COLON +, COMMA +; SEMI ++ PLUS +- MINUS +* STAR +/ SLASH +| VBAR +& AMPER +< LESS +> GREATER += EQUAL +. DOT +% PERCENT +` BACKQUOTE +{ LBRACE +} RBRACE +@ AT +== EQEQUAL +!= NOTEQUAL +<> NOTEQUAL +<= LESSEQUAL +>= GREATEREQUAL +~ TILDE +^ CIRCUMFLEX +<< LEFTSHIFT +>> RIGHTSHIFT +** DOUBLESTAR ++= PLUSEQUAL +-= MINEQUAL +*= STAREQUAL +/= SLASHEQUAL +%= PERCENTEQUAL +&= AMPEREQUAL +|= VBAREQUAL +^= CIRCUMFLEXEQUAL +<<= LEFTSHIFTEQUAL +>>= RIGHTSHIFTEQUAL +**= DOUBLESTAREQUAL +// DOUBLESLASH +//= DOUBLESLASHEQUAL +-> RARROW +""" + +opmap = {} +for line in opmap_raw.splitlines(): + if line: + op, name = line.split() + opmap[op] = getattr(token, name) diff --git a/sphinx/pycode/pgen2/literals.py b/sphinx/pycode/pgen2/literals.py new file mode 100644 index 00000000..0b3948a5 --- /dev/null +++ b/sphinx/pycode/pgen2/literals.py @@ -0,0 +1,60 @@ +# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Safely evaluate Python string literals without using eval().""" + +import re + +simple_escapes = {"a": "\a", + "b": "\b", + "f": "\f", + "n": "\n", + "r": "\r", + "t": "\t", + "v": "\v", + "'": "'", + '"': '"', + "\\": "\\"} + +def escape(m): + all, tail = m.group(0, 1) + assert all.startswith("\\") + esc = simple_escapes.get(tail) + if esc is not None: + return esc + if tail.startswith("x"): + hexes = tail[1:] + if len(hexes) < 2: + raise ValueError("invalid hex string escape ('\\%s')" % tail) + try: + i = int(hexes, 16) + except ValueError: + raise ValueError("invalid hex string escape ('\\%s')" % tail) + else: + try: + i = int(tail, 8) + except ValueError: + raise ValueError("invalid octal string escape ('\\%s')" % tail) + return chr(i) + +def evalString(s): + assert s.startswith("'") or s.startswith('"'), repr(s[:1]) + q = s[0] + if s[:3] == q*3: + q = q*3 + assert s.endswith(q), repr(s[-len(q):]) + assert len(s) >= 2*len(q) + s = s[len(q):-len(q)] + return re.sub(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3})", escape, s) + +def test(): + for i in range(256): + c = chr(i) + s = repr(c) + e = evalString(s) + if e != c: + print i, c, s, e + + +if __name__ == "__main__": + test() diff --git a/sphinx/pycode/pgen2/parse.py b/sphinx/pycode/pgen2/parse.py new file mode 100644 index 00000000..60eec05e --- /dev/null +++ b/sphinx/pycode/pgen2/parse.py @@ -0,0 +1,201 @@ +# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Parser engine for the grammar tables generated by pgen. + +The grammar table must be loaded first. + +See Parser/parser.c in the Python distribution for additional info on +how this parsing engine works. + +""" + +# Local imports +from sphinx.pycode.pgen2 import token + +class ParseError(Exception): + """Exception to signal the parser is stuck.""" + + def __init__(self, msg, type, value, context): + Exception.__init__(self, "%s: type=%r, value=%r, context=%r" % + (msg, type, value, context)) + self.msg = msg + self.type = type + self.value = value + self.context = context + +class Parser(object): + """Parser engine. + + The proper usage sequence is: + + p = Parser(grammar, [converter]) # create instance + p.setup([start]) # prepare for parsing + : + if p.addtoken(...): # parse a token; may raise ParseError + break + root = p.rootnode # root of abstract syntax tree + + A Parser instance may be reused by calling setup() repeatedly. + + A Parser instance contains state pertaining to the current token + sequence, and should not be used concurrently by different threads + to parse separate token sequences. + + See driver.py for how to get input tokens by tokenizing a file or + string. + + Parsing is complete when addtoken() returns True; the root of the + abstract syntax tree can then be retrieved from the rootnode + instance variable. When a syntax error occurs, addtoken() raises + the ParseError exception. There is no error recovery; the parser + cannot be used after a syntax error was reported (but it can be + reinitialized by calling setup()). + + """ + + def __init__(self, grammar, convert=None): + """Constructor. + + The grammar argument is a grammar.Grammar instance; see the + grammar module for more information. + + The parser is not ready yet for parsing; you must call the + setup() method to get it started. + + The optional convert argument is a function mapping concrete + syntax tree nodes to abstract syntax tree nodes. If not + given, no conversion is done and the syntax tree produced is + the concrete syntax tree. If given, it must be a function of + two arguments, the first being the grammar (a grammar.Grammar + instance), and the second being the concrete syntax tree node + to be converted. The syntax tree is converted from the bottom + up. + + A concrete syntax tree node is a (type, value, context, nodes) + tuple, where type is the node type (a token or symbol number), + value is None for symbols and a string for tokens, context is + None or an opaque value used for error reporting (typically a + (lineno, offset) pair), and nodes is a list of children for + symbols, and None for tokens. + + An abstract syntax tree node may be anything; this is entirely + up to the converter function. + + """ + self.grammar = grammar + self.convert = convert or (lambda grammar, node: node) + + def setup(self, start=None): + """Prepare for parsing. + + This *must* be called before starting to parse. + + The optional argument is an alternative start symbol; it + defaults to the grammar's start symbol. + + You can use a Parser instance to parse any number of programs; + each time you call setup() the parser is reset to an initial + state determined by the (implicit or explicit) start symbol. + + """ + if start is None: + start = self.grammar.start + # Each stack entry is a tuple: (dfa, state, node). + # A node is a tuple: (type, value, context, children), + # where children is a list of nodes or None, and context may be None. + newnode = (start, None, None, []) + stackentry = (self.grammar.dfas[start], 0, newnode) + self.stack = [stackentry] + self.rootnode = None + self.used_names = set() # Aliased to self.rootnode.used_names in pop() + + def addtoken(self, type, value, context): + """Add a token; return True iff this is the end of the program.""" + # Map from token to label + ilabel = self.classify(type, value, context) + # Loop until the token is shifted; may raise exceptions + while True: + dfa, state, node = self.stack[-1] + states, first = dfa + arcs = states[state] + # Look for a state with this label + for i, newstate in arcs: + t, v = self.grammar.labels[i] + if ilabel == i: + # Look it up in the list of labels + assert t < 256 + # Shift a token; we're done with it + self.shift(type, value, newstate, context) + # Pop while we are in an accept-only state + state = newstate + while states[state] == [(0, state)]: + self.pop() + if not self.stack: + # Done parsing! + return True + dfa, state, node = self.stack[-1] + states, first = dfa + # Done with this token + return False + elif t >= 256: + # See if it's a symbol and if we're in its first set + itsdfa = self.grammar.dfas[t] + itsstates, itsfirst = itsdfa + if ilabel in itsfirst: + # Push a symbol + self.push(t, self.grammar.dfas[t], newstate, context) + break # To continue the outer while loop + else: + if (0, state) in arcs: + # An accepting state, pop it and try something else + self.pop() + if not self.stack: + # Done parsing, but another token is input + raise ParseError("too much input", + type, value, context) + else: + # No success finding a transition + raise ParseError("bad input", type, value, context) + + def classify(self, type, value, context): + """Turn a token into a label. (Internal)""" + if type == token.NAME: + # Keep a listing of all used names + self.used_names.add(value) + # Check for reserved words + ilabel = self.grammar.keywords.get(value) + if ilabel is not None: + return ilabel + ilabel = self.grammar.tokens.get(type) + if ilabel is None: + raise ParseError("bad token", type, value, context) + return ilabel + + def shift(self, type, value, newstate, context): + """Shift a token. (Internal)""" + dfa, state, node = self.stack[-1] + newnode = (type, value, context, None) + newnode = self.convert(self.grammar, newnode) + if newnode is not None: + node[-1].append(newnode) + self.stack[-1] = (dfa, newstate, node) + + def push(self, type, newdfa, newstate, context): + """Push a nonterminal. (Internal)""" + dfa, state, node = self.stack[-1] + newnode = (type, None, context, []) + self.stack[-1] = (dfa, newstate, node) + self.stack.append((newdfa, 0, newnode)) + + def pop(self): + """Pop a nonterminal. (Internal)""" + popdfa, popstate, popnode = self.stack.pop() + newnode = self.convert(self.grammar, popnode) + if newnode is not None: + if self.stack: + dfa, state, node = self.stack[-1] + node[-1].append(newnode) + else: + self.rootnode = newnode + self.rootnode.used_names = self.used_names diff --git a/sphinx/pycode/pgen2/pgen.py b/sphinx/pycode/pgen2/pgen.py new file mode 100644 index 00000000..d6895eae --- /dev/null +++ b/sphinx/pycode/pgen2/pgen.py @@ -0,0 +1,384 @@ +# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +# Pgen imports +from sphinx.pycode.pgen2 import grammar, token, tokenize + +class PgenGrammar(grammar.Grammar): + pass + +class ParserGenerator(object): + + def __init__(self, filename, stream=None): + close_stream = None + if stream is None: + stream = open(filename) + close_stream = stream.close + self.filename = filename + self.stream = stream + self.generator = tokenize.generate_tokens(stream.readline) + self.gettoken() # Initialize lookahead + self.dfas, self.startsymbol = self.parse() + if close_stream is not None: + close_stream() + self.first = {} # map from symbol name to set of tokens + self.addfirstsets() + + def make_grammar(self): + c = PgenGrammar() + names = self.dfas.keys() + names.sort() + names.remove(self.startsymbol) + names.insert(0, self.startsymbol) + for name in names: + i = 256 + len(c.symbol2number) + c.symbol2number[name] = i + c.number2symbol[i] = name + for name in names: + dfa = self.dfas[name] + states = [] + for state in dfa: + arcs = [] + for label, next in state.arcs.iteritems(): + arcs.append((self.make_label(c, label), dfa.index(next))) + if state.isfinal: + arcs.append((0, dfa.index(state))) + states.append(arcs) + c.states.append(states) + c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name)) + c.start = c.symbol2number[self.startsymbol] + return c + + def make_first(self, c, name): + rawfirst = self.first[name] + first = {} + for label in rawfirst: + ilabel = self.make_label(c, label) + ##assert ilabel not in first # XXX failed on <> ... != + first[ilabel] = 1 + return first + + def make_label(self, c, label): + # XXX Maybe this should be a method on a subclass of converter? + ilabel = len(c.labels) + if label[0].isalpha(): + # Either a symbol name or a named token + if label in c.symbol2number: + # A symbol name (a non-terminal) + if label in c.symbol2label: + return c.symbol2label[label] + else: + c.labels.append((c.symbol2number[label], None)) + c.symbol2label[label] = ilabel + return ilabel + else: + # A named token (NAME, NUMBER, STRING) + itoken = getattr(token, label, None) + assert isinstance(itoken, int), label + assert itoken in token.tok_name, label + if itoken in c.tokens: + return c.tokens[itoken] + else: + c.labels.append((itoken, None)) + c.tokens[itoken] = ilabel + return ilabel + else: + # Either a keyword or an operator + assert label[0] in ('"', "'"), label + value = eval(label) + if value[0].isalpha(): + # A keyword + if value in c.keywords: + return c.keywords[value] + else: + c.labels.append((token.NAME, value)) + c.keywords[value] = ilabel + return ilabel + else: + # An operator (any non-numeric token) + itoken = grammar.opmap[value] # Fails if unknown token + if itoken in c.tokens: + return c.tokens[itoken] + else: + c.labels.append((itoken, None)) + c.tokens[itoken] = ilabel + return ilabel + + def addfirstsets(self): + names = self.dfas.keys() + names.sort() + for name in names: + if name not in self.first: + self.calcfirst(name) + #print name, self.first[name].keys() + + def calcfirst(self, name): + dfa = self.dfas[name] + self.first[name] = None # dummy to detect left recursion + state = dfa[0] + totalset = {} + overlapcheck = {} + for label, next in state.arcs.iteritems(): + if label in self.dfas: + if label in self.first: + fset = self.first[label] + if fset is None: + raise ValueError("recursion for rule %r" % name) + else: + self.calcfirst(label) + fset = self.first[label] + totalset.update(fset) + overlapcheck[label] = fset + else: + totalset[label] = 1 + overlapcheck[label] = {label: 1} + inverse = {} + for label, itsfirst in overlapcheck.iteritems(): + for symbol in itsfirst: + if symbol in inverse: + raise ValueError("rule %s is ambiguous; %s is in the" + " first sets of %s as well as %s" % + (name, symbol, label, inverse[symbol])) + inverse[symbol] = label + self.first[name] = totalset + + def parse(self): + dfas = {} + startsymbol = None + # MSTART: (NEWLINE | RULE)* ENDMARKER + while self.type != token.ENDMARKER: + while self.type == token.NEWLINE: + self.gettoken() + # RULE: NAME ':' RHS NEWLINE + name = self.expect(token.NAME) + self.expect(token.OP, ":") + a, z = self.parse_rhs() + self.expect(token.NEWLINE) + #self.dump_nfa(name, a, z) + dfa = self.make_dfa(a, z) + #self.dump_dfa(name, dfa) + oldlen = len(dfa) + self.simplify_dfa(dfa) + newlen = len(dfa) + dfas[name] = dfa + #print name, oldlen, newlen + if startsymbol is None: + startsymbol = name + return dfas, startsymbol + + def make_dfa(self, start, finish): + # To turn an NFA into a DFA, we define the states of the DFA + # to correspond to *sets* of states of the NFA. Then do some + # state reduction. Let's represent sets as dicts with 1 for + # values. + assert isinstance(start, NFAState) + assert isinstance(finish, NFAState) + def closure(state): + base = {} + addclosure(state, base) + return base + def addclosure(state, base): + assert isinstance(state, NFAState) + if state in base: + return + base[state] = 1 + for label, next in state.arcs: + if label is None: + addclosure(next, base) + states = [DFAState(closure(start), finish)] + for state in states: # NB states grows while we're iterating + arcs = {} + for nfastate in state.nfaset: + for label, next in nfastate.arcs: + if label is not None: + addclosure(next, arcs.setdefault(label, {})) + for label, nfaset in arcs.iteritems(): + for st in states: + if st.nfaset == nfaset: + break + else: + st = DFAState(nfaset, finish) + states.append(st) + state.addarc(st, label) + return states # List of DFAState instances; first one is start + + def dump_nfa(self, name, start, finish): + print "Dump of NFA for", name + todo = [start] + for i, state in enumerate(todo): + print " State", i, state is finish and "(final)" or "" + for label, next in state.arcs: + if next in todo: + j = todo.index(next) + else: + j = len(todo) + todo.append(next) + if label is None: + print " -> %d" % j + else: + print " %s -> %d" % (label, j) + + def dump_dfa(self, name, dfa): + print "Dump of DFA for", name + for i, state in enumerate(dfa): + print " State", i, state.isfinal and "(final)" or "" + for label, next in state.arcs.iteritems(): + print " %s -> %d" % (label, dfa.index(next)) + + def simplify_dfa(self, dfa): + # This is not theoretically optimal, but works well enough. + # Algorithm: repeatedly look for two states that have the same + # set of arcs (same labels pointing to the same nodes) and + # unify them, until things stop changing. + + # dfa is a list of DFAState instances + changes = True + while changes: + changes = False + for i, state_i in enumerate(dfa): + for j in range(i+1, len(dfa)): + state_j = dfa[j] + if state_i == state_j: + #print " unify", i, j + del dfa[j] + for state in dfa: + state.unifystate(state_j, state_i) + changes = True + break + + def parse_rhs(self): + # RHS: ALT ('|' ALT)* + a, z = self.parse_alt() + if self.value != "|": + return a, z + else: + aa = NFAState() + zz = NFAState() + aa.addarc(a) + z.addarc(zz) + while self.value == "|": + self.gettoken() + a, z = self.parse_alt() + aa.addarc(a) + z.addarc(zz) + return aa, zz + + def parse_alt(self): + # ALT: ITEM+ + a, b = self.parse_item() + while (self.value in ("(", "[") or + self.type in (token.NAME, token.STRING)): + c, d = self.parse_item() + b.addarc(c) + b = d + return a, b + + def parse_item(self): + # ITEM: '[' RHS ']' | ATOM ['+' | '*'] + if self.value == "[": + self.gettoken() + a, z = self.parse_rhs() + self.expect(token.OP, "]") + a.addarc(z) + return a, z + else: + a, z = self.parse_atom() + value = self.value + if value not in ("+", "*"): + return a, z + self.gettoken() + z.addarc(a) + if value == "+": + return a, z + else: + return a, a + + def parse_atom(self): + # ATOM: '(' RHS ')' | NAME | STRING + if self.value == "(": + self.gettoken() + a, z = self.parse_rhs() + self.expect(token.OP, ")") + return a, z + elif self.type in (token.NAME, token.STRING): + a = NFAState() + z = NFAState() + a.addarc(z, self.value) + self.gettoken() + return a, z + else: + self.raise_error("expected (...) or NAME or STRING, got %s/%s", + self.type, self.value) + + def expect(self, type, value=None): + if self.type != type or (value is not None and self.value != value): + self.raise_error("expected %s/%s, got %s/%s", + type, value, self.type, self.value) + value = self.value + self.gettoken() + return value + + def gettoken(self): + tup = self.generator.next() + while tup[0] in (tokenize.COMMENT, tokenize.NL): + tup = self.generator.next() + self.type, self.value, self.begin, self.end, self.line = tup + #print token.tok_name[self.type], repr(self.value) + + def raise_error(self, msg, *args): + if args: + try: + msg = msg % args + except: + msg = " ".join([msg] + map(str, args)) + raise SyntaxError(msg, (self.filename, self.end[0], + self.end[1], self.line)) + +class NFAState(object): + + def __init__(self): + self.arcs = [] # list of (label, NFAState) pairs + + def addarc(self, next, label=None): + assert label is None or isinstance(label, str) + assert isinstance(next, NFAState) + self.arcs.append((label, next)) + +class DFAState(object): + + def __init__(self, nfaset, final): + assert isinstance(nfaset, dict) + assert isinstance(iter(nfaset).next(), NFAState) + assert isinstance(final, NFAState) + self.nfaset = nfaset + self.isfinal = final in nfaset + self.arcs = {} # map from label to DFAState + + def addarc(self, next, label): + assert isinstance(label, str) + assert label not in self.arcs + assert isinstance(next, DFAState) + self.arcs[label] = next + + def unifystate(self, old, new): + for label, next in self.arcs.iteritems(): + if next is old: + self.arcs[label] = new + + def __eq__(self, other): + # Equality test -- ignore the nfaset instance variable + assert isinstance(other, DFAState) + if self.isfinal != other.isfinal: + return False + # Can't just return self.arcs == other.arcs, because that + # would invoke this method recursively, with cycles... + if len(self.arcs) != len(other.arcs): + return False + for label, next in self.arcs.iteritems(): + if next is not other.arcs.get(label): + return False + return True + +def generate_grammar(filename="Grammar.txt"): + p = ParserGenerator(filename) + return p.make_grammar() diff --git a/sphinx/pycode/pgen2/token.py b/sphinx/pycode/pgen2/token.py new file mode 100755 index 00000000..61468b31 --- /dev/null +++ b/sphinx/pycode/pgen2/token.py @@ -0,0 +1,82 @@ +#! /usr/bin/env python + +"""Token constants (from "token.h").""" + +# Taken from Python (r53757) and modified to include some tokens +# originally monkeypatched in by pgen2.tokenize + +#--start constants-- +ENDMARKER = 0 +NAME = 1 +NUMBER = 2 +STRING = 3 +NEWLINE = 4 +INDENT = 5 +DEDENT = 6 +LPAR = 7 +RPAR = 8 +LSQB = 9 +RSQB = 10 +COLON = 11 +COMMA = 12 +SEMI = 13 +PLUS = 14 +MINUS = 15 +STAR = 16 +SLASH = 17 +VBAR = 18 +AMPER = 19 +LESS = 20 +GREATER = 21 +EQUAL = 22 +DOT = 23 +PERCENT = 24 +BACKQUOTE = 25 +LBRACE = 26 +RBRACE = 27 +EQEQUAL = 28 +NOTEQUAL = 29 +LESSEQUAL = 30 +GREATEREQUAL = 31 +TILDE = 32 +CIRCUMFLEX = 33 +LEFTSHIFT = 34 +RIGHTSHIFT = 35 +DOUBLESTAR = 36 +PLUSEQUAL = 37 +MINEQUAL = 38 +STAREQUAL = 39 +SLASHEQUAL = 40 +PERCENTEQUAL = 41 +AMPEREQUAL = 42 +VBAREQUAL = 43 +CIRCUMFLEXEQUAL = 44 +LEFTSHIFTEQUAL = 45 +RIGHTSHIFTEQUAL = 46 +DOUBLESTAREQUAL = 47 +DOUBLESLASH = 48 +DOUBLESLASHEQUAL = 49 +AT = 50 +OP = 51 +COMMENT = 52 +NL = 53 +RARROW = 54 +ERRORTOKEN = 55 +N_TOKENS = 56 +NT_OFFSET = 256 +#--end constants-- + +tok_name = {} +for _name, _value in globals().items(): + if type(_value) is type(0): + tok_name[_value] = _name + + +def ISTERMINAL(x): + return x < NT_OFFSET + +def ISNONTERMINAL(x): + return x >= NT_OFFSET + +def ISEOF(x): + return x == ENDMARKER diff --git a/sphinx/pycode/pgen2/tokenize.py b/sphinx/pycode/pgen2/tokenize.py new file mode 100644 index 00000000..46ee7842 --- /dev/null +++ b/sphinx/pycode/pgen2/tokenize.py @@ -0,0 +1,405 @@ +# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation. +# All rights reserved. + +"""Tokenization help for Python programs. + +generate_tokens(readline) is a generator that breaks a stream of +text into Python tokens. It accepts a readline-like method which is called +repeatedly to get the next line of input (or "" for EOF). It generates +5-tuples with these members: + + the token type (see token.py) + the token (a string) + the starting (row, column) indices of the token (a 2-tuple of ints) + the ending (row, column) indices of the token (a 2-tuple of ints) + the original line (string) + +It is designed to match the working of the Python tokenizer exactly, except +that it produces COMMENT tokens for comments and gives type OP for all +operators + +Older entry points + tokenize_loop(readline, tokeneater) + tokenize(readline, tokeneater=printtoken) +are the same, except instead of generating tokens, tokeneater is a callback +function to which the 5 fields described above are passed as 5 arguments, +each time a new token is found.""" + +__author__ = 'Ka-Ping Yee ' +__credits__ = \ + 'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro' + +import string, re +from sphinx.pycode.pgen2.token import * +from sphinx.pycode.pgen2 import token + +__all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize", + "generate_tokens", "untokenize"] +del token + +def group(*choices): return '(' + '|'.join(choices) + ')' +def any(*choices): return group(*choices) + '*' +def maybe(*choices): return group(*choices) + '?' + +Whitespace = r'[ \f\t]*' +Comment = r'#[^\r\n]*' +Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment) +Name = r'[a-zA-Z_]\w*' + +Binnumber = r'0[bB][01]*' +Hexnumber = r'0[xX][\da-fA-F]*[lL]?' +Octnumber = r'0[oO]?[0-7]*[lL]?' +Decnumber = r'[1-9]\d*[lL]?' +Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber) +Exponent = r'[eE][-+]?\d+' +Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent) +Expfloat = r'\d+' + Exponent +Floatnumber = group(Pointfloat, Expfloat) +Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]') +Number = group(Imagnumber, Floatnumber, Intnumber) + +# Tail end of ' string. +Single = r"[^'\\]*(?:\\.[^'\\]*)*'" +# Tail end of " string. +Double = r'[^"\\]*(?:\\.[^"\\]*)*"' +# Tail end of ''' string. +Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" +# Tail end of """ string. +Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' +Triple = group("[ubUB]?[rR]?'''", '[ubUB]?[rR]?"""') +# Single-line ' or " string. +String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'", + r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"') + +# Because of leftmost-then-longest match semantics, be sure to put the +# longest operators first (e.g., if = came before ==, == would get +# recognized as two instances of =). +Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=", + r"//=?", r"->", + r"[+\-*/%&|^=<>]=?", + r"~") + +Bracket = '[][(){}]' +Special = group(r'\r?\n', r'[:;.,`@]') +Funny = group(Operator, Bracket, Special) + +PlainToken = group(Number, Funny, String, Name) +Token = Ignore + PlainToken + +# First (or only) line of ' or " string. +ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" + + group("'", r'\\\r?\n'), + r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' + + group('"', r'\\\r?\n')) +PseudoExtras = group(r'\\\r?\n', Comment, Triple) +PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) + +tokenprog, pseudoprog, single3prog, double3prog = map( + re.compile, (Token, PseudoToken, Single3, Double3)) +endprogs = {"'": re.compile(Single), '"': re.compile(Double), + "'''": single3prog, '"""': double3prog, + "r'''": single3prog, 'r"""': double3prog, + "u'''": single3prog, 'u"""': double3prog, + "b'''": single3prog, 'b"""': double3prog, + "ur'''": single3prog, 'ur"""': double3prog, + "br'''": single3prog, 'br"""': double3prog, + "R'''": single3prog, 'R"""': double3prog, + "U'''": single3prog, 'U"""': double3prog, + "B'''": single3prog, 'B"""': double3prog, + "uR'''": single3prog, 'uR"""': double3prog, + "Ur'''": single3prog, 'Ur"""': double3prog, + "UR'''": single3prog, 'UR"""': double3prog, + "bR'''": single3prog, 'bR"""': double3prog, + "Br'''": single3prog, 'Br"""': double3prog, + "BR'''": single3prog, 'BR"""': double3prog, + 'r': None, 'R': None, + 'u': None, 'U': None, + 'b': None, 'B': None} + +triple_quoted = {} +for t in ("'''", '"""', + "r'''", 'r"""', "R'''", 'R"""', + "u'''", 'u"""', "U'''", 'U"""', + "b'''", 'b"""', "B'''", 'B"""', + "ur'''", 'ur"""', "Ur'''", 'Ur"""', + "uR'''", 'uR"""', "UR'''", 'UR"""', + "br'''", 'br"""', "Br'''", 'Br"""', + "bR'''", 'bR"""', "BR'''", 'BR"""',): + triple_quoted[t] = t +single_quoted = {} +for t in ("'", '"', + "r'", 'r"', "R'", 'R"', + "u'", 'u"', "U'", 'U"', + "b'", 'b"', "B'", 'B"', + "ur'", 'ur"', "Ur'", 'Ur"', + "uR'", 'uR"', "UR'", 'UR"', + "br'", 'br"', "Br'", 'Br"', + "bR'", 'bR"', "BR'", 'BR"', ): + single_quoted[t] = t + +tabsize = 8 + +class TokenError(Exception): pass + +class StopTokenizing(Exception): pass + +def printtoken(type, token, (srow, scol), (erow, ecol), line): # for testing + print "%d,%d-%d,%d:\t%s\t%s" % \ + (srow, scol, erow, ecol, tok_name[type], repr(token)) + +def tokenize(readline, tokeneater=printtoken): + """ + The tokenize() function accepts two parameters: one representing the + input stream, and one providing an output mechanism for tokenize(). + + The first parameter, readline, must be a callable object which provides + the same interface as the readline() method of built-in file objects. + Each call to the function should return one line of input as a string. + + The second parameter, tokeneater, must also be a callable object. It is + called once for each token, with five arguments, corresponding to the + tuples generated by generate_tokens(). + """ + try: + tokenize_loop(readline, tokeneater) + except StopTokenizing: + pass + +# backwards compatible interface +def tokenize_loop(readline, tokeneater): + for token_info in generate_tokens(readline): + tokeneater(*token_info) + +class Untokenizer: + + def __init__(self): + self.tokens = [] + self.prev_row = 1 + self.prev_col = 0 + + def add_whitespace(self, start): + row, col = start + assert row <= self.prev_row + col_offset = col - self.prev_col + if col_offset: + self.tokens.append(" " * col_offset) + + def untokenize(self, iterable): + for t in iterable: + if len(t) == 2: + self.compat(t, iterable) + break + tok_type, token, start, end, line = t + self.add_whitespace(start) + self.tokens.append(token) + self.prev_row, self.prev_col = end + if tok_type in (NEWLINE, NL): + self.prev_row += 1 + self.prev_col = 0 + return "".join(self.tokens) + + def compat(self, token, iterable): + startline = False + indents = [] + toks_append = self.tokens.append + toknum, tokval = token + if toknum in (NAME, NUMBER): + tokval += ' ' + if toknum in (NEWLINE, NL): + startline = True + for tok in iterable: + toknum, tokval = tok[:2] + + if toknum in (NAME, NUMBER): + tokval += ' ' + + if toknum == INDENT: + indents.append(tokval) + continue + elif toknum == DEDENT: + indents.pop() + continue + elif toknum in (NEWLINE, NL): + startline = True + elif startline and indents: + toks_append(indents[-1]) + startline = False + toks_append(tokval) + +def untokenize(iterable): + """Transform tokens back into Python source code. + + Each element returned by the iterable must be a token sequence + with at least two elements, a token number and token value. If + only two tokens are passed, the resulting output is poor. + + Round-trip invariant for full input: + Untokenized source will match input source exactly + + Round-trip invariant for limited intput: + # Output text will tokenize the back to the input + t1 = [tok[:2] for tok in generate_tokens(f.readline)] + newcode = untokenize(t1) + readline = iter(newcode.splitlines(1)).next + t2 = [tok[:2] for tokin generate_tokens(readline)] + assert t1 == t2 + """ + ut = Untokenizer() + return ut.untokenize(iterable) + +def generate_tokens(readline): + """ + The generate_tokens() generator requires one argment, readline, which + must be a callable object which provides the same interface as the + readline() method of built-in file objects. Each call to the function + should return one line of input as a string. Alternately, readline + can be a callable function terminating with StopIteration: + readline = open(myfile).next # Example of alternate readline + + The generator produces 5-tuples with these members: the token type; the + token string; a 2-tuple (srow, scol) of ints specifying the row and + column where the token begins in the source; a 2-tuple (erow, ecol) of + ints specifying the row and column where the token ends in the source; + and the line on which the token was found. The line passed is the + logical line; continuation lines are included. + """ + lnum = parenlev = continued = 0 + namechars, numchars = string.ascii_letters + '_', '0123456789' + contstr, needcont = '', 0 + contline = None + indents = [0] + + while 1: # loop over lines in stream + try: + line = readline() + except StopIteration: + line = '' + lnum = lnum + 1 + pos, max = 0, len(line) + + if contstr: # continued string + if not line: + raise TokenError, ("EOF in multi-line string", strstart) + endmatch = endprog.match(line) + if endmatch: + pos = end = endmatch.end(0) + yield (STRING, contstr + line[:end], + strstart, (lnum, end), contline + line) + contstr, needcont = '', 0 + contline = None + elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': + yield (ERRORTOKEN, contstr + line, + strstart, (lnum, len(line)), contline) + contstr = '' + contline = None + continue + else: + contstr = contstr + line + contline = contline + line + continue + + elif parenlev == 0 and not continued: # new statement + if not line: break + column = 0 + while pos < max: # measure leading whitespace + if line[pos] == ' ': column = column + 1 + elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize + elif line[pos] == '\f': column = 0 + else: break + pos = pos + 1 + if pos == max: break + + if line[pos] in '#\r\n': # skip comments or blank lines + if line[pos] == '#': + comment_token = line[pos:].rstrip('\r\n') + nl_pos = pos + len(comment_token) + yield (COMMENT, comment_token, + (lnum, pos), (lnum, pos + len(comment_token)), line) + yield (NL, line[nl_pos:], + (lnum, nl_pos), (lnum, len(line)), line) + else: + yield ((NL, COMMENT)[line[pos] == '#'], line[pos:], + (lnum, pos), (lnum, len(line)), line) + continue + + if column > indents[-1]: # count indents or dedents + indents.append(column) + yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line) + while column < indents[-1]: + if column not in indents: + raise IndentationError( + "unindent does not match any outer indentation level", + ("", lnum, pos, line)) + indents = indents[:-1] + yield (DEDENT, '', (lnum, pos), (lnum, pos), line) + + else: # continued statement + if not line: + raise TokenError, ("EOF in multi-line statement", (lnum, 0)) + continued = 0 + + while pos < max: + pseudomatch = pseudoprog.match(line, pos) + if pseudomatch: # scan for tokens + start, end = pseudomatch.span(1) + spos, epos, pos = (lnum, start), (lnum, end), end + token, initial = line[start:end], line[start] + + if initial in numchars or \ + (initial == '.' and token != '.'): # ordinary number + yield (NUMBER, token, spos, epos, line) + elif initial in '\r\n': + newline = NEWLINE + if parenlev > 0: + newline = NL + yield (newline, token, spos, epos, line) + elif initial == '#': + assert not token.endswith("\n") + yield (COMMENT, token, spos, epos, line) + elif token in triple_quoted: + endprog = endprogs[token] + endmatch = endprog.match(line, pos) + if endmatch: # all on one line + pos = endmatch.end(0) + token = line[start:pos] + yield (STRING, token, spos, (lnum, pos), line) + else: + strstart = (lnum, start) # multiple lines + contstr = line[start:] + contline = line + break + elif initial in single_quoted or \ + token[:2] in single_quoted or \ + token[:3] in single_quoted: + if token[-1] == '\n': # continued string + strstart = (lnum, start) + endprog = (endprogs[initial] or endprogs[token[1]] or + endprogs[token[2]]) + contstr, needcont = line[start:], 1 + contline = line + break + else: # ordinary string + yield (STRING, token, spos, epos, line) + elif initial in namechars: # ordinary name + yield (NAME, token, spos, epos, line) + elif initial == '\\': # continued stmt + # This yield is new; needed for better idempotency: + yield (NL, token, spos, (lnum, pos), line) + continued = 1 + else: + if initial in '([{': parenlev = parenlev + 1 + elif initial in ')]}': parenlev = parenlev - 1 + yield (OP, token, spos, epos, line) + else: + yield (ERRORTOKEN, line[pos], + (lnum, pos), (lnum, pos+1), line) + pos = pos + 1 + + for indent in indents[1:]: # pop remaining indent levels + yield (DEDENT, '', (lnum, 0), (lnum, 0), '') + yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '') + +if __name__ == '__main__': # testing + import sys + if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline) + else: tokenize(sys.stdin.readline) diff --git a/sphinx/pycode/pytree.py b/sphinx/pycode/pytree.py new file mode 100644 index 00000000..950319c5 --- /dev/null +++ b/sphinx/pycode/pytree.py @@ -0,0 +1,295 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Python parse tree definitions. + +This is a very concrete parse tree; we need to keep every token and +even the comments and whitespace between tokens. + +Adapted for read-only nodes from pytree.py in Python's 2to3 tool, and +added a few bits. +""" + +__author__ = "Guido van Rossum " + + +class Base(object): + + """Abstract base class for Node and Leaf. + + This provides some default functionality and boilerplate using the + template pattern. + + A node may be a subnode of at most one parent. + """ + + # Default values for instance variables + type = None # int: token number (< 256) or symbol number (>= 256) + parent = None # Parent node pointer, or None + children = () # Tuple of subnodes + was_changed = False + + def __new__(cls, *args, **kwds): + """Constructor that prevents Base from being instantiated.""" + assert cls is not Base, "Cannot instantiate Base" + return object.__new__(cls) + + def __eq__(self, other): + """Compares two nodes for equality. + + This calls the method _eq(). + """ + if self.__class__ is not other.__class__: + return NotImplemented + return self._eq(other) + + def __ne__(self, other): + """Compares two nodes for inequality. + + This calls the method _eq(). + """ + if self.__class__ is not other.__class__: + return NotImplemented + return not self._eq(other) + + def _eq(self, other): + """Compares two nodes for equality. + + This is called by __eq__ and __ne__. It is only called if the + two nodes have the same type. This must be implemented by the + concrete subclass. Nodes should be considered equal if they + have the same structure, ignoring the prefix string and other + context information. + """ + raise NotImplementedError + + def get_lineno(self): + """Returns the line number which generated the invocant node.""" + node = self + while not isinstance(node, Leaf): + if not node.children: + return + node = node.children[0] + return node.lineno + + def get_next_sibling(self): + """Return the node immediately following the invocant in their + parent's children list. If the invocant does not have a next + sibling, return None.""" + if self.parent is None: + return None + + # Can't use index(); we need to test by identity + for i, child in enumerate(self.parent.children): + if child is self: + try: + return self.parent.children[i+1] + except IndexError: + return None + + def get_prev_sibling(self): + """Return the node immediately preceding the invocant in their + parent's children list. If the invocant does not have a previous + sibling, return None.""" + if self.parent is None: + return None + + # Can't use index(); we need to test by identity + for i, child in enumerate(self.parent.children): + if child is self: + if i == 0: + return None + return self.parent.children[i-1] + + def get_prev_leaf(self): + """Return the leaf node that precedes this node in the parse tree.""" + def last_child(node): + if isinstance(node, Leaf): + return node + elif not node.children: + return None + else: + return last_child(node.children[-1]) + if self.parent is None: + return None + prev = self.get_prev_sibling() + if isinstance(prev, Leaf): + return prev + elif prev is not None: + return last_child(prev) + return self.parent.get_prev_leaf() + + def get_suffix(self): + """Return the string immediately following the invocant node. This + is effectively equivalent to node.get_next_sibling().get_prefix()""" + next_sib = self.get_next_sibling() + if next_sib is None: + return "" + return next_sib.get_prefix() + + +class Node(Base): + + """Concrete implementation for interior nodes.""" + + def __init__(self, type, children, context=None, prefix=None): + """Initializer. + + Takes a type constant (a symbol number >= 256), a sequence of + child nodes, and an optional context keyword argument. + + As a side effect, the parent pointers of the children are updated. + """ + assert type >= 256, type + self.type = type + self.children = list(children) + for ch in self.children: + assert ch.parent is None, repr(ch) + ch.parent = self + if prefix is not None: + self.set_prefix(prefix) + + def __repr__(self): + return "%s(%s, %r)" % (self.__class__.__name__, + self.type, self.children) + + def __str__(self): + """This reproduces the input source exactly.""" + return "".join(map(str, self.children)) + + def compact(self): + return ''.join(child.compact() for child in self.children) + + def __getitem__(self, index): + return self.children[index] + + def __iter__(self): + return iter(self.children) + + def _eq(self, other): + """Compares two nodes for equality.""" + return (self.type, self.children) == (other.type, other.children) + + def post_order(self): + """Returns a post-order iterator for the tree.""" + for child in self.children: + for node in child.post_order(): + yield node + yield self + + def pre_order(self): + """Returns a pre-order iterator for the tree.""" + yield self + for child in self.children: + for node in child.post_order(): + yield node + + def get_prefix(self): + """Returns the prefix for the node. + + This passes the call on to the first child. + """ + if not self.children: + return "" + return self.children[0].get_prefix() + + +class Leaf(Base): + + """Concrete implementation for leaf nodes.""" + + # Default values for instance variables + prefix = "" # Whitespace and comments preceding this token in the input + lineno = 0 # Line where this token starts in the input + column = 0 # Column where this token tarts in the input + + def __init__(self, type, value, context=None, prefix=None): + """Initializer. + + Takes a type constant (a token number < 256), a string value, + and an optional context keyword argument. + """ + assert 0 <= type < 256, type + if context is not None: + self.prefix, (self.lineno, self.column) = context + self.type = type + self.value = value + if prefix is not None: + self.prefix = prefix + + def __repr__(self): + return "%s(%r, %r)" % (self.__class__.__name__, + self.type, self.value) + + def __str__(self): + """This reproduces the input source exactly.""" + return self.prefix + str(self.value) + + def compact(self): + return str(self.value) + + def _eq(self, other): + """Compares two nodes for equality.""" + return (self.type, self.value) == (other.type, other.value) + + def post_order(self): + """Returns a post-order iterator for the tree.""" + yield self + + def pre_order(self): + """Returns a pre-order iterator for the tree.""" + yield self + + def get_prefix(self): + """Returns the prefix for the node.""" + return self.prefix + + +def convert(grammar, raw_node): + """Convert raw node to a Node or Leaf instance.""" + type, value, context, children = raw_node + if children or type in grammar.number2symbol: + # If there's exactly one child, return that child instead of + # creating a new node. + if len(children) == 1: + return children[0] + return Node(type, children, context=context) + else: + return Leaf(type, value, context=context) + + +def nice_repr(node, number2name, prefix=False): + def _repr(node): + if isinstance(node, Leaf): + return "%s(%r)" % (number2name[node.type], node.value) + else: + return "%s(%s)" % (number2name[node.type], + ', '.join(map(_repr, node.children))) + def _prepr(node): + if isinstance(node, Leaf): + return "%s(%r, %r)" % (number2name[node.type], node.prefix, node.value) + else: + return "%s(%s)" % (number2name[node.type], + ', '.join(map(_prepr, node.children))) + return (prefix and _prepr or _repr)(node) + + +class NodeVisitor(object): + def __init__(self, number2name): + self.number2name = number2name + self.init() + + def init(self): + pass + + def visit(self, node): + """Visit a node.""" + method = 'visit_' + self.number2name[node.type] + visitor = getattr(self, method, self.generic_visit) + return visitor(node) + + def generic_visit(self, node): + """Called if no explicit visitor function exists for a node.""" + if isinstance(node, Node): + for child in node: + self.visit(child) -- cgit v1.2.1 From 76d544d815fe9b3bb6f2150dbd4e11832fb8d47e Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 29 Dec 2008 20:37:30 +0100 Subject: Cleanup; add scoping to ClassAttrVisitor. --- sphinx/pycode/__init__.py | 61 ++++++++++++++++++++++------------------------- sphinx/pycode/pytree.py | 6 ++--- 2 files changed, 31 insertions(+), 36 deletions(-) diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index b5d83e67..e006ce16 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -10,6 +10,7 @@ """ import sys +import time from os import path from sphinx.pycode import pytree @@ -45,8 +46,10 @@ _eq = pytree.Leaf(token.EQUAL, '=') class ClassAttrVisitor(pytree.NodeVisitor): - def init(self): + def init(self, scope): + self.scope = scope self.namespace = [] + self.collected = [] def visit_classdef(self, node): self.namespace.append(node[1].value) @@ -54,17 +57,21 @@ class ClassAttrVisitor(pytree.NodeVisitor): self.namespace.pop() def visit_expr_stmt(self, node): - if _eq in node.children: - prefix = node[0].get_prefix() - if not prefix: - prev = node[0].get_prev_leaf() - if prev and prev.type == token.INDENT: - prefix = prev.prefix - doc = prepare_commentdoc(prefix) - if doc: - targ = '.'.join(self.namespace + [node[0].compact()]) - print targ - print doc + if _eq not in node.children: + # not an assignment (we don't care for augmented assignments) + return + prefix = node[0].get_prefix() + if not prefix: + # if this assignment is the first thing in a class block, + # the comment will be the prefix of the preceding INDENT token + prev = node[0].get_prev_leaf() + if prev and prev.type == token.INDENT: + prefix = prev.prefix + doc = prepare_commentdoc(prefix) + if doc: + name = '.'.join(self.namespace + [node[0].compact()]) + if name.startswith(self.scope): + self.collected.append((name, doc)) def visit_funcdef(self, node): return @@ -115,28 +122,16 @@ class ModuleAnalyzer(object): return cls.for_file(filename, modname) def find_defs(self): - attr_visitor = ClassAttrVisitor(number2name) - attr_visitor.namespace = [self.modname] + attr_visitor = ClassAttrVisitor(number2name, '') attr_visitor.visit(self.tree) + for name, doc in attr_visitor.collected: + print '>>', name + print doc -class Test: - """doc""" - - #: testing... - x = 1 - """doc""" - - #: testing more... - x = 2 - -#ma = ModuleAnalyzer.for_file(__file__.rstrip('c')) -import time -x0=time.time() -ma = ModuleAnalyzer.for_module('sphinx.builders.latex') -x1=time.time() +x0 = time.time() +ma = ModuleAnalyzer.for_module('sphinx.builders.html') +x1 = time.time() ma.find_defs() -x2=time.time() -print "%.4f %.4f" % (x1-x0, x2-x1) - -#print pytree.nice_repr(ma.tree, number2name, True) +x2 = time.time() +print "parsing %.4f, finding %.4f" % (x1-x0, x2-x1) diff --git a/sphinx/pycode/pytree.py b/sphinx/pycode/pytree.py index 950319c5..a0e83b63 100644 --- a/sphinx/pycode/pytree.py +++ b/sphinx/pycode/pytree.py @@ -275,11 +275,11 @@ def nice_repr(node, number2name, prefix=False): class NodeVisitor(object): - def __init__(self, number2name): + def __init__(self, number2name, *args): self.number2name = number2name - self.init() + self.init(*args) - def init(self): + def init(self, *args): pass def visit(self, node): -- cgit v1.2.1 From d59cb5c2dfeae2282b804459b76a4ef5f9243ccf Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 29 Dec 2008 20:39:14 +0100 Subject: Add comment. --- sphinx/pycode/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index e006ce16..26d3579d 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -74,6 +74,7 @@ class ClassAttrVisitor(pytree.NodeVisitor): self.collected.append((name, doc)) def visit_funcdef(self, node): + # don't descend into functions -- nothing interesting there return -- cgit v1.2.1 From fb41d84eca01e3a622780dbd15b6c7606b0bbc4f Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 29 Dec 2008 20:47:32 +0100 Subject: Improve error handling. --- sphinx/pycode/__init__.py | 51 ++++++++++++++++++++++++++++++----------------- 1 file changed, 33 insertions(+), 18 deletions(-) diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index 26d3579d..0ebf683c 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -14,7 +14,7 @@ import time from os import path from sphinx.pycode import pytree -from sphinx.pycode.pgen2 import driver, token +from sphinx.pycode.pgen2 import driver, token, parse # load the Python grammar @@ -78,6 +78,14 @@ class ClassAttrVisitor(pytree.NodeVisitor): return +class PycodeError(Exception): + def __str__(self): + res = self.args[0] + if len(self.args) > 1: + res += ' (exception was: %r)' % self.args[1] + return res + + class ModuleAnalyzer(object): def __init__(self, tree, modname, srcname): @@ -91,48 +99,55 @@ class ModuleAnalyzer(object): @classmethod def for_file(cls, filename, modname): - # XXX if raises - fileobj = open(filename, 'r') try: - return cls(pydriver.parse_stream(fileobj), modname, filename) + fileobj = open(filename, 'r') + except Exception, err: + raise PycodeError('error opening %r' % filename, err) + try: + try: + return cls(pydriver.parse_stream(fileobj), modname, filename) + except parse.ParseError, err: + raise PycodeError('error parsing %r' % filename, err) finally: fileobj.close() @classmethod def for_module(cls, modname): if modname not in sys.modules: - # XXX - __import__(modname) + try: + __import__(modname) + except ImportError, err: + raise PycodeError('error importing %r' % modname, err) mod = sys.modules[modname] if hasattr(mod, '__loader__'): - # XXX raises - source = mod.__loader__.get_source(modname) + try: + source = mod.__loader__.get_source(modname) + except Exception, err: + raise PycodeError('error getting source for %r' % modname, err) return cls.for_string(source, modname) filename = getattr(mod, '__file__', None) if filename is None: - # XXX - raise RuntimeError('no source found') + raise PycodeError('no source found for module %r' % modname) if filename.lower().endswith('.pyo') or \ filename.lower().endswith('.pyc'): filename = filename[:-1] elif not filename.lower().endswith('.py'): - raise RuntimeError('not a .py file') + raise PycodeError('source is not a .py file: %r' % filename) if not path.isfile(filename): - # XXX - raise RuntimeError('source not present') + raise PycodeError('source file is not present: %r' % filename) return cls.for_file(filename, modname) - def find_defs(self): + def find_attrs(self): attr_visitor = ClassAttrVisitor(number2name, '') attr_visitor.visit(self.tree) - for name, doc in attr_visitor.collected: - print '>>', name - print doc + return attr_visitor.collected x0 = time.time() ma = ModuleAnalyzer.for_module('sphinx.builders.html') x1 = time.time() -ma.find_defs() +for name, doc in ma.find_attrs(): + print '>>', name + print doc x2 = time.time() print "parsing %.4f, finding %.4f" % (x1-x0, x2-x1) -- cgit v1.2.1 From e51906609b6959386593da6fa2ede65ca9129b62 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 29 Dec 2008 22:25:15 +0100 Subject: Fix handling of INDENT/DEDENT tokens. --- sphinx/pycode/__init__.py | 39 ++++++++++++++++++++++++++++----------- sphinx/pycode/pytree.py | 3 +++ 2 files changed, 31 insertions(+), 11 deletions(-) diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index 0ebf683c..ab34af1c 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -32,6 +32,10 @@ number2name.update(token.tok_name) def prepare_commentdoc(s): + """ + Extract documentation comment lines (starting with #:) and return them as a + list of lines. Returns an empty list if there is no documentation. + """ result = [] lines = [line.strip() for line in s.expandtabs().splitlines()] for line in lines: @@ -46,6 +50,10 @@ _eq = pytree.Leaf(token.EQUAL, '=') class ClassAttrVisitor(pytree.NodeVisitor): + """ + Visitor that collects comments appearing before attribute assignments + on toplevel and in classes. + """ def init(self, scope): self.scope = scope self.namespace = [] @@ -60,18 +68,27 @@ class ClassAttrVisitor(pytree.NodeVisitor): if _eq not in node.children: # not an assignment (we don't care for augmented assignments) return - prefix = node[0].get_prefix() - if not prefix: - # if this assignment is the first thing in a class block, - # the comment will be the prefix of the preceding INDENT token - prev = node[0].get_prev_leaf() - if prev and prev.type == token.INDENT: - prefix = prev.prefix - doc = prepare_commentdoc(prefix) - if doc: - name = '.'.join(self.namespace + [node[0].compact()]) + pnode = node[0] + prefix = pnode.get_prefix() + # if the assignment is the first statement on a new indentation + # level, its preceding whitespace and comments are not assigned + # to that token, but the first INDENT or DEDENT token + while not prefix: + pnode = pnode.get_prev_leaf() + if not pnode or pnode.type not in (token.INDENT, token.DEDENT): + break + docstring = prepare_commentdoc(prefix) + if not docstring: + return + # add an item for each assignment target + for i in range(0, len(node) - 1, 2): + target = node[i] + if target.type != token.NAME: + # don't care about complex targets + continue + name = '.'.join(self.namespace + [target.value]) if name.startswith(self.scope): - self.collected.append((name, doc)) + self.collected.append((name, docstring)) def visit_funcdef(self, node): # don't descend into functions -- nothing interesting there diff --git a/sphinx/pycode/pytree.py b/sphinx/pycode/pytree.py index a0e83b63..bd72bc99 100644 --- a/sphinx/pycode/pytree.py +++ b/sphinx/pycode/pytree.py @@ -166,6 +166,9 @@ class Node(Base): def __iter__(self): return iter(self.children) + def __len__(self): + return len(self.children) + def _eq(self, other): """Compares two nodes for equality.""" return (self.type, self.children) == (other.type, other.children) -- cgit v1.2.1 From c5bd95ae02352a05a3899b0cbad8a85a95804935 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 29 Dec 2008 22:30:00 +0100 Subject: Another fix for DEDENT/INDENT handling. --- sphinx/builders/html.py | 3 ++- sphinx/pycode/__init__.py | 1 + sphinx/pycode/pytree.py | 4 ++-- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/sphinx/builders/html.py b/sphinx/builders/html.py index 4ce30ad8..2bcd54ec 100644 --- a/sphinx/builders/html.py +++ b/sphinx/builders/html.py @@ -37,8 +37,9 @@ except ImportError: except ImportError: json = None - +#: the filename for the inventory of objects INVENTORY_FILENAME = 'objects.inv' +#: the filename for the "last build" file (for serializing builders) LAST_BUILD_FILENAME = 'last_build' diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index ab34af1c..dd32d470 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -77,6 +77,7 @@ class ClassAttrVisitor(pytree.NodeVisitor): pnode = pnode.get_prev_leaf() if not pnode or pnode.type not in (token.INDENT, token.DEDENT): break + prefix = pnode.get_prefix() docstring = prepare_commentdoc(prefix) if not docstring: return diff --git a/sphinx/pycode/pytree.py b/sphinx/pycode/pytree.py index bd72bc99..46017a95 100644 --- a/sphinx/pycode/pytree.py +++ b/sphinx/pycode/pytree.py @@ -221,8 +221,8 @@ class Leaf(Base): self.prefix = prefix def __repr__(self): - return "%s(%r, %r)" % (self.__class__.__name__, - self.type, self.value) + return "%s(%r, %r, %r)" % (self.__class__.__name__, + self.type, self.value, self.prefix) def __str__(self): """This reproduces the input source exactly.""" -- cgit v1.2.1 From e177eeb750aad12d964a16b750a8839f194a03bd Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 29 Dec 2008 22:30:41 +0100 Subject: Ignore grammar pickle files. --- .hgignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.hgignore b/.hgignore index a6b6e37a..37589422 100644 --- a/.hgignore +++ b/.hgignore @@ -2,6 +2,7 @@ .*\.egg build/ dist/ +sphinx/pycode/Grammar.*pickle Sphinx.egg-info/ doc/_build/ TAGS -- cgit v1.2.1 From 484625cc327f4fab73313f785b9aaeb94080e5be Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Tue, 30 Dec 2008 01:29:49 +0100 Subject: Some speedups in pytree. Add Cython parse.py replacement, yielding a 2x speedup in parsing. --- sphinx/pycode/pgen2/parse.pyx | 156 ++++++++++++++++++++++++++++++++++++++++++ sphinx/pycode/pytree.py | 23 +++---- 2 files changed, 165 insertions(+), 14 deletions(-) create mode 100644 sphinx/pycode/pgen2/parse.pyx diff --git a/sphinx/pycode/pgen2/parse.pyx b/sphinx/pycode/pgen2/parse.pyx new file mode 100644 index 00000000..6a11ee6b --- /dev/null +++ b/sphinx/pycode/pgen2/parse.pyx @@ -0,0 +1,156 @@ +# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Parser engine for the grammar tables generated by pgen. + +The grammar table must be loaded first. + +See Parser/parser.c in the Python distribution for additional info on +how this parsing engine works. + +""" + +from sphinx.pycode.pytree import Node, Leaf + +DEF NAME = 1 + +class ParseError(Exception): + """Exception to signal the parser is stuck.""" + + def __init__(self, msg, type, value, context): + Exception.__init__(self, "%s: type=%r, value=%r, context=%r" % + (msg, type, value, context)) + self.msg = msg + self.type = type + self.value = value + self.context = context + + +cdef class Parser: + cdef public grammar, stack, rootnode, used_names + cdef _grammar_dfas, _grammar_labels, _grammar_keywords, _grammar_tokens + cdef _grammar_number2symbol + + def __init__(self, grammar, convert=None): + self.grammar = grammar + #self.convert = convert or noconvert + + self._grammar_dfas = grammar.dfas + self._grammar_labels = grammar.labels + self._grammar_keywords = grammar.keywords + self._grammar_tokens = grammar.tokens + self._grammar_number2symbol = grammar.number2symbol + + def setup(self, start=None): + if start is None: + start = self.grammar.start + # Each stack entry is a tuple: (dfa, state, node). + # A node is a tuple: (type, value, context, children), + # where children is a list of nodes or None, and context may be None. + newnode = (start, None, None, []) + stackentry = (self._grammar_dfas[start], 0, newnode) + self.stack = [stackentry] + self.rootnode = None + self.used_names = set() # Aliased to self.rootnode.used_names in pop() + + def addtoken(self, type, value, context): + """Add a token; return True iff this is the end of the program.""" + cdef int ilabel, i, t, state, newstate + # Map from token to label + ilabel = self.classify(type, value, context) + # Loop until the token is shifted; may raise exceptions + while True: + dfa, state, node = self.stack[-1] + states, first = dfa + arcs = states[state] + # Look for a state with this label + for i, newstate in arcs: + t, v = self._grammar_labels[i] + if ilabel == i: + # Look it up in the list of labels + ## assert t < 256 + # Shift a token; we're done with it + self.shift(type, value, newstate, context) + # Pop while we are in an accept-only state + state = newstate + while states[state] == [(0, state)]: + self.pop() + if not self.stack: + # Done parsing! + return True + dfa, state, node = self.stack[-1] + states, first = dfa + # Done with this token + return False + elif t >= 256: + # See if it's a symbol and if we're in its first set + itsdfa = self._grammar_dfas[t] + itsstates, itsfirst = itsdfa + if ilabel in itsfirst: + # Push a symbol + self.push(t, itsdfa, newstate, context) + break # To continue the outer while loop + else: + if (0, state) in arcs: + # An accepting state, pop it and try something else + self.pop() + if not self.stack: + # Done parsing, but another token is input + raise ParseError("too much input", + type, value, context) + else: + # No success finding a transition + raise ParseError("bad input", type, value, context) + + cdef int classify(self, type, value, context): + """Turn a token into a label. (Internal)""" + if type == NAME: + # Keep a listing of all used names + self.used_names.add(value) + # Check for reserved words + ilabel = self._grammar_keywords.get(value) + if ilabel is not None: + return ilabel + ilabel = self._grammar_tokens.get(type) + if ilabel is None: + raise ParseError("bad token", type, value, context) + return ilabel + + cdef void shift(self, type, value, newstate, context): + """Shift a token. (Internal)""" + dfa, state, node = self.stack[-1] + newnode = (type, value, context, None) + newnode = self.convert(newnode) + if newnode is not None: + node[-1].append(newnode) + self.stack[-1] = (dfa, newstate, node) + + cdef void push(self, type, newdfa, newstate, context): + """Push a nonterminal. (Internal)""" + dfa, state, node = self.stack[-1] + newnode = (type, None, context, []) + self.stack[-1] = (dfa, newstate, node) + self.stack.append((newdfa, 0, newnode)) + + cdef void pop(self): + """Pop a nonterminal. (Internal)""" + popdfa, popstate, popnode = self.stack.pop() + newnode = self.convert(popnode) + if newnode is not None: + if self.stack: + dfa, state, node = self.stack[-1] + node[-1].append(newnode) + else: + self.rootnode = newnode + self.rootnode.used_names = self.used_names + + cdef convert(self, raw_node): + type, value, context, children = raw_node + if children or type in self._grammar_number2symbol: + # If there's exactly one child, return that child instead of + # creating a new node. + if len(children) == 1: + return children[0] + return Node(type, children, context=context) + else: + return Leaf(type, value, context=context) diff --git a/sphinx/pycode/pytree.py b/sphinx/pycode/pytree.py index 46017a95..063e39ec 100644 --- a/sphinx/pycode/pytree.py +++ b/sphinx/pycode/pytree.py @@ -29,11 +29,6 @@ class Base(object): children = () # Tuple of subnodes was_changed = False - def __new__(cls, *args, **kwds): - """Constructor that prevents Base from being instantiated.""" - assert cls is not Base, "Cannot instantiate Base" - return object.__new__(cls) - def __eq__(self, other): """Compares two nodes for equality. @@ -132,7 +127,7 @@ class Node(Base): """Concrete implementation for interior nodes.""" - def __init__(self, type, children, context=None, prefix=None): + def __init__(self, type, children, context=None): """Initializer. Takes a type constant (a symbol number >= 256), a sequence of @@ -140,14 +135,14 @@ class Node(Base): As a side effect, the parent pointers of the children are updated. """ - assert type >= 256, type + # assert type >= 256, type self.type = type self.children = list(children) for ch in self.children: - assert ch.parent is None, repr(ch) + # assert ch.parent is None, repr(ch) ch.parent = self - if prefix is not None: - self.set_prefix(prefix) + # if prefix is not None: + # self.set_prefix(prefix) def __repr__(self): return "%s(%s, %r)" % (self.__class__.__name__, @@ -206,19 +201,19 @@ class Leaf(Base): lineno = 0 # Line where this token starts in the input column = 0 # Column where this token tarts in the input - def __init__(self, type, value, context=None, prefix=None): + def __init__(self, type, value, context=None): """Initializer. Takes a type constant (a token number < 256), a string value, and an optional context keyword argument. """ - assert 0 <= type < 256, type + # assert 0 <= type < 256, type if context is not None: self.prefix, (self.lineno, self.column) = context self.type = type self.value = value - if prefix is not None: - self.prefix = prefix + # if prefix is not None: + # self.prefix = prefix def __repr__(self): return "%s(%r, %r, %r)" % (self.__class__.__name__, -- cgit v1.2.1 From bc4c6b9214603c8a04b965eae7ddebaa61727530 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Tue, 30 Dec 2008 01:32:01 +0100 Subject: Move benchmark into __main__ block. --- sphinx/pycode/__init__.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index dd32d470..373d4a48 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -160,12 +160,12 @@ class ModuleAnalyzer(object): attr_visitor.visit(self.tree) return attr_visitor.collected - -x0 = time.time() -ma = ModuleAnalyzer.for_module('sphinx.builders.html') -x1 = time.time() -for name, doc in ma.find_attrs(): - print '>>', name - print doc -x2 = time.time() -print "parsing %.4f, finding %.4f" % (x1-x0, x2-x1) +if __name__ == '__main__': + x0 = time.time() + ma = ModuleAnalyzer.for_file('sphinx/builders/html.py', 'sphinx.builders.html') + x1 = time.time() + for name, doc in ma.find_attrs(): + print '>>', name + print doc + x2 = time.time() + print "parsing %.4f, finding %.4f" % (x1-x0, x2-x1) -- cgit v1.2.1 From b16fc1a0ae62ebb0c97511da7ae04d890c7a9ad3 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Tue, 30 Dec 2008 02:09:29 +0100 Subject: First iteration of an autodoc that handles attribute documentation. --- sphinx/ext/autodoc.py | 55 +++++++++++++++++++++++++++++++++++------------ sphinx/pycode/__init__.py | 27 +++++++++++++++-------- 2 files changed, 59 insertions(+), 23 deletions(-) diff --git a/sphinx/ext/autodoc.py b/sphinx/ext/autodoc.py index f94afc75..ddea1e44 100644 --- a/sphinx/ext/autodoc.py +++ b/sphinx/ext/autodoc.py @@ -22,6 +22,7 @@ from docutils.parsers.rst import directives from docutils.statemachine import ViewList from sphinx.util import rpartition, nested_parse_with_titles +from sphinx.pycode import ModuleAnalyzer, PycodeError clstypes = (type, ClassType) try: @@ -313,7 +314,7 @@ class RstGenerator(object): 'for automodule %s' % name) return (path or '') + base, [], None, None - elif what in ('exception', 'function', 'class'): + elif what in ('exception', 'function', 'class', 'data'): if mod is None: if path: mod = path.rstrip('.') @@ -434,7 +435,9 @@ class RstGenerator(object): modfile = None # e.g. for builtin and C modules for part in objpath: todoc = getattr(todoc, part) - except (ImportError, AttributeError), err: + # also get a source code analyzer for attribute docs + analyzer = ModuleAnalyzer.for_module(mod) + except (ImportError, AttributeError, PycodeError), err: self.warn('autodoc can\'t import/find %s %r, it reported error: "%s", ' 'please check your spelling and sys.path' % (what, str(fullname), err)) @@ -503,6 +506,15 @@ class RstGenerator(object): else: sourcename = 'docstring of %s' % fullname + # add content from attribute documentation + attr_docs = analyzer.find_attr_docs() + if what in ('data', 'attribute'): + key = ('.'.join(objpath[:-1]), objpath[-1]) + if key in attr_docs: + no_docstring = True + for i, line in enumerate(attr_docs[key]): + self.result.append(indent + line, sourcename, i) + # add content from docstrings if not no_docstring: for i, line in enumerate(self.get_doc(what, fullname, todoc)): @@ -524,9 +536,9 @@ class RstGenerator(object): self.env.autodoc_current_class = objpath[0] # add members, if possible - _all = members == ['__all__'] + all_members = members == ['__all__'] members_check_module = False - if _all: + if all_members: # unqualified :members: given if what == 'module': if hasattr(todoc, '__all__'): @@ -555,14 +567,28 @@ class RstGenerator(object): else: all_members = [(mname, getattr(todoc, mname)) for mname in members] + # search for members in source code too + namespace = '.'.join(objpath) # will be empty for modules + for (membername, member) in all_members: - if _all and membername.startswith('_'): + # if isattr is True, the member is documented as an attribute + isattr = False + # if content is not None, no extra content from docstrings will be added + content = None + + if all_members and membername.startswith('_'): # ignore members whose name starts with _ by default skip = True else: - # ignore undocumented members if :undoc-members: is not given - doc = getattr(member, '__doc__', None) - skip = not self.options.undoc_members and not doc + if (namespace, membername) in attr_docs: + # keep documented attributes + skip = False + isattr = True + else: + # ignore undocumented members if :undoc-members: is not given + doc = getattr(member, '__doc__', None) + skip = not self.options.undoc_members and not doc + # give the user a chance to decide whether this member should be skipped if self.env.app: # let extensions preprocess docstrings @@ -573,10 +599,11 @@ class RstGenerator(object): if skip: continue - content = None if what == 'module': if isinstance(member, (FunctionType, BuiltinFunctionType)): memberwhat = 'function' + elif isattr: + memberwhat = 'attribute' elif isinstance(member, clstypes): if member.__name__ != membername: # assume it's aliased @@ -588,10 +615,13 @@ class RstGenerator(object): else: memberwhat = 'class' else: - # XXX: todo -- attribute docs continue else: - if isinstance(member, clstypes): + if inspect.isroutine(member): + memberwhat = 'method' + elif isattr: + memberwhat = 'attribute' + elif isinstance(member, clstypes): if member.__name__ != membername: # assume it's aliased memberwhat = 'attribute' @@ -599,12 +629,9 @@ class RstGenerator(object): source='') else: memberwhat = 'class' - elif inspect.isroutine(member): - memberwhat = 'method' elif isdescriptor(member): memberwhat = 'attribute' else: - # XXX: todo -- attribute docs continue # give explicitly separated module name, so that members of inner classes # can be documented diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index 373d4a48..0d4058bd 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -43,7 +43,7 @@ def prepare_commentdoc(s): result.append(line[3:]) if result and result[-1]: result.append('') - return '\n'.join(result) + return result _eq = pytree.Leaf(token.EQUAL, '=') @@ -57,7 +57,7 @@ class ClassAttrVisitor(pytree.NodeVisitor): def init(self, scope): self.scope = scope self.namespace = [] - self.collected = [] + self.collected = {} def visit_classdef(self, node): self.namespace.append(node[1].value) @@ -87,9 +87,9 @@ class ClassAttrVisitor(pytree.NodeVisitor): if target.type != token.NAME: # don't care about complex targets continue - name = '.'.join(self.namespace + [target.value]) - if name.startswith(self.scope): - self.collected.append((name, docstring)) + namespace = '.'.join(self.namespace) + if namespace.startswith(self.scope): + self.collected[namespace, target.value] = docstring def visit_funcdef(self, node): # don't descend into functions -- nothing interesting there @@ -105,6 +105,8 @@ class PycodeError(Exception): class ModuleAnalyzer(object): + # cache for analyzer objects + cache = {} def __init__(self, tree, modname, srcname): self.tree = tree @@ -131,6 +133,8 @@ class ModuleAnalyzer(object): @classmethod def for_module(cls, modname): + if modname in cls.cache: + return cls.cache[modname] if modname not in sys.modules: try: __import__(modname) @@ -142,7 +146,9 @@ class ModuleAnalyzer(object): source = mod.__loader__.get_source(modname) except Exception, err: raise PycodeError('error getting source for %r' % modname, err) - return cls.for_string(source, modname) + obj = cls.for_string(source, modname) + cls.cache[modname] = obj + return obj filename = getattr(mod, '__file__', None) if filename is None: raise PycodeError('no source found for module %r' % modname) @@ -153,13 +159,16 @@ class ModuleAnalyzer(object): raise PycodeError('source is not a .py file: %r' % filename) if not path.isfile(filename): raise PycodeError('source file is not present: %r' % filename) - return cls.for_file(filename, modname) + obj = cls.for_file(filename, modname) + cls.cache[modname] = obj + return obj - def find_attrs(self): - attr_visitor = ClassAttrVisitor(number2name, '') + def find_attr_docs(self, scope=''): + attr_visitor = ClassAttrVisitor(number2name, scope) attr_visitor.visit(self.tree) return attr_visitor.collected + if __name__ == '__main__': x0 = time.time() ma = ModuleAnalyzer.for_file('sphinx/builders/html.py', 'sphinx.builders.html') -- cgit v1.2.1 From 7124a1e1760c7704a555b2eda54d1970783b1d56 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Tue, 30 Dec 2008 02:26:47 +0100 Subject: Also find attribute docs in the "other" style: docstrings after the assignment. --- sphinx/pycode/__init__.py | 75 ++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 61 insertions(+), 14 deletions(-) diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index 0d4058bd..f4bd1b1d 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -14,7 +14,7 @@ import time from os import path from sphinx.pycode import pytree -from sphinx.pycode.pgen2 import driver, token, parse +from sphinx.pycode.pgen2 import driver, token, parse, literals # load the Python grammar @@ -46,13 +46,42 @@ def prepare_commentdoc(s): return result +def prepare_literaldoc(s): + # first, "evaluate" the string + s = literals.evalString(s) + # then, prepare (XXX copied from ext/autodoc) + lines = s.expandtabs().splitlines() + # Find minimum indentation of any non-blank lines after first line. + margin = sys.maxint + for line in lines[1:]: + content = len(line.lstrip()) + if content: + indent = len(line) - content + margin = min(margin, indent) + # Remove indentation. + if lines: + lines[0] = lines[0].lstrip() + if margin < sys.maxint: + for i in range(1, len(lines)): lines[i] = lines[i][margin:] + # Remove any leading blank lines. + while lines and not lines[0]: + lines.pop(0) + # make sure there is an empty line at the end + if lines and lines[-1]: + lines.append('') + return lines + + _eq = pytree.Leaf(token.EQUAL, '=') -class ClassAttrVisitor(pytree.NodeVisitor): +class AttrDocVisitor(pytree.NodeVisitor): """ - Visitor that collects comments appearing before attribute assignments - on toplevel and in classes. + Visitor that collects docstrings for attribute assignments on toplevel and + in classes. + + The docstrings can either be in special '#:' comments before the assignment + or in a docstring after it. """ def init(self, scope): self.scope = scope @@ -65,6 +94,7 @@ class ClassAttrVisitor(pytree.NodeVisitor): self.namespace.pop() def visit_expr_stmt(self, node): + """Visit an assignment which may have a special comment before it.""" if _eq not in node.children: # not an assignment (we don't care for augmented assignments) return @@ -79,8 +109,27 @@ class ClassAttrVisitor(pytree.NodeVisitor): break prefix = pnode.get_prefix() docstring = prepare_commentdoc(prefix) - if not docstring: + if docstring: + self.add_docstring(node, docstring) + + def visit_simple_stmt(self, node): + """Visit a docstring statement which may have an assignment before.""" + if node[0].type != token.STRING: + # not a docstring; but still need to visit children + return self.generic_visit(node) + prev = node.get_prev_sibling() + if not prev: return + if prev.type == sym.simple_stmt and \ + prev[0].type == sym.expr_stmt and _eq in prev[0].children: + docstring = prepare_literaldoc(node[0].value) + self.add_docstring(prev[0], docstring) + + def visit_funcdef(self, node): + # don't descend into functions -- nothing interesting there + return + + def add_docstring(self, node, docstring): # add an item for each assignment target for i in range(0, len(node) - 1, 2): target = node[i] @@ -91,10 +140,6 @@ class ClassAttrVisitor(pytree.NodeVisitor): if namespace.startswith(self.scope): self.collected[namespace, target.value] = docstring - def visit_funcdef(self, node): - # don't descend into functions -- nothing interesting there - return - class PycodeError(Exception): def __str__(self): @@ -164,17 +209,19 @@ class ModuleAnalyzer(object): return obj def find_attr_docs(self, scope=''): - attr_visitor = ClassAttrVisitor(number2name, scope) + attr_visitor = AttrDocVisitor(number2name, scope) attr_visitor.visit(self.tree) return attr_visitor.collected if __name__ == '__main__': x0 = time.time() - ma = ModuleAnalyzer.for_file('sphinx/builders/html.py', 'sphinx.builders.html') + #ma = ModuleAnalyzer.for_file('sphinx/builders/html.py', 'sphinx.builders.html') + ma = ModuleAnalyzer.for_file(__file__.rstrip('c'), 'sphinx.builders.html') x1 = time.time() - for name, doc in ma.find_attrs(): - print '>>', name - print doc + for (ns, name), doc in ma.find_attr_docs().iteritems(): + print '>>', ns, name + print '\n'.join(doc) x2 = time.time() + #print pytree.nice_repr(ma.tree, number2name) print "parsing %.4f, finding %.4f" % (x1-x0, x2-x1) -- cgit v1.2.1 From 47cf3c5630c790fff8b52e9c0350d47a067094cc Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Tue, 30 Dec 2008 02:37:01 +0100 Subject: Ignore .so files. --- .hgignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.hgignore b/.hgignore index 37589422..99fc8d87 100644 --- a/.hgignore +++ b/.hgignore @@ -1,5 +1,6 @@ .*\.pyc .*\.egg +.*\.so build/ dist/ sphinx/pycode/Grammar.*pickle -- cgit v1.2.1 From 6bec10bbcf957d4a26dc5b3db2f4a099382abf56 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Tue, 30 Dec 2008 02:37:20 +0100 Subject: Move docstring processing to an util module. --- sphinx/ext/autodoc.py | 30 +------------------------ sphinx/pycode/__init__.py | 51 +++++------------------------------------- sphinx/util/docstrings.py | 56 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 63 insertions(+), 74 deletions(-) create mode 100644 sphinx/util/docstrings.py diff --git a/sphinx/ext/autodoc.py b/sphinx/ext/autodoc.py index ddea1e44..55f0d58f 100644 --- a/sphinx/ext/autodoc.py +++ b/sphinx/ext/autodoc.py @@ -23,6 +23,7 @@ from docutils.statemachine import ViewList from sphinx.util import rpartition, nested_parse_with_titles from sphinx.pycode import ModuleAnalyzer, PycodeError +from sphinx.util.docstrings import prepare_docstring clstypes = (type, ClassType) try: @@ -172,35 +173,6 @@ def isdescriptor(x): return False -def prepare_docstring(s): - """ - Convert a docstring into lines of parseable reST. Return it as a list of - lines usable for inserting into a docutils ViewList (used as argument - of nested_parse().) An empty line is added to act as a separator between - this docstring and following content. - """ - lines = s.expandtabs().splitlines() - # Find minimum indentation of any non-blank lines after first line. - margin = sys.maxint - for line in lines[1:]: - content = len(line.lstrip()) - if content: - indent = len(line) - content - margin = min(margin, indent) - # Remove indentation. - if lines: - lines[0] = lines[0].lstrip() - if margin < sys.maxint: - for i in range(1, len(lines)): lines[i] = lines[i][margin:] - # Remove any leading blank lines. - while lines and not lines[0]: - lines.pop(0) - # make sure there is an empty line at the end - if lines and lines[-1]: - lines.append('') - return lines - - def get_module_charset(module): """Return the charset of the given module (cached in _module_charsets).""" if module in _module_charsets: diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index f4bd1b1d..e52a231d 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -10,11 +10,11 @@ """ import sys -import time from os import path from sphinx.pycode import pytree from sphinx.pycode.pgen2 import driver, token, parse, literals +from sphinx.util.docstrings import prepare_docstring, prepare_commentdoc # load the Python grammar @@ -31,47 +31,6 @@ number2name = pygrammar.number2symbol.copy() number2name.update(token.tok_name) -def prepare_commentdoc(s): - """ - Extract documentation comment lines (starting with #:) and return them as a - list of lines. Returns an empty list if there is no documentation. - """ - result = [] - lines = [line.strip() for line in s.expandtabs().splitlines()] - for line in lines: - if line.startswith('#: '): - result.append(line[3:]) - if result and result[-1]: - result.append('') - return result - - -def prepare_literaldoc(s): - # first, "evaluate" the string - s = literals.evalString(s) - # then, prepare (XXX copied from ext/autodoc) - lines = s.expandtabs().splitlines() - # Find minimum indentation of any non-blank lines after first line. - margin = sys.maxint - for line in lines[1:]: - content = len(line.lstrip()) - if content: - indent = len(line) - content - margin = min(margin, indent) - # Remove indentation. - if lines: - lines[0] = lines[0].lstrip() - if margin < sys.maxint: - for i in range(1, len(lines)): lines[i] = lines[i][margin:] - # Remove any leading blank lines. - while lines and not lines[0]: - lines.pop(0) - # make sure there is an empty line at the end - if lines and lines[-1]: - lines.append('') - return lines - - _eq = pytree.Leaf(token.EQUAL, '=') @@ -122,7 +81,8 @@ class AttrDocVisitor(pytree.NodeVisitor): return if prev.type == sym.simple_stmt and \ prev[0].type == sym.expr_stmt and _eq in prev[0].children: - docstring = prepare_literaldoc(node[0].value) + # need to "eval" the string because it's returned in its original form + docstring = prepare_docstring(literals.evalString(node[0].value)) self.add_docstring(prev[0], docstring) def visit_funcdef(self, node): @@ -215,9 +175,10 @@ class ModuleAnalyzer(object): if __name__ == '__main__': + import time x0 = time.time() - #ma = ModuleAnalyzer.for_file('sphinx/builders/html.py', 'sphinx.builders.html') - ma = ModuleAnalyzer.for_file(__file__.rstrip('c'), 'sphinx.builders.html') + ma = ModuleAnalyzer.for_file('sphinx/builders/html.py', 'sphinx.builders.html') + #ma = ModuleAnalyzer.for_file(__file__.rstrip('c'), 'sphinx.builders.html') x1 = time.time() for (ns, name), doc in ma.find_attr_docs().iteritems(): print '>>', ns, name diff --git a/sphinx/util/docstrings.py b/sphinx/util/docstrings.py new file mode 100644 index 00000000..d7e20e4c --- /dev/null +++ b/sphinx/util/docstrings.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +""" + sphinx.util.docstrings + ~~~~~~~~~~~~~~~~~~~~~~ + + Utilities for docstring processing. + + :copyright: 2008 by Georg Brandl. + :license: BSD, see LICENSE for details. +""" + +import sys + + +def prepare_docstring(s): + """ + Convert a docstring into lines of parseable reST. Return it as a list of + lines usable for inserting into a docutils ViewList (used as argument + of nested_parse().) An empty line is added to act as a separator between + this docstring and following content. + """ + lines = s.expandtabs().splitlines() + # Find minimum indentation of any non-blank lines after first line. + margin = sys.maxint + for line in lines[1:]: + content = len(line.lstrip()) + if content: + indent = len(line) - content + margin = min(margin, indent) + # Remove indentation. + if lines: + lines[0] = lines[0].lstrip() + if margin < sys.maxint: + for i in range(1, len(lines)): lines[i] = lines[i][margin:] + # Remove any leading blank lines. + while lines and not lines[0]: + lines.pop(0) + # make sure there is an empty line at the end + if lines and lines[-1]: + lines.append('') + return lines + + +def prepare_commentdoc(s): + """ + Extract documentation comment lines (starting with #:) and return them as a + list of lines. Returns an empty list if there is no documentation. + """ + result = [] + lines = [line.strip() for line in s.expandtabs().splitlines()] + for line in lines: + if line.startswith('#: '): + result.append(line[3:]) + if result and result[-1]: + result.append('') + return result -- cgit v1.2.1 From 998660e41e99b171d029a0e6aa7b85fc0ba62373 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Tue, 30 Dec 2008 12:42:26 +0100 Subject: * Add a tag-finding method based on tokens. * Don't parse immediately if tokenizing suffices. * Also cache by file name. --- sphinx/pycode/__init__.py | 139 +++++++++++++++++++++++++++++++++--------- sphinx/pycode/pgen2/driver.py | 18 +++--- 2 files changed, 118 insertions(+), 39 deletions(-) diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index e52a231d..f456cfe6 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -11,9 +11,10 @@ import sys from os import path +from cStringIO import StringIO from sphinx.pycode import pytree -from sphinx.pycode.pgen2 import driver, token, parse, literals +from sphinx.pycode.pgen2 import driver, token, tokenize, parse, literals from sphinx.util.docstrings import prepare_docstring, prepare_commentdoc @@ -22,9 +23,12 @@ _grammarfile = path.join(path.dirname(__file__), 'Grammar.txt') pygrammar = driver.load_grammar(_grammarfile) pydriver = driver.Driver(pygrammar, convert=pytree.convert) +# an object with attributes corresponding to token and symbol names class sym: pass for k, v in pygrammar.symbol2number.iteritems(): setattr(sym, k, v) +for k, v in token.tok_name.iteritems(): + setattr(sym, v, k) # a dict mapping terminal and nonterminal numbers to their names number2name = pygrammar.number2symbol.copy() @@ -110,36 +114,29 @@ class PycodeError(Exception): class ModuleAnalyzer(object): - # cache for analyzer objects + # cache for analyzer objects -- caches both by module and file name cache = {} - def __init__(self, tree, modname, srcname): - self.tree = tree - self.modname = modname - self.srcname = srcname - @classmethod def for_string(cls, string, modname, srcname=''): - return cls(pydriver.parse_string(string), modname, srcname) + return cls(StringIO(string), modname, srcname) @classmethod def for_file(cls, filename, modname): + if ('file', filename) in cls.cache: + return cls.cache['file', filename] try: fileobj = open(filename, 'r') except Exception, err: raise PycodeError('error opening %r' % filename, err) - try: - try: - return cls(pydriver.parse_stream(fileobj), modname, filename) - except parse.ParseError, err: - raise PycodeError('error parsing %r' % filename, err) - finally: - fileobj.close() + obj = cls(fileobj, modname, filename) + cls.cache['file', filename] = obj + return obj @classmethod def for_module(cls, modname): - if modname in cls.cache: - return cls.cache[modname] + if ('module', modname) in cls.cache: + return cls.cache['module', modname] if modname not in sys.modules: try: __import__(modname) @@ -152,37 +149,119 @@ class ModuleAnalyzer(object): except Exception, err: raise PycodeError('error getting source for %r' % modname, err) obj = cls.for_string(source, modname) - cls.cache[modname] = obj + cls.cache['module', modname] = obj return obj filename = getattr(mod, '__file__', None) if filename is None: raise PycodeError('no source found for module %r' % modname) - if filename.lower().endswith('.pyo') or \ - filename.lower().endswith('.pyc'): + filename = path.normpath(filename) + lfilename = filename.lower() + if lfilename.endswith('.pyo') or lfilename.endswith('.pyc'): filename = filename[:-1] - elif not filename.lower().endswith('.py'): + elif not lfilename.endswith('.py'): raise PycodeError('source is not a .py file: %r' % filename) if not path.isfile(filename): raise PycodeError('source file is not present: %r' % filename) obj = cls.for_file(filename, modname) - cls.cache[modname] = obj + cls.cache['module', modname] = obj return obj + def __init__(self, source, modname, srcname): + self.modname = modname + self.srcname = srcname + # file-like object yielding source lines + self.source = source + + # will be filled by tokenize() + self.tokens = None + # will be filled by parse() + self.parsetree = None + + def tokenize(self): + """Generate tokens from the source.""" + if self.tokens is not None: + return + self.tokens = list(tokenize.generate_tokens(self.source.readline)) + self.source.close() + + def parse(self): + """Parse the generated source tokens.""" + if self.parsetree is not None: + return + self.tokenize() + self.parsetree = pydriver.parse_tokens(self.tokens) + def find_attr_docs(self, scope=''): + """Find class and module-level attributes and their documentation.""" + self.parse() attr_visitor = AttrDocVisitor(number2name, scope) - attr_visitor.visit(self.tree) + attr_visitor.visit(self.parsetree) return attr_visitor.collected + def find_tags(self): + """Find class, function and method definitions and their location.""" + self.tokenize() + result = {} + namespace = [] + stack = [] + indent = 0 + defline = False + expect_indent = False + def tokeniter(ignore = (token.COMMENT, token.NL)): + for tokentup in self.tokens: + if tokentup[0] not in ignore: + yield tokentup + tokeniter = tokeniter() + for type, tok, spos, epos, line in tokeniter: + if expect_indent: + if type != token.INDENT: + # no suite -- one-line definition + assert stack + dtype, fullname, startline, _ = stack.pop() + endline = epos[0] + namespace.pop() + result[dtype, fullname] = (startline, endline) + expect_indent = False + if tok in ('def', 'class'): + name = tokeniter.next()[1] + namespace.append(name) + fullname = '.'.join(namespace) + stack.append((tok, fullname, spos[0], indent)) + defline = True + elif type == token.INDENT: + expect_indent = False + indent += 1 + elif type == token.DEDENT: + indent -= 1 + # if the stacklevel is the same as it was before the last def/class block, + # this dedent closes that block + if stack and indent == stack[-1][3]: + dtype, fullname, startline, _ = stack.pop() + endline = spos[0] + namespace.pop() + result[dtype, fullname] = (startline, endline) + elif type == token.NEWLINE: + # if this line contained a definition, expect an INDENT to start the + # suite; if there is no such INDENT it's a one-line definition + if defline: + defline = False + expect_indent = True + return result + if __name__ == '__main__': - import time + import time, pprint x0 = time.time() - ma = ModuleAnalyzer.for_file('sphinx/builders/html.py', 'sphinx.builders.html') #ma = ModuleAnalyzer.for_file(__file__.rstrip('c'), 'sphinx.builders.html') + ma = ModuleAnalyzer.for_file('sphinx/builders/html.py', 'sphinx.builders.html') + ma.tokenize() x1 = time.time() - for (ns, name), doc in ma.find_attr_docs().iteritems(): - print '>>', ns, name - print '\n'.join(doc) + ma.parse() x2 = time.time() - #print pytree.nice_repr(ma.tree, number2name) - print "parsing %.4f, finding %.4f" % (x1-x0, x2-x1) + #for (ns, name), doc in ma.find_attr_docs().iteritems(): + # print '>>', ns, name + # print '\n'.join(doc) + pprint.pprint(ma.find_tags()) + x3 = time.time() + #print pytree.nice_repr(ma.parsetree, number2name) + print "tokenizing %.4f, parsing %.4f, finding %.4f" % (x1-x0, x2-x1, x3-x2) diff --git a/sphinx/pycode/pgen2/driver.py b/sphinx/pycode/pgen2/driver.py index 3e9e1043..edc882fa 100644 --- a/sphinx/pycode/pgen2/driver.py +++ b/sphinx/pycode/pgen2/driver.py @@ -42,8 +42,8 @@ class Driver(object): column = 0 type = value = start = end = line_text = None prefix = "" - for quintuple in tokens: - type, value, start, end, line_text = quintuple + opmap = grammar.opmap + for type, value, start, end, line_text in tokens: if start != (lineno, column): assert (lineno, column) <= start, ((lineno, column), start) s_lineno, s_column = start @@ -62,13 +62,13 @@ class Driver(object): column = 0 continue if type == token.OP: - type = grammar.opmap[value] - if debug: - self.logger.debug("%s %r (prefix=%r)", - token.tok_name[type], value, prefix) + type = opmap[value] + # if debug: + # self.logger.debug("%s %r (prefix=%r)", + # token.tok_name[type], value, prefix) if p.addtoken(type, value, (prefix, start)): - if debug: - self.logger.debug("Stop.") + # if debug: + # self.logger.debug("Stop.") break prefix = "" lineno, column = end @@ -77,7 +77,7 @@ class Driver(object): column = 0 else: # We never broke out -- EOF is too soon (how can this happen???) - raise parse.ParseError("incomplete input", t, v, x) + raise parse.ParseError("incomplete input", type, value, line_text) return p.rootnode def parse_stream_raw(self, stream, debug=False): -- cgit v1.2.1 From 2cfbedc3766a17d9991589f482eadc76e4b3d687 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Tue, 30 Dec 2008 15:41:21 +0100 Subject: Add "object" option to literalinclude directive. --- sphinx/directives/code.py | 50 +++++++++++++++++++++++++++++++++-------------- sphinx/pycode/__init__.py | 4 ++-- 2 files changed, 37 insertions(+), 17 deletions(-) diff --git a/sphinx/directives/code.py b/sphinx/directives/code.py index b4364535..7ac9a1c3 100644 --- a/sphinx/directives/code.py +++ b/sphinx/directives/code.py @@ -68,32 +68,52 @@ def literalinclude_directive(name, arguments, options, content, lineno, lineno - state_machine.input_offset - 1))) fn = path.normpath(path.join(source_dir, rel_fn)) + fromline = toline = None + objectname = options.get('object') + if objectname is not None: + from sphinx.pycode import ModuleAnalyzer + analyzer = ModuleAnalyzer.for_file(fn, '') + tags = analyzer.find_tags() + if objectname not in tags: + return [state.document.reporter.warning( + 'Object named %r not found in include file %r' % + (objectname, arguments[0]), line=lineno)] + else: + fromline = tags[objectname][1] - 1 + toline = tags[objectname][2] - 1 + encoding = options.get('encoding', env.config.source_encoding) try: f = codecs.open(fn, 'r', encoding) - text = f.read() + lines = f.readlines() f.close() except (IOError, OSError): - retnode = state.document.reporter.warning( - 'Include file %r not found or reading it failed' % arguments[0], line=lineno) + return [state.document.reporter.warning( + 'Include file %r not found or reading it failed' % arguments[0], + line=lineno)] except UnicodeError: - retnode = state.document.reporter.warning( + return [state.document.reporter.warning( 'Encoding %r used for reading included file %r seems to ' 'be wrong, try giving an :encoding: option' % - (encoding, arguments[0])) - else: - retnode = nodes.literal_block(text, text, source=fn) - retnode.line = 1 - if options.get('language', ''): - retnode['language'] = options['language'] - if 'linenos' in options: - retnode['linenos'] = True - state.document.settings.env.note_dependency(rel_fn) + (encoding, arguments[0]))] + text = ''.join(lines[fromline:toline]) + retnode = nodes.literal_block(text, text, source=fn) + retnode.line = 1 + if options.get('language', ''): + retnode['language'] = options['language'] + if 'linenos' in options: + retnode['linenos'] = True + state.document.settings.env.note_dependency(rel_fn) return [retnode] literalinclude_directive.options = {'linenos': directives.flag, - 'language': directives.unchanged, - 'encoding': directives.encoding} + 'language': directives.unchanged_required, + 'encoding': directives.encoding, + 'object': directives.unchanged_required, + #'lines': directives.unchanged_required, + #'start-after': directives.unchanged_required, + #'end-before': directives.unchanged_required, + } literalinclude_directive.content = 0 literalinclude_directive.arguments = (1, 0, 0) directives.register_directive('literalinclude', literalinclude_directive) diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index f456cfe6..a61208d5 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -220,7 +220,7 @@ class ModuleAnalyzer(object): dtype, fullname, startline, _ = stack.pop() endline = epos[0] namespace.pop() - result[dtype, fullname] = (startline, endline) + result[fullname] = (dtype, startline, endline) expect_indent = False if tok in ('def', 'class'): name = tokeniter.next()[1] @@ -239,7 +239,7 @@ class ModuleAnalyzer(object): dtype, fullname, startline, _ = stack.pop() endline = spos[0] namespace.pop() - result[dtype, fullname] = (startline, endline) + result[fullname] = (dtype, startline, endline) elif type == token.NEWLINE: # if this line contained a definition, expect an INDENT to start the # suite; if there is no such INDENT it's a one-line definition -- cgit v1.2.1 From c9d52ded703813ca16c75702bd18b9fcbd2e751d Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Tue, 30 Dec 2008 15:45:02 +0100 Subject: Rename "object" to "pyobject" and document it. --- doc/markup/code.rst | 12 ++++++++++++ sphinx/directives/code.py | 4 ++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/doc/markup/code.rst b/doc/markup/code.rst index 299ab0bc..2fd51c84 100644 --- a/doc/markup/code.rst +++ b/doc/markup/code.rst @@ -113,8 +113,20 @@ Includes .. literalinclude:: example.py :encoding: latin-1 + The directive also supports including only parts of the file. If it is a + Python module, you can select a class, function or method to include using + the ``pyobject`` option:: + + .. literalinclude:: example.py + :pyobject: Timer.start + + This would only include the code lines belonging to the ``start()`` method in + the ``Timer`` class within the file. + .. versionadded:: 0.4.3 The ``encoding`` option. + .. versionadded:: 0.6 + The ``pyobject`` option. .. rubric:: Footnotes diff --git a/sphinx/directives/code.py b/sphinx/directives/code.py index 7ac9a1c3..cc74566d 100644 --- a/sphinx/directives/code.py +++ b/sphinx/directives/code.py @@ -69,7 +69,7 @@ def literalinclude_directive(name, arguments, options, content, lineno, fn = path.normpath(path.join(source_dir, rel_fn)) fromline = toline = None - objectname = options.get('object') + objectname = options.get('pyobject') if objectname is not None: from sphinx.pycode import ModuleAnalyzer analyzer = ModuleAnalyzer.for_file(fn, '') @@ -109,7 +109,7 @@ def literalinclude_directive(name, arguments, options, content, lineno, literalinclude_directive.options = {'linenos': directives.flag, 'language': directives.unchanged_required, 'encoding': directives.encoding, - 'object': directives.unchanged_required, + 'pyobject': directives.unchanged_required, #'lines': directives.unchanged_required, #'start-after': directives.unchanged_required, #'end-before': directives.unchanged_required, -- cgit v1.2.1 From c80f5f2c0969541faace962382011192dbf1fda2 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Tue, 30 Dec 2008 23:02:47 +0100 Subject: Update Czech locale. --- sphinx/locale/cs/LC_MESSAGES/sphinx.js | 2 +- sphinx/locale/cs/LC_MESSAGES/sphinx.mo | Bin 7500 -> 9018 bytes sphinx/locale/cs/LC_MESSAGES/sphinx.po | 328 ++++++++++++++++----------------- 3 files changed, 161 insertions(+), 169 deletions(-) diff --git a/sphinx/locale/cs/LC_MESSAGES/sphinx.js b/sphinx/locale/cs/LC_MESSAGES/sphinx.js index 4b814e4f..0684899d 100644 --- a/sphinx/locale/cs/LC_MESSAGES/sphinx.js +++ b/sphinx/locale/cs/LC_MESSAGES/sphinx.js @@ -1 +1 @@ -Documentation.addTranslations({"locale": "cs", "plural_expr": "(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)", "messages": {"module, in ": "modul", "Preparing search...": "", "Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories.": "", "Search finished, found %s page(s) matching the search query.": "", ", in ": "", "Permalink to this headline": "Trval\u00fd odkaz na tento nadpis", "Searching": "hledej", "Permalink to this definition": "Trval\u00fd odkaz na tuto definici", "Hide Search Matches": "", "Search Results": "V\u00fdsledky hled\u00e1n\u00ed"}}); \ No newline at end of file +Documentation.addTranslations({"locale": "cs", "plural_expr": "(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)", "messages": {"module, in ": "modul, v", "Preparing search...": "Připravuji hledání...", "Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories.": "Nenalezli jsme žádný dokument. Ujistěte se prosím, že všechna slova jsou správně.", "Search finished, found %s page(s) matching the search query.": "Vyhledávání skončilo, nalezeno %s stran.", ", in ": ", v", "Permalink to this headline": "Trvalý odkaz na tento nadpis", "Searching": "Hledám", "Permalink to this definition": "Trvalý odkaz na tuto definici", "Hide Search Matches": "Skrýt výsledky hledání", "Search Results": "Výsledky hledání"}}); diff --git a/sphinx/locale/cs/LC_MESSAGES/sphinx.mo b/sphinx/locale/cs/LC_MESSAGES/sphinx.mo index c50de3db..fd97a57e 100644 Binary files a/sphinx/locale/cs/LC_MESSAGES/sphinx.mo and b/sphinx/locale/cs/LC_MESSAGES/sphinx.mo differ diff --git a/sphinx/locale/cs/LC_MESSAGES/sphinx.po b/sphinx/locale/cs/LC_MESSAGES/sphinx.po index 758a4ff3..0d11dded 100644 --- a/sphinx/locale/cs/LC_MESSAGES/sphinx.po +++ b/sphinx/locale/cs/LC_MESSAGES/sphinx.po @@ -7,41 +7,108 @@ msgid "" msgstr "" "Project-Id-Version: Sphinx 0.5\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2008-08-10 11:43+0000\n" -"PO-Revision-Date: 2008-12-28 23:40+0100\n" +"POT-Creation-Date: 2008-11-27 18:39+0100\n" +"PO-Revision-Date: 2008-12-25 05:59+0100\n" "Last-Translator: Pavel Kosina \n" "Language-Team: Pavel Kosina \n" -"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " -"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)\n" "MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" +"Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 0.9.3\n" +"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)\n" +"Generated-By: Babel 0.9.4\n" +"X-Poedit-Language: Czech\n" +"X-Poedit-Country: CZECH REPUBLIC\n" -#: sphinx/environment.py:103 sphinx/writers/latex.py:170 +#: sphinx/builder.py:408 +#, python-format +msgid "%b %d, %Y" +msgstr "%d.%m.%Y" + +#: sphinx/builder.py:427 +#: sphinx/templates/defindex.html:21 +msgid "General Index" +msgstr "Rejstřík indexů" + +#: sphinx/builder.py:427 +msgid "index" +msgstr "index" + +#: sphinx/builder.py:429 +#: sphinx/htmlhelp.py:156 +#: sphinx/templates/defindex.html:19 +#: sphinx/templates/modindex.html:2 +#: sphinx/templates/modindex.html:13 +msgid "Global Module Index" +msgstr "Celkový rejstřík modulů" + +#: sphinx/builder.py:429 +msgid "modules" +msgstr "moduly" + +#: sphinx/builder.py:466 +msgid "next" +msgstr "další" + +#: sphinx/builder.py:473 +msgid "previous" +msgstr "předchozí" + +#: sphinx/builder.py:1054 +msgid " (in " +msgstr "(v" + +#: sphinx/builder.py:1129 +msgid "Builtins" +msgstr "Vestavěné funkce " + +#: sphinx/builder.py:1131 +msgid "Module level" +msgstr "Úroveň modulů" + +#: sphinx/environment.py:102 +#: sphinx/latexwriter.py:169 #, python-format msgid "%B %d, %Y" msgstr "%d.%m.%Y" -#: sphinx/environment.py:293 sphinx/templates/genindex-single.html:2 +#: sphinx/environment.py:291 +#: sphinx/latexwriter.py:175 +#: sphinx/templates/genindex-single.html:2 #: sphinx/templates/genindex-split.html:2 -#: sphinx/templates/genindex-split.html:5 sphinx/templates/genindex.html:2 -#: sphinx/templates/genindex.html:5 sphinx/templates/genindex.html:48 -#: sphinx/templates/layout.html:117 sphinx/writers/latex.py:176 +#: sphinx/templates/genindex-split.html:5 +#: sphinx/templates/genindex.html:2 +#: sphinx/templates/genindex.html:5 +#: sphinx/templates/genindex.html:48 +#: sphinx/templates/layout.html:130 msgid "Index" msgstr "Index" -#: sphinx/environment.py:294 sphinx/writers/latex.py:175 -#, fuzzy +#: sphinx/environment.py:292 +#: sphinx/latexwriter.py:174 msgid "Module Index" -msgstr "Rejstřík modulů" +msgstr "Rejstřík modulů " -#: sphinx/environment.py:295 sphinx/templates/defindex.html:16 -#, fuzzy +#: sphinx/environment.py:293 +#: sphinx/templates/defindex.html:16 msgid "Search Page" msgstr "Vyhledávací stránka" -#: sphinx/roles.py:53 sphinx/directives/desc.py:564 +#: sphinx/htmlwriter.py:79 +#: sphinx/static/doctools.js:145 +msgid "Permalink to this definition" +msgstr "Trvalý odkaz na tuto definici" + +#: sphinx/htmlwriter.py:399 +#: sphinx/static/doctools.js:139 +msgid "Permalink to this headline" +msgstr "Trvalý odkaz na tento nadpis" + +#: sphinx/latexwriter.py:172 +msgid "Release" +msgstr "Vydání" + +#: sphinx/roles.py:53 +#: sphinx/directives/desc.py:537 #, python-format msgid "environment variable; %s" msgstr "promměná prostředí, %s" @@ -51,55 +118,22 @@ msgstr "promměná prostředí, %s" msgid "Python Enhancement Proposals!PEP %s" msgstr "Python Enhancement Proposals!PEP %s" -#: sphinx/builders/changes.py:64 -msgid "Builtins" -msgstr "Vestavěné funkce" - -#: sphinx/builders/changes.py:66 -msgid "Module level" -msgstr "Úroveň modulů" - -#: sphinx/builders/html.py:115 +#: sphinx/textwriter.py:166 #, python-format -msgid "%b %d, %Y" -msgstr "%d.%m.%Y" - -#: sphinx/builders/html.py:134 sphinx/templates/defindex.html:21 -msgid "General Index" -msgstr "Rejstřík indexů" - -#: sphinx/builders/html.py:134 -msgid "index" -msgstr "index" - -#: sphinx/builders/html.py:136 sphinx/builders/htmlhelp.py:180 -#: sphinx/builders/qthelp.py:129 sphinx/templates/defindex.html:19 -#: sphinx/templates/modindex.html:2 sphinx/templates/modindex.html:13 -msgid "Global Module Index" -msgstr "Rejstřík modulů" - -#: sphinx/builders/html.py:136 -msgid "modules" -msgstr "moduly" - -#: sphinx/builders/html.py:175 -msgid "next" -msgstr "další" - -#: sphinx/builders/html.py:182 -msgid "previous" -msgstr "předchozí" +msgid "Platform: %s" +msgstr "Platforma: %s" -#: sphinx/builders/latex.py:155 -msgid " (in " -msgstr "" +#: sphinx/textwriter.py:422 +msgid "[image]" +msgstr "[obrázek]" #: sphinx/directives/desc.py:25 #, python-format msgid "%s() (built-in function)" msgstr "%s() (vestavěná funkce)" -#: sphinx/directives/desc.py:26 sphinx/directives/desc.py:42 +#: sphinx/directives/desc.py:26 +#: sphinx/directives/desc.py:42 #: sphinx/directives/desc.py:54 #, python-format msgid "%s() (in module %s)" @@ -110,15 +144,16 @@ msgstr "%s() (v modulu %s)" msgid "%s (built-in variable)" msgstr "%s() (vestavěná proměnná)" -#: sphinx/directives/desc.py:30 sphinx/directives/desc.py:66 +#: sphinx/directives/desc.py:30 +#: sphinx/directives/desc.py:66 #, python-format msgid "%s (in module %s)" msgstr "%s() (v modulu %s)" #: sphinx/directives/desc.py:33 -#, fuzzy, python-format +#, python-format msgid "%s (built-in class)" -msgstr "%s() (vestavěná proměnná)" +msgstr "%s () (vestavěná proměnná)" #: sphinx/directives/desc.py:34 #, python-format @@ -196,19 +231,14 @@ msgstr "Vrací" msgid "Return type" msgstr "Typ navrácené hodnoty" -#: sphinx/directives/desc.py:201 -#, fuzzy -msgid "Parameter" -msgstr "Parametry" - -#: sphinx/directives/desc.py:205 +#: sphinx/directives/desc.py:143 msgid "Parameters" msgstr "Parametry" -#: sphinx/directives/desc.py:450 -#, fuzzy, python-format +#: sphinx/directives/desc.py:423 +#, python-format msgid "%scommand line option; %s" -msgstr "%sparametry příkazového řádku; %s" +msgstr "%s parametry příkazového řádku; %s" #: sphinx/directives/other.py:101 msgid "Platforms: " @@ -231,27 +261,22 @@ msgstr "Autor modulu: " msgid "Author: " msgstr "Autor: " -#: sphinx/directives/other.py:249 +#: sphinx/directives/other.py:246 msgid "See also" msgstr "Viz také" -#: sphinx/ext/autodoc.py:576 sphinx/ext/autodoc.py:590 -#, python-format -msgid "alias of :class:`%s`" -msgstr "" - #: sphinx/ext/todo.py:31 msgid "Todo" -msgstr "" +msgstr "Todo" #: sphinx/ext/todo.py:75 #, python-format msgid "(The original entry is located in %s, line %d and can be found " -msgstr "" +msgstr "(Původní záznam je v %s, řádka %d a lze jej nalézt" #: sphinx/ext/todo.py:81 msgid "here" -msgstr "" +msgstr "zde" #: sphinx/locale/__init__.py:15 msgid "Attention" @@ -336,50 +361,39 @@ msgstr "příkaz" msgid "built-in function" msgstr "vestavěná funkce" -#: sphinx/static/doctools.js:139 sphinx/writers/html.py:415 -msgid "Permalink to this headline" -msgstr "Trvalý odkaz na tento nadpis" - -#: sphinx/static/doctools.js:145 sphinx/writers/html.py:80 -msgid "Permalink to this definition" -msgstr "Trvalý odkaz na tuto definici" - #: sphinx/static/doctools.js:174 msgid "Hide Search Matches" -msgstr "" +msgstr "Skrýt výsledky vyhledávání" #: sphinx/static/searchtools.js:274 -#, fuzzy msgid "Searching" -msgstr "hledej" +msgstr "Hledám" #: sphinx/static/searchtools.js:279 msgid "Preparing search..." -msgstr "" +msgstr "Připravuji vyhledávání...." #: sphinx/static/searchtools.js:338 -#, fuzzy msgid "module, in " -msgstr "modul" +msgstr "modul, v" #: sphinx/static/searchtools.js:347 msgid ", in " -msgstr "" +msgstr ", v" -#: sphinx/static/searchtools.js:453 sphinx/templates/search.html:18 +#: sphinx/static/searchtools.js:447 +#: sphinx/templates/search.html:18 msgid "Search Results" msgstr "Výsledky hledání" -#: sphinx/static/searchtools.js:455 -msgid "" -"Your search did not match any documents. Please make sure that all words " -"are spelled correctly and that you've selected enough categories." -msgstr "" +#: sphinx/static/searchtools.js:449 +msgid "Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories." +msgstr "Nenalezli jsme žádný dokument. Ujistěte se prosím, že všechna slova jsou správně a že jste vybral dostatek kategorií." -#: sphinx/static/searchtools.js:457 +#: sphinx/static/searchtools.js:451 #, python-format msgid "Search finished, found %s page(s) matching the search query." -msgstr "" +msgstr "Vyhledávání skončilo, nalezeno %s stran." #: sphinx/templates/defindex.html:2 msgid "Overview" @@ -416,7 +430,8 @@ msgstr "Index – %(key)s" #: sphinx/templates/genindex-single.html:44 #: sphinx/templates/genindex-split.html:14 -#: sphinx/templates/genindex-split.html:27 sphinx/templates/genindex.html:54 +#: sphinx/templates/genindex-split.html:27 +#: sphinx/templates/genindex.html:54 msgid "Full index on one page" msgstr "Plný index na jedné stránce" @@ -452,70 +467,83 @@ msgstr "Další téma" msgid "next chapter" msgstr "další kapitola" -#: sphinx/templates/layout.html:56 +#: sphinx/templates/layout.html:55 msgid "This Page" msgstr "Tato stránka" -#: sphinx/templates/layout.html:58 +#: sphinx/templates/layout.html:59 +msgid "Suggest Change" +msgstr "Návrh změnu" + +#: sphinx/templates/layout.html:60 +#: sphinx/templates/layout.html:62 msgid "Show Source" msgstr "Ukázat zdroj" -#: sphinx/templates/layout.html:67 +#: sphinx/templates/layout.html:71 msgid "Quick search" msgstr "Rychlé vyhledávání" -#: sphinx/templates/layout.html:69 +#: sphinx/templates/layout.html:71 +msgid "Keyword search" +msgstr "Hledání dle klíče" + +#: sphinx/templates/layout.html:73 msgid "Go" msgstr "hledej" -#: sphinx/templates/layout.html:73 -#, fuzzy -msgid "Enter search terms or a module, class or function name." +#: sphinx/templates/layout.html:78 +msgid "Enter a module, class or function name." msgstr "Zadej jméno modulu, třídy nebo funkce." -#: sphinx/templates/layout.html:106 +#: sphinx/templates/layout.html:119 #, python-format msgid "Search within %(docstitle)s" msgstr "Hledání uvnitř %(docstitle)s" -#: sphinx/templates/layout.html:115 +#: sphinx/templates/layout.html:128 msgid "About these documents" msgstr "O těchto dokumentech" -#: sphinx/templates/layout.html:118 sphinx/templates/search.html:2 +#: sphinx/templates/layout.html:131 +#: sphinx/templates/search.html:2 #: sphinx/templates/search.html:5 msgid "Search" msgstr "Hledání" -#: sphinx/templates/layout.html:120 +#: sphinx/templates/layout.html:133 msgid "Copyright" msgstr "Veškerá práva vyhrazena" -#: sphinx/templates/layout.html:165 +#: sphinx/templates/layout.html:178 #, python-format msgid "© Copyright %(copyright)s." msgstr "© Copyright %(copyright)s." -#: sphinx/templates/layout.html:167 +#: sphinx/templates/layout.html:180 #, python-format msgid "© Copyright %(copyright)s." msgstr "© Copyright %(copyright)s." -#: sphinx/templates/layout.html:170 +#: sphinx/templates/layout.html:183 #, python-format msgid "Last updated on %(last_updated)s." -msgstr "Naposledy aktualizováno dne %(last_updated)s." +msgstr "Aktualizováno dne %(last_updated)s." -#: sphinx/templates/layout.html:173 +#: sphinx/templates/layout.html:186 #, python-format -msgid "" -"Created using Sphinx " -"%(sphinx_version)s." -msgstr "" -"Vytvořeno pomocí Sphinx " -"%(sphinx_version)s." +msgid "Created using Sphinx %(sphinx_version)s." +msgstr "Vytvořeno pomocí Sphinx %(sphinx_version)s." -#: sphinx/templates/modindex.html:36 +#: sphinx/templates/modindex.html:15 +msgid "Most popular modules:" +msgstr "Nejpopulárnější moduly:" + +#: sphinx/templates/modindex.html:24 +msgid "Show modules only available on these platforms" +msgstr "Zobrazit moduly dostupné na této platformě" + +#: sphinx/templates/modindex.html:56 msgid "Deprecated" msgstr "Zastaralé" @@ -524,18 +552,19 @@ msgstr "Zastaralé" msgid "Search %(docstitle)s" msgstr "Prohledat %(docstitle)s" +#: sphinx/templates/page.html:8 +msgid "Note: You requested an out-of-date URL from this server. We've tried to redirect you to the new location of this page, but it may not be the right one." +msgstr "Poznámka: Stránka, kterou hledáte, neexistuje.
    Snažili jsme se najít nové umístění této stránky, ale nepovedlo se." + #: sphinx/templates/search.html:7 -#, fuzzy msgid "" "From here you can search these documents. Enter your search\n" " words into the box below and click \"search\". Note that the search\n" " function will automatically search for all of the words. Pages\n" " containing fewer words won't appear in the result list." msgstr "" -"Toto je vyhledávací stránka. Zadejte klíčová slova do pole níže a " -"klikněte na \"hledej\". \n" -"Prohledávání funkcí hledá automaticky všechna slova. Stránky obsahující" -" slov méně, nebudou nalezeny." +"Toto je vyhledávací stránka. Zadejte klíčová slova a klikněte na \"hledej\". \n" +"Vyhledávání hledá automaticky všechna slova. Nebudou tedy nalezeny stránky, obsahující méně slov." #: sphinx/templates/search.html:14 msgid "search" @@ -573,40 +602,3 @@ msgstr "Změny API" msgid "Other changes" msgstr "Ostatní změny" -#: sphinx/writers/latex.py:173 -msgid "Release" -msgstr "Vydání" - -#: sphinx/writers/text.py:166 -#, python-format -msgid "Platform: %s" -msgstr "Platforma: %s" - -#: sphinx/writers/text.py:427 -msgid "[image]" -msgstr "[obrázek]" - -#~ msgid "Suggest Change" -#~ msgstr "Návrh změnu" - -#~ msgid "Keyword search" -#~ msgstr "Hledání dle klíče" - -#~ msgid "Most popular modules:" -#~ msgstr "Nejpopulárnější moduly:" - -#~ msgid "Show modules only available on these platforms" -#~ msgstr "Zobrazit moduly dostupné na této platformě" - -#~ msgid "" -#~ "Note: You requested an " -#~ "out-of-date URL from this server. " -#~ "We've tried to redirect you to the" -#~ " new location of this page, but " -#~ "it may not be the right one." -#~ msgstr "" -#~ "Poznámka: Stránka, kterou hledáte," -#~ " neexistuje.
    Snažili jsme se najít nové" -#~ " umístění této stránky, ale nepovedlo " -#~ "se." - -- cgit v1.2.1 From 60a2cb5719f8d03d7ce425a696cb7cf1c49298ae Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Wed, 31 Dec 2008 00:21:19 +0100 Subject: Add WFront. --- EXAMPLES | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/EXAMPLES b/EXAMPLES index 6b61ec04..1fef388e 100644 --- a/EXAMPLES +++ b/EXAMPLES @@ -22,6 +22,7 @@ included, please mail to `the Google group * Matplotlib: http://matplotlib.sourceforge.net/ * Mayavi: http://code.enthought.com/projects/mayavi/docs/development/html/mayavi * Mixin.com: http://dev.mixin.com/ +* mpmath: http://mpmath.googlecode.com/svn/trunk/doc/build/index.html * NetworkX: http://networkx.lanl.gov/ * NumPy: http://docs.scipy.org/doc/numpy/reference/ * ObjectListView: http://objectlistview.sourceforge.net/python @@ -40,6 +41,6 @@ included, please mail to `the Google group * SymPy: http://docs.sympy.org/ * tinyTiM: http://tinytim.sourceforge.net/docs/2.0/ * TurboGears: http://turbogears.org/2.0/docs/ +* WFront: http://discorporate.us/projects/WFront/ * Zope 3: e.g. http://docs.carduner.net/z3c-tutorial/ -* mpmath: http://mpmath.googlecode.com/svn/trunk/doc/build/index.html * zc.async: http://packages.python.org/zc.async/1.5.0/ -- cgit v1.2.1 From d9f337266f7118d7bfd80a66ee6ae10fa251f866 Mon Sep 17 00:00:00 2001 From: Benjamin Peterson Date: Thu, 1 Jan 2009 10:33:41 -0600 Subject: add a test for detecting python console sessions --- tests/test_highlighting.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/tests/test_highlighting.py b/tests/test_highlighting.py index 1d16b982..c54a7c2c 100644 --- a/tests/test_highlighting.py +++ b/tests/test_highlighting.py @@ -28,6 +28,11 @@ class MyLexer(RegexLexer): ], } +class ComplainOnUnhighlighted(PygmentsBridge): + + def unhighlighted(self, source): + raise AssertionError("should highlight %r" % source) + class MyFormatter(HtmlFormatter): def format(self, tokensource, outfile): outfile.write('test') @@ -41,6 +46,18 @@ def test_add_lexer(app): ret = bridge.highlight_block('ab', 'test') assert 'ab' in ret +def test_detect_interactive(): + bridge = ComplainOnUnhighlighted('html') + blocks = [ + """ + >>> testing() + True + """, + ] + for block in blocks: + ret = bridge.highlight_block(block.lstrip(), 'python') + assert ret.startswith("
    ") + def test_set_formatter(): PygmentsBridge.html_formatter = MyFormatter try: -- cgit v1.2.1 From 45c8e6b4f22e44208206b124a685ba273df284e2 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Thu, 1 Jan 2009 23:48:10 +0100 Subject: Add Python license info, add parse.c source generated by Cython. --- LICENSE | 73 +- sphinx/pycode/__init__.py | 12 +- sphinx/pycode/nodes.py | 202 +++ sphinx/pycode/pgen2/parse.c | 3261 +++++++++++++++++++++++++++++++++++++++++ sphinx/pycode/pgen2/parse.pyx | 4 +- sphinx/pycode/pytree.py | 293 ---- 6 files changed, 3537 insertions(+), 308 deletions(-) create mode 100644 sphinx/pycode/nodes.py create mode 100644 sphinx/pycode/pgen2/parse.c delete mode 100644 sphinx/pycode/pytree.py diff --git a/LICENSE b/LICENSE index fd441170..34183cb5 100644 --- a/LICENSE +++ b/LICENSE @@ -1,17 +1,19 @@ -Copyright (c) 2007-2008 by the respective authors (see AUTHORS file). -All rights reserved. +Copyright (c) 2007-2008 by the Sphinx team (see AUTHORS file). All +rights reserved. + +License for Sphinx +================== Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. +* Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. +* Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT @@ -24,3 +26,58 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Licenses for incorporated software +================================== + +The pgen2 package, included in this distribution under the name +sphinx.pycode.pgen2, is available in the Python 2.6 distribution under +the PSF license agreement for Python: + +1. This LICENSE AGREEMENT is between the Python Software Foundation + ("PSF"), and the Individual or Organization ("Licensee") accessing + and otherwise using Python 2.6 software in source or binary form + and its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF + hereby grants Licensee a nonexclusive, royalty-free, world-wide + license to reproduce, analyze, test, perform and/or display + publicly, prepare derivative works, distribute, and otherwise use + Python 2.6 alone or in any derivative version, provided, however, + that PSF's License Agreement and PSF's notice of copyright, i.e., + "Copyright © 2001-2008 Python Software Foundation; All Rights + Reserved" are retained in Python 2.6 alone or in any derivative + version prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on + or incorporates Python 2.6 or any part thereof, and wants to make + the derivative work available to others as provided herein, then + Licensee hereby agrees to include in any such work a brief summary + of the changes made to Python 2.6. + +4. PSF is making Python 2.6 available to Licensee on an "AS IS" basis. + PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY + WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY + REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY + PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 2.6 WILL NOT INFRINGE + ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON + 2.6 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS + AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON + 2.6, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY + THEREOF. + +6. This License Agreement will automatically terminate upon a material + breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any + relationship of agency, partnership, or joint venture between PSF + and Licensee. This License Agreement does not grant permission to + use PSF trademarks or trade name in a trademark sense to endorse or + promote products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python 2.6, Licensee + agrees to be bound by the terms and conditions of this License + Agreement. diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index a61208d5..d707db2d 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -5,7 +5,7 @@ Utilities parsing and analyzing Python code. - :copyright: 2008 by Georg Brandl. + :copyright: 2008-2009 by Georg Brandl. :license: BSD, see LICENSE for details. """ @@ -13,7 +13,7 @@ import sys from os import path from cStringIO import StringIO -from sphinx.pycode import pytree +from sphinx.pycode import nodes from sphinx.pycode.pgen2 import driver, token, tokenize, parse, literals from sphinx.util.docstrings import prepare_docstring, prepare_commentdoc @@ -21,7 +21,7 @@ from sphinx.util.docstrings import prepare_docstring, prepare_commentdoc # load the Python grammar _grammarfile = path.join(path.dirname(__file__), 'Grammar.txt') pygrammar = driver.load_grammar(_grammarfile) -pydriver = driver.Driver(pygrammar, convert=pytree.convert) +pydriver = driver.Driver(pygrammar, convert=nodes.convert) # an object with attributes corresponding to token and symbol names class sym: pass @@ -35,10 +35,10 @@ number2name = pygrammar.number2symbol.copy() number2name.update(token.tok_name) -_eq = pytree.Leaf(token.EQUAL, '=') +_eq = nodes.Leaf(token.EQUAL, '=') -class AttrDocVisitor(pytree.NodeVisitor): +class AttrDocVisitor(nodes.NodeVisitor): """ Visitor that collects docstrings for attribute assignments on toplevel and in classes. @@ -263,5 +263,5 @@ if __name__ == '__main__': # print '\n'.join(doc) pprint.pprint(ma.find_tags()) x3 = time.time() - #print pytree.nice_repr(ma.parsetree, number2name) + #print nodes.nice_repr(ma.parsetree, number2name) print "tokenizing %.4f, parsing %.4f, finding %.4f" % (x1-x0, x2-x1, x3-x2) diff --git a/sphinx/pycode/nodes.py b/sphinx/pycode/nodes.py new file mode 100644 index 00000000..d0fb522b --- /dev/null +++ b/sphinx/pycode/nodes.py @@ -0,0 +1,202 @@ +# -*- coding: utf-8 -*- +""" + sphinx.pycode.nodes + ~~~~~~~~~~~~~~~~~~~ + + Parse tree node implementations. + + :copyright: 2009 by Georg Brandl. + :license: BSD, see LICENSE for details. +""" + + +class BaseNode(object): + """ + Node superclass for both terminal and nonterminal nodes. + """ + + def _eq(self, other): + raise NotImplementedError + + def __eq__(self, other): + if self.__class__ is not other.__class__: + return NotImplemented + return self._eq(other) + + def __ne__(self, other): + if self.__class__ is not other.__class__: + return NotImplemented + return not self._eq(other) + + def get_prev_sibling(self): + """Return previous child in parent's children, or None.""" + if self.parent is None: + return None + for i, child in enumerate(self.parent.children): + if child is self: + if i == 0: + return None + return self.parent.children[i-1] + + def get_next_sibling(self): + """Return next child in parent's children, or None.""" + if self.parent is None: + return None + for i, child in enumerate(self.parent.children): + if child is self: + try: + return self.parent.children[i+1] + except IndexError: + return None + + def get_prev_leaf(self): + """Return the leaf node that precedes this node in the parse tree.""" + def last_child(node): + if isinstance(node, Leaf): + return node + elif not node.children: + return None + else: + return last_child(node.children[-1]) + if self.parent is None: + return None + prev = self.get_prev_sibling() + if isinstance(prev, Leaf): + return prev + elif prev is not None: + return last_child(prev) + return self.parent.get_prev_leaf() + + def get_next_leaf(self): + """Return self if leaf, otherwise the leaf node that succeeds this + node in the parse tree. + """ + node = self + while not isinstance(node, Leaf): + assert node.children + node = node.children[0] + return node + + def get_lineno(self): + """Return the line number which generated the invocant node.""" + return self.get_next_leaf().lineno + + def get_prefix(self): + """Return the prefix of the next leaf node.""" + # only leaves carry a prefix + return self.get_next_leaf().prefix + + +class Node(BaseNode): + """ + Node implementation for nonterminals. + """ + + def __init__(self, type, children, context=None): + # type of nonterminals is >= 256 + # assert type >= 256, type + self.type = type + self.children = list(children) + for ch in self.children: + # assert ch.parent is None, repr(ch) + ch.parent = self + + def __repr__(self): + return '%s(%s, %r)' % (self.__class__.__name__, self.type, self.children) + + def __str__(self): + """This reproduces the input source exactly.""" + return ''.join(map(str, self.children)) + + def _eq(self, other): + return (self.type, self.children) == (other.type, other.children) + + # support indexing the node directly instead of .children + + def __getitem__(self, index): + return self.children[index] + + def __iter__(self): + return iter(self.children) + + def __len__(self): + return len(self.children) + + +class Leaf(BaseNode): + """ + Node implementation for leaf nodes (terminals). + """ + prefix = '' # Whitespace and comments preceding this token in the input + lineno = 0 # Line where this token starts in the input + column = 0 # Column where this token tarts in the input + + def __init__(self, type, value, context=None): + # type of terminals is below 256 + # assert 0 <= type < 256, type + self.type = type + self.value = value + if context is not None: + self.prefix, (self.lineno, self.column) = context + + def __repr__(self): + return '%s(%r, %r, %r)' % (self.__class__.__name__, + self.type, self.value, self.prefix) + + def __str__(self): + """This reproduces the input source exactly.""" + return self.prefix + str(self.value) + + def _eq(self, other): + """Compares two nodes for equality.""" + return (self.type, self.value) == (other.type, other.value) + + +def convert(grammar, raw_node): + """Convert raw node to a Node or Leaf instance.""" + type, value, context, children = raw_node + if children or type in grammar.number2symbol: + # If there's exactly one child, return that child instead of + # creating a new node. + if len(children) == 1: + return children[0] + return Node(type, children, context=context) + else: + return Leaf(type, value, context=context) + + +def nice_repr(node, number2name, prefix=False): + def _repr(node): + if isinstance(node, Leaf): + return "%s(%r)" % (number2name[node.type], node.value) + else: + return "%s(%s)" % (number2name[node.type], + ', '.join(map(_repr, node.children))) + def _prepr(node): + if isinstance(node, Leaf): + return "%s(%r, %r)" % (number2name[node.type], node.prefix, node.value) + else: + return "%s(%s)" % (number2name[node.type], + ', '.join(map(_prepr, node.children))) + return (prefix and _prepr or _repr)(node) + + +class NodeVisitor(object): + def __init__(self, number2name, *args): + self.number2name = number2name + self.init(*args) + + def init(self, *args): + pass + + def visit(self, node): + """Visit a node.""" + method = 'visit_' + self.number2name[node.type] + visitor = getattr(self, method, self.generic_visit) + return visitor(node) + + def generic_visit(self, node): + """Called if no explicit visitor function exists for a node.""" + if isinstance(node, Node): + for child in node: + self.visit(child) diff --git a/sphinx/pycode/pgen2/parse.c b/sphinx/pycode/pgen2/parse.c new file mode 100644 index 00000000..fd0e9ff9 --- /dev/null +++ b/sphinx/pycode/pgen2/parse.c @@ -0,0 +1,3261 @@ +/* Generated by Cython 0.9.8.1 on Thu Jan 1 23:45:38 2009 */ + +#define PY_SSIZE_T_CLEAN +#include "Python.h" +#include "structmember.h" +#ifndef PY_LONG_LONG + #define PY_LONG_LONG LONG_LONG +#endif +#ifndef DL_EXPORT + #define DL_EXPORT(t) t +#endif +#if PY_VERSION_HEX < 0x02040000 + #define METH_COEXIST 0 +#endif +#if PY_VERSION_HEX < 0x02050000 + typedef int Py_ssize_t; + #define PY_SSIZE_T_MAX INT_MAX + #define PY_SSIZE_T_MIN INT_MIN + #define PyInt_FromSsize_t(z) PyInt_FromLong(z) + #define PyInt_AsSsize_t(o) PyInt_AsLong(o) + #define PyNumber_Index(o) PyNumber_Int(o) + #define PyIndex_Check(o) PyNumber_Check(o) +#endif +#if PY_VERSION_HEX < 0x02060000 + #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) + #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) + #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) + #define PyVarObject_HEAD_INIT(type, size) \ + PyObject_HEAD_INIT(type) size, + #define PyType_Modified(t) + + typedef struct { + void *buf; + Py_ssize_t len; + int readonly; + const char *format; + int ndim; + Py_ssize_t *shape; + Py_ssize_t *strides; + Py_ssize_t *suboffsets; + Py_ssize_t itemsize; + void *internal; + } Py_buffer; + + #define PyBUF_SIMPLE 0 + #define PyBUF_WRITABLE 0x0001 + #define PyBUF_LOCK 0x0002 + #define PyBUF_FORMAT 0x0004 + #define PyBUF_ND 0x0008 + #define PyBUF_STRIDES (0x0010 | PyBUF_ND) + #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) + #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) + #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) + #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) + +#endif +#if PY_MAJOR_VERSION < 3 + #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" +#else + #define __Pyx_BUILTIN_MODULE_NAME "builtins" +#endif +#if PY_MAJOR_VERSION >= 3 + #define Py_TPFLAGS_CHECKTYPES 0 + #define Py_TPFLAGS_HAVE_INDEX 0 +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBaseString_Type PyUnicode_Type + #define PyString_Type PyBytes_Type + #define PyInt_Type PyLong_Type + #define PyInt_Check(op) PyLong_Check(op) + #define PyInt_CheckExact(op) PyLong_CheckExact(op) + #define PyInt_FromString PyLong_FromString + #define PyInt_FromUnicode PyLong_FromUnicode + #define PyInt_FromLong PyLong_FromLong + #define PyInt_FromSize_t PyLong_FromSize_t + #define PyInt_FromSsize_t PyLong_FromSsize_t + #define PyInt_AsLong PyLong_AsLong + #define PyInt_AS_LONG PyLong_AS_LONG + #define PyInt_AsSsize_t PyLong_AsSsize_t + #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask + #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask + #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) +#else + #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) + #define PyBytes_Type PyString_Type +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyMethod_New(func, self, klass) PyInstanceMethod_New(func) +#endif +#if !defined(WIN32) && !defined(MS_WINDOWS) + #ifndef __stdcall + #define __stdcall + #endif + #ifndef __cdecl + #define __cdecl + #endif +#else + #define _USE_MATH_DEFINES +#endif +#ifdef __cplusplus +#define __PYX_EXTERN_C extern "C" +#else +#define __PYX_EXTERN_C extern +#endif +#include +#define __PYX_HAVE_API__sphinx__pycode__pgen2__parse + + +#ifdef __GNUC__ +#define INLINE __inline__ +#elif _WIN32 +#define INLINE __inline +#else +#define INLINE +#endif + +typedef struct {PyObject **p; char *s; long n; char is_unicode; char intern; char is_identifier;} __Pyx_StringTabEntry; /*proto*/ + + + +static int __pyx_skip_dispatch = 0; + + +/* Type Conversion Predeclarations */ + +#if PY_MAJOR_VERSION < 3 +#define __Pyx_PyBytes_FromString PyString_FromString +#define __Pyx_PyBytes_AsString PyString_AsString +#else +#define __Pyx_PyBytes_FromString PyBytes_FromString +#define __Pyx_PyBytes_AsString PyBytes_AsString +#endif + +#define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) +static INLINE int __Pyx_PyObject_IsTrue(PyObject* x); +static INLINE PY_LONG_LONG __pyx_PyInt_AsLongLong(PyObject* x); +static INLINE unsigned PY_LONG_LONG __pyx_PyInt_AsUnsignedLongLong(PyObject* x); +static INLINE Py_ssize_t __pyx_PyIndex_AsSsize_t(PyObject* b); + +#define __pyx_PyInt_AsLong(x) (PyInt_CheckExact(x) ? PyInt_AS_LONG(x) : PyInt_AsLong(x)) +#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) + +static INLINE unsigned char __pyx_PyInt_unsigned_char(PyObject* x); +static INLINE unsigned short __pyx_PyInt_unsigned_short(PyObject* x); +static INLINE char __pyx_PyInt_char(PyObject* x); +static INLINE short __pyx_PyInt_short(PyObject* x); +static INLINE int __pyx_PyInt_int(PyObject* x); +static INLINE long __pyx_PyInt_long(PyObject* x); +static INLINE signed char __pyx_PyInt_signed_char(PyObject* x); +static INLINE signed short __pyx_PyInt_signed_short(PyObject* x); +static INLINE signed int __pyx_PyInt_signed_int(PyObject* x); +static INLINE signed long __pyx_PyInt_signed_long(PyObject* x); +static INLINE long double __pyx_PyInt_long_double(PyObject* x); +#ifdef __GNUC__ +/* Test for GCC > 2.95 */ +#if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) +#define likely(x) __builtin_expect(!!(x), 1) +#define unlikely(x) __builtin_expect(!!(x), 0) +#else /* __GNUC__ > 2 ... */ +#define likely(x) (x) +#define unlikely(x) (x) +#endif /* __GNUC__ > 2 ... */ +#else /* __GNUC__ */ +#define likely(x) (x) +#define unlikely(x) (x) +#endif /* __GNUC__ */ + +static PyObject *__pyx_m; +static PyObject *__pyx_b; +static PyObject *__pyx_empty_tuple; +static int __pyx_lineno; +static int __pyx_clineno = 0; +static const char * __pyx_cfilenm= __FILE__; +static const char *__pyx_filename; +static const char **__pyx_f; + +static INLINE void __Pyx_RaiseArgtupleTooLong(Py_ssize_t num_expected, Py_ssize_t num_found); /*proto*/ + +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list); /*proto*/ + +static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/ + +static PyObject *__Pyx_CreateClass(PyObject *bases, PyObject *dict, PyObject *name, char *modname); /*proto*/ + +static INLINE PyObject *__Pyx_GetItemInt(PyObject *o, Py_ssize_t i, int is_unsigned) { + PyObject *r; + if (PyList_CheckExact(o) && 0 <= i && i < PyList_GET_SIZE(o)) { + r = PyList_GET_ITEM(o, i); + Py_INCREF(r); + } + else if (PyTuple_CheckExact(o) && 0 <= i && i < PyTuple_GET_SIZE(o)) { + r = PyTuple_GET_ITEM(o, i); + Py_INCREF(r); + } + else if (Py_TYPE(o)->tp_as_sequence && Py_TYPE(o)->tp_as_sequence->sq_item && (likely(i >= 0) || !is_unsigned)) + r = PySequence_GetItem(o, i); + else { + PyObject *j = (likely(i >= 0) || !is_unsigned) ? PyInt_FromLong(i) : PyLong_FromUnsignedLongLong((sizeof(unsigned long long) > sizeof(Py_ssize_t) ? (1ULL << (sizeof(Py_ssize_t)*8)) : 0) + i); + if (!j) + return 0; + r = PyObject_GetItem(o, j); + Py_DECREF(j); + } + return r; +} + +static PyObject *__Pyx_UnpackItem(PyObject *, Py_ssize_t index); /*proto*/ +static int __Pyx_EndUnpack(PyObject *); /*proto*/ + +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ + +static INLINE PyObject* __Pyx_PyObject_Append(PyObject* L, PyObject* x) { + if (likely(PyList_CheckExact(L))) { + if (PyList_Append(L, x) < 0) return NULL; + Py_INCREF(Py_None); + return Py_None; // this is just to have an accurate signature + } + else { + return PyObject_CallMethod(L, "append", "(O)", x); + } +} + +static INLINE int __Pyx_SetItemInt(PyObject *o, Py_ssize_t i, PyObject *v, int is_unsigned) { + int r; + if (PyList_CheckExact(o) && 0 <= i && i < PyList_GET_SIZE(o)) { + Py_DECREF(PyList_GET_ITEM(o, i)); + Py_INCREF(v); + PyList_SET_ITEM(o, i, v); + return 1; + } + else if (Py_TYPE(o)->tp_as_sequence && Py_TYPE(o)->tp_as_sequence->sq_ass_item && (likely(i >= 0) || !is_unsigned)) + r = PySequence_SetItem(o, i, v); + else { + PyObject *j = (likely(i >= 0) || !is_unsigned) ? PyInt_FromLong(i) : PyLong_FromUnsignedLongLong((sizeof(unsigned long long) > sizeof(Py_ssize_t) ? (1ULL << (sizeof(Py_ssize_t)*8)) : 0) + i); + if (!j) + return -1; + r = PyObject_SetItem(o, j, v); + Py_DECREF(j); + } + return r; +} + +static void __Pyx_WriteUnraisable(const char *name); /*proto*/ + +static int __Pyx_SetVtable(PyObject *dict, void *vtable); /*proto*/ + +static void __Pyx_AddTraceback(const char *funcname); /*proto*/ + +static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ + +/* Type declarations */ + +/* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":31 + * + * + * cdef class Parser: # <<<<<<<<<<<<<< + * cdef public grammar, stack, rootnode, used_names + * cdef _grammar_dfas, _grammar_labels, _grammar_keywords, _grammar_tokens + */ + +struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser { + PyObject_HEAD + struct __pyx_vtabstruct_6sphinx_6pycode_5pgen2_5parse_Parser *__pyx_vtab; + PyObject *grammar; + PyObject *stack; + PyObject *rootnode; + PyObject *used_names; + PyObject *_grammar_dfas; + PyObject *_grammar_labels; + PyObject *_grammar_keywords; + PyObject *_grammar_tokens; + PyObject *_grammar_number2symbol; +}; + + +struct __pyx_vtabstruct_6sphinx_6pycode_5pgen2_5parse_Parser { + int (*classify)(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *, PyObject *, PyObject *, PyObject *); + void (*shift)(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *, PyObject *, PyObject *, PyObject *, PyObject *); + void (*push)(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *, PyObject *, PyObject *, PyObject *, PyObject *); + void (*pop)(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *); + PyObject *(*convert)(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *, PyObject *); +}; +static struct __pyx_vtabstruct_6sphinx_6pycode_5pgen2_5parse_Parser *__pyx_vtabptr_6sphinx_6pycode_5pgen2_5parse_Parser; +/* Module declarations from sphinx.pycode.pgen2.parse */ + +static PyTypeObject *__pyx_ptype_6sphinx_6pycode_5pgen2_5parse_Parser = 0; + + +/* Implementation of sphinx.pycode.pgen2.parse */ +static char __pyx_k_2[] = "Exception to signal the parser is stuck."; +static PyObject *__pyx_int_0; +static PyObject *__pyx_int_1; +static char __pyx_k___init__[] = "__init__"; +static PyObject *__pyx_kp___init__; +static char __pyx_k_setup[] = "setup"; +static PyObject *__pyx_kp_setup; +static char __pyx_k_addtoken[] = "addtoken"; +static PyObject *__pyx_kp_addtoken; +static char __pyx_k_1[] = "sphinx.pycode.nodes"; +static PyObject *__pyx_kp_1; +static char __pyx_k_Node[] = "Node"; +static PyObject *__pyx_kp_Node; +static char __pyx_k_Leaf[] = "Leaf"; +static PyObject *__pyx_kp_Leaf; +static char __pyx_k_ParseError[] = "ParseError"; +static PyObject *__pyx_kp_ParseError; +static char __pyx_k_Exception[] = "Exception"; +static PyObject *__pyx_kp_Exception; +static char __pyx_k_msg[] = "msg"; +static PyObject *__pyx_kp_msg; +static char __pyx_k_type[] = "type"; +static PyObject *__pyx_kp_type; +static char __pyx_k_value[] = "value"; +static PyObject *__pyx_kp_value; +static char __pyx_k_context[] = "context"; +static PyObject *__pyx_kp_context; +static char __pyx_k_dfas[] = "dfas"; +static PyObject *__pyx_kp_dfas; +static char __pyx_k_labels[] = "labels"; +static PyObject *__pyx_kp_labels; +static char __pyx_k_keywords[] = "keywords"; +static PyObject *__pyx_kp_keywords; +static char __pyx_k_tokens[] = "tokens"; +static PyObject *__pyx_kp_tokens; +static char __pyx_k_4[] = "number2symbol"; +static PyObject *__pyx_kp_4; +static char __pyx_k_start[] = "start"; +static PyObject *__pyx_kp_start; +static char __pyx_k_add[] = "add"; +static PyObject *__pyx_kp_add; +static char __pyx_k_get[] = "get"; +static PyObject *__pyx_kp_get; +static char __pyx_k_append[] = "append"; +static PyObject *__pyx_kp_append; +static char __pyx_k_pop[] = "pop"; +static PyObject *__pyx_kp_pop; +static char __pyx_k_used_names[] = "used_names"; +static PyObject *__pyx_kp_used_names; +static PyObject *__pyx_kp_2; +static PyObject *__pyx_builtin_Exception; +static PyObject *__pyx_kp_3; +static char __pyx_k_3[] = "%s: type=%r, value=%r, context=%r"; +static PyObject *__pyx_kp_5; +static PyObject *__pyx_kp_6; +static char __pyx_k_5[] = "too much input"; +static char __pyx_k_6[] = "bad input"; +static PyObject *__pyx_kp_7; +static char __pyx_k_7[] = "bad token"; + +/* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":22 + * """Exception to signal the parser is stuck.""" + * + * def __init__(self, msg, type, value, context): # <<<<<<<<<<<<<< + * Exception.__init__(self, "%s: type=%r, value=%r, context=%r" % + * (msg, type, value, context)) + */ + +static PyObject *__pyx_pf_6sphinx_6pycode_5pgen2_5parse_10ParseError___init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_6sphinx_6pycode_5pgen2_5parse_10ParseError___init__ = {"__init__", (PyCFunction)__pyx_pf_6sphinx_6pycode_5pgen2_5parse_10ParseError___init__, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pf_6sphinx_6pycode_5pgen2_5parse_10ParseError___init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_self = 0; + PyObject *__pyx_v_msg = 0; + PyObject *__pyx_v_type = 0; + PyObject *__pyx_v_value = 0; + PyObject *__pyx_v_context = 0; + PyObject *__pyx_r; + PyObject *__pyx_1 = 0; + PyObject *__pyx_2 = 0; + PyObject *__pyx_3 = 0; + static char *__pyx_argnames[] = {"self","msg","type","value","context",0}; + __pyx_self = __pyx_self; + if (likely(!__pyx_kwds) && likely(PyTuple_GET_SIZE(__pyx_args) == 5)) { + __pyx_v_self = PyTuple_GET_ITEM(__pyx_args, 0); + __pyx_v_msg = PyTuple_GET_ITEM(__pyx_args, 1); + __pyx_v_type = PyTuple_GET_ITEM(__pyx_args, 2); + __pyx_v_value = PyTuple_GET_ITEM(__pyx_args, 3); + __pyx_v_context = PyTuple_GET_ITEM(__pyx_args, 4); + } + else { + if (unlikely(!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "OOOOO", __pyx_argnames, &__pyx_v_self, &__pyx_v_msg, &__pyx_v_type, &__pyx_v_value, &__pyx_v_context))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + } + goto __pyx_L4; + __pyx_L3_error:; + __Pyx_AddTraceback("sphinx.pycode.pgen2.parse.ParseError.__init__"); + return NULL; + __pyx_L4:; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":23 + * + * def __init__(self, msg, type, value, context): + * Exception.__init__(self, "%s: type=%r, value=%r, context=%r" % # <<<<<<<<<<<<<< + * (msg, type, value, context)) + * self.msg = msg + */ + __pyx_1 = PyObject_GetAttr(__pyx_builtin_Exception, __pyx_kp___init__); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":24 + * def __init__(self, msg, type, value, context): + * Exception.__init__(self, "%s: type=%r, value=%r, context=%r" % + * (msg, type, value, context)) # <<<<<<<<<<<<<< + * self.msg = msg + * self.type = type + */ + __pyx_2 = PyTuple_New(4); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_v_msg); + PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_msg); + Py_INCREF(__pyx_v_type); + PyTuple_SET_ITEM(__pyx_2, 1, __pyx_v_type); + Py_INCREF(__pyx_v_value); + PyTuple_SET_ITEM(__pyx_2, 2, __pyx_v_value); + Py_INCREF(__pyx_v_context); + PyTuple_SET_ITEM(__pyx_2, 3, __pyx_v_context); + __pyx_3 = PyNumber_Remainder(__pyx_kp_3, ((PyObject *)__pyx_2)); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(((PyObject *)__pyx_2)); __pyx_2 = 0; + __pyx_2 = PyTuple_New(2); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_v_self); + PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_self); + PyTuple_SET_ITEM(__pyx_2, 1, __pyx_3); + __pyx_3 = 0; + __pyx_3 = PyObject_Call(__pyx_1, ((PyObject *)__pyx_2), NULL); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_1); __pyx_1 = 0; + Py_DECREF(((PyObject *)__pyx_2)); __pyx_2 = 0; + Py_DECREF(__pyx_3); __pyx_3 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":25 + * Exception.__init__(self, "%s: type=%r, value=%r, context=%r" % + * (msg, type, value, context)) + * self.msg = msg # <<<<<<<<<<<<<< + * self.type = type + * self.value = value + */ + if (PyObject_SetAttr(__pyx_v_self, __pyx_kp_msg, __pyx_v_msg) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":26 + * (msg, type, value, context)) + * self.msg = msg + * self.type = type # <<<<<<<<<<<<<< + * self.value = value + * self.context = context + */ + if (PyObject_SetAttr(__pyx_v_self, __pyx_kp_type, __pyx_v_type) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":27 + * self.msg = msg + * self.type = type + * self.value = value # <<<<<<<<<<<<<< + * self.context = context + * + */ + if (PyObject_SetAttr(__pyx_v_self, __pyx_kp_value, __pyx_v_value) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 27; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":28 + * self.type = type + * self.value = value + * self.context = context # <<<<<<<<<<<<<< + * + * + */ + if (PyObject_SetAttr(__pyx_v_self, __pyx_kp_context, __pyx_v_context) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + + __pyx_r = Py_None; Py_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + Py_XDECREF(__pyx_1); + Py_XDECREF(__pyx_2); + Py_XDECREF(__pyx_3); + __Pyx_AddTraceback("sphinx.pycode.pgen2.parse.ParseError.__init__"); + __pyx_r = NULL; + __pyx_L0:; + return __pyx_r; +} + +/* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":36 + * cdef _grammar_number2symbol + * + * def __init__(self, grammar, convert=None): # <<<<<<<<<<<<<< + * self.grammar = grammar + * #self.convert = convert or noconvert + */ + +static int __pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static int __pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_grammar = 0; + PyObject *__pyx_v_convert = 0; + int __pyx_r; + PyObject *__pyx_1 = 0; + static char *__pyx_argnames[] = {"grammar","convert",0}; + __pyx_v_convert = Py_None; + if (likely(!__pyx_kwds) && likely(1 <= PyTuple_GET_SIZE(__pyx_args)) && likely(PyTuple_GET_SIZE(__pyx_args) <= 2)) { + __pyx_v_grammar = PyTuple_GET_ITEM(__pyx_args, 0); + if (PyTuple_GET_SIZE(__pyx_args) > 1) { + __pyx_v_convert = PyTuple_GET_ITEM(__pyx_args, 1); + } + } + else { + if (unlikely(!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O|O", __pyx_argnames, &__pyx_v_grammar, &__pyx_v_convert))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + } + goto __pyx_L4; + __pyx_L3_error:; + __Pyx_AddTraceback("sphinx.pycode.pgen2.parse.Parser.__init__"); + return -1; + __pyx_L4:; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":37 + * + * def __init__(self, grammar, convert=None): + * self.grammar = grammar # <<<<<<<<<<<<<< + * #self.convert = convert or noconvert + * + */ + Py_INCREF(__pyx_v_grammar); + Py_DECREF(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->grammar); + ((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->grammar = __pyx_v_grammar; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":40 + * #self.convert = convert or noconvert + * + * self._grammar_dfas = grammar.dfas # <<<<<<<<<<<<<< + * self._grammar_labels = grammar.labels + * self._grammar_keywords = grammar.keywords + */ + __pyx_1 = PyObject_GetAttr(__pyx_v_grammar, __pyx_kp_dfas); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_dfas); + ((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_dfas = __pyx_1; + __pyx_1 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":41 + * + * self._grammar_dfas = grammar.dfas + * self._grammar_labels = grammar.labels # <<<<<<<<<<<<<< + * self._grammar_keywords = grammar.keywords + * self._grammar_tokens = grammar.tokens + */ + __pyx_1 = PyObject_GetAttr(__pyx_v_grammar, __pyx_kp_labels); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_labels); + ((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_labels = __pyx_1; + __pyx_1 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":42 + * self._grammar_dfas = grammar.dfas + * self._grammar_labels = grammar.labels + * self._grammar_keywords = grammar.keywords # <<<<<<<<<<<<<< + * self._grammar_tokens = grammar.tokens + * self._grammar_number2symbol = grammar.number2symbol + */ + __pyx_1 = PyObject_GetAttr(__pyx_v_grammar, __pyx_kp_keywords); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_keywords); + ((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_keywords = __pyx_1; + __pyx_1 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":43 + * self._grammar_labels = grammar.labels + * self._grammar_keywords = grammar.keywords + * self._grammar_tokens = grammar.tokens # <<<<<<<<<<<<<< + * self._grammar_number2symbol = grammar.number2symbol + * + */ + __pyx_1 = PyObject_GetAttr(__pyx_v_grammar, __pyx_kp_tokens); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_tokens); + ((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_tokens = __pyx_1; + __pyx_1 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":44 + * self._grammar_keywords = grammar.keywords + * self._grammar_tokens = grammar.tokens + * self._grammar_number2symbol = grammar.number2symbol # <<<<<<<<<<<<<< + * + * def setup(self, start=None): + */ + __pyx_1 = PyObject_GetAttr(__pyx_v_grammar, __pyx_kp_4); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_number2symbol); + ((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_number2symbol = __pyx_1; + __pyx_1 = 0; + + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + Py_XDECREF(__pyx_1); + __Pyx_AddTraceback("sphinx.pycode.pgen2.parse.Parser.__init__"); + __pyx_r = -1; + __pyx_L0:; + return __pyx_r; +} + +/* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":46 + * self._grammar_number2symbol = grammar.number2symbol + * + * def setup(self, start=None): # <<<<<<<<<<<<<< + * if start is None: + * start = self.grammar.start + */ + +static PyObject *__pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser_setup(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyObject *__pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser_setup(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_start = 0; + PyObject *__pyx_v_newnode; + PyObject *__pyx_v_stackentry; + PyObject *__pyx_r; + int __pyx_1; + PyObject *__pyx_2 = 0; + PyObject *__pyx_3 = 0; + static char *__pyx_argnames[] = {"start",0}; + __pyx_v_start = Py_None; + if (likely(!__pyx_kwds) && likely(0 <= PyTuple_GET_SIZE(__pyx_args)) && likely(PyTuple_GET_SIZE(__pyx_args) <= 1)) { + if (PyTuple_GET_SIZE(__pyx_args) > 0) { + __pyx_v_start = PyTuple_GET_ITEM(__pyx_args, 0); + } + } + else { + if (unlikely(!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "|O", __pyx_argnames, &__pyx_v_start))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + } + goto __pyx_L4; + __pyx_L3_error:; + __Pyx_AddTraceback("sphinx.pycode.pgen2.parse.Parser.setup"); + return NULL; + __pyx_L4:; + Py_INCREF(__pyx_v_start); + __pyx_v_newnode = Py_None; Py_INCREF(Py_None); + __pyx_v_stackentry = Py_None; Py_INCREF(Py_None); + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":47 + * + * def setup(self, start=None): + * if start is None: # <<<<<<<<<<<<<< + * start = self.grammar.start + * # Each stack entry is a tuple: (dfa, state, node). + */ + __pyx_1 = (__pyx_v_start == Py_None); + if (__pyx_1) { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":48 + * def setup(self, start=None): + * if start is None: + * start = self.grammar.start # <<<<<<<<<<<<<< + * # Each stack entry is a tuple: (dfa, state, node). + * # A node is a tuple: (type, value, context, children), + */ + __pyx_2 = PyObject_GetAttr(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->grammar, __pyx_kp_start); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_start); + __pyx_v_start = __pyx_2; + __pyx_2 = 0; + goto __pyx_L5; + } + __pyx_L5:; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":52 + * # A node is a tuple: (type, value, context, children), + * # where children is a list of nodes or None, and context may be None. + * newnode = (start, None, None, []) # <<<<<<<<<<<<<< + * stackentry = (self._grammar_dfas[start], 0, newnode) + * self.stack = [stackentry] + */ + __pyx_2 = PyList_New(0); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_3 = PyTuple_New(4); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_v_start); + PyTuple_SET_ITEM(__pyx_3, 0, __pyx_v_start); + Py_INCREF(Py_None); + PyTuple_SET_ITEM(__pyx_3, 1, Py_None); + Py_INCREF(Py_None); + PyTuple_SET_ITEM(__pyx_3, 2, Py_None); + PyTuple_SET_ITEM(__pyx_3, 3, ((PyObject *)__pyx_2)); + __pyx_2 = 0; + Py_DECREF(__pyx_v_newnode); + __pyx_v_newnode = ((PyObject *)__pyx_3); + __pyx_3 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":53 + * # where children is a list of nodes or None, and context may be None. + * newnode = (start, None, None, []) + * stackentry = (self._grammar_dfas[start], 0, newnode) # <<<<<<<<<<<<<< + * self.stack = [stackentry] + * self.rootnode = None + */ + __pyx_2 = PyObject_GetItem(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_dfas, __pyx_v_start); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_3 = PyTuple_New(3); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + PyTuple_SET_ITEM(__pyx_3, 0, __pyx_2); + Py_INCREF(__pyx_int_0); + PyTuple_SET_ITEM(__pyx_3, 1, __pyx_int_0); + Py_INCREF(__pyx_v_newnode); + PyTuple_SET_ITEM(__pyx_3, 2, __pyx_v_newnode); + __pyx_2 = 0; + Py_DECREF(__pyx_v_stackentry); + __pyx_v_stackentry = ((PyObject *)__pyx_3); + __pyx_3 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":54 + * newnode = (start, None, None, []) + * stackentry = (self._grammar_dfas[start], 0, newnode) + * self.stack = [stackentry] # <<<<<<<<<<<<<< + * self.rootnode = None + * self.used_names = set() # Aliased to self.rootnode.used_names in pop() + */ + __pyx_2 = PyList_New(1); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_v_stackentry); + PyList_SET_ITEM(__pyx_2, 0, __pyx_v_stackentry); + Py_DECREF(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->stack); + ((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->stack = ((PyObject *)__pyx_2); + __pyx_2 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":55 + * stackentry = (self._grammar_dfas[start], 0, newnode) + * self.stack = [stackentry] + * self.rootnode = None # <<<<<<<<<<<<<< + * self.used_names = set() # Aliased to self.rootnode.used_names in pop() + * + */ + Py_INCREF(Py_None); + Py_DECREF(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->rootnode); + ((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->rootnode = Py_None; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":56 + * self.stack = [stackentry] + * self.rootnode = None + * self.used_names = set() # Aliased to self.rootnode.used_names in pop() # <<<<<<<<<<<<<< + * + * def addtoken(self, type, value, context): + */ + __pyx_3 = PyObject_Call(((PyObject*)&PySet_Type), ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->used_names); + ((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->used_names = __pyx_3; + __pyx_3 = 0; + + __pyx_r = Py_None; Py_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + Py_XDECREF(__pyx_2); + Py_XDECREF(__pyx_3); + __Pyx_AddTraceback("sphinx.pycode.pgen2.parse.Parser.setup"); + __pyx_r = NULL; + __pyx_L0:; + Py_DECREF(__pyx_v_newnode); + Py_DECREF(__pyx_v_stackentry); + Py_DECREF(__pyx_v_start); + return __pyx_r; +} + +/* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":58 + * self.used_names = set() # Aliased to self.rootnode.used_names in pop() + * + * def addtoken(self, type, value, context): # <<<<<<<<<<<<<< + * """Add a token; return True iff this is the end of the program.""" + * cdef int ilabel, i, t, state, newstate + */ + +static PyObject *__pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser_addtoken(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static char __pyx_doc_6sphinx_6pycode_5pgen2_5parse_6Parser_addtoken[] = "Add a token; return True iff this is the end of the program."; +static PyObject *__pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser_addtoken(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_type = 0; + PyObject *__pyx_v_value = 0; + PyObject *__pyx_v_context = 0; + int __pyx_v_ilabel; + int __pyx_v_i; + int __pyx_v_t; + int __pyx_v_state; + int __pyx_v_newstate; + PyObject *__pyx_v_dfa; + PyObject *__pyx_v_node; + PyObject *__pyx_v_states; + PyObject *__pyx_v_first; + PyObject *__pyx_v_arcs; + PyObject *__pyx_v_v; + PyObject *__pyx_v_itsdfa; + PyObject *__pyx_v_itsstates; + PyObject *__pyx_v_itsfirst; + PyObject *__pyx_r; + int __pyx_1; + PyObject *__pyx_2 = 0; + PyObject *__pyx_3 = 0; + PyObject *__pyx_4 = 0; + int __pyx_5; + Py_ssize_t __pyx_6 = 0; + PyObject *__pyx_7 = 0; + int __pyx_8; + static char *__pyx_argnames[] = {"type","value","context",0}; + if (likely(!__pyx_kwds) && likely(PyTuple_GET_SIZE(__pyx_args) == 3)) { + __pyx_v_type = PyTuple_GET_ITEM(__pyx_args, 0); + __pyx_v_value = PyTuple_GET_ITEM(__pyx_args, 1); + __pyx_v_context = PyTuple_GET_ITEM(__pyx_args, 2); + } + else { + if (unlikely(!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "OOO", __pyx_argnames, &__pyx_v_type, &__pyx_v_value, &__pyx_v_context))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 58; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + } + goto __pyx_L4; + __pyx_L3_error:; + __Pyx_AddTraceback("sphinx.pycode.pgen2.parse.Parser.addtoken"); + return NULL; + __pyx_L4:; + __pyx_v_dfa = Py_None; Py_INCREF(Py_None); + __pyx_v_node = Py_None; Py_INCREF(Py_None); + __pyx_v_states = Py_None; Py_INCREF(Py_None); + __pyx_v_first = Py_None; Py_INCREF(Py_None); + __pyx_v_arcs = Py_None; Py_INCREF(Py_None); + __pyx_v_v = Py_None; Py_INCREF(Py_None); + __pyx_v_itsdfa = Py_None; Py_INCREF(Py_None); + __pyx_v_itsstates = Py_None; Py_INCREF(Py_None); + __pyx_v_itsfirst = Py_None; Py_INCREF(Py_None); + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":62 + * cdef int ilabel, i, t, state, newstate + * # Map from token to label + * ilabel = self.classify(type, value, context) # <<<<<<<<<<<<<< + * # Loop until the token is shifted; may raise exceptions + * while True: + */ + __pyx_v_ilabel = ((struct __pyx_vtabstruct_6sphinx_6pycode_5pgen2_5parse_Parser *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->__pyx_vtab)->classify(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self), __pyx_v_type, __pyx_v_value, __pyx_v_context); + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":64 + * ilabel = self.classify(type, value, context) + * # Loop until the token is shifted; may raise exceptions + * while True: # <<<<<<<<<<<<<< + * dfa, state, node = self.stack[-1] + * states, first = dfa + */ + while (1) { + __pyx_1 = 1; + if (!__pyx_1) break; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":65 + * # Loop until the token is shifted; may raise exceptions + * while True: + * dfa, state, node = self.stack[-1] # <<<<<<<<<<<<<< + * states, first = dfa + * arcs = states[state] + */ + __pyx_2 = __Pyx_GetItemInt(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->stack, -1, 0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyTuple_CheckExact(__pyx_2) && PyTuple_GET_SIZE(__pyx_2) == 3) { + PyObject* tuple = __pyx_2; + __pyx_4 = PyTuple_GET_ITEM(tuple, 0); + Py_INCREF(__pyx_4); + Py_DECREF(__pyx_v_dfa); + __pyx_v_dfa = __pyx_4; + __pyx_4 = 0; + __pyx_4 = PyTuple_GET_ITEM(tuple, 1); + Py_INCREF(__pyx_4); + __pyx_5 = __pyx_PyInt_int(__pyx_4); if (unlikely((__pyx_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_4); __pyx_4 = 0; + __pyx_v_state = __pyx_5; + __pyx_4 = PyTuple_GET_ITEM(tuple, 2); + Py_INCREF(__pyx_4); + Py_DECREF(__pyx_v_node); + __pyx_v_node = __pyx_4; + __pyx_4 = 0; + Py_DECREF(__pyx_2); __pyx_2 = 0; + } + else { + __pyx_3 = PyObject_GetIter(__pyx_2); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_2); __pyx_2 = 0; + __pyx_4 = __Pyx_UnpackItem(__pyx_3, 0); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_dfa); + __pyx_v_dfa = __pyx_4; + __pyx_4 = 0; + __pyx_4 = __Pyx_UnpackItem(__pyx_3, 1); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_5 = __pyx_PyInt_int(__pyx_4); if (unlikely((__pyx_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_4); __pyx_4 = 0; + __pyx_v_state = __pyx_5; + __pyx_4 = __Pyx_UnpackItem(__pyx_3, 2); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_node); + __pyx_v_node = __pyx_4; + __pyx_4 = 0; + if (__Pyx_EndUnpack(__pyx_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_3); __pyx_3 = 0; + } + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":66 + * while True: + * dfa, state, node = self.stack[-1] + * states, first = dfa # <<<<<<<<<<<<<< + * arcs = states[state] + * # Look for a state with this label + */ + if (PyTuple_CheckExact(__pyx_v_dfa) && PyTuple_GET_SIZE(__pyx_v_dfa) == 2) { + PyObject* tuple = __pyx_v_dfa; + __pyx_2 = PyTuple_GET_ITEM(tuple, 0); + Py_INCREF(__pyx_2); + Py_DECREF(__pyx_v_states); + __pyx_v_states = __pyx_2; + __pyx_2 = 0; + __pyx_3 = PyTuple_GET_ITEM(tuple, 1); + Py_INCREF(__pyx_3); + Py_DECREF(__pyx_v_first); + __pyx_v_first = __pyx_3; + __pyx_3 = 0; + } + else { + __pyx_4 = PyObject_GetIter(__pyx_v_dfa); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_2 = __Pyx_UnpackItem(__pyx_4, 0); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_states); + __pyx_v_states = __pyx_2; + __pyx_2 = 0; + __pyx_3 = __Pyx_UnpackItem(__pyx_4, 1); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_first); + __pyx_v_first = __pyx_3; + __pyx_3 = 0; + if (__Pyx_EndUnpack(__pyx_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_4); __pyx_4 = 0; + } + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":67 + * dfa, state, node = self.stack[-1] + * states, first = dfa + * arcs = states[state] # <<<<<<<<<<<<<< + * # Look for a state with this label + * for i, newstate in arcs: + */ + __pyx_2 = __Pyx_GetItemInt(__pyx_v_states, __pyx_v_state, 0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_arcs); + __pyx_v_arcs = __pyx_2; + __pyx_2 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":69 + * arcs = states[state] + * # Look for a state with this label + * for i, newstate in arcs: # <<<<<<<<<<<<<< + * t, v = self._grammar_labels[i] + * if ilabel == i: + */ + if (PyList_CheckExact(__pyx_v_arcs) || PyTuple_CheckExact(__pyx_v_arcs)) { + __pyx_6 = 0; __pyx_3 = __pyx_v_arcs; Py_INCREF(__pyx_3); + } else { + __pyx_6 = -1; __pyx_3 = PyObject_GetIter(__pyx_v_arcs); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + } + for (;;) { + if (likely(PyList_CheckExact(__pyx_3))) { + if (__pyx_6 >= PyList_GET_SIZE(__pyx_3)) break; + __pyx_4 = PyList_GET_ITEM(__pyx_3, __pyx_6); Py_INCREF(__pyx_4); __pyx_6++; + } else if (likely(PyTuple_CheckExact(__pyx_3))) { + if (__pyx_6 >= PyTuple_GET_SIZE(__pyx_3)) break; + __pyx_4 = PyTuple_GET_ITEM(__pyx_3, __pyx_6); Py_INCREF(__pyx_4); __pyx_6++; + } else { + __pyx_4 = PyIter_Next(__pyx_3); + if (!__pyx_4) { + if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + break; + } + } + if (PyTuple_CheckExact(__pyx_4) && PyTuple_GET_SIZE(__pyx_4) == 2) { + PyObject* tuple = __pyx_4; + __pyx_7 = PyTuple_GET_ITEM(tuple, 0); + Py_INCREF(__pyx_7); + __pyx_5 = __pyx_PyInt_int(__pyx_7); if (unlikely((__pyx_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_7); __pyx_7 = 0; + __pyx_v_i = __pyx_5; + __pyx_7 = PyTuple_GET_ITEM(tuple, 1); + Py_INCREF(__pyx_7); + __pyx_5 = __pyx_PyInt_int(__pyx_7); if (unlikely((__pyx_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_7); __pyx_7 = 0; + __pyx_v_newstate = __pyx_5; + Py_DECREF(__pyx_4); __pyx_4 = 0; + } + else { + __pyx_2 = PyObject_GetIter(__pyx_4); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_4); __pyx_4 = 0; + __pyx_7 = __Pyx_UnpackItem(__pyx_2, 0); if (unlikely(!__pyx_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_5 = __pyx_PyInt_int(__pyx_7); if (unlikely((__pyx_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_7); __pyx_7 = 0; + __pyx_v_i = __pyx_5; + __pyx_7 = __Pyx_UnpackItem(__pyx_2, 1); if (unlikely(!__pyx_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_5 = __pyx_PyInt_int(__pyx_7); if (unlikely((__pyx_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_7); __pyx_7 = 0; + __pyx_v_newstate = __pyx_5; + if (__Pyx_EndUnpack(__pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_2); __pyx_2 = 0; + } + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":70 + * # Look for a state with this label + * for i, newstate in arcs: + * t, v = self._grammar_labels[i] # <<<<<<<<<<<<<< + * if ilabel == i: + * # Look it up in the list of labels + */ + __pyx_7 = __Pyx_GetItemInt(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_labels, __pyx_v_i, 0); if (!__pyx_7) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyTuple_CheckExact(__pyx_7) && PyTuple_GET_SIZE(__pyx_7) == 2) { + PyObject* tuple = __pyx_7; + __pyx_2 = PyTuple_GET_ITEM(tuple, 0); + Py_INCREF(__pyx_2); + __pyx_5 = __pyx_PyInt_int(__pyx_2); if (unlikely((__pyx_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_2); __pyx_2 = 0; + __pyx_v_t = __pyx_5; + __pyx_2 = PyTuple_GET_ITEM(tuple, 1); + Py_INCREF(__pyx_2); + Py_DECREF(__pyx_v_v); + __pyx_v_v = __pyx_2; + __pyx_2 = 0; + Py_DECREF(__pyx_7); __pyx_7 = 0; + } + else { + __pyx_4 = PyObject_GetIter(__pyx_7); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_7); __pyx_7 = 0; + __pyx_2 = __Pyx_UnpackItem(__pyx_4, 0); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_5 = __pyx_PyInt_int(__pyx_2); if (unlikely((__pyx_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_2); __pyx_2 = 0; + __pyx_v_t = __pyx_5; + __pyx_2 = __Pyx_UnpackItem(__pyx_4, 1); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_v); + __pyx_v_v = __pyx_2; + __pyx_2 = 0; + if (__Pyx_EndUnpack(__pyx_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_4); __pyx_4 = 0; + } + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":71 + * for i, newstate in arcs: + * t, v = self._grammar_labels[i] + * if ilabel == i: # <<<<<<<<<<<<<< + * # Look it up in the list of labels + * ## assert t < 256 + */ + __pyx_1 = (__pyx_v_ilabel == __pyx_v_i); + if (__pyx_1) { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":75 + * ## assert t < 256 + * # Shift a token; we're done with it + * self.shift(type, value, newstate, context) # <<<<<<<<<<<<<< + * # Pop while we are in an accept-only state + * state = newstate + */ + __pyx_2 = PyInt_FromLong(__pyx_v_newstate); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 75; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + ((struct __pyx_vtabstruct_6sphinx_6pycode_5pgen2_5parse_Parser *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->__pyx_vtab)->shift(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self), __pyx_v_type, __pyx_v_value, __pyx_2, __pyx_v_context); + Py_DECREF(__pyx_2); __pyx_2 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":77 + * self.shift(type, value, newstate, context) + * # Pop while we are in an accept-only state + * state = newstate # <<<<<<<<<<<<<< + * while states[state] == [(0, state)]: + * self.pop() + */ + __pyx_v_state = __pyx_v_newstate; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":78 + * # Pop while we are in an accept-only state + * state = newstate + * while states[state] == [(0, state)]: # <<<<<<<<<<<<<< + * self.pop() + * if not self.stack: + */ + while (1) { + __pyx_7 = __Pyx_GetItemInt(__pyx_v_states, __pyx_v_state, 0); if (!__pyx_7) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_4 = PyInt_FromLong(__pyx_v_state); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_2 = PyTuple_New(2); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_int_0); + PyTuple_SET_ITEM(__pyx_2, 0, __pyx_int_0); + PyTuple_SET_ITEM(__pyx_2, 1, __pyx_4); + __pyx_4 = 0; + __pyx_4 = PyList_New(1); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + PyList_SET_ITEM(__pyx_4, 0, ((PyObject *)__pyx_2)); + __pyx_2 = 0; + __pyx_2 = PyObject_RichCompare(__pyx_7, ((PyObject *)__pyx_4), Py_EQ); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_7); __pyx_7 = 0; + Py_DECREF(((PyObject *)__pyx_4)); __pyx_4 = 0; + __pyx_1 = __Pyx_PyObject_IsTrue(__pyx_2); if (unlikely(__pyx_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_2); __pyx_2 = 0; + if (!__pyx_1) break; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":79 + * state = newstate + * while states[state] == [(0, state)]: + * self.pop() # <<<<<<<<<<<<<< + * if not self.stack: + * # Done parsing! + */ + ((struct __pyx_vtabstruct_6sphinx_6pycode_5pgen2_5parse_Parser *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->__pyx_vtab)->pop(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)); + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":80 + * while states[state] == [(0, state)]: + * self.pop() + * if not self.stack: # <<<<<<<<<<<<<< + * # Done parsing! + * return True + */ + __pyx_1 = __Pyx_PyObject_IsTrue(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->stack); if (unlikely(__pyx_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_8 = (!__pyx_1); + if (__pyx_8) { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":82 + * if not self.stack: + * # Done parsing! + * return True # <<<<<<<<<<<<<< + * dfa, state, node = self.stack[-1] + * states, first = dfa + */ + __pyx_7 = __Pyx_PyBool_FromLong(1); if (unlikely(!__pyx_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_r = __pyx_7; + __pyx_7 = 0; + Py_DECREF(__pyx_3); __pyx_3 = 0; + goto __pyx_L0; + goto __pyx_L12; + } + __pyx_L12:; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":83 + * # Done parsing! + * return True + * dfa, state, node = self.stack[-1] # <<<<<<<<<<<<<< + * states, first = dfa + * # Done with this token + */ + __pyx_4 = __Pyx_GetItemInt(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->stack, -1, 0); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyTuple_CheckExact(__pyx_4) && PyTuple_GET_SIZE(__pyx_4) == 3) { + PyObject* tuple = __pyx_4; + __pyx_7 = PyTuple_GET_ITEM(tuple, 0); + Py_INCREF(__pyx_7); + Py_DECREF(__pyx_v_dfa); + __pyx_v_dfa = __pyx_7; + __pyx_7 = 0; + __pyx_7 = PyTuple_GET_ITEM(tuple, 1); + Py_INCREF(__pyx_7); + __pyx_5 = __pyx_PyInt_int(__pyx_7); if (unlikely((__pyx_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_7); __pyx_7 = 0; + __pyx_v_state = __pyx_5; + __pyx_7 = PyTuple_GET_ITEM(tuple, 2); + Py_INCREF(__pyx_7); + Py_DECREF(__pyx_v_node); + __pyx_v_node = __pyx_7; + __pyx_7 = 0; + Py_DECREF(__pyx_4); __pyx_4 = 0; + } + else { + __pyx_2 = PyObject_GetIter(__pyx_4); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_4); __pyx_4 = 0; + __pyx_7 = __Pyx_UnpackItem(__pyx_2, 0); if (unlikely(!__pyx_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_dfa); + __pyx_v_dfa = __pyx_7; + __pyx_7 = 0; + __pyx_7 = __Pyx_UnpackItem(__pyx_2, 1); if (unlikely(!__pyx_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_5 = __pyx_PyInt_int(__pyx_7); if (unlikely((__pyx_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_7); __pyx_7 = 0; + __pyx_v_state = __pyx_5; + __pyx_7 = __Pyx_UnpackItem(__pyx_2, 2); if (unlikely(!__pyx_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_node); + __pyx_v_node = __pyx_7; + __pyx_7 = 0; + if (__Pyx_EndUnpack(__pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_2); __pyx_2 = 0; + } + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":84 + * return True + * dfa, state, node = self.stack[-1] + * states, first = dfa # <<<<<<<<<<<<<< + * # Done with this token + * return False + */ + if (PyTuple_CheckExact(__pyx_v_dfa) && PyTuple_GET_SIZE(__pyx_v_dfa) == 2) { + PyObject* tuple = __pyx_v_dfa; + __pyx_4 = PyTuple_GET_ITEM(tuple, 0); + Py_INCREF(__pyx_4); + Py_DECREF(__pyx_v_states); + __pyx_v_states = __pyx_4; + __pyx_4 = 0; + __pyx_2 = PyTuple_GET_ITEM(tuple, 1); + Py_INCREF(__pyx_2); + Py_DECREF(__pyx_v_first); + __pyx_v_first = __pyx_2; + __pyx_2 = 0; + } + else { + __pyx_7 = PyObject_GetIter(__pyx_v_dfa); if (unlikely(!__pyx_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_4 = __Pyx_UnpackItem(__pyx_7, 0); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_states); + __pyx_v_states = __pyx_4; + __pyx_4 = 0; + __pyx_2 = __Pyx_UnpackItem(__pyx_7, 1); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_first); + __pyx_v_first = __pyx_2; + __pyx_2 = 0; + if (__Pyx_EndUnpack(__pyx_7) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_7); __pyx_7 = 0; + } + } + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":86 + * states, first = dfa + * # Done with this token + * return False # <<<<<<<<<<<<<< + * elif t >= 256: + * # See if it's a symbol and if we're in its first set + */ + __pyx_4 = __Pyx_PyBool_FromLong(0); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_r = __pyx_4; + __pyx_4 = 0; + Py_DECREF(__pyx_3); __pyx_3 = 0; + goto __pyx_L0; + goto __pyx_L9; + } + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":87 + * # Done with this token + * return False + * elif t >= 256: # <<<<<<<<<<<<<< + * # See if it's a symbol and if we're in its first set + * itsdfa = self._grammar_dfas[t] + */ + __pyx_1 = (__pyx_v_t >= 256); + if (__pyx_1) { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":89 + * elif t >= 256: + * # See if it's a symbol and if we're in its first set + * itsdfa = self._grammar_dfas[t] # <<<<<<<<<<<<<< + * itsstates, itsfirst = itsdfa + * if ilabel in itsfirst: + */ + __pyx_2 = __Pyx_GetItemInt(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_dfas, __pyx_v_t, 0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_itsdfa); + __pyx_v_itsdfa = __pyx_2; + __pyx_2 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":90 + * # See if it's a symbol and if we're in its first set + * itsdfa = self._grammar_dfas[t] + * itsstates, itsfirst = itsdfa # <<<<<<<<<<<<<< + * if ilabel in itsfirst: + * # Push a symbol + */ + if (PyTuple_CheckExact(__pyx_v_itsdfa) && PyTuple_GET_SIZE(__pyx_v_itsdfa) == 2) { + PyObject* tuple = __pyx_v_itsdfa; + __pyx_4 = PyTuple_GET_ITEM(tuple, 0); + Py_INCREF(__pyx_4); + Py_DECREF(__pyx_v_itsstates); + __pyx_v_itsstates = __pyx_4; + __pyx_4 = 0; + __pyx_2 = PyTuple_GET_ITEM(tuple, 1); + Py_INCREF(__pyx_2); + Py_DECREF(__pyx_v_itsfirst); + __pyx_v_itsfirst = __pyx_2; + __pyx_2 = 0; + } + else { + __pyx_7 = PyObject_GetIter(__pyx_v_itsdfa); if (unlikely(!__pyx_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_4 = __Pyx_UnpackItem(__pyx_7, 0); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_itsstates); + __pyx_v_itsstates = __pyx_4; + __pyx_4 = 0; + __pyx_2 = __Pyx_UnpackItem(__pyx_7, 1); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_itsfirst); + __pyx_v_itsfirst = __pyx_2; + __pyx_2 = 0; + if (__Pyx_EndUnpack(__pyx_7) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_7); __pyx_7 = 0; + } + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":91 + * itsdfa = self._grammar_dfas[t] + * itsstates, itsfirst = itsdfa + * if ilabel in itsfirst: # <<<<<<<<<<<<<< + * # Push a symbol + * self.push(t, itsdfa, newstate, context) + */ + __pyx_4 = PyInt_FromLong(__pyx_v_ilabel); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_8 = (PySequence_Contains(__pyx_v_itsfirst, __pyx_4)); if (unlikely(__pyx_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_4); __pyx_4 = 0; + if (__pyx_8) { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":93 + * if ilabel in itsfirst: + * # Push a symbol + * self.push(t, itsdfa, newstate, context) # <<<<<<<<<<<<<< + * break # To continue the outer while loop + * else: + */ + __pyx_2 = PyInt_FromLong(__pyx_v_t); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_7 = PyInt_FromLong(__pyx_v_newstate); if (unlikely(!__pyx_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + ((struct __pyx_vtabstruct_6sphinx_6pycode_5pgen2_5parse_Parser *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->__pyx_vtab)->push(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self), __pyx_2, __pyx_v_itsdfa, __pyx_7, __pyx_v_context); + Py_DECREF(__pyx_2); __pyx_2 = 0; + Py_DECREF(__pyx_7); __pyx_7 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":94 + * # Push a symbol + * self.push(t, itsdfa, newstate, context) + * break # To continue the outer while loop # <<<<<<<<<<<<<< + * else: + * if (0, state) in arcs: + */ + goto __pyx_L8; + goto __pyx_L13; + } + __pyx_L13:; + goto __pyx_L9; + } + __pyx_L9:; + } + /*else*/ { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":96 + * break # To continue the outer while loop + * else: + * if (0, state) in arcs: # <<<<<<<<<<<<<< + * # An accepting state, pop it and try something else + * self.pop() + */ + __pyx_4 = PyInt_FromLong(__pyx_v_state); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 96; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_2 = PyTuple_New(2); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 96; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_int_0); + PyTuple_SET_ITEM(__pyx_2, 0, __pyx_int_0); + PyTuple_SET_ITEM(__pyx_2, 1, __pyx_4); + __pyx_4 = 0; + __pyx_1 = (PySequence_Contains(__pyx_v_arcs, ((PyObject *)__pyx_2))); if (unlikely(__pyx_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 96; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(((PyObject *)__pyx_2)); __pyx_2 = 0; + if (__pyx_1) { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":98 + * if (0, state) in arcs: + * # An accepting state, pop it and try something else + * self.pop() # <<<<<<<<<<<<<< + * if not self.stack: + * # Done parsing, but another token is input + */ + ((struct __pyx_vtabstruct_6sphinx_6pycode_5pgen2_5parse_Parser *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->__pyx_vtab)->pop(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)); + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":99 + * # An accepting state, pop it and try something else + * self.pop() + * if not self.stack: # <<<<<<<<<<<<<< + * # Done parsing, but another token is input + * raise ParseError("too much input", + */ + __pyx_8 = __Pyx_PyObject_IsTrue(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->stack); if (unlikely(__pyx_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_1 = (!__pyx_8); + if (__pyx_1) { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":101 + * if not self.stack: + * # Done parsing, but another token is input + * raise ParseError("too much input", # <<<<<<<<<<<<<< + * type, value, context) + * else: + */ + __pyx_7 = __Pyx_GetName(__pyx_m, __pyx_kp_ParseError); if (unlikely(!__pyx_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":102 + * # Done parsing, but another token is input + * raise ParseError("too much input", + * type, value, context) # <<<<<<<<<<<<<< + * else: + * # No success finding a transition + */ + __pyx_4 = PyTuple_New(4); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_kp_5); + PyTuple_SET_ITEM(__pyx_4, 0, __pyx_kp_5); + Py_INCREF(__pyx_v_type); + PyTuple_SET_ITEM(__pyx_4, 1, __pyx_v_type); + Py_INCREF(__pyx_v_value); + PyTuple_SET_ITEM(__pyx_4, 2, __pyx_v_value); + Py_INCREF(__pyx_v_context); + PyTuple_SET_ITEM(__pyx_4, 3, __pyx_v_context); + __pyx_2 = PyObject_Call(__pyx_7, ((PyObject *)__pyx_4), NULL); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_7); __pyx_7 = 0; + Py_DECREF(((PyObject *)__pyx_4)); __pyx_4 = 0; + __Pyx_Raise(__pyx_2, 0, 0); + Py_DECREF(__pyx_2); __pyx_2 = 0; + {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + goto __pyx_L15; + } + __pyx_L15:; + goto __pyx_L14; + } + /*else*/ { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":105 + * else: + * # No success finding a transition + * raise ParseError("bad input", type, value, context) # <<<<<<<<<<<<<< + * + * cdef int classify(self, type, value, context): + */ + __pyx_7 = __Pyx_GetName(__pyx_m, __pyx_kp_ParseError); if (unlikely(!__pyx_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_4 = PyTuple_New(4); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_kp_6); + PyTuple_SET_ITEM(__pyx_4, 0, __pyx_kp_6); + Py_INCREF(__pyx_v_type); + PyTuple_SET_ITEM(__pyx_4, 1, __pyx_v_type); + Py_INCREF(__pyx_v_value); + PyTuple_SET_ITEM(__pyx_4, 2, __pyx_v_value); + Py_INCREF(__pyx_v_context); + PyTuple_SET_ITEM(__pyx_4, 3, __pyx_v_context); + __pyx_2 = PyObject_Call(__pyx_7, ((PyObject *)__pyx_4), NULL); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_7); __pyx_7 = 0; + Py_DECREF(((PyObject *)__pyx_4)); __pyx_4 = 0; + __Pyx_Raise(__pyx_2, 0, 0); + Py_DECREF(__pyx_2); __pyx_2 = 0; + {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + } + __pyx_L14:; + } + __pyx_L8:; + Py_DECREF(__pyx_3); __pyx_3 = 0; + } + + __pyx_r = Py_None; Py_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + Py_XDECREF(__pyx_2); + Py_XDECREF(__pyx_3); + Py_XDECREF(__pyx_4); + Py_XDECREF(__pyx_7); + __Pyx_AddTraceback("sphinx.pycode.pgen2.parse.Parser.addtoken"); + __pyx_r = NULL; + __pyx_L0:; + Py_DECREF(__pyx_v_dfa); + Py_DECREF(__pyx_v_node); + Py_DECREF(__pyx_v_states); + Py_DECREF(__pyx_v_first); + Py_DECREF(__pyx_v_arcs); + Py_DECREF(__pyx_v_v); + Py_DECREF(__pyx_v_itsdfa); + Py_DECREF(__pyx_v_itsstates); + Py_DECREF(__pyx_v_itsfirst); + return __pyx_r; +} + +/* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":107 + * raise ParseError("bad input", type, value, context) + * + * cdef int classify(self, type, value, context): # <<<<<<<<<<<<<< + * """Turn a token into a label. (Internal)""" + * if type == NAME: + */ + +static int __pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_classify(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *__pyx_v_self, PyObject *__pyx_v_type, PyObject *__pyx_v_value, PyObject *__pyx_v_context) { + PyObject *__pyx_v_ilabel; + int __pyx_r; + PyObject *__pyx_1 = 0; + int __pyx_2; + PyObject *__pyx_3 = 0; + PyObject *__pyx_4 = 0; + int __pyx_5; + __pyx_v_ilabel = Py_None; Py_INCREF(Py_None); + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":109 + * cdef int classify(self, type, value, context): + * """Turn a token into a label. (Internal)""" + * if type == NAME: # <<<<<<<<<<<<<< + * # Keep a listing of all used names + * self.used_names.add(value) + */ + __pyx_1 = PyObject_RichCompare(__pyx_v_type, __pyx_int_1, Py_EQ); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_2 = __Pyx_PyObject_IsTrue(__pyx_1); if (unlikely(__pyx_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_1); __pyx_1 = 0; + if (__pyx_2) { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":111 + * if type == NAME: + * # Keep a listing of all used names + * self.used_names.add(value) # <<<<<<<<<<<<<< + * # Check for reserved words + * ilabel = self._grammar_keywords.get(value) + */ + __pyx_1 = PyObject_GetAttr(__pyx_v_self->used_names, __pyx_kp_add); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_3 = PyTuple_New(1); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_v_value); + PyTuple_SET_ITEM(__pyx_3, 0, __pyx_v_value); + __pyx_4 = PyObject_Call(__pyx_1, ((PyObject *)__pyx_3), NULL); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_1); __pyx_1 = 0; + Py_DECREF(((PyObject *)__pyx_3)); __pyx_3 = 0; + Py_DECREF(__pyx_4); __pyx_4 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":113 + * self.used_names.add(value) + * # Check for reserved words + * ilabel = self._grammar_keywords.get(value) # <<<<<<<<<<<<<< + * if ilabel is not None: + * return ilabel + */ + __pyx_1 = PyObject_GetAttr(__pyx_v_self->_grammar_keywords, __pyx_kp_get); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_3 = PyTuple_New(1); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_v_value); + PyTuple_SET_ITEM(__pyx_3, 0, __pyx_v_value); + __pyx_4 = PyObject_Call(__pyx_1, ((PyObject *)__pyx_3), NULL); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_1); __pyx_1 = 0; + Py_DECREF(((PyObject *)__pyx_3)); __pyx_3 = 0; + Py_DECREF(__pyx_v_ilabel); + __pyx_v_ilabel = __pyx_4; + __pyx_4 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":114 + * # Check for reserved words + * ilabel = self._grammar_keywords.get(value) + * if ilabel is not None: # <<<<<<<<<<<<<< + * return ilabel + * ilabel = self._grammar_tokens.get(type) + */ + __pyx_2 = (__pyx_v_ilabel != Py_None); + if (__pyx_2) { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":115 + * ilabel = self._grammar_keywords.get(value) + * if ilabel is not None: + * return ilabel # <<<<<<<<<<<<<< + * ilabel = self._grammar_tokens.get(type) + * if ilabel is None: + */ + __pyx_5 = __pyx_PyInt_int(__pyx_v_ilabel); if (unlikely((__pyx_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_r = __pyx_5; + goto __pyx_L0; + goto __pyx_L4; + } + __pyx_L4:; + goto __pyx_L3; + } + __pyx_L3:; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":116 + * if ilabel is not None: + * return ilabel + * ilabel = self._grammar_tokens.get(type) # <<<<<<<<<<<<<< + * if ilabel is None: + * raise ParseError("bad token", type, value, context) + */ + __pyx_1 = PyObject_GetAttr(__pyx_v_self->_grammar_tokens, __pyx_kp_get); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_3 = PyTuple_New(1); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_v_type); + PyTuple_SET_ITEM(__pyx_3, 0, __pyx_v_type); + __pyx_4 = PyObject_Call(__pyx_1, ((PyObject *)__pyx_3), NULL); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_1); __pyx_1 = 0; + Py_DECREF(((PyObject *)__pyx_3)); __pyx_3 = 0; + Py_DECREF(__pyx_v_ilabel); + __pyx_v_ilabel = __pyx_4; + __pyx_4 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":117 + * return ilabel + * ilabel = self._grammar_tokens.get(type) + * if ilabel is None: # <<<<<<<<<<<<<< + * raise ParseError("bad token", type, value, context) + * return ilabel + */ + __pyx_2 = (__pyx_v_ilabel == Py_None); + if (__pyx_2) { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":118 + * ilabel = self._grammar_tokens.get(type) + * if ilabel is None: + * raise ParseError("bad token", type, value, context) # <<<<<<<<<<<<<< + * return ilabel + * + */ + __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_kp_ParseError); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_3 = PyTuple_New(4); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_kp_7); + PyTuple_SET_ITEM(__pyx_3, 0, __pyx_kp_7); + Py_INCREF(__pyx_v_type); + PyTuple_SET_ITEM(__pyx_3, 1, __pyx_v_type); + Py_INCREF(__pyx_v_value); + PyTuple_SET_ITEM(__pyx_3, 2, __pyx_v_value); + Py_INCREF(__pyx_v_context); + PyTuple_SET_ITEM(__pyx_3, 3, __pyx_v_context); + __pyx_4 = PyObject_Call(__pyx_1, ((PyObject *)__pyx_3), NULL); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_1); __pyx_1 = 0; + Py_DECREF(((PyObject *)__pyx_3)); __pyx_3 = 0; + __Pyx_Raise(__pyx_4, 0, 0); + Py_DECREF(__pyx_4); __pyx_4 = 0; + {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + goto __pyx_L5; + } + __pyx_L5:; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":119 + * if ilabel is None: + * raise ParseError("bad token", type, value, context) + * return ilabel # <<<<<<<<<<<<<< + * + * cdef void shift(self, type, value, newstate, context): + */ + __pyx_5 = __pyx_PyInt_int(__pyx_v_ilabel); if (unlikely((__pyx_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_r = __pyx_5; + goto __pyx_L0; + + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + Py_XDECREF(__pyx_1); + Py_XDECREF(__pyx_3); + Py_XDECREF(__pyx_4); + __Pyx_WriteUnraisable("sphinx.pycode.pgen2.parse.Parser.classify"); + __pyx_r = 0; + __pyx_L0:; + Py_DECREF(__pyx_v_ilabel); + return __pyx_r; +} + +/* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":121 + * return ilabel + * + * cdef void shift(self, type, value, newstate, context): # <<<<<<<<<<<<<< + * """Shift a token. (Internal)""" + * dfa, state, node = self.stack[-1] + */ + +static void __pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_shift(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *__pyx_v_self, PyObject *__pyx_v_type, PyObject *__pyx_v_value, PyObject *__pyx_v_newstate, PyObject *__pyx_v_context) { + PyObject *__pyx_v_dfa; + PyObject *__pyx_v_state; + PyObject *__pyx_v_node; + PyObject *__pyx_v_newnode; + PyObject *__pyx_1 = 0; + PyObject *__pyx_2 = 0; + PyObject *__pyx_3 = 0; + int __pyx_4; + __pyx_v_dfa = Py_None; Py_INCREF(Py_None); + __pyx_v_state = Py_None; Py_INCREF(Py_None); + __pyx_v_node = Py_None; Py_INCREF(Py_None); + __pyx_v_newnode = Py_None; Py_INCREF(Py_None); + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":123 + * cdef void shift(self, type, value, newstate, context): + * """Shift a token. (Internal)""" + * dfa, state, node = self.stack[-1] # <<<<<<<<<<<<<< + * newnode = (type, value, context, None) + * newnode = self.convert(newnode) + */ + __pyx_1 = __Pyx_GetItemInt(__pyx_v_self->stack, -1, 0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyTuple_CheckExact(__pyx_1) && PyTuple_GET_SIZE(__pyx_1) == 3) { + PyObject* tuple = __pyx_1; + __pyx_3 = PyTuple_GET_ITEM(tuple, 0); + Py_INCREF(__pyx_3); + Py_DECREF(__pyx_v_dfa); + __pyx_v_dfa = __pyx_3; + __pyx_3 = 0; + __pyx_3 = PyTuple_GET_ITEM(tuple, 1); + Py_INCREF(__pyx_3); + Py_DECREF(__pyx_v_state); + __pyx_v_state = __pyx_3; + __pyx_3 = 0; + __pyx_3 = PyTuple_GET_ITEM(tuple, 2); + Py_INCREF(__pyx_3); + Py_DECREF(__pyx_v_node); + __pyx_v_node = __pyx_3; + __pyx_3 = 0; + Py_DECREF(__pyx_1); __pyx_1 = 0; + } + else { + __pyx_2 = PyObject_GetIter(__pyx_1); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_1); __pyx_1 = 0; + __pyx_3 = __Pyx_UnpackItem(__pyx_2, 0); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_dfa); + __pyx_v_dfa = __pyx_3; + __pyx_3 = 0; + __pyx_3 = __Pyx_UnpackItem(__pyx_2, 1); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_state); + __pyx_v_state = __pyx_3; + __pyx_3 = 0; + __pyx_3 = __Pyx_UnpackItem(__pyx_2, 2); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_node); + __pyx_v_node = __pyx_3; + __pyx_3 = 0; + if (__Pyx_EndUnpack(__pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_2); __pyx_2 = 0; + } + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":124 + * """Shift a token. (Internal)""" + * dfa, state, node = self.stack[-1] + * newnode = (type, value, context, None) # <<<<<<<<<<<<<< + * newnode = self.convert(newnode) + * if newnode is not None: + */ + __pyx_3 = PyTuple_New(4); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_v_type); + PyTuple_SET_ITEM(__pyx_3, 0, __pyx_v_type); + Py_INCREF(__pyx_v_value); + PyTuple_SET_ITEM(__pyx_3, 1, __pyx_v_value); + Py_INCREF(__pyx_v_context); + PyTuple_SET_ITEM(__pyx_3, 2, __pyx_v_context); + Py_INCREF(Py_None); + PyTuple_SET_ITEM(__pyx_3, 3, Py_None); + Py_DECREF(__pyx_v_newnode); + __pyx_v_newnode = ((PyObject *)__pyx_3); + __pyx_3 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":125 + * dfa, state, node = self.stack[-1] + * newnode = (type, value, context, None) + * newnode = self.convert(newnode) # <<<<<<<<<<<<<< + * if newnode is not None: + * node[-1].append(newnode) + */ + __pyx_1 = ((struct __pyx_vtabstruct_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self->__pyx_vtab)->convert(__pyx_v_self, __pyx_v_newnode); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_newnode); + __pyx_v_newnode = __pyx_1; + __pyx_1 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":126 + * newnode = (type, value, context, None) + * newnode = self.convert(newnode) + * if newnode is not None: # <<<<<<<<<<<<<< + * node[-1].append(newnode) + * self.stack[-1] = (dfa, newstate, node) + */ + __pyx_4 = (__pyx_v_newnode != Py_None); + if (__pyx_4) { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":127 + * newnode = self.convert(newnode) + * if newnode is not None: + * node[-1].append(newnode) # <<<<<<<<<<<<<< + * self.stack[-1] = (dfa, newstate, node) + * + */ + __pyx_2 = __Pyx_GetItemInt(__pyx_v_node, -1, 0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_3 = __Pyx_PyObject_Append(__pyx_2, __pyx_v_newnode); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_2); __pyx_2 = 0; + Py_DECREF(__pyx_3); __pyx_3 = 0; + goto __pyx_L3; + } + __pyx_L3:; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":128 + * if newnode is not None: + * node[-1].append(newnode) + * self.stack[-1] = (dfa, newstate, node) # <<<<<<<<<<<<<< + * + * cdef void push(self, type, newdfa, newstate, context): + */ + __pyx_1 = PyTuple_New(3); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_v_dfa); + PyTuple_SET_ITEM(__pyx_1, 0, __pyx_v_dfa); + Py_INCREF(__pyx_v_newstate); + PyTuple_SET_ITEM(__pyx_1, 1, __pyx_v_newstate); + Py_INCREF(__pyx_v_node); + PyTuple_SET_ITEM(__pyx_1, 2, __pyx_v_node); + if (__Pyx_SetItemInt(__pyx_v_self->stack, -1, ((PyObject *)__pyx_1), 0) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(((PyObject *)__pyx_1)); __pyx_1 = 0; + + goto __pyx_L0; + __pyx_L1_error:; + Py_XDECREF(__pyx_1); + Py_XDECREF(__pyx_2); + Py_XDECREF(__pyx_3); + __Pyx_WriteUnraisable("sphinx.pycode.pgen2.parse.Parser.shift"); + __pyx_L0:; + Py_DECREF(__pyx_v_dfa); + Py_DECREF(__pyx_v_state); + Py_DECREF(__pyx_v_node); + Py_DECREF(__pyx_v_newnode); +} + +/* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":130 + * self.stack[-1] = (dfa, newstate, node) + * + * cdef void push(self, type, newdfa, newstate, context): # <<<<<<<<<<<<<< + * """Push a nonterminal. (Internal)""" + * dfa, state, node = self.stack[-1] + */ + +static void __pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_push(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *__pyx_v_self, PyObject *__pyx_v_type, PyObject *__pyx_v_newdfa, PyObject *__pyx_v_newstate, PyObject *__pyx_v_context) { + PyObject *__pyx_v_dfa; + PyObject *__pyx_v_state; + PyObject *__pyx_v_node; + PyObject *__pyx_v_newnode; + PyObject *__pyx_1 = 0; + PyObject *__pyx_2 = 0; + PyObject *__pyx_3 = 0; + __pyx_v_dfa = Py_None; Py_INCREF(Py_None); + __pyx_v_state = Py_None; Py_INCREF(Py_None); + __pyx_v_node = Py_None; Py_INCREF(Py_None); + __pyx_v_newnode = Py_None; Py_INCREF(Py_None); + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":132 + * cdef void push(self, type, newdfa, newstate, context): + * """Push a nonterminal. (Internal)""" + * dfa, state, node = self.stack[-1] # <<<<<<<<<<<<<< + * newnode = (type, None, context, []) + * self.stack[-1] = (dfa, newstate, node) + */ + __pyx_1 = __Pyx_GetItemInt(__pyx_v_self->stack, -1, 0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyTuple_CheckExact(__pyx_1) && PyTuple_GET_SIZE(__pyx_1) == 3) { + PyObject* tuple = __pyx_1; + __pyx_3 = PyTuple_GET_ITEM(tuple, 0); + Py_INCREF(__pyx_3); + Py_DECREF(__pyx_v_dfa); + __pyx_v_dfa = __pyx_3; + __pyx_3 = 0; + __pyx_3 = PyTuple_GET_ITEM(tuple, 1); + Py_INCREF(__pyx_3); + Py_DECREF(__pyx_v_state); + __pyx_v_state = __pyx_3; + __pyx_3 = 0; + __pyx_3 = PyTuple_GET_ITEM(tuple, 2); + Py_INCREF(__pyx_3); + Py_DECREF(__pyx_v_node); + __pyx_v_node = __pyx_3; + __pyx_3 = 0; + Py_DECREF(__pyx_1); __pyx_1 = 0; + } + else { + __pyx_2 = PyObject_GetIter(__pyx_1); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_1); __pyx_1 = 0; + __pyx_3 = __Pyx_UnpackItem(__pyx_2, 0); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_dfa); + __pyx_v_dfa = __pyx_3; + __pyx_3 = 0; + __pyx_3 = __Pyx_UnpackItem(__pyx_2, 1); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_state); + __pyx_v_state = __pyx_3; + __pyx_3 = 0; + __pyx_3 = __Pyx_UnpackItem(__pyx_2, 2); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_node); + __pyx_v_node = __pyx_3; + __pyx_3 = 0; + if (__Pyx_EndUnpack(__pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_2); __pyx_2 = 0; + } + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":133 + * """Push a nonterminal. (Internal)""" + * dfa, state, node = self.stack[-1] + * newnode = (type, None, context, []) # <<<<<<<<<<<<<< + * self.stack[-1] = (dfa, newstate, node) + * self.stack.append((newdfa, 0, newnode)) + */ + __pyx_3 = PyList_New(0); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_1 = PyTuple_New(4); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_v_type); + PyTuple_SET_ITEM(__pyx_1, 0, __pyx_v_type); + Py_INCREF(Py_None); + PyTuple_SET_ITEM(__pyx_1, 1, Py_None); + Py_INCREF(__pyx_v_context); + PyTuple_SET_ITEM(__pyx_1, 2, __pyx_v_context); + PyTuple_SET_ITEM(__pyx_1, 3, ((PyObject *)__pyx_3)); + __pyx_3 = 0; + Py_DECREF(__pyx_v_newnode); + __pyx_v_newnode = ((PyObject *)__pyx_1); + __pyx_1 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":134 + * dfa, state, node = self.stack[-1] + * newnode = (type, None, context, []) + * self.stack[-1] = (dfa, newstate, node) # <<<<<<<<<<<<<< + * self.stack.append((newdfa, 0, newnode)) + * + */ + __pyx_2 = PyTuple_New(3); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_v_dfa); + PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_dfa); + Py_INCREF(__pyx_v_newstate); + PyTuple_SET_ITEM(__pyx_2, 1, __pyx_v_newstate); + Py_INCREF(__pyx_v_node); + PyTuple_SET_ITEM(__pyx_2, 2, __pyx_v_node); + if (__Pyx_SetItemInt(__pyx_v_self->stack, -1, ((PyObject *)__pyx_2), 0) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(((PyObject *)__pyx_2)); __pyx_2 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":135 + * newnode = (type, None, context, []) + * self.stack[-1] = (dfa, newstate, node) + * self.stack.append((newdfa, 0, newnode)) # <<<<<<<<<<<<<< + * + * cdef void pop(self): + */ + __pyx_3 = PyTuple_New(3); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_v_newdfa); + PyTuple_SET_ITEM(__pyx_3, 0, __pyx_v_newdfa); + Py_INCREF(__pyx_int_0); + PyTuple_SET_ITEM(__pyx_3, 1, __pyx_int_0); + Py_INCREF(__pyx_v_newnode); + PyTuple_SET_ITEM(__pyx_3, 2, __pyx_v_newnode); + __pyx_1 = __Pyx_PyObject_Append(__pyx_v_self->stack, ((PyObject *)__pyx_3)); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(((PyObject *)__pyx_3)); __pyx_3 = 0; + Py_DECREF(__pyx_1); __pyx_1 = 0; + + goto __pyx_L0; + __pyx_L1_error:; + Py_XDECREF(__pyx_1); + Py_XDECREF(__pyx_2); + Py_XDECREF(__pyx_3); + __Pyx_WriteUnraisable("sphinx.pycode.pgen2.parse.Parser.push"); + __pyx_L0:; + Py_DECREF(__pyx_v_dfa); + Py_DECREF(__pyx_v_state); + Py_DECREF(__pyx_v_node); + Py_DECREF(__pyx_v_newnode); +} + +/* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":137 + * self.stack.append((newdfa, 0, newnode)) + * + * cdef void pop(self): # <<<<<<<<<<<<<< + * """Pop a nonterminal. (Internal)""" + * popdfa, popstate, popnode = self.stack.pop() + */ + +static void __pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_pop(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *__pyx_v_self) { + PyObject *__pyx_v_popdfa; + PyObject *__pyx_v_popstate; + PyObject *__pyx_v_popnode; + PyObject *__pyx_v_newnode; + PyObject *__pyx_v_dfa; + PyObject *__pyx_v_state; + PyObject *__pyx_v_node; + PyObject *__pyx_1 = 0; + PyObject *__pyx_2 = 0; + PyObject *__pyx_3 = 0; + int __pyx_4; + __pyx_v_popdfa = Py_None; Py_INCREF(Py_None); + __pyx_v_popstate = Py_None; Py_INCREF(Py_None); + __pyx_v_popnode = Py_None; Py_INCREF(Py_None); + __pyx_v_newnode = Py_None; Py_INCREF(Py_None); + __pyx_v_dfa = Py_None; Py_INCREF(Py_None); + __pyx_v_state = Py_None; Py_INCREF(Py_None); + __pyx_v_node = Py_None; Py_INCREF(Py_None); + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":139 + * cdef void pop(self): + * """Pop a nonterminal. (Internal)""" + * popdfa, popstate, popnode = self.stack.pop() # <<<<<<<<<<<<<< + * newnode = self.convert(popnode) + * if newnode is not None: + */ + __pyx_1 = PyObject_GetAttr(__pyx_v_self->stack, __pyx_kp_pop); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_2 = PyObject_Call(__pyx_1, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_1); __pyx_1 = 0; + if (PyTuple_CheckExact(__pyx_2) && PyTuple_GET_SIZE(__pyx_2) == 3) { + PyObject* tuple = __pyx_2; + __pyx_3 = PyTuple_GET_ITEM(tuple, 0); + Py_INCREF(__pyx_3); + Py_DECREF(__pyx_v_popdfa); + __pyx_v_popdfa = __pyx_3; + __pyx_3 = 0; + __pyx_3 = PyTuple_GET_ITEM(tuple, 1); + Py_INCREF(__pyx_3); + Py_DECREF(__pyx_v_popstate); + __pyx_v_popstate = __pyx_3; + __pyx_3 = 0; + __pyx_3 = PyTuple_GET_ITEM(tuple, 2); + Py_INCREF(__pyx_3); + Py_DECREF(__pyx_v_popnode); + __pyx_v_popnode = __pyx_3; + __pyx_3 = 0; + Py_DECREF(__pyx_2); __pyx_2 = 0; + } + else { + __pyx_1 = PyObject_GetIter(__pyx_2); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_2); __pyx_2 = 0; + __pyx_3 = __Pyx_UnpackItem(__pyx_1, 0); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_popdfa); + __pyx_v_popdfa = __pyx_3; + __pyx_3 = 0; + __pyx_3 = __Pyx_UnpackItem(__pyx_1, 1); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_popstate); + __pyx_v_popstate = __pyx_3; + __pyx_3 = 0; + __pyx_3 = __Pyx_UnpackItem(__pyx_1, 2); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_popnode); + __pyx_v_popnode = __pyx_3; + __pyx_3 = 0; + if (__Pyx_EndUnpack(__pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_1); __pyx_1 = 0; + } + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":140 + * """Pop a nonterminal. (Internal)""" + * popdfa, popstate, popnode = self.stack.pop() + * newnode = self.convert(popnode) # <<<<<<<<<<<<<< + * if newnode is not None: + * if self.stack: + */ + __pyx_3 = ((struct __pyx_vtabstruct_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self->__pyx_vtab)->convert(__pyx_v_self, __pyx_v_popnode); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_newnode); + __pyx_v_newnode = __pyx_3; + __pyx_3 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":141 + * popdfa, popstate, popnode = self.stack.pop() + * newnode = self.convert(popnode) + * if newnode is not None: # <<<<<<<<<<<<<< + * if self.stack: + * dfa, state, node = self.stack[-1] + */ + __pyx_4 = (__pyx_v_newnode != Py_None); + if (__pyx_4) { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":142 + * newnode = self.convert(popnode) + * if newnode is not None: + * if self.stack: # <<<<<<<<<<<<<< + * dfa, state, node = self.stack[-1] + * node[-1].append(newnode) + */ + __pyx_4 = __Pyx_PyObject_IsTrue(__pyx_v_self->stack); if (unlikely(__pyx_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (__pyx_4) { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":143 + * if newnode is not None: + * if self.stack: + * dfa, state, node = self.stack[-1] # <<<<<<<<<<<<<< + * node[-1].append(newnode) + * else: + */ + __pyx_2 = __Pyx_GetItemInt(__pyx_v_self->stack, -1, 0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyTuple_CheckExact(__pyx_2) && PyTuple_GET_SIZE(__pyx_2) == 3) { + PyObject* tuple = __pyx_2; + __pyx_3 = PyTuple_GET_ITEM(tuple, 0); + Py_INCREF(__pyx_3); + Py_DECREF(__pyx_v_dfa); + __pyx_v_dfa = __pyx_3; + __pyx_3 = 0; + __pyx_3 = PyTuple_GET_ITEM(tuple, 1); + Py_INCREF(__pyx_3); + Py_DECREF(__pyx_v_state); + __pyx_v_state = __pyx_3; + __pyx_3 = 0; + __pyx_3 = PyTuple_GET_ITEM(tuple, 2); + Py_INCREF(__pyx_3); + Py_DECREF(__pyx_v_node); + __pyx_v_node = __pyx_3; + __pyx_3 = 0; + Py_DECREF(__pyx_2); __pyx_2 = 0; + } + else { + __pyx_1 = PyObject_GetIter(__pyx_2); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_2); __pyx_2 = 0; + __pyx_3 = __Pyx_UnpackItem(__pyx_1, 0); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_dfa); + __pyx_v_dfa = __pyx_3; + __pyx_3 = 0; + __pyx_3 = __Pyx_UnpackItem(__pyx_1, 1); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_state); + __pyx_v_state = __pyx_3; + __pyx_3 = 0; + __pyx_3 = __Pyx_UnpackItem(__pyx_1, 2); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_node); + __pyx_v_node = __pyx_3; + __pyx_3 = 0; + if (__Pyx_EndUnpack(__pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_1); __pyx_1 = 0; + } + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":144 + * if self.stack: + * dfa, state, node = self.stack[-1] + * node[-1].append(newnode) # <<<<<<<<<<<<<< + * else: + * self.rootnode = newnode + */ + __pyx_3 = __Pyx_GetItemInt(__pyx_v_node, -1, 0); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_2 = __Pyx_PyObject_Append(__pyx_3, __pyx_v_newnode); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_3); __pyx_3 = 0; + Py_DECREF(__pyx_2); __pyx_2 = 0; + goto __pyx_L4; + } + /*else*/ { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":146 + * node[-1].append(newnode) + * else: + * self.rootnode = newnode # <<<<<<<<<<<<<< + * self.rootnode.used_names = self.used_names + * + */ + Py_INCREF(__pyx_v_newnode); + Py_DECREF(__pyx_v_self->rootnode); + __pyx_v_self->rootnode = __pyx_v_newnode; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":147 + * else: + * self.rootnode = newnode + * self.rootnode.used_names = self.used_names # <<<<<<<<<<<<<< + * + * cdef convert(self, raw_node): + */ + if (PyObject_SetAttr(__pyx_v_self->rootnode, __pyx_kp_used_names, __pyx_v_self->used_names) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + } + __pyx_L4:; + goto __pyx_L3; + } + __pyx_L3:; + + goto __pyx_L0; + __pyx_L1_error:; + Py_XDECREF(__pyx_1); + Py_XDECREF(__pyx_2); + Py_XDECREF(__pyx_3); + __Pyx_WriteUnraisable("sphinx.pycode.pgen2.parse.Parser.pop"); + __pyx_L0:; + Py_DECREF(__pyx_v_popdfa); + Py_DECREF(__pyx_v_popstate); + Py_DECREF(__pyx_v_popnode); + Py_DECREF(__pyx_v_newnode); + Py_DECREF(__pyx_v_dfa); + Py_DECREF(__pyx_v_state); + Py_DECREF(__pyx_v_node); +} + +/* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":149 + * self.rootnode.used_names = self.used_names + * + * cdef convert(self, raw_node): # <<<<<<<<<<<<<< + * type, value, context, children = raw_node + * if children or type in self._grammar_number2symbol: + */ + +static PyObject *__pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_convert(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *__pyx_v_self, PyObject *__pyx_v_raw_node) { + PyObject *__pyx_v_type; + PyObject *__pyx_v_value; + PyObject *__pyx_v_context; + PyObject *__pyx_v_children; + PyObject *__pyx_r; + PyObject *__pyx_1 = 0; + PyObject *__pyx_2 = 0; + int __pyx_3; + Py_ssize_t __pyx_4 = 0; + PyObject *__pyx_5 = 0; + PyObject *__pyx_6 = 0; + __pyx_v_type = Py_None; Py_INCREF(Py_None); + __pyx_v_value = Py_None; Py_INCREF(Py_None); + __pyx_v_context = Py_None; Py_INCREF(Py_None); + __pyx_v_children = Py_None; Py_INCREF(Py_None); + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":150 + * + * cdef convert(self, raw_node): + * type, value, context, children = raw_node # <<<<<<<<<<<<<< + * if children or type in self._grammar_number2symbol: + * # If there's exactly one child, return that child instead of + */ + if (PyTuple_CheckExact(__pyx_v_raw_node) && PyTuple_GET_SIZE(__pyx_v_raw_node) == 4) { + PyObject* tuple = __pyx_v_raw_node; + __pyx_2 = PyTuple_GET_ITEM(tuple, 0); + Py_INCREF(__pyx_2); + Py_DECREF(__pyx_v_type); + __pyx_v_type = __pyx_2; + __pyx_2 = 0; + __pyx_2 = PyTuple_GET_ITEM(tuple, 1); + Py_INCREF(__pyx_2); + Py_DECREF(__pyx_v_value); + __pyx_v_value = __pyx_2; + __pyx_2 = 0; + __pyx_2 = PyTuple_GET_ITEM(tuple, 2); + Py_INCREF(__pyx_2); + Py_DECREF(__pyx_v_context); + __pyx_v_context = __pyx_2; + __pyx_2 = 0; + __pyx_2 = PyTuple_GET_ITEM(tuple, 3); + Py_INCREF(__pyx_2); + Py_DECREF(__pyx_v_children); + __pyx_v_children = __pyx_2; + __pyx_2 = 0; + } + else { + __pyx_1 = PyObject_GetIter(__pyx_v_raw_node); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 150; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_2 = __Pyx_UnpackItem(__pyx_1, 0); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 150; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_type); + __pyx_v_type = __pyx_2; + __pyx_2 = 0; + __pyx_2 = __Pyx_UnpackItem(__pyx_1, 1); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 150; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_value); + __pyx_v_value = __pyx_2; + __pyx_2 = 0; + __pyx_2 = __Pyx_UnpackItem(__pyx_1, 2); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 150; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_context); + __pyx_v_context = __pyx_2; + __pyx_2 = 0; + __pyx_2 = __Pyx_UnpackItem(__pyx_1, 3); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 150; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_children); + __pyx_v_children = __pyx_2; + __pyx_2 = 0; + if (__Pyx_EndUnpack(__pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 150; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_1); __pyx_1 = 0; + } + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":151 + * cdef convert(self, raw_node): + * type, value, context, children = raw_node + * if children or type in self._grammar_number2symbol: # <<<<<<<<<<<<<< + * # If there's exactly one child, return that child instead of + * # creating a new node. + */ + __pyx_2 = __pyx_v_children; + Py_INCREF(__pyx_2); + __pyx_3 = __Pyx_PyObject_IsTrue(__pyx_2); if (unlikely(__pyx_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 151; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (!__pyx_3) { + Py_DECREF(__pyx_2); __pyx_2 = 0; + __pyx_3 = (PySequence_Contains(__pyx_v_self->_grammar_number2symbol, __pyx_v_type)); if (unlikely(__pyx_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 151; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_2 = __Pyx_PyBool_FromLong(__pyx_3); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 151; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + } + __pyx_3 = __Pyx_PyObject_IsTrue(__pyx_2); if (unlikely(__pyx_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 151; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_2); __pyx_2 = 0; + if (__pyx_3) { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":154 + * # If there's exactly one child, return that child instead of + * # creating a new node. + * if len(children) == 1: # <<<<<<<<<<<<<< + * return children[0] + * return Node(type, children, context=context) + */ + __pyx_4 = PyObject_Length(__pyx_v_children); if (unlikely(__pyx_4 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_3 = (__pyx_4 == 1); + if (__pyx_3) { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":155 + * # creating a new node. + * if len(children) == 1: + * return children[0] # <<<<<<<<<<<<<< + * return Node(type, children, context=context) + * else: + */ + __pyx_1 = __Pyx_GetItemInt(__pyx_v_children, 0, 0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_r = __pyx_1; + __pyx_1 = 0; + goto __pyx_L0; + goto __pyx_L4; + } + __pyx_L4:; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":156 + * if len(children) == 1: + * return children[0] + * return Node(type, children, context=context) # <<<<<<<<<<<<<< + * else: + * return Leaf(type, value, context=context) + */ + __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_kp_Node); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_1 = PyTuple_New(2); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_v_type); + PyTuple_SET_ITEM(__pyx_1, 0, __pyx_v_type); + Py_INCREF(__pyx_v_children); + PyTuple_SET_ITEM(__pyx_1, 1, __pyx_v_children); + __pyx_5 = PyDict_New(); if (unlikely(!__pyx_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyDict_SetItem(__pyx_5, __pyx_kp_context, __pyx_v_context) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_6 = PyEval_CallObjectWithKeywords(__pyx_2, ((PyObject *)__pyx_1), ((PyObject *)__pyx_5)); if (unlikely(!__pyx_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_2); __pyx_2 = 0; + Py_DECREF(((PyObject *)__pyx_1)); __pyx_1 = 0; + Py_DECREF(((PyObject *)__pyx_5)); __pyx_5 = 0; + __pyx_r = __pyx_6; + __pyx_6 = 0; + goto __pyx_L0; + goto __pyx_L3; + } + /*else*/ { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":158 + * return Node(type, children, context=context) + * else: + * return Leaf(type, value, context=context) # <<<<<<<<<<<<<< + */ + __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_kp_Leaf); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_1 = PyTuple_New(2); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_v_type); + PyTuple_SET_ITEM(__pyx_1, 0, __pyx_v_type); + Py_INCREF(__pyx_v_value); + PyTuple_SET_ITEM(__pyx_1, 1, __pyx_v_value); + __pyx_5 = PyDict_New(); if (unlikely(!__pyx_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyDict_SetItem(__pyx_5, __pyx_kp_context, __pyx_v_context) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_6 = PyEval_CallObjectWithKeywords(__pyx_2, ((PyObject *)__pyx_1), ((PyObject *)__pyx_5)); if (unlikely(!__pyx_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_2); __pyx_2 = 0; + Py_DECREF(((PyObject *)__pyx_1)); __pyx_1 = 0; + Py_DECREF(((PyObject *)__pyx_5)); __pyx_5 = 0; + __pyx_r = __pyx_6; + __pyx_6 = 0; + goto __pyx_L0; + } + __pyx_L3:; + + __pyx_r = Py_None; Py_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + Py_XDECREF(__pyx_1); + Py_XDECREF(__pyx_2); + Py_XDECREF(__pyx_5); + Py_XDECREF(__pyx_6); + __Pyx_AddTraceback("sphinx.pycode.pgen2.parse.Parser.convert"); + __pyx_r = 0; + __pyx_L0:; + Py_DECREF(__pyx_v_type); + Py_DECREF(__pyx_v_value); + Py_DECREF(__pyx_v_context); + Py_DECREF(__pyx_v_children); + return __pyx_r; +} +static struct __pyx_vtabstruct_6sphinx_6pycode_5pgen2_5parse_Parser __pyx_vtable_6sphinx_6pycode_5pgen2_5parse_Parser; + +static PyObject *__pyx_tp_new_6sphinx_6pycode_5pgen2_5parse_Parser(PyTypeObject *t, PyObject *a, PyObject *k) { + struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *p; + PyObject *o = (*t->tp_alloc)(t, 0); + if (!o) return 0; + p = ((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)o); + p->__pyx_vtab = __pyx_vtabptr_6sphinx_6pycode_5pgen2_5parse_Parser; + p->grammar = Py_None; Py_INCREF(Py_None); + p->stack = Py_None; Py_INCREF(Py_None); + p->rootnode = Py_None; Py_INCREF(Py_None); + p->used_names = Py_None; Py_INCREF(Py_None); + p->_grammar_dfas = Py_None; Py_INCREF(Py_None); + p->_grammar_labels = Py_None; Py_INCREF(Py_None); + p->_grammar_keywords = Py_None; Py_INCREF(Py_None); + p->_grammar_tokens = Py_None; Py_INCREF(Py_None); + p->_grammar_number2symbol = Py_None; Py_INCREF(Py_None); + return o; +} + +static void __pyx_tp_dealloc_6sphinx_6pycode_5pgen2_5parse_Parser(PyObject *o) { + struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *p = (struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)o; + Py_XDECREF(p->grammar); + Py_XDECREF(p->stack); + Py_XDECREF(p->rootnode); + Py_XDECREF(p->used_names); + Py_XDECREF(p->_grammar_dfas); + Py_XDECREF(p->_grammar_labels); + Py_XDECREF(p->_grammar_keywords); + Py_XDECREF(p->_grammar_tokens); + Py_XDECREF(p->_grammar_number2symbol); + (*Py_TYPE(o)->tp_free)(o); +} + +static int __pyx_tp_traverse_6sphinx_6pycode_5pgen2_5parse_Parser(PyObject *o, visitproc v, void *a) { + int e; + struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *p = (struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)o; + if (p->grammar) { + e = (*v)(p->grammar, a); if (e) return e; + } + if (p->stack) { + e = (*v)(p->stack, a); if (e) return e; + } + if (p->rootnode) { + e = (*v)(p->rootnode, a); if (e) return e; + } + if (p->used_names) { + e = (*v)(p->used_names, a); if (e) return e; + } + if (p->_grammar_dfas) { + e = (*v)(p->_grammar_dfas, a); if (e) return e; + } + if (p->_grammar_labels) { + e = (*v)(p->_grammar_labels, a); if (e) return e; + } + if (p->_grammar_keywords) { + e = (*v)(p->_grammar_keywords, a); if (e) return e; + } + if (p->_grammar_tokens) { + e = (*v)(p->_grammar_tokens, a); if (e) return e; + } + if (p->_grammar_number2symbol) { + e = (*v)(p->_grammar_number2symbol, a); if (e) return e; + } + return 0; +} + +static int __pyx_tp_clear_6sphinx_6pycode_5pgen2_5parse_Parser(PyObject *o) { + struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *p = (struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)o; + PyObject* tmp; + tmp = ((PyObject*)p->grammar); + p->grammar = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->stack); + p->stack = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->rootnode); + p->rootnode = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->used_names); + p->used_names = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->_grammar_dfas); + p->_grammar_dfas = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->_grammar_labels); + p->_grammar_labels = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->_grammar_keywords); + p->_grammar_keywords = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->_grammar_tokens); + p->_grammar_tokens = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->_grammar_number2symbol); + p->_grammar_number2symbol = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + return 0; +} + +static struct PyMethodDef __pyx_methods_6sphinx_6pycode_5pgen2_5parse_Parser[] = { + {"setup", (PyCFunction)__pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser_setup, METH_VARARGS|METH_KEYWORDS, 0}, + {"addtoken", (PyCFunction)__pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser_addtoken, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6sphinx_6pycode_5pgen2_5parse_6Parser_addtoken}, + {0, 0, 0, 0} +}; + +static struct PyMemberDef __pyx_members_6sphinx_6pycode_5pgen2_5parse_Parser[] = { + {"grammar", T_OBJECT, offsetof(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser, grammar), 0, 0}, + {"stack", T_OBJECT, offsetof(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser, stack), 0, 0}, + {"rootnode", T_OBJECT, offsetof(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser, rootnode), 0, 0}, + {"used_names", T_OBJECT, offsetof(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser, used_names), 0, 0}, + {0, 0, 0, 0, 0} +}; + +static PyNumberMethods __pyx_tp_as_number_Parser = { + 0, /*nb_add*/ + 0, /*nb_subtract*/ + 0, /*nb_multiply*/ + #if PY_MAJOR_VERSION < 3 + 0, /*nb_divide*/ + #endif + 0, /*nb_remainder*/ + 0, /*nb_divmod*/ + 0, /*nb_power*/ + 0, /*nb_negative*/ + 0, /*nb_positive*/ + 0, /*nb_absolute*/ + 0, /*nb_nonzero*/ + 0, /*nb_invert*/ + 0, /*nb_lshift*/ + 0, /*nb_rshift*/ + 0, /*nb_and*/ + 0, /*nb_xor*/ + 0, /*nb_or*/ + #if PY_MAJOR_VERSION < 3 + 0, /*nb_coerce*/ + #endif + 0, /*nb_int*/ + 0, /*nb_long*/ + 0, /*nb_float*/ + #if PY_MAJOR_VERSION < 3 + 0, /*nb_oct*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*nb_hex*/ + #endif + 0, /*nb_inplace_add*/ + 0, /*nb_inplace_subtract*/ + 0, /*nb_inplace_multiply*/ + #if PY_MAJOR_VERSION < 3 + 0, /*nb_inplace_divide*/ + #endif + 0, /*nb_inplace_remainder*/ + 0, /*nb_inplace_power*/ + 0, /*nb_inplace_lshift*/ + 0, /*nb_inplace_rshift*/ + 0, /*nb_inplace_and*/ + 0, /*nb_inplace_xor*/ + 0, /*nb_inplace_or*/ + 0, /*nb_floor_divide*/ + 0, /*nb_true_divide*/ + 0, /*nb_inplace_floor_divide*/ + 0, /*nb_inplace_true_divide*/ + #if (PY_MAJOR_VERSION >= 3) || (Py_TPFLAGS_DEFAULT & Py_TPFLAGS_HAVE_INDEX) + 0, /*nb_index*/ + #endif +}; + +static PySequenceMethods __pyx_tp_as_sequence_Parser = { + 0, /*sq_length*/ + 0, /*sq_concat*/ + 0, /*sq_repeat*/ + 0, /*sq_item*/ + 0, /*sq_slice*/ + 0, /*sq_ass_item*/ + 0, /*sq_ass_slice*/ + 0, /*sq_contains*/ + 0, /*sq_inplace_concat*/ + 0, /*sq_inplace_repeat*/ +}; + +static PyMappingMethods __pyx_tp_as_mapping_Parser = { + 0, /*mp_length*/ + 0, /*mp_subscript*/ + 0, /*mp_ass_subscript*/ +}; + +static PyBufferProcs __pyx_tp_as_buffer_Parser = { + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getreadbuffer*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getwritebuffer*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getsegcount*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getcharbuffer*/ + #endif + #if (PY_MAJOR_VERSION >= 3) || (Py_TPFLAGS_DEFAULT & Py_TPFLAGS_HAVE_NEWBUFFER) + 0, /*bf_getbuffer*/ + #endif + #if (PY_MAJOR_VERSION >= 3) || (Py_TPFLAGS_DEFAULT & Py_TPFLAGS_HAVE_NEWBUFFER) + 0, /*bf_releasebuffer*/ + #endif +}; + +PyTypeObject __pyx_type_6sphinx_6pycode_5pgen2_5parse_Parser = { + PyVarObject_HEAD_INIT(0, 0) + "sphinx.pycode.pgen2.parse.Parser", /*tp_name*/ + sizeof(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + __pyx_tp_dealloc_6sphinx_6pycode_5pgen2_5parse_Parser, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + 0, /*tp_repr*/ + &__pyx_tp_as_number_Parser, /*tp_as_number*/ + &__pyx_tp_as_sequence_Parser, /*tp_as_sequence*/ + &__pyx_tp_as_mapping_Parser, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + &__pyx_tp_as_buffer_Parser, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + 0, /*tp_doc*/ + __pyx_tp_traverse_6sphinx_6pycode_5pgen2_5parse_Parser, /*tp_traverse*/ + __pyx_tp_clear_6sphinx_6pycode_5pgen2_5parse_Parser, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + __pyx_methods_6sphinx_6pycode_5pgen2_5parse_Parser, /*tp_methods*/ + __pyx_members_6sphinx_6pycode_5pgen2_5parse_Parser, /*tp_members*/ + 0, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + __pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser___init__, /*tp_init*/ + 0, /*tp_alloc*/ + __pyx_tp_new_6sphinx_6pycode_5pgen2_5parse_Parser, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ +}; + +static struct PyMethodDef __pyx_methods[] = { + {0, 0, 0, 0} +}; + +static void __pyx_init_filenames(void); /*proto*/ + +#if PY_MAJOR_VERSION >= 3 +static struct PyModuleDef __pyx_moduledef = { + PyModuleDef_HEAD_INIT, + "parse", + 0, /* m_doc */ + -1, /* m_size */ + __pyx_methods /* m_methods */, + NULL, /* m_reload */ + NULL, /* m_traverse */ + NULL, /* m_clear */ + NULL /* m_free */ +}; +#endif + +static __Pyx_StringTabEntry __pyx_string_tab[] = { + {&__pyx_kp___init__, __pyx_k___init__, sizeof(__pyx_k___init__), 0, 1, 1}, + {&__pyx_kp_setup, __pyx_k_setup, sizeof(__pyx_k_setup), 0, 1, 1}, + {&__pyx_kp_addtoken, __pyx_k_addtoken, sizeof(__pyx_k_addtoken), 0, 1, 1}, + {&__pyx_kp_1, __pyx_k_1, sizeof(__pyx_k_1), 1, 1, 1}, + {&__pyx_kp_Node, __pyx_k_Node, sizeof(__pyx_k_Node), 1, 1, 1}, + {&__pyx_kp_Leaf, __pyx_k_Leaf, sizeof(__pyx_k_Leaf), 1, 1, 1}, + {&__pyx_kp_ParseError, __pyx_k_ParseError, sizeof(__pyx_k_ParseError), 0, 1, 1}, + {&__pyx_kp_Exception, __pyx_k_Exception, sizeof(__pyx_k_Exception), 1, 1, 1}, + {&__pyx_kp_msg, __pyx_k_msg, sizeof(__pyx_k_msg), 1, 1, 1}, + {&__pyx_kp_type, __pyx_k_type, sizeof(__pyx_k_type), 1, 1, 1}, + {&__pyx_kp_value, __pyx_k_value, sizeof(__pyx_k_value), 1, 1, 1}, + {&__pyx_kp_context, __pyx_k_context, sizeof(__pyx_k_context), 1, 1, 1}, + {&__pyx_kp_dfas, __pyx_k_dfas, sizeof(__pyx_k_dfas), 1, 1, 1}, + {&__pyx_kp_labels, __pyx_k_labels, sizeof(__pyx_k_labels), 1, 1, 1}, + {&__pyx_kp_keywords, __pyx_k_keywords, sizeof(__pyx_k_keywords), 1, 1, 1}, + {&__pyx_kp_tokens, __pyx_k_tokens, sizeof(__pyx_k_tokens), 1, 1, 1}, + {&__pyx_kp_4, __pyx_k_4, sizeof(__pyx_k_4), 1, 1, 1}, + {&__pyx_kp_start, __pyx_k_start, sizeof(__pyx_k_start), 1, 1, 1}, + {&__pyx_kp_add, __pyx_k_add, sizeof(__pyx_k_add), 1, 1, 1}, + {&__pyx_kp_get, __pyx_k_get, sizeof(__pyx_k_get), 1, 1, 1}, + {&__pyx_kp_append, __pyx_k_append, sizeof(__pyx_k_append), 1, 1, 1}, + {&__pyx_kp_pop, __pyx_k_pop, sizeof(__pyx_k_pop), 1, 1, 1}, + {&__pyx_kp_used_names, __pyx_k_used_names, sizeof(__pyx_k_used_names), 1, 1, 1}, + {&__pyx_kp_2, __pyx_k_2, sizeof(__pyx_k_2), 0, 0, 0}, + {&__pyx_kp_3, __pyx_k_3, sizeof(__pyx_k_3), 0, 0, 0}, + {&__pyx_kp_5, __pyx_k_5, sizeof(__pyx_k_5), 0, 0, 0}, + {&__pyx_kp_6, __pyx_k_6, sizeof(__pyx_k_6), 0, 0, 0}, + {&__pyx_kp_7, __pyx_k_7, sizeof(__pyx_k_7), 0, 0, 0}, + {0, 0, 0, 0, 0, 0} +}; +static int __Pyx_InitCachedBuiltins(void) { + __pyx_builtin_Exception = __Pyx_GetName(__pyx_b, __pyx_kp_Exception); if (!__pyx_builtin_Exception) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + return 0; + __pyx_L1_error:; + return -1; +} + +static int __Pyx_InitGlobals(void) { + __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + return 0; + __pyx_L1_error:; + return -1; +} + +#if PY_MAJOR_VERSION < 3 +PyMODINIT_FUNC initparse(void); /*proto*/ +PyMODINIT_FUNC initparse(void) +#else +PyMODINIT_FUNC PyInit_parse(void); /*proto*/ +PyMODINIT_FUNC PyInit_parse(void) +#endif +{ + PyObject *__pyx_1 = 0; + PyObject *__pyx_2 = 0; + PyObject *__pyx_3 = 0; + PyObject *__pyx_4 = 0; + __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + /*--- Libary function declarations ---*/ + __pyx_init_filenames(); + /*--- Initialize various global constants etc. ---*/ + if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + /*--- Module creation code ---*/ + #if PY_MAJOR_VERSION < 3 + __pyx_m = Py_InitModule4("parse", __pyx_methods, 0, 0, PYTHON_API_VERSION); + #else + __pyx_m = PyModule_Create(&__pyx_moduledef); + #endif + if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + #if PY_MAJOR_VERSION < 3 + Py_INCREF(__pyx_m); + #endif + __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); + if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + /*--- Builtin init code ---*/ + if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_skip_dispatch = 0; + /*--- Global init code ---*/ + /*--- Function export code ---*/ + /*--- Type init code ---*/ + __pyx_vtabptr_6sphinx_6pycode_5pgen2_5parse_Parser = &__pyx_vtable_6sphinx_6pycode_5pgen2_5parse_Parser; + *(void(**)(void))&__pyx_vtable_6sphinx_6pycode_5pgen2_5parse_Parser.classify = (void(*)(void))__pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_classify; + *(void(**)(void))&__pyx_vtable_6sphinx_6pycode_5pgen2_5parse_Parser.shift = (void(*)(void))__pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_shift; + *(void(**)(void))&__pyx_vtable_6sphinx_6pycode_5pgen2_5parse_Parser.push = (void(*)(void))__pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_push; + *(void(**)(void))&__pyx_vtable_6sphinx_6pycode_5pgen2_5parse_Parser.pop = (void(*)(void))__pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_pop; + *(void(**)(void))&__pyx_vtable_6sphinx_6pycode_5pgen2_5parse_Parser.convert = (void(*)(void))__pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_convert; + if (PyType_Ready(&__pyx_type_6sphinx_6pycode_5pgen2_5parse_Parser) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (__Pyx_SetVtable(__pyx_type_6sphinx_6pycode_5pgen2_5parse_Parser.tp_dict, __pyx_vtabptr_6sphinx_6pycode_5pgen2_5parse_Parser) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyObject_SetAttrString(__pyx_m, "Parser", (PyObject *)&__pyx_type_6sphinx_6pycode_5pgen2_5parse_Parser) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_ptype_6sphinx_6pycode_5pgen2_5parse_Parser = &__pyx_type_6sphinx_6pycode_5pgen2_5parse_Parser; + /*--- Type import code ---*/ + /*--- Function import code ---*/ + /*--- Execution code ---*/ + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":15 + * """ + * + * from sphinx.pycode.nodes import Node, Leaf # <<<<<<<<<<<<<< + * + * DEF NAME = 1 + */ + __pyx_1 = PyList_New(2); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_kp_Node); + PyList_SET_ITEM(__pyx_1, 0, __pyx_kp_Node); + Py_INCREF(__pyx_kp_Leaf); + PyList_SET_ITEM(__pyx_1, 1, __pyx_kp_Leaf); + __pyx_2 = __Pyx_Import(__pyx_kp_1, ((PyObject *)__pyx_1)); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(((PyObject *)__pyx_1)); __pyx_1 = 0; + __pyx_1 = PyObject_GetAttr(__pyx_2, __pyx_kp_Node); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyObject_SetAttr(__pyx_m, __pyx_kp_Node, __pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_1); __pyx_1 = 0; + __pyx_1 = PyObject_GetAttr(__pyx_2, __pyx_kp_Leaf); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyObject_SetAttr(__pyx_m, __pyx_kp_Leaf, __pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_1); __pyx_1 = 0; + Py_DECREF(__pyx_2); __pyx_2 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":19 + * DEF NAME = 1 + * + * class ParseError(Exception): # <<<<<<<<<<<<<< + * """Exception to signal the parser is stuck.""" + * + */ + __pyx_2 = PyDict_New(); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_1 = PyTuple_New(1); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_builtin_Exception); + PyTuple_SET_ITEM(__pyx_1, 0, __pyx_builtin_Exception); + if (PyDict_SetItemString(((PyObject *)__pyx_2), "__doc__", __pyx_kp_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_3 = __Pyx_CreateClass(((PyObject *)__pyx_1), ((PyObject *)__pyx_2), __pyx_kp_ParseError, "sphinx.pycode.pgen2.parse"); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(((PyObject *)__pyx_1)); __pyx_1 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":22 + * """Exception to signal the parser is stuck.""" + * + * def __init__(self, msg, type, value, context): # <<<<<<<<<<<<<< + * Exception.__init__(self, "%s: type=%r, value=%r, context=%r" % + * (msg, type, value, context)) + */ + __pyx_1 = PyCFunction_New(&__pyx_mdef_6sphinx_6pycode_5pgen2_5parse_10ParseError___init__, 0); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_4 = PyMethod_New(__pyx_1, 0, __pyx_3); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_1); __pyx_1 = 0; + if (PyObject_SetAttr(__pyx_3, __pyx_kp___init__, __pyx_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_4); __pyx_4 = 0; + if (PyObject_SetAttr(__pyx_m, __pyx_kp_ParseError, __pyx_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_3); __pyx_3 = 0; + Py_DECREF(((PyObject *)__pyx_2)); __pyx_2 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":149 + * self.rootnode.used_names = self.used_names + * + * cdef convert(self, raw_node): # <<<<<<<<<<<<<< + * type, value, context, children = raw_node + * if children or type in self._grammar_number2symbol: + */ + #if PY_MAJOR_VERSION < 3 + return; + #else + return __pyx_m; + #endif + __pyx_L1_error:; + Py_XDECREF(__pyx_1); + Py_XDECREF(__pyx_2); + Py_XDECREF(__pyx_3); + Py_XDECREF(__pyx_4); + __Pyx_AddTraceback("sphinx.pycode.pgen2.parse"); + #if PY_MAJOR_VERSION >= 3 + return NULL; + #endif +} + +static const char *__pyx_filenames[] = { + "parse.pyx", +}; + +/* Runtime support code */ + +static void __pyx_init_filenames(void) { + __pyx_f = __pyx_filenames; +} + +static INLINE void __Pyx_RaiseArgtupleTooLong( + Py_ssize_t num_expected, + Py_ssize_t num_found) +{ + const char* error_message = + #if PY_VERSION_HEX < 0x02050000 + "function takes at most %d positional arguments (%d given)"; + #else + "function takes at most %zd positional arguments (%zd given)"; + #endif + PyErr_Format(PyExc_TypeError, error_message, num_expected, num_found); +} + +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list) { + PyObject *__import__ = 0; + PyObject *empty_list = 0; + PyObject *module = 0; + PyObject *global_dict = 0; + PyObject *empty_dict = 0; + PyObject *list; + __import__ = PyObject_GetAttrString(__pyx_b, "__import__"); + if (!__import__) + goto bad; + if (from_list) + list = from_list; + else { + empty_list = PyList_New(0); + if (!empty_list) + goto bad; + list = empty_list; + } + global_dict = PyModule_GetDict(__pyx_m); + if (!global_dict) + goto bad; + empty_dict = PyDict_New(); + if (!empty_dict) + goto bad; + module = PyObject_CallFunction(__import__, "OOOO", + name, global_dict, empty_dict, list); +bad: + Py_XDECREF(empty_list); + Py_XDECREF(__import__); + Py_XDECREF(empty_dict); + return module; +} + +static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) { + PyObject *result; + result = PyObject_GetAttr(dict, name); + if (!result) + PyErr_SetObject(PyExc_NameError, name); + return result; +} + +static PyObject *__Pyx_CreateClass( + PyObject *bases, PyObject *dict, PyObject *name, char *modname) +{ + PyObject *py_modname; + PyObject *result = 0; + + #if PY_MAJOR_VERSION < 3 + py_modname = PyString_FromString(modname); + #else + py_modname = PyUnicode_FromString(modname); + #endif + if (!py_modname) + goto bad; + if (PyDict_SetItemString(dict, "__module__", py_modname) < 0) + goto bad; + #if PY_MAJOR_VERSION < 3 + result = PyClass_New(bases, dict, name); + #else + result = PyObject_CallFunctionObjArgs((PyObject *)&PyType_Type, name, bases, dict, NULL); + #endif +bad: + Py_XDECREF(py_modname); + return result; +} + + +static PyObject *__Pyx_UnpackItem(PyObject *iter, Py_ssize_t index) { + PyObject *item; + if (!(item = PyIter_Next(iter))) { + if (!PyErr_Occurred()) { + PyErr_Format(PyExc_ValueError, + #if PY_VERSION_HEX < 0x02050000 + "need more than %d values to unpack", (int)index); + #else + "need more than %zd values to unpack", index); + #endif + } + } + return item; +} + +static int __Pyx_EndUnpack(PyObject *iter) { + PyObject *item; + if ((item = PyIter_Next(iter))) { + Py_DECREF(item); + PyErr_SetString(PyExc_ValueError, "too many values to unpack"); + return -1; + } + else if (!PyErr_Occurred()) + return 0; + else + return -1; +} + +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb) { + Py_XINCREF(type); + Py_XINCREF(value); + Py_XINCREF(tb); + /* First, check the traceback argument, replacing None with NULL. */ + if (tb == Py_None) { + Py_DECREF(tb); + tb = 0; + } + else if (tb != NULL && !PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto raise_error; + } + /* Next, replace a missing value with None */ + if (value == NULL) { + value = Py_None; + Py_INCREF(value); + } + #if PY_VERSION_HEX < 0x02050000 + if (!PyClass_Check(type)) + #else + if (!PyType_Check(type)) + #endif + { + /* Raising an instance. The value should be a dummy. */ + if (value != Py_None) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto raise_error; + } + /* Normalize to raise , */ + Py_DECREF(value); + value = type; + #if PY_VERSION_HEX < 0x02050000 + if (PyInstance_Check(type)) { + type = (PyObject*) ((PyInstanceObject*)type)->in_class; + Py_INCREF(type); + } + else { + type = 0; + PyErr_SetString(PyExc_TypeError, + "raise: exception must be an old-style class or instance"); + goto raise_error; + } + #else + type = (PyObject*) Py_TYPE(type); + Py_INCREF(type); + if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto raise_error; + } + #endif + } + PyErr_Restore(type, value, tb); + return; +raise_error: + Py_XDECREF(value); + Py_XDECREF(type); + Py_XDECREF(tb); + return; +} + + +static void __Pyx_WriteUnraisable(const char *name) { + PyObject *old_exc, *old_val, *old_tb; + PyObject *ctx; + PyErr_Fetch(&old_exc, &old_val, &old_tb); + #if PY_MAJOR_VERSION < 3 + ctx = PyString_FromString(name); + #else + ctx = PyUnicode_FromString(name); + #endif + PyErr_Restore(old_exc, old_val, old_tb); + if (!ctx) + ctx = Py_None; + PyErr_WriteUnraisable(ctx); +} + +static int __Pyx_SetVtable(PyObject *dict, void *vtable) { + PyObject *pycobj = 0; + int result; + + pycobj = PyCObject_FromVoidPtr(vtable, 0); + if (!pycobj) + goto bad; + if (PyDict_SetItemString(dict, "__pyx_vtable__", pycobj) < 0) + goto bad; + result = 0; + goto done; + +bad: + result = -1; +done: + Py_XDECREF(pycobj); + return result; +} + +#include "compile.h" +#include "frameobject.h" +#include "traceback.h" + +static void __Pyx_AddTraceback(const char *funcname) { + PyObject *py_srcfile = 0; + PyObject *py_funcname = 0; + PyObject *py_globals = 0; + PyObject *empty_string = 0; + PyCodeObject *py_code = 0; + PyFrameObject *py_frame = 0; + + #if PY_MAJOR_VERSION < 3 + py_srcfile = PyString_FromString(__pyx_filename); + #else + py_srcfile = PyUnicode_FromString(__pyx_filename); + #endif + if (!py_srcfile) goto bad; + if (__pyx_clineno) { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno); + #else + py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno); + #endif + } + else { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromString(funcname); + #else + py_funcname = PyUnicode_FromString(funcname); + #endif + } + if (!py_funcname) goto bad; + py_globals = PyModule_GetDict(__pyx_m); + if (!py_globals) goto bad; + #if PY_MAJOR_VERSION < 3 + empty_string = PyString_FromStringAndSize("", 0); + #else + empty_string = PyBytes_FromStringAndSize("", 0); + #endif + if (!empty_string) goto bad; + py_code = PyCode_New( + 0, /*int argcount,*/ + #if PY_MAJOR_VERSION >= 3 + 0, /*int kwonlyargcount,*/ + #endif + 0, /*int nlocals,*/ + 0, /*int stacksize,*/ + 0, /*int flags,*/ + empty_string, /*PyObject *code,*/ + __pyx_empty_tuple, /*PyObject *consts,*/ + __pyx_empty_tuple, /*PyObject *names,*/ + __pyx_empty_tuple, /*PyObject *varnames,*/ + __pyx_empty_tuple, /*PyObject *freevars,*/ + __pyx_empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + __pyx_lineno, /*int firstlineno,*/ + empty_string /*PyObject *lnotab*/ + ); + if (!py_code) goto bad; + py_frame = PyFrame_New( + PyThreadState_Get(), /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + py_globals, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + py_frame->f_lineno = __pyx_lineno; + PyTraceBack_Here(py_frame); +bad: + Py_XDECREF(py_srcfile); + Py_XDECREF(py_funcname); + Py_XDECREF(empty_string); + Py_XDECREF(py_code); + Py_XDECREF(py_frame); +} + +static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { + while (t->p) { + #if PY_MAJOR_VERSION < 3 + if (t->is_unicode && (!t->is_identifier)) { + *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); + } else if (t->intern) { + *t->p = PyString_InternFromString(t->s); + } else { + *t->p = PyString_FromStringAndSize(t->s, t->n - 1); + } + #else /* Python 3+ has unicode identifiers */ + if (t->is_identifier || (t->is_unicode && t->intern)) { + *t->p = PyUnicode_InternFromString(t->s); + } else if (t->is_unicode) { + *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); + } else { + *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); + } + #endif + if (!*t->p) + return -1; + ++t; + } + return 0; +} + +/* Type Conversion Functions */ + +static INLINE Py_ssize_t __pyx_PyIndex_AsSsize_t(PyObject* b) { + Py_ssize_t ival; + PyObject* x = PyNumber_Index(b); + if (!x) return -1; + ival = PyInt_AsSsize_t(x); + Py_DECREF(x); + return ival; +} + +static INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { + if (x == Py_True) return 1; + else if (x == Py_False) return 0; + else return PyObject_IsTrue(x); +} + +static INLINE PY_LONG_LONG __pyx_PyInt_AsLongLong(PyObject* x) { + if (PyInt_CheckExact(x)) { + return PyInt_AS_LONG(x); + } + else if (PyLong_CheckExact(x)) { + return PyLong_AsLongLong(x); + } + else { + PY_LONG_LONG val; + PyObject* tmp = PyNumber_Int(x); if (!tmp) return (PY_LONG_LONG)-1; + val = __pyx_PyInt_AsLongLong(tmp); + Py_DECREF(tmp); + return val; + } +} + +static INLINE unsigned PY_LONG_LONG __pyx_PyInt_AsUnsignedLongLong(PyObject* x) { + if (PyInt_CheckExact(x)) { + long val = PyInt_AS_LONG(x); + if (unlikely(val < 0)) { + PyErr_SetString(PyExc_TypeError, "Negative assignment to unsigned type."); + return (unsigned PY_LONG_LONG)-1; + } + return val; + } + else if (PyLong_CheckExact(x)) { + return PyLong_AsUnsignedLongLong(x); + } + else { + PY_LONG_LONG val; + PyObject* tmp = PyNumber_Int(x); if (!tmp) return (PY_LONG_LONG)-1; + val = __pyx_PyInt_AsUnsignedLongLong(tmp); + Py_DECREF(tmp); + return val; + } +} + + +static INLINE unsigned char __pyx_PyInt_unsigned_char(PyObject* x) { + if (sizeof(unsigned char) < sizeof(long)) { + long long_val = __pyx_PyInt_AsLong(x); + unsigned char val = (unsigned char)long_val; + if (unlikely((val != long_val) || (long_val < 0))) { + PyErr_SetString(PyExc_OverflowError, "value too large to convert to unsigned char"); + return (unsigned char)-1; + } + return val; + } + else { + return __pyx_PyInt_AsLong(x); + } +} + +static INLINE unsigned short __pyx_PyInt_unsigned_short(PyObject* x) { + if (sizeof(unsigned short) < sizeof(long)) { + long long_val = __pyx_PyInt_AsLong(x); + unsigned short val = (unsigned short)long_val; + if (unlikely((val != long_val) || (long_val < 0))) { + PyErr_SetString(PyExc_OverflowError, "value too large to convert to unsigned short"); + return (unsigned short)-1; + } + return val; + } + else { + return __pyx_PyInt_AsLong(x); + } +} + +static INLINE char __pyx_PyInt_char(PyObject* x) { + if (sizeof(char) < sizeof(long)) { + long long_val = __pyx_PyInt_AsLong(x); + char val = (char)long_val; + if (unlikely((val != long_val) )) { + PyErr_SetString(PyExc_OverflowError, "value too large to convert to char"); + return (char)-1; + } + return val; + } + else { + return __pyx_PyInt_AsLong(x); + } +} + +static INLINE short __pyx_PyInt_short(PyObject* x) { + if (sizeof(short) < sizeof(long)) { + long long_val = __pyx_PyInt_AsLong(x); + short val = (short)long_val; + if (unlikely((val != long_val) )) { + PyErr_SetString(PyExc_OverflowError, "value too large to convert to short"); + return (short)-1; + } + return val; + } + else { + return __pyx_PyInt_AsLong(x); + } +} + +static INLINE int __pyx_PyInt_int(PyObject* x) { + if (sizeof(int) < sizeof(long)) { + long long_val = __pyx_PyInt_AsLong(x); + int val = (int)long_val; + if (unlikely((val != long_val) )) { + PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); + return (int)-1; + } + return val; + } + else { + return __pyx_PyInt_AsLong(x); + } +} + +static INLINE long __pyx_PyInt_long(PyObject* x) { + if (sizeof(long) < sizeof(long)) { + long long_val = __pyx_PyInt_AsLong(x); + long val = (long)long_val; + if (unlikely((val != long_val) )) { + PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); + return (long)-1; + } + return val; + } + else { + return __pyx_PyInt_AsLong(x); + } +} + +static INLINE signed char __pyx_PyInt_signed_char(PyObject* x) { + if (sizeof(signed char) < sizeof(long)) { + long long_val = __pyx_PyInt_AsLong(x); + signed char val = (signed char)long_val; + if (unlikely((val != long_val) )) { + PyErr_SetString(PyExc_OverflowError, "value too large to convert to signed char"); + return (signed char)-1; + } + return val; + } + else { + return __pyx_PyInt_AsLong(x); + } +} + +static INLINE signed short __pyx_PyInt_signed_short(PyObject* x) { + if (sizeof(signed short) < sizeof(long)) { + long long_val = __pyx_PyInt_AsLong(x); + signed short val = (signed short)long_val; + if (unlikely((val != long_val) )) { + PyErr_SetString(PyExc_OverflowError, "value too large to convert to signed short"); + return (signed short)-1; + } + return val; + } + else { + return __pyx_PyInt_AsLong(x); + } +} + +static INLINE signed int __pyx_PyInt_signed_int(PyObject* x) { + if (sizeof(signed int) < sizeof(long)) { + long long_val = __pyx_PyInt_AsLong(x); + signed int val = (signed int)long_val; + if (unlikely((val != long_val) )) { + PyErr_SetString(PyExc_OverflowError, "value too large to convert to signed int"); + return (signed int)-1; + } + return val; + } + else { + return __pyx_PyInt_AsLong(x); + } +} + +static INLINE signed long __pyx_PyInt_signed_long(PyObject* x) { + if (sizeof(signed long) < sizeof(long)) { + long long_val = __pyx_PyInt_AsLong(x); + signed long val = (signed long)long_val; + if (unlikely((val != long_val) )) { + PyErr_SetString(PyExc_OverflowError, "value too large to convert to signed long"); + return (signed long)-1; + } + return val; + } + else { + return __pyx_PyInt_AsLong(x); + } +} + +static INLINE long double __pyx_PyInt_long_double(PyObject* x) { + if (sizeof(long double) < sizeof(long)) { + long long_val = __pyx_PyInt_AsLong(x); + long double val = (long double)long_val; + if (unlikely((val != long_val) )) { + PyErr_SetString(PyExc_OverflowError, "value too large to convert to long double"); + return (long double)-1; + } + return val; + } + else { + return __pyx_PyInt_AsLong(x); + } +} + diff --git a/sphinx/pycode/pgen2/parse.pyx b/sphinx/pycode/pgen2/parse.pyx index 6a11ee6b..537d7393 100644 --- a/sphinx/pycode/pgen2/parse.pyx +++ b/sphinx/pycode/pgen2/parse.pyx @@ -1,6 +1,8 @@ # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. +# Adapted from parse.py to be compiled with Cython by Georg Brandl. + """Parser engine for the grammar tables generated by pgen. The grammar table must be loaded first. @@ -10,7 +12,7 @@ how this parsing engine works. """ -from sphinx.pycode.pytree import Node, Leaf +from sphinx.pycode.nodes import Node, Leaf DEF NAME = 1 diff --git a/sphinx/pycode/pytree.py b/sphinx/pycode/pytree.py deleted file mode 100644 index 063e39ec..00000000 --- a/sphinx/pycode/pytree.py +++ /dev/null @@ -1,293 +0,0 @@ -# Copyright 2006 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Python parse tree definitions. - -This is a very concrete parse tree; we need to keep every token and -even the comments and whitespace between tokens. - -Adapted for read-only nodes from pytree.py in Python's 2to3 tool, and -added a few bits. -""" - -__author__ = "Guido van Rossum " - - -class Base(object): - - """Abstract base class for Node and Leaf. - - This provides some default functionality and boilerplate using the - template pattern. - - A node may be a subnode of at most one parent. - """ - - # Default values for instance variables - type = None # int: token number (< 256) or symbol number (>= 256) - parent = None # Parent node pointer, or None - children = () # Tuple of subnodes - was_changed = False - - def __eq__(self, other): - """Compares two nodes for equality. - - This calls the method _eq(). - """ - if self.__class__ is not other.__class__: - return NotImplemented - return self._eq(other) - - def __ne__(self, other): - """Compares two nodes for inequality. - - This calls the method _eq(). - """ - if self.__class__ is not other.__class__: - return NotImplemented - return not self._eq(other) - - def _eq(self, other): - """Compares two nodes for equality. - - This is called by __eq__ and __ne__. It is only called if the - two nodes have the same type. This must be implemented by the - concrete subclass. Nodes should be considered equal if they - have the same structure, ignoring the prefix string and other - context information. - """ - raise NotImplementedError - - def get_lineno(self): - """Returns the line number which generated the invocant node.""" - node = self - while not isinstance(node, Leaf): - if not node.children: - return - node = node.children[0] - return node.lineno - - def get_next_sibling(self): - """Return the node immediately following the invocant in their - parent's children list. If the invocant does not have a next - sibling, return None.""" - if self.parent is None: - return None - - # Can't use index(); we need to test by identity - for i, child in enumerate(self.parent.children): - if child is self: - try: - return self.parent.children[i+1] - except IndexError: - return None - - def get_prev_sibling(self): - """Return the node immediately preceding the invocant in their - parent's children list. If the invocant does not have a previous - sibling, return None.""" - if self.parent is None: - return None - - # Can't use index(); we need to test by identity - for i, child in enumerate(self.parent.children): - if child is self: - if i == 0: - return None - return self.parent.children[i-1] - - def get_prev_leaf(self): - """Return the leaf node that precedes this node in the parse tree.""" - def last_child(node): - if isinstance(node, Leaf): - return node - elif not node.children: - return None - else: - return last_child(node.children[-1]) - if self.parent is None: - return None - prev = self.get_prev_sibling() - if isinstance(prev, Leaf): - return prev - elif prev is not None: - return last_child(prev) - return self.parent.get_prev_leaf() - - def get_suffix(self): - """Return the string immediately following the invocant node. This - is effectively equivalent to node.get_next_sibling().get_prefix()""" - next_sib = self.get_next_sibling() - if next_sib is None: - return "" - return next_sib.get_prefix() - - -class Node(Base): - - """Concrete implementation for interior nodes.""" - - def __init__(self, type, children, context=None): - """Initializer. - - Takes a type constant (a symbol number >= 256), a sequence of - child nodes, and an optional context keyword argument. - - As a side effect, the parent pointers of the children are updated. - """ - # assert type >= 256, type - self.type = type - self.children = list(children) - for ch in self.children: - # assert ch.parent is None, repr(ch) - ch.parent = self - # if prefix is not None: - # self.set_prefix(prefix) - - def __repr__(self): - return "%s(%s, %r)" % (self.__class__.__name__, - self.type, self.children) - - def __str__(self): - """This reproduces the input source exactly.""" - return "".join(map(str, self.children)) - - def compact(self): - return ''.join(child.compact() for child in self.children) - - def __getitem__(self, index): - return self.children[index] - - def __iter__(self): - return iter(self.children) - - def __len__(self): - return len(self.children) - - def _eq(self, other): - """Compares two nodes for equality.""" - return (self.type, self.children) == (other.type, other.children) - - def post_order(self): - """Returns a post-order iterator for the tree.""" - for child in self.children: - for node in child.post_order(): - yield node - yield self - - def pre_order(self): - """Returns a pre-order iterator for the tree.""" - yield self - for child in self.children: - for node in child.post_order(): - yield node - - def get_prefix(self): - """Returns the prefix for the node. - - This passes the call on to the first child. - """ - if not self.children: - return "" - return self.children[0].get_prefix() - - -class Leaf(Base): - - """Concrete implementation for leaf nodes.""" - - # Default values for instance variables - prefix = "" # Whitespace and comments preceding this token in the input - lineno = 0 # Line where this token starts in the input - column = 0 # Column where this token tarts in the input - - def __init__(self, type, value, context=None): - """Initializer. - - Takes a type constant (a token number < 256), a string value, - and an optional context keyword argument. - """ - # assert 0 <= type < 256, type - if context is not None: - self.prefix, (self.lineno, self.column) = context - self.type = type - self.value = value - # if prefix is not None: - # self.prefix = prefix - - def __repr__(self): - return "%s(%r, %r, %r)" % (self.__class__.__name__, - self.type, self.value, self.prefix) - - def __str__(self): - """This reproduces the input source exactly.""" - return self.prefix + str(self.value) - - def compact(self): - return str(self.value) - - def _eq(self, other): - """Compares two nodes for equality.""" - return (self.type, self.value) == (other.type, other.value) - - def post_order(self): - """Returns a post-order iterator for the tree.""" - yield self - - def pre_order(self): - """Returns a pre-order iterator for the tree.""" - yield self - - def get_prefix(self): - """Returns the prefix for the node.""" - return self.prefix - - -def convert(grammar, raw_node): - """Convert raw node to a Node or Leaf instance.""" - type, value, context, children = raw_node - if children or type in grammar.number2symbol: - # If there's exactly one child, return that child instead of - # creating a new node. - if len(children) == 1: - return children[0] - return Node(type, children, context=context) - else: - return Leaf(type, value, context=context) - - -def nice_repr(node, number2name, prefix=False): - def _repr(node): - if isinstance(node, Leaf): - return "%s(%r)" % (number2name[node.type], node.value) - else: - return "%s(%s)" % (number2name[node.type], - ', '.join(map(_repr, node.children))) - def _prepr(node): - if isinstance(node, Leaf): - return "%s(%r, %r)" % (number2name[node.type], node.prefix, node.value) - else: - return "%s(%s)" % (number2name[node.type], - ', '.join(map(_prepr, node.children))) - return (prefix and _prepr or _repr)(node) - - -class NodeVisitor(object): - def __init__(self, number2name, *args): - self.number2name = number2name - self.init(*args) - - def init(self, *args): - pass - - def visit(self, node): - """Visit a node.""" - method = 'visit_' + self.number2name[node.type] - visitor = getattr(self, method, self.generic_visit) - return visitor(node) - - def generic_visit(self, node): - """Called if no explicit visitor function exists for a node.""" - if isinstance(node, Node): - for child in node: - self.visit(child) -- cgit v1.2.1 From 57ad5365be590f8e65ac4a85fef5cb685fa08b55 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Thu, 1 Jan 2009 23:49:32 +0100 Subject: Fix long line. --- sphinx/pycode/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index d707db2d..03481233 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -233,8 +233,8 @@ class ModuleAnalyzer(object): indent += 1 elif type == token.DEDENT: indent -= 1 - # if the stacklevel is the same as it was before the last def/class block, - # this dedent closes that block + # if the stacklevel is the same as it was before the last + # def/class block, this dedent closes that block if stack and indent == stack[-1][3]: dtype, fullname, startline, _ = stack.pop() endline = spos[0] -- cgit v1.2.1 From 5d006c40aabdf9aad10f7871a55db4416d80d98a Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Fri, 2 Jan 2009 00:25:29 +0100 Subject: Add lines and start-after/end-before options to literalinclude. --- sphinx/directives/code.py | 63 +++++++++++++++++++++++++++++++++-------------- sphinx/util/__init__.py | 25 ++++++++++++++++++- 2 files changed, 69 insertions(+), 19 deletions(-) diff --git a/sphinx/directives/code.py b/sphinx/directives/code.py index cc74566d..07fceaae 100644 --- a/sphinx/directives/code.py +++ b/sphinx/directives/code.py @@ -3,7 +3,7 @@ sphinx.directives.code ~~~~~~~~~~~~~~~~~~~~~~ - :copyright: 2007-2008 by Georg Brandl. + :copyright: 2007-2009 by Georg Brandl. :license: BSD, see LICENSE for details. """ @@ -15,6 +15,7 @@ from docutils import nodes from docutils.parsers.rst import directives from sphinx import addnodes +from sphinx.util import parselinenos # ------ highlight directive -------------------------------------------------------- @@ -68,19 +69,9 @@ def literalinclude_directive(name, arguments, options, content, lineno, lineno - state_machine.input_offset - 1))) fn = path.normpath(path.join(source_dir, rel_fn)) - fromline = toline = None - objectname = options.get('pyobject') - if objectname is not None: - from sphinx.pycode import ModuleAnalyzer - analyzer = ModuleAnalyzer.for_file(fn, '') - tags = analyzer.find_tags() - if objectname not in tags: - return [state.document.reporter.warning( - 'Object named %r not found in include file %r' % - (objectname, arguments[0]), line=lineno)] - else: - fromline = tags[objectname][1] - 1 - toline = tags[objectname][2] - 1 + if 'pyobject' in options and 'lines' in options: + return [state.document.reporter.warning( + 'Cannot use both "pyobject" and "lines" options', line=lineno)] encoding = options.get('encoding', env.config.source_encoding) try: @@ -96,7 +87,43 @@ def literalinclude_directive(name, arguments, options, content, lineno, 'Encoding %r used for reading included file %r seems to ' 'be wrong, try giving an :encoding: option' % (encoding, arguments[0]))] - text = ''.join(lines[fromline:toline]) + + objectname = options.get('pyobject') + if objectname is not None: + from sphinx.pycode import ModuleAnalyzer + analyzer = ModuleAnalyzer.for_file(fn, '') + tags = analyzer.find_tags() + if objectname not in tags: + return [state.document.reporter.warning( + 'Object named %r not found in include file %r' % + (objectname, arguments[0]), line=lineno)] + else: + lines = lines[tags[objectname][1] - 1 : tags[objectname][2]] + + linespec = options.get('lines') + if linespec is not None: + try: + linelist = parselinenos(linespec, len(lines)) + except ValueError, err: + return [state.document.reporter.warning(str(err), line=lineno)] + lines = [lines[i] for i in linelist] + + startafter = options.get('start-after') + endbefore = options.get('end-before') + if startafter is not None or endbefore is not None: + use = not startafter + res = [] + for line in lines: + if not use and startafter in line: + use = True + elif use and endbefore in line: + use = False + break + elif use: + res.append(line) + lines = res + + text = ''.join(lines) retnode = nodes.literal_block(text, text, source=fn) retnode.line = 1 if options.get('language', ''): @@ -110,9 +137,9 @@ literalinclude_directive.options = {'linenos': directives.flag, 'language': directives.unchanged_required, 'encoding': directives.encoding, 'pyobject': directives.unchanged_required, - #'lines': directives.unchanged_required, - #'start-after': directives.unchanged_required, - #'end-before': directives.unchanged_required, + 'lines': directives.unchanged_required, + 'start-after': directives.unchanged_required, + 'end-before': directives.unchanged_required, } literalinclude_directive.content = 0 literalinclude_directive.arguments = (1, 0, 0) diff --git a/sphinx/util/__init__.py b/sphinx/util/__init__.py index e25bc5a1..fbd7d243 100644 --- a/sphinx/util/__init__.py +++ b/sphinx/util/__init__.py @@ -5,7 +5,7 @@ Utility functions for Sphinx. - :copyright: 2007-2008 by Georg Brandl. + :copyright: 2007-2009 by Georg Brandl. :license: BSD, see LICENSE for details. """ @@ -324,3 +324,26 @@ class FilenameUniqDict(dict): def __setstate__(self, state): self._existing = state + + +def parselinenos(spec, total): + """ + Parse a line number spec (such as "1,2,4-6") and return a list of + wanted line numbers. + """ + items = list() + parts = spec.split(',') + for part in parts: + try: + begend = part.strip().split('-') + if len(begend) > 2: + raise ValueError + if len(begend) == 1: + items.append(int(begend[0])-1) + else: + start = (begend[0] == '') and 0 or int(begend[0])-1 + end = (begend[1] == '') and total or int(begend[1]) + items.extend(xrange(start, end)) + except Exception, err: + raise ValueError('invalid line number spec: %r' % spec) + return items -- cgit v1.2.1 From 8429841e5c11fa1e3ce5c56dd7070c1bd21a51ec Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Fri, 2 Jan 2009 00:32:02 +0100 Subject: Document new literalinclude options. --- doc/markup/code.rst | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/doc/markup/code.rst b/doc/markup/code.rst index 2fd51c84..0bf8343b 100644 --- a/doc/markup/code.rst +++ b/doc/markup/code.rst @@ -123,10 +123,25 @@ Includes This would only include the code lines belonging to the ``start()`` method in the ``Timer`` class within the file. + Alternately, you can specify exactly which lines to include by giving a + ``lines`` option:: + + .. literalinclude:: example.py + :lines: 1,3,5-10,20- + + This includes the lines 1, 3, 5 to 10 and lines 20 to the last line. + + Another way to control which part of the file is included is to use the + ``start-after`` and ``end-before`` options (or only one of them). If + ``start-after`` is given as a string option, only lines that follow the first + line containing that string are included. If ``end-before`` is given as a + string option, only lines that precede the first lines containing that string + are included. + .. versionadded:: 0.4.3 The ``encoding`` option. .. versionadded:: 0.6 - The ``pyobject`` option. + The ``pyobject``, ``lines``, ``start-after`` and ``end-before`` options. .. rubric:: Footnotes -- cgit v1.2.1 From 531600d1bb35018f59774a7ff89a433a84aeb81e Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sat, 3 Jan 2009 12:29:42 +0100 Subject: Add tests for new literalinclude options, and fix an off-by-one bug. --- sphinx/directives/code.py | 2 +- tests/root/includes.txt | 22 ++++++++++++++++++++++ tests/root/literal.inc | 9 +++++++++ tests/test_build.py | 36 +++++++++++++++++++++++++----------- tests/test_markup.py | 2 ++ 5 files changed, 59 insertions(+), 12 deletions(-) diff --git a/sphinx/directives/code.py b/sphinx/directives/code.py index 07fceaae..80836689 100644 --- a/sphinx/directives/code.py +++ b/sphinx/directives/code.py @@ -98,7 +98,7 @@ def literalinclude_directive(name, arguments, options, content, lineno, 'Object named %r not found in include file %r' % (objectname, arguments[0]), line=lineno)] else: - lines = lines[tags[objectname][1] - 1 : tags[objectname][2]] + lines = lines[tags[objectname][1] - 1 : tags[objectname][2] - 1] linespec = options.get('lines') if linespec is not None: diff --git a/tests/root/includes.txt b/tests/root/includes.txt index d2964d3f..44e33af0 100644 --- a/tests/root/includes.txt +++ b/tests/root/includes.txt @@ -15,6 +15,28 @@ Test file and literal inclusion .. include:: wrongenc.inc :encoding: latin-1 +Literalinclude options +====================== + +.. highlight:: text + +.. cssclass:: inc-pyobj1 +.. literalinclude:: literal.inc + :pyobject: Foo + +.. cssclass:: inc-pyobj2 +.. literalinclude:: literal.inc + :pyobject: Bar.baz + +.. cssclass:: inc-lines +.. literalinclude:: literal.inc + :lines: 6-7,9 + +.. cssclass:: inc-startend +.. literalinclude:: literal.inc + :start-after: coding: utf-8 + :end-before: class Foo + Testing downloadable files ========================== diff --git a/tests/root/literal.inc b/tests/root/literal.inc index a4ce93d2..d5b9890c 100644 --- a/tests/root/literal.inc +++ b/tests/root/literal.inc @@ -2,3 +2,12 @@ # -*- coding: utf-8 -*- foo = u"Including Unicode characters: üöä" + +class Foo: + pass + +class Bar: + def baz(): + pass + +def bar(): pass diff --git a/tests/test_build.py b/tests/test_build.py index 91506dad..0ac5380e 100644 --- a/tests/test_build.py +++ b/tests/test_build.py @@ -10,6 +10,7 @@ """ import os +import re import sys import difflib import htmlentitydefs @@ -32,7 +33,7 @@ WARNING: %(root)s/images.txt:9: Image file not readable: foo.png WARNING: %(root)s/images.txt:23: Nonlocal image URI found: http://www.python.org/logo.png WARNING: %(root)s/includes.txt:: (WARNING/2) Encoding 'utf-8' used for reading included \ file u'wrongenc.inc' seems to be wrong, try giving an :encoding: option -WARNING: %(root)s/includes.txt:34: Download file not readable: nonexisting.png +WARNING: %(root)s/includes.txt:56: Download file not readable: nonexisting.png """ HTML_WARNINGS = ENV_WARNINGS + """\ @@ -61,11 +62,19 @@ HTML_XPATH = { ".//pre": u'Max Strauß', ".//a[@href='_downloads/img.png']": '', ".//a[@href='_downloads/img1.png']": '', + ".//div[@class='inc-pyobj1 highlight-text']/div/pre": + r'^class Foo:\n pass\n\s*$', + ".//div[@class='inc-pyobj2 highlight-text']/div/pre": + r'^ def baz\(\):\n pass\n\s*$', + ".//div[@class='inc-lines highlight-text']/div/pre": + r'^class Foo:\n pass\nclass Bar:\n$', + ".//div[@class='inc-startend highlight-text']/div/pre": + ur'^foo = u"Including Unicode characters: üöä"\n$', }, 'autodoc.html': { ".//dt[@id='test_autodoc.Class']": '', - ".//dt[@id='test_autodoc.function']/em": '**kwds', - ".//dd": 'Return spam.', + ".//dt[@id='test_autodoc.function']/em": r'\*\*kwds', + ".//dd": r'Return spam\.', }, 'markup.html': { ".//meta[@name='author'][@content='Me']": '', @@ -81,7 +90,7 @@ HTML_XPATH = { }, 'contents.html': { ".//meta[@name='hc'][@content='hcval']": '', - ".//td[@class='label']": '[Ref1]', + ".//td[@class='label']": r'\[Ref1\]', ".//li[@class='toctree-l1']/a": 'Testing various markup', ".//li[@class='toctree-l2']/a": 'Admonitions', ".//title": 'Sphinx ', @@ -117,18 +126,23 @@ def test_html(app): parser = NslessParser() parser.entity.update(htmlentitydefs.entitydefs) etree = ET.parse(os.path.join(app.outdir, fname), parser) - for path, text in paths.iteritems(): + for path, check in paths.iteritems(): nodes = list(etree.findall(path)) assert nodes != [] - if not text: + if callable(check): + check(nodes) + elif not check: # only check for node presence continue - for node in nodes: - if node.text and text in node.text: - break else: - assert False, ('%r not found in any node matching ' - 'path %s in %s' % (text, path, fname)) + rex = re.compile(check) + for node in nodes: + if node.text and rex.search(node.text): + break + else: + assert False, ('%r not found in any node matching ' + 'path %s in %s: %r' % (check, path, fname, + [node.text for node in nodes])) @with_app(buildername='latex', warning=latex_warnfile) diff --git a/tests/test_markup.py b/tests/test_markup.py index 86889a2a..29583866 100644 --- a/tests/test_markup.py +++ b/tests/test_markup.py @@ -17,11 +17,13 @@ from docutils import frontend, utils, nodes from docutils.parsers import rst from sphinx import addnodes +from sphinx.util import texescape from sphinx.writers.html import HTMLWriter, SmartyPantsHTMLTranslator from sphinx.writers.latex import LaTeXWriter, LaTeXTranslator def setup_module(): global app, settings, parser + texescape.init() # otherwise done by the latex builder app = TestApp(cleanenv=True) optparser = frontend.OptionParser(components=(rst.Parser, HTMLWriter, LaTeXWriter)) settings = optparser.get_default_values() -- cgit v1.2.1 From 01ca953cbeae8082d8d28c0e3ee2d856eef8a865 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sat, 3 Jan 2009 13:29:26 +0100 Subject: Add OpenLayers. --- EXAMPLES | 1 + 1 file changed, 1 insertion(+) diff --git a/EXAMPLES b/EXAMPLES index 1fef388e..d3ac299c 100644 --- a/EXAMPLES +++ b/EXAMPLES @@ -26,6 +26,7 @@ included, please mail to `the Google group * NetworkX: http://networkx.lanl.gov/ * NumPy: http://docs.scipy.org/doc/numpy/reference/ * ObjectListView: http://objectlistview.sourceforge.net/python +* OpenLayers: http://docs.openlayers.org/ * Paste: http://pythonpaste.org/script/ * Paver: http://www.blueskyonmars.com/projects/paver/ * Py on Windows: http://timgolden.me.uk/python-on-windows/ -- cgit v1.2.1 From 9b7a7f6c10dd58776179bd405c3eed7b3d81dc29 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sat, 3 Jan 2009 21:18:28 +0100 Subject: Fix bad node value for makevar. --- sphinx/roles.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sphinx/roles.py b/sphinx/roles.py index 2bbdab94..d2e9558e 100644 --- a/sphinx/roles.py +++ b/sphinx/roles.py @@ -24,7 +24,7 @@ generic_docroles = { 'guilabel' : nodes.strong, 'kbd' : nodes.literal, 'mailheader' : addnodes.literal_emphasis, - 'makevar' : nodes.Text, + 'makevar' : nodes.strong, 'manpage' : addnodes.literal_emphasis, 'mimetype' : addnodes.literal_emphasis, 'newsgroup' : addnodes.literal_emphasis, -- cgit v1.2.1 From 615da933a90527588029a1164894c6a8f56ecfce Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sun, 4 Jan 2009 14:39:36 +0100 Subject: Fix a few remaining copyrights and add 2009 to license. --- LICENSE | 2 +- doc/conf.py | 2 +- sphinx/builders/__init__.py | 2 +- sphinx/environment.py | 4 ---- sphinx/jinja2glue.py | 2 +- sphinx/pycode/__init__.py | 2 +- sphinx/pycode/nodes.py | 2 +- sphinx/util/docstrings.py | 2 +- sphinx/util/jsdump.py | 2 +- 9 files changed, 8 insertions(+), 12 deletions(-) diff --git a/LICENSE b/LICENSE index 8b1ad263..faac79f0 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2007-2008 by the Sphinx team (see AUTHORS file). +Copyright (c) 2007-2009 by the Sphinx team (see AUTHORS file). All rights reserved. License for Sphinx diff --git a/doc/conf.py b/doc/conf.py index 89247821..709d6f75 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -36,7 +36,7 @@ master_doc = 'contents' # General substitutions. project = 'Sphinx' -copyright = '2008, Georg Brandl' +copyright = '2007-2009, Georg Brandl' # The default replacements for |version| and |release|, also used in various # other places throughout the built documents. diff --git a/sphinx/builders/__init__.py b/sphinx/builders/__init__.py index 52b4b0ac..8847b6dc 100644 --- a/sphinx/builders/__init__.py +++ b/sphinx/builders/__init__.py @@ -5,7 +5,7 @@ Builder superclass for all builders. - :copyright: 2007-2008 by Georg Brandl, Sebastian Wiesner, Horst Gutmann. + :copyright: Copyright 2007-2009 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ diff --git a/sphinx/environment.py b/sphinx/environment.py index 0562ce66..b70bd250 100644 --- a/sphinx/environment.py +++ b/sphinx/environment.py @@ -5,11 +5,7 @@ Global creation environment. -<<<<<<< local - :copyright: 2007-2009 by Georg Brandl. -======= :copyright: Copyright 2007-2009 by the Sphinx team, see AUTHORS. ->>>>>>> other :license: BSD, see LICENSE for details. """ diff --git a/sphinx/jinja2glue.py b/sphinx/jinja2glue.py index 996df70b..0c7c5d72 100644 --- a/sphinx/jinja2glue.py +++ b/sphinx/jinja2glue.py @@ -5,7 +5,7 @@ Glue code for the jinja2 templating engine. - :copyright: 2008 by Sebastian Wiesner. + :copyright: Copyright 2007-2009 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index 03481233..a61a051a 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -5,7 +5,7 @@ Utilities parsing and analyzing Python code. - :copyright: 2008-2009 by Georg Brandl. + :copyright: Copyright 2007-2009 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ diff --git a/sphinx/pycode/nodes.py b/sphinx/pycode/nodes.py index d0fb522b..4d27fc66 100644 --- a/sphinx/pycode/nodes.py +++ b/sphinx/pycode/nodes.py @@ -5,7 +5,7 @@ Parse tree node implementations. - :copyright: 2009 by Georg Brandl. + :copyright: Copyright 2007-2009 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ diff --git a/sphinx/util/docstrings.py b/sphinx/util/docstrings.py index d7e20e4c..1b0a599a 100644 --- a/sphinx/util/docstrings.py +++ b/sphinx/util/docstrings.py @@ -5,7 +5,7 @@ Utilities for docstring processing. - :copyright: 2008 by Georg Brandl. + :copyright: Copyright 2007-2009 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ diff --git a/sphinx/util/jsdump.py b/sphinx/util/jsdump.py index 2eb4619e..8c760b68 100644 --- a/sphinx/util/jsdump.py +++ b/sphinx/util/jsdump.py @@ -6,7 +6,7 @@ This module implements a simple JavaScript serializer. Uses the basestring encode function from simplejson by Bob Ippolito. - :copyright: Copyright 2008 by the Sphinx team, see AUTHORS. + :copyright: Copyright 2007-2009 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ -- cgit v1.2.1 From 939e3f37d96e29c260244476caf60ee33c2761ed Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sun, 4 Jan 2009 19:16:52 +0100 Subject: Cache tags and attribute docs in the analyzer. --- sphinx/pycode/__init__.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index a61a051a..0141ede4 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -176,6 +176,10 @@ class ModuleAnalyzer(object): self.tokens = None # will be filled by parse() self.parsetree = None + # will be filled by find_attr_docs() + self.attr_docs = None + # will be filled by find_tags() + self.tags = None def tokenize(self): """Generate tokens from the source.""" @@ -193,13 +197,18 @@ class ModuleAnalyzer(object): def find_attr_docs(self, scope=''): """Find class and module-level attributes and their documentation.""" + if self.attr_docs is not None: + return self.attr_docs self.parse() attr_visitor = AttrDocVisitor(number2name, scope) attr_visitor.visit(self.parsetree) + self.attr_docs = attr_visitor.collected return attr_visitor.collected def find_tags(self): """Find class, function and method definitions and their location.""" + if self.tags is not None: + return self.tags self.tokenize() result = {} namespace = [] @@ -246,6 +255,7 @@ class ModuleAnalyzer(object): if defline: defline = False expect_indent = True + self.tags = result return result -- cgit v1.2.1 From 686c154eea969fe7eecc4aec27d922d301be46fc Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sun, 4 Jan 2009 19:35:03 +0100 Subject: Support all types of string literals in literals.py. --- sphinx/pycode/pgen2/literals.py | 54 +++++++++++++++++++++++++++++++++-------- 1 file changed, 44 insertions(+), 10 deletions(-) diff --git a/sphinx/pycode/pgen2/literals.py b/sphinx/pycode/pgen2/literals.py index 0b3948a5..78667df0 100644 --- a/sphinx/pycode/pgen2/literals.py +++ b/sphinx/pycode/pgen2/literals.py @@ -1,6 +1,8 @@ # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. +# Extended to handle raw and unicode literals by Georg Brandl. + """Safely evaluate Python string literals without using eval().""" import re @@ -16,28 +18,60 @@ simple_escapes = {"a": "\a", '"': '"', "\\": "\\"} +def convert_hex(x, n): + if len(x) < n+1: + raise ValueError("invalid hex string escape ('\\%s')" % x) + try: + return int(x[1:], 16) + except ValueError: + raise ValueError("invalid hex string escape ('\\%s')" % x) + def escape(m): all, tail = m.group(0, 1) assert all.startswith("\\") esc = simple_escapes.get(tail) if esc is not None: return esc - if tail.startswith("x"): - hexes = tail[1:] - if len(hexes) < 2: - raise ValueError("invalid hex string escape ('\\%s')" % tail) + elif tail.startswith("x"): + return chr(convert_hex(tail, 2)) + elif tail.startswith('u'): + return unichr(convert_hex(tail, 4)) + elif tail.startswith('U'): + return unichr(convert_hex(tail, 8)) + elif tail.startswith('N'): + import unicodedata try: - i = int(hexes, 16) - except ValueError: - raise ValueError("invalid hex string escape ('\\%s')" % tail) + return unicodedata.lookup(tail[1:-1]) + except KeyError: + raise ValueError("undefined character name %r" % tail[1:-1]) else: try: - i = int(tail, 8) + return chr(int(tail, 8)) except ValueError: raise ValueError("invalid octal string escape ('\\%s')" % tail) - return chr(i) + +def escaperaw(m): + all, tail = m.group(0, 1) + if tail.startswith('u'): + return unichr(convert_hex(tail, 4)) + elif tail.startswith('U'): + return unichr(convert_hex(tail, 8)) + else: + return all + +escape_re = re.compile(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3})") +uni_escape_re = re.compile(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3}|" + r"u[0-9a-fA-F]{0,4}|U[0-9a-fA-F]{0,8}|N\{.+?\})") def evalString(s): + regex = escape_re + repl = escape + if s.startswith('u') or s.startswith('U'): + regex = uni_escape_re + s = s[1:] + if s.startswith('r') or s.startswith('R'): + repl = escaperaw + s = s[1:] assert s.startswith("'") or s.startswith('"'), repr(s[:1]) q = s[0] if s[:3] == q*3: @@ -45,7 +79,7 @@ def evalString(s): assert s.endswith(q), repr(s[-len(q):]) assert len(s) >= 2*len(q) s = s[len(q):-len(q)] - return re.sub(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3})", escape, s) + return regex.sub(repl, s) def test(): for i in range(256): -- cgit v1.2.1 From 93c8edb25dbab5c219258662ab750aa346aeb0e9 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sun, 4 Jan 2009 20:02:24 +0100 Subject: Add support for decoding strings and comments to the analyzer. --- sphinx/pycode/__init__.py | 86 +++++++++++++++++++++++++++-------------- sphinx/pycode/pgen2/literals.py | 4 +- 2 files changed, 59 insertions(+), 31 deletions(-) diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index 0141ede4..17dc6afb 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -9,6 +9,7 @@ :license: BSD, see LICENSE for details. """ +import re import sys from os import path from cStringIO import StringIO @@ -35,6 +36,9 @@ number2name = pygrammar.number2symbol.copy() number2name.update(token.tok_name) +# a regex to recognize coding cookies +_coding_re = re.compile(r'coding[:=]\s*([-\w.]+)') + _eq = nodes.Leaf(token.EQUAL, '=') @@ -46,8 +50,9 @@ class AttrDocVisitor(nodes.NodeVisitor): The docstrings can either be in special '#:' comments before the assignment or in a docstring after it. """ - def init(self, scope): + def init(self, scope, encoding): self.scope = scope + self.encoding = encoding self.namespace = [] self.collected = {} @@ -71,6 +76,7 @@ class AttrDocVisitor(nodes.NodeVisitor): if not pnode or pnode.type not in (token.INDENT, token.DEDENT): break prefix = pnode.get_prefix() + prefix = prefix.decode(self.encoding) docstring = prepare_commentdoc(prefix) if docstring: self.add_docstring(node, docstring) @@ -86,7 +92,8 @@ class AttrDocVisitor(nodes.NodeVisitor): if prev.type == sym.simple_stmt and \ prev[0].type == sym.expr_stmt and _eq in prev[0].children: # need to "eval" the string because it's returned in its original form - docstring = prepare_docstring(literals.evalString(node[0].value)) + docstring = literals.evalString(node[0].value, self.encoding) + docstring = prepare_docstring(docstring) self.add_docstring(prev[0], docstring) def visit_funcdef(self, node): @@ -136,38 +143,48 @@ class ModuleAnalyzer(object): @classmethod def for_module(cls, modname): if ('module', modname) in cls.cache: - return cls.cache['module', modname] - if modname not in sys.modules: - try: - __import__(modname) - except ImportError, err: - raise PycodeError('error importing %r' % modname, err) - mod = sys.modules[modname] - if hasattr(mod, '__loader__'): - try: - source = mod.__loader__.get_source(modname) - except Exception, err: - raise PycodeError('error getting source for %r' % modname, err) - obj = cls.for_string(source, modname) - cls.cache['module', modname] = obj - return obj - filename = getattr(mod, '__file__', None) - if filename is None: - raise PycodeError('no source found for module %r' % modname) - filename = path.normpath(filename) - lfilename = filename.lower() - if lfilename.endswith('.pyo') or lfilename.endswith('.pyc'): - filename = filename[:-1] - elif not lfilename.endswith('.py'): - raise PycodeError('source is not a .py file: %r' % filename) - if not path.isfile(filename): - raise PycodeError('source file is not present: %r' % filename) - obj = cls.for_file(filename, modname) + entry = cls.cache['module', modname] + if isinstance(entry, PycodeError): + raise entry + return entry + + try: + if modname not in sys.modules: + try: + __import__(modname) + except ImportError, err: + raise PycodeError('error importing %r' % modname, err) + mod = sys.modules[modname] + if hasattr(mod, '__loader__'): + try: + source = mod.__loader__.get_source(modname) + except Exception, err: + raise PycodeError('error getting source for %r' % modname, err) + obj = cls.for_string(source, modname) + cls.cache['module', modname] = obj + return obj + filename = getattr(mod, '__file__', None) + if filename is None: + raise PycodeError('no source found for module %r' % modname) + filename = path.normpath(filename) + lfilename = filename.lower() + if lfilename.endswith('.pyo') or lfilename.endswith('.pyc'): + filename = filename[:-1] + elif not lfilename.endswith('.py'): + raise PycodeError('source is not a .py file: %r' % filename) + if not path.isfile(filename): + raise PycodeError('source file is not present: %r' % filename) + obj = cls.for_file(filename, modname) + except PycodeError, err: + cls.cache['module', modname] = err + raise cls.cache['module', modname] = obj return obj def __init__(self, source, modname, srcname): + # name of the module self.modname = modname + # name of the source file self.srcname = srcname # file-like object yielding source lines self.source = source @@ -194,13 +211,22 @@ class ModuleAnalyzer(object): return self.tokenize() self.parsetree = pydriver.parse_tokens(self.tokens) + # find the source code encoding + encoding = sys.getdefaultencoding() + comments = self.parsetree.get_prefix() + for line in comments.splitlines()[:2]: + match = _coding_re.search(line) + if match is not None: + encoding = match.group(1) + break + self.encoding = encoding def find_attr_docs(self, scope=''): """Find class and module-level attributes and their documentation.""" if self.attr_docs is not None: return self.attr_docs self.parse() - attr_visitor = AttrDocVisitor(number2name, scope) + attr_visitor = AttrDocVisitor(number2name, scope, self.encoding) attr_visitor.visit(self.parsetree) self.attr_docs = attr_visitor.collected return attr_visitor.collected diff --git a/sphinx/pycode/pgen2/literals.py b/sphinx/pycode/pgen2/literals.py index 78667df0..31900291 100644 --- a/sphinx/pycode/pgen2/literals.py +++ b/sphinx/pycode/pgen2/literals.py @@ -63,9 +63,11 @@ escape_re = re.compile(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3})") uni_escape_re = re.compile(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3}|" r"u[0-9a-fA-F]{0,4}|U[0-9a-fA-F]{0,8}|N\{.+?\})") -def evalString(s): +def evalString(s, encoding=None): regex = escape_re repl = escape + if encoding: + s = s.decode(encoding) if s.startswith('u') or s.startswith('U'): regex = uni_escape_re s = s[1:] -- cgit v1.2.1 From 7570f70ab99020eeacc640aa69ff03da58341901 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sun, 4 Jan 2009 20:27:37 +0100 Subject: Add new tests for the attribute feature and fix existing tests. --- sphinx/ext/autodoc.py | 108 +++++++++++++++++++----------------------------- sphinx/util/__init__.py | 14 +++++++ tests/test_autodoc.py | 65 ++++++++++++++++++++--------- 3 files changed, 102 insertions(+), 85 deletions(-) diff --git a/sphinx/ext/autodoc.py b/sphinx/ext/autodoc.py index c8faa0da..60ebd5b5 100644 --- a/sphinx/ext/autodoc.py +++ b/sphinx/ext/autodoc.py @@ -21,7 +21,7 @@ from docutils import nodes from docutils.parsers.rst import directives from docutils.statemachine import ViewList -from sphinx.util import rpartition, nested_parse_with_titles +from sphinx.util import rpartition, nested_parse_with_titles, force_decode from sphinx.pycode import ModuleAnalyzer, PycodeError from sphinx.util.docstrings import prepare_docstring @@ -31,8 +31,6 @@ try: except NameError: base_exception = Exception -_charset_re = re.compile(r'coding[:=]\s*([-\w.]+)') -_module_charsets = {} py_ext_sig_re = re.compile( r'''^ ([\w.]+::)? # explicit module name @@ -173,27 +171,6 @@ def isdescriptor(x): return False -def get_module_charset(module): - """Return the charset of the given module (cached in _module_charsets).""" - if module in _module_charsets: - return _module_charsets[module] - try: - filename = __import__(module, None, None, ['foo']).__file__ - except (ImportError, AttributeError): - return None - if filename[-4:].lower() in ('.pyc', '.pyo'): - filename = filename[:-1] - for line in [linecache.getline(filename, x) for x in (1, 2)]: - match = _charset_re.search(line) - if match is not None: - charset = match.group(1) - break - else: - charset = 'ascii' - _module_charsets[module] = charset - return charset - - class RstGenerator(object): def __init__(self, options, document, lineno): self.options = options @@ -207,15 +184,19 @@ class RstGenerator(object): def warn(self, msg): self.warnings.append(self.reporter.warning(msg, line=self.lineno)) - def get_doc(self, what, name, obj): - """Format and yield lines of the docstring(s) for the object.""" + def get_doc(self, what, obj, encoding=None): + """Decode and return lines of the docstring(s) for the object.""" docstrings = [] + + # add the regular docstring if present if getattr(obj, '__doc__', None): docstrings.append(obj.__doc__) - # skip some lines in module docstrings if configured + + # skip some lines in module docstrings if configured (deprecated!) if what == 'module' and self.env.config.automodule_skip_lines and docstrings: docstrings[0] = '\n'.join(docstrings[0].splitlines() [self.env.config.automodule_skip_lines:]) + # for classes, what the "docstring" is can be controlled via an option if what in ('class', 'exception'): content = self.env.config.autoclass_content @@ -231,24 +212,12 @@ class RstGenerator(object): docstrings.append(initdocstring) # the default is only the class docstring - # decode the docstrings using the module's source encoding - charset = None - module = getattr(obj, '__module__', None) - if module is not None: - charset = get_module_charset(module) + # make sure we get Unicode docstrings + return [force_decode(docstring, encoding) for docstring in docstrings] - for docstring in docstrings: - if isinstance(docstring, str): - if charset: - docstring = docstring.decode(charset) - else: - try: - # try decoding with utf-8, should only work for real UTF-8 - docstring = docstring.decode('utf-8') - except UnicodeError: - # last resort -- can't fail - docstring = docstring.decode('latin1') - docstringlines = prepare_docstring(docstring) + def process_doc(self, docstrings, what, name, obj): + """Let the user process the docstrings.""" + for docstringlines in docstrings: if self.env.app: # let extensions preprocess docstrings self.env.app.emit('autodoc-process-docstring', @@ -397,24 +366,25 @@ class RstGenerator(object): # now, import the module and get object to document try: - todoc = module = __import__(mod, None, None, ['foo']) - if hasattr(module, '__file__') and module.__file__: - modfile = module.__file__ - if modfile[-4:].lower() in ('.pyc', '.pyo'): - modfile = modfile[:-1] - self.filename_set.add(modfile) - else: - modfile = None # e.g. for builtin and C modules + __import__(mod) + todoc = module = sys.modules[mod] for part in objpath: todoc = getattr(todoc, part) - # also get a source code analyzer for attribute docs - analyzer = ModuleAnalyzer.for_module(mod) except (ImportError, AttributeError, PycodeError), err: self.warn('autodoc can\'t import/find %s %r, it reported error: "%s", ' 'please check your spelling and sys.path' % (what, str(fullname), err)) return + # try to also get a source code analyzer for attribute docs + try: + analyzer = ModuleAnalyzer.for_module(mod) + except PycodeError, err: + # no source file -- e.g. for builtin and C modules + analyzer = None + else: + self.filename_set.add(analyzer.srcname) + # check __module__ of object if wanted (for members not given explicitly) if check_module: if hasattr(todoc, '__module__'): @@ -473,23 +443,29 @@ class RstGenerator(object): if what != 'module': indent += u' ' - if modfile: - sourcename = '%s:docstring of %s' % (modfile, fullname) + # add content from attribute documentation + if analyzer: + sourcename = '%s:docstring of %s' % (analyzer.srcname, fullname) + attr_docs = analyzer.find_attr_docs() + if what in ('data', 'attribute'): + key = ('.'.join(objpath[:-1]), objpath[-1]) + if key in attr_docs: + no_docstring = True + docstrings = [attr_docs[key]] + for i, line in enumerate(self.process_doc(docstrings, what, + fullname, todoc)): + self.result.append(indent + line, sourcename, i) else: sourcename = 'docstring of %s' % fullname - - # add content from attribute documentation - attr_docs = analyzer.find_attr_docs() - if what in ('data', 'attribute'): - key = ('.'.join(objpath[:-1]), objpath[-1]) - if key in attr_docs: - no_docstring = True - for i, line in enumerate(attr_docs[key]): - self.result.append(indent + line, sourcename, i) + attr_docs = {} # add content from docstrings if not no_docstring: - for i, line in enumerate(self.get_doc(what, fullname, todoc)): + encoding = analyzer and analyzer.encoding + docstrings = map(prepare_docstring, + self.get_doc(what, todoc, encoding)) + for i, line in enumerate(self.process_doc(docstrings, what, + fullname, todoc)): self.result.append(indent + line, sourcename, i) # add source content, if present diff --git a/sphinx/util/__init__.py b/sphinx/util/__init__.py index b851e484..4aacf67b 100644 --- a/sphinx/util/__init__.py +++ b/sphinx/util/__init__.py @@ -347,3 +347,17 @@ def parselinenos(spec, total): except Exception, err: raise ValueError('invalid line number spec: %r' % spec) return items + + +def force_decode(string, encoding): + if isinstance(string, str): + if encoding: + string = string.decode(encoding) + else: + try: + # try decoding with utf-8, should only work for real UTF-8 + string = string.decode('utf-8') + except UnicodeError: + # last resort -- can't fail + string = string.decode('latin1') + return string diff --git a/tests/test_autodoc.py b/tests/test_autodoc.py index 837b5695..dd9934f5 100644 --- a/tests/test_autodoc.py +++ b/tests/test_autodoc.py @@ -15,6 +15,7 @@ from util import * from docutils.statemachine import ViewList from sphinx.ext.autodoc import RstGenerator, cut_lines, between +from sphinx.util.docstrings import prepare_docstring def setup_module(): @@ -173,13 +174,14 @@ def test_format_signature(): def test_get_doc(): def getdocl(*args): - # strip the empty line at the end - return list(gen.get_doc(*args))[:-1] + ds = map(prepare_docstring, gen.get_doc(*args)) + # for testing purposes, concat them and strip the empty line at the end + return sum(ds, [])[:-1] # objects without docstring def f(): pass - assert getdocl('function', 'f', f) == [] + assert getdocl('function', f) == [] # standard function, diverse docstring styles... def f(): @@ -189,7 +191,7 @@ def test_get_doc(): Docstring """ for func in (f, g): - assert getdocl('function', 'f', func) == ['Docstring'] + assert getdocl('function', func) == ['Docstring'] # first line vs. other lines indentation def f(): @@ -198,17 +200,17 @@ def test_get_doc(): Other lines """ - assert getdocl('function', 'f', f) == ['First line', '', 'Other', ' lines'] + assert getdocl('function', f) == ['First line', '', 'Other', ' lines'] # charset guessing (this module is encoded in utf-8) def f(): """Döcstring""" - assert getdocl('function', 'f', f) == [u'Döcstring'] + assert getdocl('function', f) == [u'Döcstring'] # already-unicode docstrings must be taken literally def f(): u"""Döcstring""" - assert getdocl('function', 'f', f) == [u'Döcstring'] + assert getdocl('function', f) == [u'Döcstring'] # class docstring: depends on config value which one is taken class C: @@ -216,11 +218,11 @@ def test_get_doc(): def __init__(self): """Init docstring""" gen.env.config.autoclass_content = 'class' - assert getdocl('class', 'C', C) == ['Class docstring'] + assert getdocl('class', C) == ['Class docstring'] gen.env.config.autoclass_content = 'init' - assert getdocl('class', 'C', C) == ['Init docstring'] + assert getdocl('class', C) == ['Init docstring'] gen.env.config.autoclass_content = 'both' - assert getdocl('class', 'C', C) == ['Class docstring', '', 'Init docstring'] + assert getdocl('class', C) == ['Class docstring', '', 'Init docstring'] class D: """Class docstring""" @@ -232,18 +234,22 @@ def test_get_doc(): """ # Indentation is normalized for 'both' - assert getdocl('class', 'D', D) == ['Class docstring', '', 'Init docstring', - '', 'Other', ' lines'] + assert getdocl('class', D) == ['Class docstring', '', 'Init docstring', + '', 'Other', ' lines'] + + +def test_docstring_processing(): + def process(what, name, obj): + return list(gen.process_doc(map(prepare_docstring, gen.get_doc(what, obj)), + what, name, obj)) class E: def __init__(self): """Init docstring""" # docstring processing by event handler - assert getdocl('class', 'bar', E) == ['Init docstring', '', '42'] + assert process('class', 'bar', E) == ['Init docstring', '', '42', ''] - -def test_docstring_processing_functions(): lid = app.connect('autodoc-process-docstring', cut_lines(1, 1, ['function'])) def f(): """ @@ -251,7 +257,7 @@ def test_docstring_processing_functions(): second line third line """ - assert list(gen.get_doc('function', 'f', f)) == ['second line', ''] + assert process('function', 'f', f) == ['second line', ''] app.disconnect(lid) lid = app.connect('autodoc-process-docstring', between('---', ['function'])) @@ -263,7 +269,7 @@ def test_docstring_processing_functions(): --- third line """ - assert list(gen.get_doc('function', 'f', f)) == ['second line', ''] + assert process('function', 'f', f) == ['second line', ''] app.disconnect(lid) @@ -289,7 +295,7 @@ def test_generate(): def assert_result_contains(item, *args): gen.generate(*args) - print '\n'.join(gen.result) + #print '\n'.join(gen.result) assert len(gen.warnings) == 0, gen.warnings assert item in gen.result del gen.result[:] @@ -325,7 +331,10 @@ def test_generate(): assert_processes(should, 'class', 'Class', [], None) should.extend([('method', 'test_autodoc.Class.meth')]) assert_processes(should, 'class', 'Class', ['meth'], None) - should.extend([('attribute', 'test_autodoc.Class.prop')]) + should.extend([('attribute', 'test_autodoc.Class.prop'), + ('attribute', 'test_autodoc.Class.attr'), + ('attribute', 'test_autodoc.Class.docattr'), + ('attribute', 'test_autodoc.Class.udocattr')]) assert_processes(should, 'class', 'Class', ['__all__'], None) options.undoc_members = True should.append(('method', 'test_autodoc.Class.undocmeth')) @@ -369,6 +378,11 @@ def test_generate(): ('method', 'test_autodoc.Outer.Inner.meth')], 'class', 'Outer', ['__all__'], None) + # test generation for C modules (which have no source file) + gen.env.currmodule = 'time' + assert_processes([('function', 'time.asctime')], 'function', 'asctime', [], None) + assert_processes([('function', 'time.asctime')], 'function', 'asctime', [], None) + # --- generate fodder ------------ @@ -398,10 +412,22 @@ class Class(Base): """Method that should be skipped.""" pass + # should not be documented + skipattr = 'foo' + + #: should be documented -- süß + attr = 'bar' + @property def prop(self): """Property.""" + docattr = 'baz' + """should likewise be documented -- süß""" + + udocattr = 'quux' + u"""should be documented as well - süß""" + class CustomDict(dict): """Docstring.""" @@ -421,4 +447,5 @@ class Outer(object): def meth(self): """Foo""" + # should be documented as an alias factory = dict -- cgit v1.2.1 From 34c771ec24fc8ac1e8aa6bf82471962692eef20b Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sun, 4 Jan 2009 20:43:15 +0100 Subject: Small API change. --- sphinx/ext/autodoc.py | 8 ++++---- tests/test_autodoc.py | 6 ++---- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/sphinx/ext/autodoc.py b/sphinx/ext/autodoc.py index 60ebd5b5..35d96bda 100644 --- a/sphinx/ext/autodoc.py +++ b/sphinx/ext/autodoc.py @@ -212,8 +212,9 @@ class RstGenerator(object): docstrings.append(initdocstring) # the default is only the class docstring - # make sure we get Unicode docstrings - return [force_decode(docstring, encoding) for docstring in docstrings] + # make sure we have Unicode docstrings, then sanitize and split into lines + return [prepare_docstring(force_decode(docstring, encoding)) + for docstring in docstrings] def process_doc(self, docstrings, what, name, obj): """Let the user process the docstrings.""" @@ -462,8 +463,7 @@ class RstGenerator(object): # add content from docstrings if not no_docstring: encoding = analyzer and analyzer.encoding - docstrings = map(prepare_docstring, - self.get_doc(what, todoc, encoding)) + docstrings = self.get_doc(what, todoc, encoding) for i, line in enumerate(self.process_doc(docstrings, what, fullname, todoc)): self.result.append(indent + line, sourcename, i) diff --git a/tests/test_autodoc.py b/tests/test_autodoc.py index dd9934f5..67220180 100644 --- a/tests/test_autodoc.py +++ b/tests/test_autodoc.py @@ -15,7 +15,6 @@ from util import * from docutils.statemachine import ViewList from sphinx.ext.autodoc import RstGenerator, cut_lines, between -from sphinx.util.docstrings import prepare_docstring def setup_module(): @@ -174,7 +173,7 @@ def test_format_signature(): def test_get_doc(): def getdocl(*args): - ds = map(prepare_docstring, gen.get_doc(*args)) + ds = gen.get_doc(*args) # for testing purposes, concat them and strip the empty line at the end return sum(ds, [])[:-1] @@ -240,8 +239,7 @@ def test_get_doc(): def test_docstring_processing(): def process(what, name, obj): - return list(gen.process_doc(map(prepare_docstring, gen.get_doc(what, obj)), - what, name, obj)) + return list(gen.process_doc(gen.get_doc(what, obj), what, name, obj)) class E: def __init__(self): -- cgit v1.2.1 From 78a34be10800b6ce834212c83a7dfa000778e351 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sun, 4 Jan 2009 20:52:35 +0100 Subject: A few more fixes in autodoc. --- sphinx/ext/autodoc.py | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/sphinx/ext/autodoc.py b/sphinx/ext/autodoc.py index 35d96bda..be3e3f0f 100644 --- a/sphinx/ext/autodoc.py +++ b/sphinx/ext/autodoc.py @@ -371,7 +371,7 @@ class RstGenerator(object): todoc = module = sys.modules[mod] for part in objpath: todoc = getattr(todoc, part) - except (ImportError, AttributeError, PycodeError), err: + except (ImportError, AttributeError), err: self.warn('autodoc can\'t import/find %s %r, it reported error: "%s", ' 'please check your spelling and sys.path' % (what, str(fullname), err)) @@ -392,6 +392,11 @@ class RstGenerator(object): if todoc.__module__ != mod: return + # make sure that the result starts with an empty line. This is + # necessary for some situations where another directive preprocesses + # reST and no starting newline is present + self.result.append(u'', '') + # format the object's signature, if any try: sig = self.format_signature(what, fullname, todoc, args, retann) @@ -400,11 +405,6 @@ class RstGenerator(object): (fullname, err)) sig = '' - # make sure that the result starts with an empty line. This is - # necessary for some situations where another directive preprocesses - # reST and no starting newline is present - self.result.append(u'', '') - # now, create the directive header if what == 'method': directive = get_method_type(todoc) @@ -430,13 +430,14 @@ class RstGenerator(object): self.result.append(indent + u' :noindex:', '') self.result.append(u'', '') + # add inheritance info, if wanted if self.options.show_inheritance and what in ('class', 'exception'): if len(todoc.__bases__): bases = [b.__module__ == '__builtin__' and u':class:`%s`' % b.__name__ or u':class:`%s.%s`' % (b.__module__, b.__name__) for b in todoc.__bases__] - self.result.append(indent + u' Bases: %s' % ', '.join(bases), + self.result.append(indent + _(u' Bases: %s') % ', '.join(bases), '') self.result.append(u'', '') @@ -468,7 +469,7 @@ class RstGenerator(object): fullname, todoc)): self.result.append(indent + line, sourcename, i) - # add source content, if present + # add additional content (e.g. from document), if present if add_content: for line, src in zip(add_content.data, add_content.items): self.result.append(indent + line, src[0], src[1]) @@ -483,10 +484,10 @@ class RstGenerator(object): if objpath: self.env.autodoc_current_class = objpath[0] - # add members, if possible - all_members = members == ['__all__'] + # look for members to include + want_all_members = members == ['__all__'] members_check_module = False - if all_members: + if want_all_members: # unqualified :members: given if what == 'module': if hasattr(todoc, '__all__'): @@ -524,7 +525,7 @@ class RstGenerator(object): # if content is not None, no extra content from docstrings will be added content = None - if all_members and membername.startswith('_'): + if want_all_members and membername.startswith('_'): # ignore members whose name starts with _ by default skip = True else: @@ -547,6 +548,7 @@ class RstGenerator(object): if skip: continue + # determine member type if what == 'module': if isinstance(member, (FunctionType, BuiltinFunctionType)): memberwhat = 'function' @@ -581,6 +583,7 @@ class RstGenerator(object): memberwhat = 'attribute' else: continue + # give explicitly separated module name, so that members of inner classes # can be documented full_membername = mod + '::' + '.'.join(objpath + [membername]) -- cgit v1.2.1 From 15a06e44095f77c1fdbe3485f837480c2309988b Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sun, 4 Jan 2009 21:07:25 +0100 Subject: Add changelog entries for news in pycode branch. --- CHANGES | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGES b/CHANGES index d43a044c..f5b53f9f 100644 --- a/CHANGES +++ b/CHANGES @@ -29,6 +29,9 @@ New features added - #4: Added a ``:download:`` role that marks a non-document file for inclusion into the HTML output and links to it. + - The ``literalinclude`` directive now supports several more + options, to include only parts of a file. + - The ``toctree`` directive now supports a ``:hidden:`` flag, which will prevent links from being generated in place of the directive -- this allows you to define your document @@ -68,6 +71,8 @@ New features added * Extensions and API: + - Autodoc now handles documented attributes. + - Autodoc now handles inner classes and their methods. - There is now a ``Sphinx.add_lexer()`` method to be able to use -- cgit v1.2.1 From afaacbd66760f8ce11ec8d7ea2cdd3d7b22fe3e8 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sun, 4 Jan 2009 21:20:51 +0100 Subject: Close #33: add manpages for command-line tools. --- doc/sphinx-build.1 | 96 +++++++++++++++++++++++++++++++++++++++++++++++++ doc/sphinx-quickstart.1 | 17 +++++++++ 2 files changed, 113 insertions(+) create mode 100644 doc/sphinx-build.1 create mode 100644 doc/sphinx-quickstart.1 diff --git a/doc/sphinx-build.1 b/doc/sphinx-build.1 new file mode 100644 index 00000000..16f8bd34 --- /dev/null +++ b/doc/sphinx-build.1 @@ -0,0 +1,96 @@ +.TH sphinx-build 1 "Jan 2009" "Sphinx 0.6" "User Commands" +.SH NAME +sphinx-build \- Sphinx documentation generator tool +.SH SYNOPSIS +.B sphinx-build +[\fIoptions\fR] <\fIsourcedir\fR> <\fIoutdir\fR> [\fIfilenames\fR...] +.SH DESCRIPTION +sphinx-build generates documentation from the files in and places it +in the . + +sphinx-build looks for /conf.py for the configuration settings. +.B sphinx-quickstart(1) +may be used to generate template files, including conf.py. + +sphinx-build can create documentation in different formats. A format is +selected by specifying the builder name on the command line; it defaults to +HTML. Builders can also perform other tasks related to documentation +processing. + +By default, everything that is outdated is built. Output only for selected +files can be built by specifying individual filenames. + +List of available builders: +.TP +\fBhtml\fR +HTML files generation. This is default builder. +.TP +\fBhtmlhelp\fR +Generates files for CHM generation. +.TP +\fBqthelp\fR +Generates files for Qt help collection generation. +.TP +\fBlatex\fR +Generates a LaTeX version of the documentation. +.TP +\fBtext\fR +Generates a plain-text version of the documentation. +.TP +\fBchanges\fR +Generates HTML files listing changed/added/deprecated items for the +current version. +.TP +\fBlinkcheck\fR +Checks the integrity of all external links in the documentation. +.TP +\fBpickle / json\fR +Generates serialized HTML files in the selected format. + +.SH OPTIONS +.TP +\fB-b\fR +Builder to use; defaults to html. See the full list of builders above. +.TP +\fB-a\fR +Generates output for all files; without this option only output for +new and changed files is generated. +.TP +\fB-E\fR +Ignores cached files, forces to re-read all source files from disk. +.TP +\fB-c\fR +Locates the conf.py file in the specified path instead of . +.TP +\fB-C\fR +Specifies that no conf.py file at all is to be used. Configuration can +only be set with the -D option. +.TP +\fB-D\fR = +Overrides a setting from the configuration file. +.TP +\fB-d\fR +Path to cached files; defaults to /.doctrees. +.TP +\fB-A\fR = +Passes a value into the HTML templates (only for html builders). +.TP +\fB-N\fR +Prevents colored output. +.TP +\fB-q\fR +Quiet operation, just prints warnings and errors on stderr. +.TP +\fB-Q\fR +Very quiet operation, doesn't print anything except for errors. +.TP +\fB-P\fR +Runs Pdb on exception. +.SH "SEE ALSO" +.BR sphinx-quickstart(1) +.SH AUTHOR +Georg Brandl , Armin Ronacher et +al. +.PP +This manual page was initially written by Mikhail Gusarov +, for the Debian project. diff --git a/doc/sphinx-quickstart.1 b/doc/sphinx-quickstart.1 new file mode 100644 index 00000000..93b0a4a5 --- /dev/null +++ b/doc/sphinx-quickstart.1 @@ -0,0 +1,17 @@ +.TH sphinx-quickstart 1 "Jan 2009" "Sphinx 0.6" "User Commands" +.SH NAME +sphinx-quickstart \- Sphinx documentation template generator +.SH SYNOPSIS +.B sphinx-quickstart +.SH DESCRIPTION +sphinx-quickstart is an interactive tool that asks some questions about your +project and then generates a complete documentation directory and sample +Makefile to be used with \fBsphinx-build(1)\fR. +.SH "SEE ALSO" +.BR sphinx-build(1) +.SH AUTHOR +Georg Brandl , Armin Ronacher et +al. +.PP +This manual page was initially written by Mikhail Gusarov + for the Debian project. -- cgit v1.2.1 From afc0d3fbc313eaca17aee86cbb0993d4f92d55c9 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sun, 4 Jan 2009 22:00:40 +0100 Subject: Close #52: There is now a ``hlist`` directive, creating a compact list by placing distributing items into multiple columns. --- CHANGES | 3 +++ doc/markup/para.rst | 21 +++++++++++++++++++++ sphinx/addnodes.py | 6 +++++- sphinx/directives/other.py | 28 ++++++++++++++++++++++++++++ sphinx/writers/html.py | 10 ++++++++++ sphinx/writers/latex.py | 22 ++++++++++++++++++++-- sphinx/writers/text.py | 10 ++++++++++ tests/root/markup.txt | 10 ++++++++++ 8 files changed, 107 insertions(+), 3 deletions(-) diff --git a/CHANGES b/CHANGES index f5b53f9f..0d2ffbbb 100644 --- a/CHANGES +++ b/CHANGES @@ -37,6 +37,9 @@ New features added the directive -- this allows you to define your document structure, but place the links yourself. + - #52: There is now a ``hlist`` directive, creating a compact + list by placing distributing items into multiple columns. + - #77: If a description environment with info field list only contains one ``:param:`` entry, no bullet list is generated. diff --git a/doc/markup/para.rst b/doc/markup/para.rst index c60eb258..b071e46c 100644 --- a/doc/markup/para.rst +++ b/doc/markup/para.rst @@ -100,6 +100,27 @@ units as well as normal text: .. centered:: LICENSE AGREEMENT +.. directive:: hlist + + This directive must contain a bullet list. It will transform it into a more + compact list by either distributing more than one item horizontally, or + reducing spacing between items, depending on the builder. + + For builders that support the horizontal distribution, there is a ``columns`` + option that specifies the number of columns; it defaults to 2. Example:: + + .. hlist:: + :columns: 3 + + * A list of + * short items + * that should be + * displayed + * horizontally + + .. versionadded:: 0.6 + + Table-of-contents markup ------------------------ diff --git a/sphinx/addnodes.py b/sphinx/addnodes.py index b267913b..a4af584c 100644 --- a/sphinx/addnodes.py +++ b/sphinx/addnodes.py @@ -74,6 +74,10 @@ class download_reference(nodes.reference): pass # for the ACKS list class acks(nodes.Element): pass +# for horizontal lists +class hlist(nodes.Element): pass +class hlistcol(nodes.Element): pass + # sets the highlighting language for literal blocks class highlightlang(nodes.Element): pass @@ -99,7 +103,7 @@ class meta(nodes.Special, nodes.PreBibliographic, nodes.Element): pass # will choke at some point if these are not added nodes._add_node_class_names("""index desc desc_content desc_signature desc_type desc_returns desc_addname desc_name desc_parameterlist - desc_parameter desc_optional download_reference + desc_parameter desc_optional download_reference hlist hlistcol centered versionmodified seealso productionlist production toctree pending_xref compact_paragraph highlightlang literal_emphasis glossary acks module start_of_file tabular_col_spec meta""".split()) diff --git a/sphinx/directives/other.py b/sphinx/directives/other.py index f19c4f9b..cbf548be 100644 --- a/sphinx/directives/other.py +++ b/sphinx/directives/other.py @@ -383,6 +383,34 @@ acks_directive.arguments = (0, 0, 0) directives.register_directive('acks', acks_directive) +def hlist_directive(name, arguments, options, content, lineno, + content_offset, block_text, state, state_machine): + ncolumns = options.get('columns', 2) + node = nodes.paragraph() + state.nested_parse(content, content_offset, node) + if len(node.children) != 1 or not isinstance(node.children[0], nodes.bullet_list): + return [state.document.reporter.warning('.. hlist content is not a list', + line=lineno)] + fulllist = node.children[0] + # create a hlist node where the items are distributed + npercol, nmore = divmod(len(fulllist), ncolumns) + index = 0 + newnode = addnodes.hlist() + for column in range(ncolumns): + endindex = index + (column < nmore and (npercol+1) or npercol) + col = addnodes.hlistcol() + col += nodes.bullet_list() + col[0] += fulllist.children[index:endindex] + index = endindex + newnode += col + return [newnode] + +hlist_directive.content = 1 +hlist_directive.arguments = (0, 0, 0) +hlist_directive.options = {'columns': int} +directives.register_directive('hlist', hlist_directive) + + def tabularcolumns_directive(name, arguments, options, content, lineno, content_offset, block_text, state, state_machine): # support giving explicit tabulary column definition to latex diff --git a/sphinx/writers/html.py b/sphinx/writers/html.py index e34b8fa9..8c1a87ba 100644 --- a/sphinx/writers/html.py +++ b/sphinx/writers/html.py @@ -321,6 +321,16 @@ class HTMLTranslator(BaseTranslator): def depart_module(self, node): pass + def visit_hlist(self, node): + self.body.append('') + def depart_hlist(self, node): + self.body.append('
    \n') + + def visit_hlistcol(self, node): + self.body.append('') + def depart_hlistcol(self, node): + self.body.append('') + def bulk_text_processor(self, text): return text diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py index c16a4271..885cac44 100644 --- a/sphinx/writers/latex.py +++ b/sphinx/writers/latex.py @@ -223,6 +223,7 @@ class LaTeXTranslator(nodes.NodeVisitor): self.this_is_the_title = 1 self.literal_whitespace = 0 self.no_contractions = 0 + self.compact_list = 0 def astext(self): return (HEADER % self.elements + self.highlighter.get_stylesheet() + @@ -652,9 +653,11 @@ class LaTeXTranslator(nodes.NodeVisitor): raise nodes.SkipNode def visit_bullet_list(self, node): - self.body.append('\\begin{itemize}\n' ) + if not self.compact_list: + self.body.append('\\begin{itemize}\n' ) def depart_bullet_list(self, node): - self.body.append('\\end{itemize}\n' ) + if not self.compact_list: + self.body.append('\\end{itemize}\n' ) def visit_enumerated_list(self, node): self.body.append('\\begin{enumerate}\n' ) @@ -723,6 +726,21 @@ class LaTeXTranslator(nodes.NodeVisitor): def depart_centered(self, node): self.body.append('\n\\end{centering}') + def visit_hlist(self, node): + # for now, we don't support a more compact list format + # don't add individual itemize environments, but one for all columns + self.compact_list += 1 + self.body.append('\\begin{itemize}\\setlength{\\itemsep}{0pt}' + '\\setlength{\\parskip}{0pt}\n') + def depart_hlist(self, node): + self.compact_list -= 1 + self.body.append('\\end{itemize}\n') + + def visit_hlistcol(self, node): + pass + def depart_hlistcol(self, node): + pass + def visit_module(self, node): modname = node['modname'] self.body.append('\n\\declaremodule[%s]{}{%s}' % (modname.replace('_', ''), diff --git a/sphinx/writers/text.py b/sphinx/writers/text.py index 5c8500fb..4718d41d 100644 --- a/sphinx/writers/text.py +++ b/sphinx/writers/text.py @@ -516,6 +516,16 @@ class TextTranslator(nodes.NodeVisitor): def depart_centered(self, node): pass + def visit_hlist(self, node): + pass + def depart_hlist(self, node): + pass + + def visit_hlistcol(self, node): + pass + def depart_hlistcol(self, node): + pass + def visit_admonition(self, node): self.new_state(0) def depart_admonition(self, node): diff --git a/tests/root/markup.txt b/tests/root/markup.txt index 454762e3..777fbd2f 100644 --- a/tests/root/markup.txt +++ b/tests/root/markup.txt @@ -104,6 +104,16 @@ Reference lookup: [Ref1]_ (defined in another file). `Google `_ For everything. +.. hlist:: + :columns: 4 + + * This + * is + * a horizontal + * list + * with several + * items + .. rubric:: Side note This is a side note. -- cgit v1.2.1 From c663958f6064cc325e271ca5416167716aa65e49 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sun, 4 Jan 2009 22:34:00 +0100 Subject: Add changelog entry for #32. --- CHANGES | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES b/CHANGES index 0d2ffbbb..47cfa672 100644 --- a/CHANGES +++ b/CHANGES @@ -91,6 +91,8 @@ New features added - Source links in HTML are now generated with ``rel="nofollow"``. + - Quickstart can now generate a Windows ``make.bat`` file. + Release 0.5.2 (in development) ============================== -- cgit v1.2.1 From 0d6fd15228d6192dcd4beb763dcdc9ac4652b185 Mon Sep 17 00:00:00 2001 From: Benoit Boissinot Date: Sun, 4 Jan 2009 23:15:38 +0100 Subject: fix #30: disable the search box when javascript isn't available --- sphinx/templates/layout.html | 21 +++++++++++++-------- sphinx/templates/search.html | 6 ++++++ 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/sphinx/templates/layout.html b/sphinx/templates/layout.html index e011f643..29ddf014 100644 --- a/sphinx/templates/layout.html +++ b/sphinx/templates/layout.html @@ -63,14 +63,19 @@ {% include customsidebar %} {%- endif %} {%- block sidebarsearch %} - {%- if pagename != "search" %} -

    {{ _('Quick search') }}

    - -

    {{ _('Enter search terms or a module, class or function name.') }}

    + {%- if pagename != "search" %} + + {%- endif %} {%- endblock %}
    diff --git a/sphinx/templates/search.html b/sphinx/templates/search.html index 545a459b..363f641f 100644 --- a/sphinx/templates/search.html +++ b/sphinx/templates/search.html @@ -3,6 +3,12 @@ {% set script_files = script_files + ['_static/searchtools.js'] %} {% block body %}

    {{ _('Search') }}

    +
    + +

    + {% trans %}Please activate Javascript to enable the search functionality.{% endtrans %} +

    +

    {% trans %}From here you can search these documents. Enter your search words into the box below and click "search". Note that the search -- cgit v1.2.1 From b94c9985e98ca0dca829942c3aac099685296349 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sun, 4 Jan 2009 23:23:54 +0100 Subject: Simplify script snippet, break long lines. --- sphinx/templates/layout.html | 16 +++++++++------- sphinx/templates/search.html | 5 +++-- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/sphinx/templates/layout.html b/sphinx/templates/layout.html index 29ddf014..fb0df9e0 100644 --- a/sphinx/templates/layout.html +++ b/sphinx/templates/layout.html @@ -44,18 +44,21 @@ {%- block sidebarrel %} {%- if prev %}

    {{ _('Previous topic') }}

    -

    {{ prev.title }}

    +

    {{ prev.title }}

    {%- endif %} {%- if next %}

    {{ _('Next topic') }}

    -

    {{ next.title }}

    +

    {{ next.title }}

    {%- endif %} {%- endblock %} {%- block sidebarsourcelink %} {%- if show_source and has_source and sourcename %}

    {{ _('This Page') }}

    {%- endif %} {%- endblock %} @@ -67,15 +70,14 @@ - + {%- endif %} {%- endblock %} diff --git a/sphinx/templates/search.html b/sphinx/templates/search.html index 363f641f..224d87b8 100644 --- a/sphinx/templates/search.html +++ b/sphinx/templates/search.html @@ -3,10 +3,11 @@ {% set script_files = script_files + ['_static/searchtools.js'] %} {% block body %}

    {{ _('Search') }}

    -
    +

    - {% trans %}Please activate Javascript to enable the search functionality.{% endtrans %} + {% trans %}Please activate JavaScript to enable the search + functionality.{% endtrans %}

    -- cgit v1.2.1 From a104927be918b503769e298df9c4ba43a7e1a3e9 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 5 Jan 2009 00:04:53 +0100 Subject: Add JQuery license; add PSF copyright statement. --- LICENSE | 37 ++++++++++++++++++++++++++++++++++--- 1 file changed, 34 insertions(+), 3 deletions(-) diff --git a/LICENSE b/LICENSE index faac79f0..fb2049a8 100644 --- a/LICENSE +++ b/LICENSE @@ -1,9 +1,9 @@ -Copyright (c) 2007-2009 by the Sphinx team (see AUTHORS file). -All rights reserved. - License for Sphinx ================== +Copyright (c) 2007-2009 by the Sphinx team (see AUTHORS file). +All rights reserved. + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: @@ -36,6 +36,11 @@ sphinx.pycode.pgen2, is available in the Python 2.6 distribution under the PSF license agreement for Python: ---------------------------------------------------------------------- +Copyright © 2001-2008 Python Software Foundation; All Rights Reserved. + +PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +-------------------------------------------- + 1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and the Individual or Organization ("Licensee") accessing and otherwise using Python 2.6 software in source or binary form @@ -186,3 +191,29 @@ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ---------------------------------------------------------------------- + +The included JQuery JavaScript library is available under the MIT +license: + +---------------------------------------------------------------------- +Copyright (c) 2008 John Resig, http://jquery.com/ + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +---------------------------------------------------------------------- -- cgit v1.2.1 From 5c28c0bd44c42ddd7e01150d363a0ca0bdcf6b83 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 5 Jan 2009 19:11:52 +0100 Subject: Remove old Jinja 1 bridge. --- sphinx/_jinja.py | 114 ------------------------------------------------------- 1 file changed, 114 deletions(-) delete mode 100644 sphinx/_jinja.py diff --git a/sphinx/_jinja.py b/sphinx/_jinja.py deleted file mode 100644 index fba432c4..00000000 --- a/sphinx/_jinja.py +++ /dev/null @@ -1,114 +0,0 @@ -# -*- coding: utf-8 -*- -""" - sphinx._jinja - ~~~~~~~~~~~~~ - - Jinja glue. - - :copyright: Copyright 2007-2009 by the Sphinx team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import codecs -from os import path - -from sphinx import package_dir -from sphinx.util import mtimes_of_files -from sphinx.application import TemplateBridge - -from jinja import Environment -from jinja.loaders import BaseLoader -from jinja.exceptions import TemplateNotFound - - -def babel_extract(fileobj, keywords, comment_tags, options): - """ - Simple extractor to get some basic Babel support. - """ - env = Environment() - for lineno, sg, pl in env.get_translations_for_string(fileobj.read()): - yield lineno, None, (sg, pl), '' - - -class SphinxFileSystemLoader(BaseLoader): - """ - A loader that loads templates either relative to one of a list of given - paths, or from an absolute path. - """ - - def __init__(self, basepath, extpaths): - self.basepath = path.abspath(basepath) - self.extpaths = map(path.abspath, extpaths) - self.searchpaths = self.extpaths + [self.basepath] - - def get_source(self, environment, name, parent): - name = name.replace('/', path.sep) - if name.startswith('!'): - name = name[1:] - if not path.exists(path.join(self.basepath, name)): - raise TemplateNotFound(name) - filename = path.join(self.basepath, name) - elif path.isabs(name): - if not path.exists(name): - raise TemplateNotFound(name) - filename = name - else: - for searchpath in self.searchpaths: - if path.exists(path.join(searchpath, name)): - filename = path.join(searchpath, name) - break - else: - raise TemplateNotFound(name) - f = codecs.open(filename, 'r', environment.template_charset) - try: - return f.read() - finally: - f.close() - - -class TranslatorEnvironment(Environment): - class _Translator(object): - def __init__(self, translator): - self.trans = translator - - def gettext(self, string): - return self.trans.ugettext(string) - - def ngettext(self, singular, plural, n): - return self.trans.ungettext(singular, plural, n) - - def __init__(self, *args, **kwargs): - self.translator = kwargs['translator'] - del kwargs['translator'] - super(TranslatorEnvironment, self).__init__(*args, **kwargs) - - def get_translator(self, context): - return TranslatorEnvironment._Translator(self.translator) - - -class BuiltinTemplates(TemplateBridge): - def init(self, builder): - self.templates = {} - base_templates_path = path.join(package_dir, 'templates') - ext_templates_path = [path.join(builder.confdir, dir) - for dir in builder.config.templates_path] - self.templates_path = [base_templates_path] + ext_templates_path - loader = SphinxFileSystemLoader(base_templates_path, ext_templates_path) - if builder.translator is not None: - self.jinja_env = TranslatorEnvironment(loader=loader, - friendly_traceback=False, translator=builder.translator) - else: - self.jinja_env = Environment(loader=loader, - # disable traceback, more likely that something - # in the application is broken than in the templates - friendly_traceback=False) - - def newest_template_mtime(self): - return max(mtimes_of_files(self.templates_path, '.html')) - - def render(self, template, context): - if template in self.templates: - return self.templates[template].render(context) - templateobj = self.templates[template] = \ - self.jinja_env.get_template(template) - return templateobj.render(context) -- cgit v1.2.1 From 9d7be98a2d69b979d8c1be5b408ba15c657b37b6 Mon Sep 17 00:00:00 2001 From: gbrandl Date: Mon, 5 Jan 2009 19:19:28 +0100 Subject: Create themes/ subdirectory and move stuff from templates/ and static/ there. --- sphinx/static/contents.png | Bin 202 -> 0 bytes sphinx/static/default.css | 657 --------------------- sphinx/static/doctools.js | 232 -------- sphinx/static/file.png | Bin 392 -> 0 bytes sphinx/static/jquery.js | 32 -- sphinx/static/minus.png | Bin 199 -> 0 bytes sphinx/static/navigation.png | Bin 218 -> 0 bytes sphinx/static/plus.png | Bin 199 -> 0 bytes sphinx/static/rightsidebar.css | 16 - sphinx/static/searchtools.js | 467 --------------- sphinx/static/sphinxdoc.css | 557 ------------------ sphinx/static/stickysidebar.css | 19 - sphinx/static/traditional.css | 700 ----------------------- sphinx/templates/changes/frameset.html | 11 - sphinx/templates/changes/rstsource.html | 15 - sphinx/templates/changes/versionchanges.html | 33 -- sphinx/templates/defindex.html | 26 - sphinx/templates/genindex-single.html | 46 -- sphinx/templates/genindex-split.html | 30 - sphinx/templates/genindex.html | 57 -- sphinx/templates/layout.html | 187 ------ sphinx/templates/modindex.html | 42 -- sphinx/templates/opensearch.xml | 10 - sphinx/templates/page.html | 4 - sphinx/templates/search.html | 45 -- sphinx/themes/basic/changes/frameset.html | 11 + sphinx/themes/basic/changes/rstsource.html | 15 + sphinx/themes/basic/changes/versionchanges.html | 33 ++ sphinx/themes/basic/defindex.html | 26 + sphinx/themes/basic/genindex-single.html | 46 ++ sphinx/themes/basic/genindex-split.html | 30 + sphinx/themes/basic/genindex.html | 57 ++ sphinx/themes/basic/layout.html | 187 ++++++ sphinx/themes/basic/modindex.html | 42 ++ sphinx/themes/basic/opensearch.xml | 10 + sphinx/themes/basic/page.html | 4 + sphinx/themes/basic/search.html | 45 ++ sphinx/themes/basic/static/basic.css | 657 +++++++++++++++++++++ sphinx/themes/basic/static/doctools.js | 232 ++++++++ sphinx/themes/basic/static/file.png | Bin 0 -> 392 bytes sphinx/themes/basic/static/jquery.js | 32 ++ sphinx/themes/basic/static/minus.png | Bin 0 -> 199 bytes sphinx/themes/basic/static/plus.png | Bin 0 -> 199 bytes sphinx/themes/basic/static/searchtools.js | 467 +++++++++++++++ sphinx/themes/default/static/default.css | 657 +++++++++++++++++++++ sphinx/themes/default/static/rightsidebar.css | 16 + sphinx/themes/default/static/stickysidebar.css | 19 + sphinx/themes/sphinxdoc/static/contents.png | Bin 0 -> 202 bytes sphinx/themes/sphinxdoc/static/navigation.png | Bin 0 -> 218 bytes sphinx/themes/sphinxdoc/static/sphinxdoc.css | 557 ++++++++++++++++++ sphinx/themes/traditional/static/traditional.css | 700 +++++++++++++++++++++++ 51 files changed, 3843 insertions(+), 3186 deletions(-) delete mode 100644 sphinx/static/contents.png delete mode 100644 sphinx/static/default.css delete mode 100644 sphinx/static/doctools.js delete mode 100644 sphinx/static/file.png delete mode 100644 sphinx/static/jquery.js delete mode 100644 sphinx/static/minus.png delete mode 100644 sphinx/static/navigation.png delete mode 100644 sphinx/static/plus.png delete mode 100644 sphinx/static/rightsidebar.css delete mode 100644 sphinx/static/searchtools.js delete mode 100644 sphinx/static/sphinxdoc.css delete mode 100644 sphinx/static/stickysidebar.css delete mode 100644 sphinx/static/traditional.css delete mode 100644 sphinx/templates/changes/frameset.html delete mode 100644 sphinx/templates/changes/rstsource.html delete mode 100644 sphinx/templates/changes/versionchanges.html delete mode 100644 sphinx/templates/defindex.html delete mode 100644 sphinx/templates/genindex-single.html delete mode 100644 sphinx/templates/genindex-split.html delete mode 100644 sphinx/templates/genindex.html delete mode 100644 sphinx/templates/layout.html delete mode 100644 sphinx/templates/modindex.html delete mode 100644 sphinx/templates/opensearch.xml delete mode 100644 sphinx/templates/page.html delete mode 100644 sphinx/templates/search.html create mode 100644 sphinx/themes/basic/changes/frameset.html create mode 100644 sphinx/themes/basic/changes/rstsource.html create mode 100644 sphinx/themes/basic/changes/versionchanges.html create mode 100644 sphinx/themes/basic/defindex.html create mode 100644 sphinx/themes/basic/genindex-single.html create mode 100644 sphinx/themes/basic/genindex-split.html create mode 100644 sphinx/themes/basic/genindex.html create mode 100644 sphinx/themes/basic/layout.html create mode 100644 sphinx/themes/basic/modindex.html create mode 100644 sphinx/themes/basic/opensearch.xml create mode 100644 sphinx/themes/basic/page.html create mode 100644 sphinx/themes/basic/search.html create mode 100644 sphinx/themes/basic/static/basic.css create mode 100644 sphinx/themes/basic/static/doctools.js create mode 100644 sphinx/themes/basic/static/file.png create mode 100644 sphinx/themes/basic/static/jquery.js create mode 100644 sphinx/themes/basic/static/minus.png create mode 100644 sphinx/themes/basic/static/plus.png create mode 100644 sphinx/themes/basic/static/searchtools.js create mode 100644 sphinx/themes/default/static/default.css create mode 100644 sphinx/themes/default/static/rightsidebar.css create mode 100644 sphinx/themes/default/static/stickysidebar.css create mode 100644 sphinx/themes/sphinxdoc/static/contents.png create mode 100644 sphinx/themes/sphinxdoc/static/navigation.png create mode 100644 sphinx/themes/sphinxdoc/static/sphinxdoc.css create mode 100644 sphinx/themes/traditional/static/traditional.css diff --git a/sphinx/static/contents.png b/sphinx/static/contents.png deleted file mode 100644 index 7fb82154..00000000 Binary files a/sphinx/static/contents.png and /dev/null differ diff --git a/sphinx/static/default.css b/sphinx/static/default.css deleted file mode 100644 index 005caa1f..00000000 --- a/sphinx/static/default.css +++ /dev/null @@ -1,657 +0,0 @@ -/** - * Sphinx Doc Design - */ - -body { - font-family: sans-serif; - font-size: 100%; - background-color: #11303d; - color: #000; - margin: 0; - padding: 0; -} - -/* :::: LAYOUT :::: */ - -div.document { - background-color: #1c4e63; -} - -div.documentwrapper { - float: left; - width: 100%; -} - -div.bodywrapper { - margin: 0 0 0 230px; -} - -div.body { - background-color: white; - padding: 0 20px 30px 20px; -} - -div.sphinxsidebarwrapper { - padding: 10px 5px 0 10px; -} - -div.sphinxsidebar { - float: left; - width: 230px; - margin-left: -100%; - font-size: 90%; -} - -div.clearer { - clear: both; -} - -div.footer { - color: #fff; - width: 100%; - padding: 9px 0 9px 0; - text-align: center; - font-size: 75%; -} - -div.footer a { - color: #fff; - text-decoration: underline; -} - -div.related { - background-color: #133f52; - color: #fff; - width: 100%; - line-height: 30px; - font-size: 90%; -} - -div.related h3 { - display: none; -} - -div.related ul { - margin: 0; - padding: 0 0 0 10px; - list-style: none; -} - -div.related li { - display: inline; -} - -div.related li.right { - float: right; - margin-right: 5px; -} - -div.related a { - color: white; -} - -/* ::: TOC :::: */ -div.sphinxsidebar h3 { - font-family: 'Trebuchet MS', sans-serif; - color: white; - font-size: 1.4em; - font-weight: normal; - margin: 0; - padding: 0; -} - -div.sphinxsidebar h3 a { - color: white; -} - -div.sphinxsidebar h4 { - font-family: 'Trebuchet MS', sans-serif; - color: white; - font-size: 1.3em; - font-weight: normal; - margin: 5px 0 0 0; - padding: 0; -} - -div.sphinxsidebar p { - color: white; -} - -div.sphinxsidebar p.topless { - margin: 5px 10px 10px 10px; -} - -div.sphinxsidebar ul { - margin: 10px; - padding: 0; - list-style: none; - color: white; -} - -div.sphinxsidebar ul ul, -div.sphinxsidebar ul.want-points { - margin-left: 20px; - list-style: square; -} - -div.sphinxsidebar ul ul { - margin-top: 0; - margin-bottom: 0; -} - -div.sphinxsidebar a { - color: #98dbcc; -} - -div.sphinxsidebar form { - margin-top: 10px; -} - -div.sphinxsidebar input { - border: 1px solid #98dbcc; - font-family: sans-serif; - font-size: 1em; -} - -/* :::: MODULE CLOUD :::: */ -div.modulecloud { - margin: -5px 10px 5px 10px; - padding: 10px; - line-height: 160%; - border: 1px solid #cbe7e5; - background-color: #f2fbfd; -} - -div.modulecloud a { - padding: 0 5px 0 5px; -} - -/* :::: SEARCH :::: */ -ul.search { - margin: 10px 0 0 20px; - padding: 0; -} - -ul.search li { - padding: 5px 0 5px 20px; - background-image: url(file.png); - background-repeat: no-repeat; - background-position: 0 7px; -} - -ul.search li a { - font-weight: bold; -} - -ul.search li div.context { - color: #888; - margin: 2px 0 0 30px; - text-align: left; -} - -ul.keywordmatches li.goodmatch a { - font-weight: bold; -} - -/* :::: COMMON FORM STYLES :::: */ - -div.actions { - padding: 5px 10px 5px 10px; - border-top: 1px solid #cbe7e5; - border-bottom: 1px solid #cbe7e5; - background-color: #e0f6f4; -} - -form dl { - color: #333; -} - -form dt { - clear: both; - float: left; - min-width: 110px; - margin-right: 10px; - padding-top: 2px; -} - -input#homepage { - display: none; -} - -div.error { - margin: 5px 20px 0 0; - padding: 5px; - border: 1px solid #d00; - font-weight: bold; -} - -/* :::: INDEX PAGE :::: */ - -table.contentstable { - width: 90%; -} - -table.contentstable p.biglink { - line-height: 150%; -} - -a.biglink { - font-size: 1.3em; -} - -span.linkdescr { - font-style: italic; - padding-top: 5px; - font-size: 90%; -} - -/* :::: INDEX STYLES :::: */ - -table.indextable td { - text-align: left; - vertical-align: top; -} - -table.indextable dl, table.indextable dd { - margin-top: 0; - margin-bottom: 0; -} - -table.indextable tr.pcap { - height: 10px; -} - -table.indextable tr.cap { - margin-top: 10px; - background-color: #f2f2f2; -} - -img.toggler { - margin-right: 3px; - margin-top: 3px; - cursor: pointer; -} - -form.pfform { - margin: 10px 0 20px 0; -} - -/* :::: GLOBAL STYLES :::: */ - -.docwarning { - background-color: #ffe4e4; - padding: 10px; - margin: 0 -20px 0 -20px; - border-bottom: 1px solid #f66; -} - -p.subhead { - font-weight: bold; - margin-top: 20px; -} - -a { - color: #355f7c; - text-decoration: none; -} - -a:hover { - text-decoration: underline; -} - -div.body h1, -div.body h2, -div.body h3, -div.body h4, -div.body h5, -div.body h6 { - font-family: 'Trebuchet MS', sans-serif; - background-color: #f2f2f2; - font-weight: normal; - color: #20435c; - border-bottom: 1px solid #ccc; - margin: 20px -20px 10px -20px; - padding: 3px 0 3px 10px; -} - -div.body h1 { margin-top: 0; font-size: 200%; } -div.body h2 { font-size: 160%; } -div.body h3 { font-size: 140%; } -div.body h4 { font-size: 120%; } -div.body h5 { font-size: 110%; } -div.body h6 { font-size: 100%; } - -a.headerlink { - color: #c60f0f; - font-size: 0.8em; - padding: 0 4px 0 4px; - text-decoration: none; - visibility: hidden; -} - -h1:hover > a.headerlink, -h2:hover > a.headerlink, -h3:hover > a.headerlink, -h4:hover > a.headerlink, -h5:hover > a.headerlink, -h6:hover > a.headerlink, -dt:hover > a.headerlink { - visibility: visible; -} - -a.headerlink:hover { - background-color: #c60f0f; - color: white; -} - -div.body p, div.body dd, div.body li { - text-align: justify; - line-height: 130%; -} - -div.body p.caption { - text-align: inherit; -} - -div.body td { - text-align: left; -} - -ul.fakelist { - list-style: none; - margin: 10px 0 10px 20px; - padding: 0; -} - -.field-list ul { - padding-left: 1em; -} - -.first { - margin-top: 0 !important; -} - -/* "Footnotes" heading */ -p.rubric { - margin-top: 30px; - font-weight: bold; -} - -/* Sidebars */ - -div.sidebar { - margin: 0 0 0.5em 1em; - border: 1px solid #ddb; - padding: 7px 7px 0 7px; - background-color: #ffe; - width: 40%; - float: right; -} - -p.sidebar-title { - font-weight: bold; -} - -/* "Topics" */ - -div.topic { - background-color: #eee; - border: 1px solid #ccc; - padding: 7px 7px 0 7px; - margin: 10px 0 10px 0; -} - -p.topic-title { - font-size: 1.1em; - font-weight: bold; - margin-top: 10px; -} - -/* Admonitions */ - -div.admonition { - margin-top: 10px; - margin-bottom: 10px; - padding: 7px; -} - -div.admonition dt { - font-weight: bold; -} - -div.admonition dl { - margin-bottom: 0; -} - -div.admonition p.admonition-title + p { - display: inline; -} - -div.seealso { - background-color: #ffc; - border: 1px solid #ff6; -} - -div.warning { - background-color: #ffe4e4; - border: 1px solid #f66; -} - -div.note { - background-color: #eee; - border: 1px solid #ccc; -} - -p.admonition-title { - margin: 0px 10px 5px 0px; - font-weight: bold; - display: inline; -} - -p.admonition-title:after { - content: ":"; -} - -div.body p.centered { - text-align: center; - margin-top: 25px; -} - -table.docutils { - border: 0; -} - -table.docutils td, table.docutils th { - padding: 1px 8px 1px 0; - border-top: 0; - border-left: 0; - border-right: 0; - border-bottom: 1px solid #aaa; -} - -table.field-list td, table.field-list th { - border: 0 !important; -} - -table.footnote td, table.footnote th { - border: 0 !important; -} - -.field-list ul { - margin: 0; - padding-left: 1em; -} - -.field-list p { - margin: 0; -} - -dl { - margin-bottom: 15px; - clear: both; -} - -dd p { - margin-top: 0px; -} - -dd ul, dd table { - margin-bottom: 10px; -} - -dd { - margin-top: 3px; - margin-bottom: 10px; - margin-left: 30px; -} - -.refcount { - color: #060; -} - -dt:target, -.highlight { - background-color: #fbe54e; -} - -dl.glossary dt { - font-weight: bold; - font-size: 1.1em; -} - -th { - text-align: left; - padding-right: 5px; -} - -pre { - padding: 5px; - background-color: #efc; - color: #333; - border: 1px solid #ac9; - border-left: none; - border-right: none; - overflow: auto; -} - -td.linenos pre { - padding: 5px 0px; - border: 0; - background-color: transparent; - color: #aaa; -} - -table.highlighttable { - margin-left: 0.5em; -} - -table.highlighttable td { - padding: 0 0.5em 0 0.5em; -} - -tt { - background-color: #ecf0f3; - padding: 0 1px 0 1px; - font-size: 0.95em; -} - -tt.descname { - background-color: transparent; - font-weight: bold; - font-size: 1.2em; -} - -tt.descclassname { - background-color: transparent; -} - -tt.xref, a tt { - background-color: transparent; - font-weight: bold; -} - -.footnote:target { background-color: #ffa } - -h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt { - background-color: transparent; -} - -.optional { - font-size: 1.3em; -} - -.versionmodified { - font-style: italic; -} - -form.comment { - margin: 0; - padding: 10px 30px 10px 30px; - background-color: #eee; -} - -form.comment h3 { - background-color: #326591; - color: white; - margin: -10px -30px 10px -30px; - padding: 5px; - font-size: 1.4em; -} - -form.comment input, -form.comment textarea { - border: 1px solid #ccc; - padding: 2px; - font-family: sans-serif; - font-size: 100%; -} - -form.comment input[type="text"] { - width: 240px; -} - -form.comment textarea { - width: 100%; - height: 200px; - margin-bottom: 10px; -} - -.system-message { - background-color: #fda; - padding: 5px; - border: 3px solid red; -} - -img.math { - vertical-align: middle; -} - -div.math p { - text-align: center; -} - -span.eqno { - float: right; -} - -img.logo { - border: 0; -} - -/* :::: PRINT :::: */ -@media print { - div.document, - div.documentwrapper, - div.bodywrapper { - margin: 0; - width : 100%; - } - - div.sphinxsidebar, - div.related, - div.footer, - div#comments div.new-comment-box, - #top-link { - display: none; - } -} diff --git a/sphinx/static/doctools.js b/sphinx/static/doctools.js deleted file mode 100644 index be4bdc88..00000000 --- a/sphinx/static/doctools.js +++ /dev/null @@ -1,232 +0,0 @@ -/// XXX: make it cross browser - -/** - * make the code below compatible with browsers without - * an installed firebug like debugger - */ -if (!window.console || !console.firebug) { - var names = ["log", "debug", "info", "warn", "error", "assert", "dir", "dirxml", - "group", "groupEnd", "time", "timeEnd", "count", "trace", "profile", "profileEnd"]; - window.console = {}; - for (var i = 0; i < names.length; ++i) - window.console[names[i]] = function() {} -} - -/** - * small helper function to urldecode strings - */ -jQuery.urldecode = function(x) { - return decodeURIComponent(x).replace(/\+/g, ' '); -} - -/** - * small helper function to urlencode strings - */ -jQuery.urlencode = encodeURIComponent; - -/** - * This function returns the parsed url parameters of the - * current request. Multiple values per key are supported, - * it will always return arrays of strings for the value parts. - */ -jQuery.getQueryParameters = function(s) { - if (typeof s == 'undefined') - s = document.location.search; - var parts = s.substr(s.indexOf('?') + 1).split('&'); - var result = {}; - for (var i = 0; i < parts.length; i++) { - var tmp = parts[i].split('=', 2); - var key = jQuery.urldecode(tmp[0]); - var value = jQuery.urldecode(tmp[1]); - if (key in result) - result[key].push(value); - else - result[key] = [value]; - } - return result; -} - -/** - * small function to check if an array contains - * a given item. - */ -jQuery.contains = function(arr, item) { - for (var i = 0; i < arr.length; i++) { - if (arr[i] == item) - return true; - } - return false; -} - -/** - * highlight a given string on a jquery object by wrapping it in - * span elements with the given class name. - */ -jQuery.fn.highlightText = function(text, className) { - function highlight(node) { - if (node.nodeType == 3) { - var val = node.nodeValue; - var pos = val.toLowerCase().indexOf(text); - if (pos >= 0 && !jQuery.className.has(node.parentNode, className)) { - var span = document.createElement("span"); - span.className = className; - span.appendChild(document.createTextNode(val.substr(pos, text.length))); - node.parentNode.insertBefore(span, node.parentNode.insertBefore( - document.createTextNode(val.substr(pos + text.length)), - node.nextSibling)); - node.nodeValue = val.substr(0, pos); - } - } - else if (!jQuery(node).is("button, select, textarea")) { - jQuery.each(node.childNodes, function() { - highlight(this) - }); - } - } - return this.each(function() { - highlight(this); - }); -} - -/** - * Small JavaScript module for the documentation. - */ -var Documentation = { - - init : function() { - this.fixFirefoxAnchorBug(); - this.highlightSearchWords(); - this.initModIndex(); - }, - - /** - * i18n support - */ - TRANSLATIONS : {}, - PLURAL_EXPR : function(n) { return n == 1 ? 0 : 1; }, - LOCALE : 'unknown', - - // gettext and ngettext don't access this so that the functions - // can savely bound to a different name (_ = Documentation.gettext) - gettext : function(string) { - var translated = Documentation.TRANSLATIONS[string]; - if (typeof translated == 'undefined') - return string; - return (typeof translated == 'string') ? translated : translated[0]; - }, - - ngettext : function(singular, plural, n) { - var translated = Documentation.TRANSLATIONS[singular]; - if (typeof translated == 'undefined') - return (n == 1) ? singular : plural; - return translated[Documentation.PLURALEXPR(n)]; - }, - - addTranslations : function(catalog) { - for (var key in catalog.messages) - this.TRANSLATIONS[key] = catalog.messages[key]; - this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); - this.LOCALE = catalog.locale; - }, - - /** - * add context elements like header anchor links - */ - addContextElements : function() { - $('div[@id] > :header:first').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this headline')). - appendTo(this); - }); - $('dt[@id]').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this definition')). - appendTo(this); - }); - }, - - /** - * workaround a firefox stupidity - */ - fixFirefoxAnchorBug : function() { - if (document.location.hash && $.browser.mozilla) - window.setTimeout(function() { - document.location.href += ''; - }, 10); - }, - - /** - * highlight the search words provided in the url in the text - */ - highlightSearchWords : function() { - var params = $.getQueryParameters(); - var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; - if (terms.length) { - var body = $('div.body'); - window.setTimeout(function() { - $.each(terms, function() { - body.highlightText(this.toLowerCase(), 'highlight'); - }); - }, 10); - $('

  • ') - .appendTo($('.sidebar .this-page-menu')); - } - }, - - /** - * init the modindex toggle buttons - */ - initModIndex : function() { - var togglers = $('img.toggler').click(function() { - var src = $(this).attr('src'); - var idnum = $(this).attr('id').substr(7); - console.log($('tr.cg-' + idnum).toggle()); - if (src.substr(-9) == 'minus.png') - $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); - else - $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); - }).css('display', ''); - if (DOCUMENTATION_OPTIONS.COLLAPSE_MODINDEX) { - togglers.click(); - } - }, - - /** - * helper function to hide the search marks again - */ - hideSearchWords : function() { - $('.sidebar .this-page-menu li.highlight-link').fadeOut(300); - $('span.highlight').removeClass('highlight'); - }, - - /** - * make the url absolute - */ - makeURL : function(relativeURL) { - return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; - }, - - /** - * get the current relative url - */ - getCurrentURL : function() { - var path = document.location.pathname; - var parts = path.split(/\//); - $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { - if (this == '..') - parts.pop(); - }); - var url = parts.join('/'); - return path.substring(url.lastIndexOf('/') + 1, path.length - 1); - } -}; - -// quick alias for translations -_ = Documentation.gettext; - -$(document).ready(function() { - Documentation.init(); -}); diff --git a/sphinx/static/file.png b/sphinx/static/file.png deleted file mode 100644 index d18082e3..00000000 Binary files a/sphinx/static/file.png and /dev/null differ diff --git a/sphinx/static/jquery.js b/sphinx/static/jquery.js deleted file mode 100644 index 82b98e1d..00000000 --- a/sphinx/static/jquery.js +++ /dev/null @@ -1,32 +0,0 @@ -/* - * jQuery 1.2.6 - New Wave Javascript - * - * Copyright (c) 2008 John Resig (jquery.com) - * Dual licensed under the MIT (MIT-LICENSE.txt) - * and GPL (GPL-LICENSE.txt) licenses. - * - * $Date: 2008-05-24 14:22:17 -0400 (Sat, 24 May 2008) $ - * $Rev: 5685 $ - */ -(function(){var _jQuery=window.jQuery,_$=window.$;var jQuery=window.jQuery=window.$=function(selector,context){return new jQuery.fn.init(selector,context);};var quickExpr=/^[^<]*(<(.|\s)+>)[^>]*$|^#(\w+)$/,isSimple=/^.[^:#\[\.]*$/,undefined;jQuery.fn=jQuery.prototype={init:function(selector,context){selector=selector||document;if(selector.nodeType){this[0]=selector;this.length=1;return this;}if(typeof selector=="string"){var match=quickExpr.exec(selector);if(match&&(match[1]||!context)){if(match[1])selector=jQuery.clean([match[1]],context);else{var elem=document.getElementById(match[3]);if(elem){if(elem.id!=match[3])return jQuery().find(selector);return jQuery(elem);}selector=[];}}else -return jQuery(context).find(selector);}else if(jQuery.isFunction(selector))return jQuery(document)[jQuery.fn.ready?"ready":"load"](selector);return this.setArray(jQuery.makeArray(selector));},jquery:"1.2.6",size:function(){return this.length;},length:0,get:function(num){return num==undefined?jQuery.makeArray(this):this[num];},pushStack:function(elems){var ret=jQuery(elems);ret.prevObject=this;return ret;},setArray:function(elems){this.length=0;Array.prototype.push.apply(this,elems);return this;},each:function(callback,args){return jQuery.each(this,callback,args);},index:function(elem){var ret=-1;return jQuery.inArray(elem&&elem.jquery?elem[0]:elem,this);},attr:function(name,value,type){var options=name;if(name.constructor==String)if(value===undefined)return this[0]&&jQuery[type||"attr"](this[0],name);else{options={};options[name]=value;}return this.each(function(i){for(name in options)jQuery.attr(type?this.style:this,name,jQuery.prop(this,options[name],type,i,name));});},css:function(key,value){if((key=='width'||key=='height')&&parseFloat(value)<0)value=undefined;return this.attr(key,value,"curCSS");},text:function(text){if(typeof text!="object"&&text!=null)return this.empty().append((this[0]&&this[0].ownerDocument||document).createTextNode(text));var ret="";jQuery.each(text||this,function(){jQuery.each(this.childNodes,function(){if(this.nodeType!=8)ret+=this.nodeType!=1?this.nodeValue:jQuery.fn.text([this]);});});return ret;},wrapAll:function(html){if(this[0])jQuery(html,this[0].ownerDocument).clone().insertBefore(this[0]).map(function(){var elem=this;while(elem.firstChild)elem=elem.firstChild;return elem;}).append(this);return this;},wrapInner:function(html){return this.each(function(){jQuery(this).contents().wrapAll(html);});},wrap:function(html){return this.each(function(){jQuery(this).wrapAll(html);});},append:function(){return this.domManip(arguments,true,false,function(elem){if(this.nodeType==1)this.appendChild(elem);});},prepend:function(){return this.domManip(arguments,true,true,function(elem){if(this.nodeType==1)this.insertBefore(elem,this.firstChild);});},before:function(){return this.domManip(arguments,false,false,function(elem){this.parentNode.insertBefore(elem,this);});},after:function(){return this.domManip(arguments,false,true,function(elem){this.parentNode.insertBefore(elem,this.nextSibling);});},end:function(){return this.prevObject||jQuery([]);},find:function(selector){var elems=jQuery.map(this,function(elem){return jQuery.find(selector,elem);});return this.pushStack(/[^+>] [^+>]/.test(selector)||selector.indexOf("..")>-1?jQuery.unique(elems):elems);},clone:function(events){var ret=this.map(function(){if(jQuery.browser.msie&&!jQuery.isXMLDoc(this)){var clone=this.cloneNode(true),container=document.createElement("div");container.appendChild(clone);return jQuery.clean([container.innerHTML])[0];}else -return this.cloneNode(true);});var clone=ret.find("*").andSelf().each(function(){if(this[expando]!=undefined)this[expando]=null;});if(events===true)this.find("*").andSelf().each(function(i){if(this.nodeType==3)return;var events=jQuery.data(this,"events");for(var type in events)for(var handler in events[type])jQuery.event.add(clone[i],type,events[type][handler],events[type][handler].data);});return ret;},filter:function(selector){return this.pushStack(jQuery.isFunction(selector)&&jQuery.grep(this,function(elem,i){return selector.call(elem,i);})||jQuery.multiFilter(selector,this));},not:function(selector){if(selector.constructor==String)if(isSimple.test(selector))return this.pushStack(jQuery.multiFilter(selector,this,true));else -selector=jQuery.multiFilter(selector,this);var isArrayLike=selector.length&&selector[selector.length-1]!==undefined&&!selector.nodeType;return this.filter(function(){return isArrayLike?jQuery.inArray(this,selector)<0:this!=selector;});},add:function(selector){return this.pushStack(jQuery.unique(jQuery.merge(this.get(),typeof selector=='string'?jQuery(selector):jQuery.makeArray(selector))));},is:function(selector){return!!selector&&jQuery.multiFilter(selector,this).length>0;},hasClass:function(selector){return this.is("."+selector);},val:function(value){if(value==undefined){if(this.length){var elem=this[0];if(jQuery.nodeName(elem,"select")){var index=elem.selectedIndex,values=[],options=elem.options,one=elem.type=="select-one";if(index<0)return null;for(var i=one?index:0,max=one?index+1:options.length;i=0||jQuery.inArray(this.name,value)>=0);else if(jQuery.nodeName(this,"select")){var values=jQuery.makeArray(value);jQuery("option",this).each(function(){this.selected=(jQuery.inArray(this.value,values)>=0||jQuery.inArray(this.text,values)>=0);});if(!values.length)this.selectedIndex=-1;}else -this.value=value;});},html:function(value){return value==undefined?(this[0]?this[0].innerHTML:null):this.empty().append(value);},replaceWith:function(value){return this.after(value).remove();},eq:function(i){return this.slice(i,i+1);},slice:function(){return this.pushStack(Array.prototype.slice.apply(this,arguments));},map:function(callback){return this.pushStack(jQuery.map(this,function(elem,i){return callback.call(elem,i,elem);}));},andSelf:function(){return this.add(this.prevObject);},data:function(key,value){var parts=key.split(".");parts[1]=parts[1]?"."+parts[1]:"";if(value===undefined){var data=this.triggerHandler("getData"+parts[1]+"!",[parts[0]]);if(data===undefined&&this.length)data=jQuery.data(this[0],key);return data===undefined&&parts[1]?this.data(parts[0]):data;}else -return this.trigger("setData"+parts[1]+"!",[parts[0],value]).each(function(){jQuery.data(this,key,value);});},removeData:function(key){return this.each(function(){jQuery.removeData(this,key);});},domManip:function(args,table,reverse,callback){var clone=this.length>1,elems;return this.each(function(){if(!elems){elems=jQuery.clean(args,this.ownerDocument);if(reverse)elems.reverse();}var obj=this;if(table&&jQuery.nodeName(this,"table")&&jQuery.nodeName(elems[0],"tr"))obj=this.getElementsByTagName("tbody")[0]||this.appendChild(this.ownerDocument.createElement("tbody"));var scripts=jQuery([]);jQuery.each(elems,function(){var elem=clone?jQuery(this).clone(true)[0]:this;if(jQuery.nodeName(elem,"script"))scripts=scripts.add(elem);else{if(elem.nodeType==1)scripts=scripts.add(jQuery("script",elem).remove());callback.call(obj,elem);}});scripts.each(evalScript);});}};jQuery.fn.init.prototype=jQuery.fn;function evalScript(i,elem){if(elem.src)jQuery.ajax({url:elem.src,async:false,dataType:"script"});else -jQuery.globalEval(elem.text||elem.textContent||elem.innerHTML||"");if(elem.parentNode)elem.parentNode.removeChild(elem);}function now(){return+new Date;}jQuery.extend=jQuery.fn.extend=function(){var target=arguments[0]||{},i=1,length=arguments.length,deep=false,options;if(target.constructor==Boolean){deep=target;target=arguments[1]||{};i=2;}if(typeof target!="object"&&typeof target!="function")target={};if(length==i){target=this;--i;}for(;i-1;}},swap:function(elem,options,callback){var old={};for(var name in options){old[name]=elem.style[name];elem.style[name]=options[name];}callback.call(elem);for(var name in options)elem.style[name]=old[name];},css:function(elem,name,force){if(name=="width"||name=="height"){var val,props={position:"absolute",visibility:"hidden",display:"block"},which=name=="width"?["Left","Right"]:["Top","Bottom"];function getWH(){val=name=="width"?elem.offsetWidth:elem.offsetHeight;var padding=0,border=0;jQuery.each(which,function(){padding+=parseFloat(jQuery.curCSS(elem,"padding"+this,true))||0;border+=parseFloat(jQuery.curCSS(elem,"border"+this+"Width",true))||0;});val-=Math.round(padding+border);}if(jQuery(elem).is(":visible"))getWH();else -jQuery.swap(elem,props,getWH);return Math.max(0,val);}return jQuery.curCSS(elem,name,force);},curCSS:function(elem,name,force){var ret,style=elem.style;function color(elem){if(!jQuery.browser.safari)return false;var ret=defaultView.getComputedStyle(elem,null);return!ret||ret.getPropertyValue("color")=="";}if(name=="opacity"&&jQuery.browser.msie){ret=jQuery.attr(style,"opacity");return ret==""?"1":ret;}if(jQuery.browser.opera&&name=="display"){var save=style.outline;style.outline="0 solid black";style.outline=save;}if(name.match(/float/i))name=styleFloat;if(!force&&style&&style[name])ret=style[name];else if(defaultView.getComputedStyle){if(name.match(/float/i))name="float";name=name.replace(/([A-Z])/g,"-$1").toLowerCase();var computedStyle=defaultView.getComputedStyle(elem,null);if(computedStyle&&!color(elem))ret=computedStyle.getPropertyValue(name);else{var swap=[],stack=[],a=elem,i=0;for(;a&&color(a);a=a.parentNode)stack.unshift(a);for(;i]*?)\/>/g,function(all,front,tag){return tag.match(/^(abbr|br|col|img|input|link|meta|param|hr|area|embed)$/i)?all:front+">";});var tags=jQuery.trim(elem).toLowerCase(),div=context.createElement("div");var wrap=!tags.indexOf("",""]||!tags.indexOf("",""]||tags.match(/^<(thead|tbody|tfoot|colg|cap)/)&&[1,"","
    "]||!tags.indexOf("",""]||(!tags.indexOf("",""]||!tags.indexOf("",""]||jQuery.browser.msie&&[1,"div
    ","
    "]||[0,"",""];div.innerHTML=wrap[1]+elem+wrap[2];while(wrap[0]--)div=div.lastChild;if(jQuery.browser.msie){var tbody=!tags.indexOf(""&&tags.indexOf("=0;--j)if(jQuery.nodeName(tbody[j],"tbody")&&!tbody[j].childNodes.length)tbody[j].parentNode.removeChild(tbody[j]);if(/^\s/.test(elem))div.insertBefore(context.createTextNode(elem.match(/^\s*/)[0]),div.firstChild);}elem=jQuery.makeArray(div.childNodes);}if(elem.length===0&&(!jQuery.nodeName(elem,"form")&&!jQuery.nodeName(elem,"select")))return;if(elem[0]==undefined||jQuery.nodeName(elem,"form")||elem.options)ret.push(elem);else -ret=jQuery.merge(ret,elem);});return ret;},attr:function(elem,name,value){if(!elem||elem.nodeType==3||elem.nodeType==8)return undefined;var notxml=!jQuery.isXMLDoc(elem),set=value!==undefined,msie=jQuery.browser.msie;name=notxml&&jQuery.props[name]||name;if(elem.tagName){var special=/href|src|style/.test(name);if(name=="selected"&&jQuery.browser.safari)elem.parentNode.selectedIndex;if(name in elem&¬xml&&!special){if(set){if(name=="type"&&jQuery.nodeName(elem,"input")&&elem.parentNode)throw"type property can't be changed";elem[name]=value;}if(jQuery.nodeName(elem,"form")&&elem.getAttributeNode(name))return elem.getAttributeNode(name).nodeValue;return elem[name];}if(msie&¬xml&&name=="style")return jQuery.attr(elem.style,"cssText",value);if(set)elem.setAttribute(name,""+value);var attr=msie&¬xml&&special?elem.getAttribute(name,2):elem.getAttribute(name);return attr===null?undefined:attr;}if(msie&&name=="opacity"){if(set){elem.zoom=1;elem.filter=(elem.filter||"").replace(/alpha\([^)]*\)/,"")+(parseInt(value)+''=="NaN"?"":"alpha(opacity="+value*100+")");}return elem.filter&&elem.filter.indexOf("opacity=")>=0?(parseFloat(elem.filter.match(/opacity=([^)]*)/)[1])/100)+'':"";}name=name.replace(/-([a-z])/ig,function(all,letter){return letter.toUpperCase();});if(set)elem[name]=value;return elem[name];},trim:function(text){return(text||"").replace(/^\s+|\s+$/g,"");},makeArray:function(array){var ret=[];if(array!=null){var i=array.length;if(i==null||array.split||array.setInterval||array.call)ret[0]=array;else -while(i)ret[--i]=array[i];}return ret;},inArray:function(elem,array){for(var i=0,length=array.length;i*",this).remove();while(this.firstChild)this.removeChild(this.firstChild);}},function(name,fn){jQuery.fn[name]=function(){return this.each(fn,arguments);};});jQuery.each(["Height","Width"],function(i,name){var type=name.toLowerCase();jQuery.fn[type]=function(size){return this[0]==window?jQuery.browser.opera&&document.body["client"+name]||jQuery.browser.safari&&window["inner"+name]||document.compatMode=="CSS1Compat"&&document.documentElement["client"+name]||document.body["client"+name]:this[0]==document?Math.max(Math.max(document.body["scroll"+name],document.documentElement["scroll"+name]),Math.max(document.body["offset"+name],document.documentElement["offset"+name])):size==undefined?(this.length?jQuery.css(this[0],type):null):this.css(type,size.constructor==String?size:size+"px");};});function num(elem,prop){return elem[0]&&parseInt(jQuery.curCSS(elem[0],prop,true),10)||0;}var chars=jQuery.browser.safari&&parseInt(jQuery.browser.version)<417?"(?:[\\w*_-]|\\\\.)":"(?:[\\w\u0128-\uFFFF*_-]|\\\\.)",quickChild=new RegExp("^>\\s*("+chars+"+)"),quickID=new RegExp("^("+chars+"+)(#)("+chars+"+)"),quickClass=new RegExp("^([#.]?)("+chars+"*)");jQuery.extend({expr:{"":function(a,i,m){return m[2]=="*"||jQuery.nodeName(a,m[2]);},"#":function(a,i,m){return a.getAttribute("id")==m[2];},":":{lt:function(a,i,m){return im[3]-0;},nth:function(a,i,m){return m[3]-0==i;},eq:function(a,i,m){return m[3]-0==i;},first:function(a,i){return i==0;},last:function(a,i,m,r){return i==r.length-1;},even:function(a,i){return i%2==0;},odd:function(a,i){return i%2;},"first-child":function(a){return a.parentNode.getElementsByTagName("*")[0]==a;},"last-child":function(a){return jQuery.nth(a.parentNode.lastChild,1,"previousSibling")==a;},"only-child":function(a){return!jQuery.nth(a.parentNode.lastChild,2,"previousSibling");},parent:function(a){return a.firstChild;},empty:function(a){return!a.firstChild;},contains:function(a,i,m){return(a.textContent||a.innerText||jQuery(a).text()||"").indexOf(m[3])>=0;},visible:function(a){return"hidden"!=a.type&&jQuery.css(a,"display")!="none"&&jQuery.css(a,"visibility")!="hidden";},hidden:function(a){return"hidden"==a.type||jQuery.css(a,"display")=="none"||jQuery.css(a,"visibility")=="hidden";},enabled:function(a){return!a.disabled;},disabled:function(a){return a.disabled;},checked:function(a){return a.checked;},selected:function(a){return a.selected||jQuery.attr(a,"selected");},text:function(a){return"text"==a.type;},radio:function(a){return"radio"==a.type;},checkbox:function(a){return"checkbox"==a.type;},file:function(a){return"file"==a.type;},password:function(a){return"password"==a.type;},submit:function(a){return"submit"==a.type;},image:function(a){return"image"==a.type;},reset:function(a){return"reset"==a.type;},button:function(a){return"button"==a.type||jQuery.nodeName(a,"button");},input:function(a){return/input|select|textarea|button/i.test(a.nodeName);},has:function(a,i,m){return jQuery.find(m[3],a).length;},header:function(a){return/h\d/i.test(a.nodeName);},animated:function(a){return jQuery.grep(jQuery.timers,function(fn){return a==fn.elem;}).length;}}},parse:[/^(\[) *@?([\w-]+) *([!*$^~=]*) *('?"?)(.*?)\4 *\]/,/^(:)([\w-]+)\("?'?(.*?(\(.*?\))?[^(]*?)"?'?\)/,new RegExp("^([:.#]*)("+chars+"+)")],multiFilter:function(expr,elems,not){var old,cur=[];while(expr&&expr!=old){old=expr;var f=jQuery.filter(expr,elems,not);expr=f.t.replace(/^\s*,\s*/,"");cur=not?elems=f.r:jQuery.merge(cur,f.r);}return cur;},find:function(t,context){if(typeof t!="string")return[t];if(context&&context.nodeType!=1&&context.nodeType!=9)return[];context=context||document;var ret=[context],done=[],last,nodeName;while(t&&last!=t){var r=[];last=t;t=jQuery.trim(t);var foundToken=false,re=quickChild,m=re.exec(t);if(m){nodeName=m[1].toUpperCase();for(var i=0;ret[i];i++)for(var c=ret[i].firstChild;c;c=c.nextSibling)if(c.nodeType==1&&(nodeName=="*"||c.nodeName.toUpperCase()==nodeName))r.push(c);ret=r;t=t.replace(re,"");if(t.indexOf(" ")==0)continue;foundToken=true;}else{re=/^([>+~])\s*(\w*)/i;if((m=re.exec(t))!=null){r=[];var merge={};nodeName=m[2].toUpperCase();m=m[1];for(var j=0,rl=ret.length;j=0;if(!not&&pass||not&&!pass)tmp.push(r[i]);}return tmp;},filter:function(t,r,not){var last;while(t&&t!=last){last=t;var p=jQuery.parse,m;for(var i=0;p[i];i++){m=p[i].exec(t);if(m){t=t.substring(m[0].length);m[2]=m[2].replace(/\\/g,"");break;}}if(!m)break;if(m[1]==":"&&m[2]=="not")r=isSimple.test(m[3])?jQuery.filter(m[3],r,true).r:jQuery(r).not(m[3]);else if(m[1]==".")r=jQuery.classFilter(r,m[2],not);else if(m[1]=="["){var tmp=[],type=m[3];for(var i=0,rl=r.length;i=0)^not)tmp.push(a);}r=tmp;}else if(m[1]==":"&&m[2]=="nth-child"){var merge={},tmp=[],test=/(-?)(\d*)n((?:\+|-)?\d*)/.exec(m[3]=="even"&&"2n"||m[3]=="odd"&&"2n+1"||!/\D/.test(m[3])&&"0n+"+m[3]||m[3]),first=(test[1]+(test[2]||1))-0,last=test[3]-0;for(var i=0,rl=r.length;i=0)add=true;if(add^not)tmp.push(node);}r=tmp;}else{var fn=jQuery.expr[m[1]];if(typeof fn=="object")fn=fn[m[2]];if(typeof fn=="string")fn=eval("false||function(a,i){return "+fn+";}");r=jQuery.grep(r,function(elem,i){return fn(elem,i,m,r);},not);}}return{r:r,t:t};},dir:function(elem,dir){var matched=[],cur=elem[dir];while(cur&&cur!=document){if(cur.nodeType==1)matched.push(cur);cur=cur[dir];}return matched;},nth:function(cur,result,dir,elem){result=result||1;var num=0;for(;cur;cur=cur[dir])if(cur.nodeType==1&&++num==result)break;return cur;},sibling:function(n,elem){var r=[];for(;n;n=n.nextSibling){if(n.nodeType==1&&n!=elem)r.push(n);}return r;}});jQuery.event={add:function(elem,types,handler,data){if(elem.nodeType==3||elem.nodeType==8)return;if(jQuery.browser.msie&&elem.setInterval)elem=window;if(!handler.guid)handler.guid=this.guid++;if(data!=undefined){var fn=handler;handler=this.proxy(fn,function(){return fn.apply(this,arguments);});handler.data=data;}var events=jQuery.data(elem,"events")||jQuery.data(elem,"events",{}),handle=jQuery.data(elem,"handle")||jQuery.data(elem,"handle",function(){if(typeof jQuery!="undefined"&&!jQuery.event.triggered)return jQuery.event.handle.apply(arguments.callee.elem,arguments);});handle.elem=elem;jQuery.each(types.split(/\s+/),function(index,type){var parts=type.split(".");type=parts[0];handler.type=parts[1];var handlers=events[type];if(!handlers){handlers=events[type]={};if(!jQuery.event.special[type]||jQuery.event.special[type].setup.call(elem)===false){if(elem.addEventListener)elem.addEventListener(type,handle,false);else if(elem.attachEvent)elem.attachEvent("on"+type,handle);}}handlers[handler.guid]=handler;jQuery.event.global[type]=true;});elem=null;},guid:1,global:{},remove:function(elem,types,handler){if(elem.nodeType==3||elem.nodeType==8)return;var events=jQuery.data(elem,"events"),ret,index;if(events){if(types==undefined||(typeof types=="string"&&types.charAt(0)=="."))for(var type in events)this.remove(elem,type+(types||""));else{if(types.type){handler=types.handler;types=types.type;}jQuery.each(types.split(/\s+/),function(index,type){var parts=type.split(".");type=parts[0];if(events[type]){if(handler)delete events[type][handler.guid];else -for(handler in events[type])if(!parts[1]||events[type][handler].type==parts[1])delete events[type][handler];for(ret in events[type])break;if(!ret){if(!jQuery.event.special[type]||jQuery.event.special[type].teardown.call(elem)===false){if(elem.removeEventListener)elem.removeEventListener(type,jQuery.data(elem,"handle"),false);else if(elem.detachEvent)elem.detachEvent("on"+type,jQuery.data(elem,"handle"));}ret=null;delete events[type];}}});}for(ret in events)break;if(!ret){var handle=jQuery.data(elem,"handle");if(handle)handle.elem=null;jQuery.removeData(elem,"events");jQuery.removeData(elem,"handle");}}},trigger:function(type,data,elem,donative,extra){data=jQuery.makeArray(data);if(type.indexOf("!")>=0){type=type.slice(0,-1);var exclusive=true;}if(!elem){if(this.global[type])jQuery("*").add([window,document]).trigger(type,data);}else{if(elem.nodeType==3||elem.nodeType==8)return undefined;var val,ret,fn=jQuery.isFunction(elem[type]||null),event=!data[0]||!data[0].preventDefault;if(event){data.unshift({type:type,target:elem,preventDefault:function(){},stopPropagation:function(){},timeStamp:now()});data[0][expando]=true;}data[0].type=type;if(exclusive)data[0].exclusive=true;var handle=jQuery.data(elem,"handle");if(handle)val=handle.apply(elem,data);if((!fn||(jQuery.nodeName(elem,'a')&&type=="click"))&&elem["on"+type]&&elem["on"+type].apply(elem,data)===false)val=false;if(event)data.shift();if(extra&&jQuery.isFunction(extra)){ret=extra.apply(elem,val==null?data:data.concat(val));if(ret!==undefined)val=ret;}if(fn&&donative!==false&&val!==false&&!(jQuery.nodeName(elem,'a')&&type=="click")){this.triggered=true;try{elem[type]();}catch(e){}}this.triggered=false;}return val;},handle:function(event){var val,ret,namespace,all,handlers;event=arguments[0]=jQuery.event.fix(event||window.event);namespace=event.type.split(".");event.type=namespace[0];namespace=namespace[1];all=!namespace&&!event.exclusive;handlers=(jQuery.data(this,"events")||{})[event.type];for(var j in handlers){var handler=handlers[j];if(all||handler.type==namespace){event.handler=handler;event.data=handler.data;ret=handler.apply(this,arguments);if(val!==false)val=ret;if(ret===false){event.preventDefault();event.stopPropagation();}}}return val;},fix:function(event){if(event[expando]==true)return event;var originalEvent=event;event={originalEvent:originalEvent};var props="altKey attrChange attrName bubbles button cancelable charCode clientX clientY ctrlKey currentTarget data detail eventPhase fromElement handler keyCode metaKey newValue originalTarget pageX pageY prevValue relatedNode relatedTarget screenX screenY shiftKey srcElement target timeStamp toElement type view wheelDelta which".split(" ");for(var i=props.length;i;i--)event[props[i]]=originalEvent[props[i]];event[expando]=true;event.preventDefault=function(){if(originalEvent.preventDefault)originalEvent.preventDefault();originalEvent.returnValue=false;};event.stopPropagation=function(){if(originalEvent.stopPropagation)originalEvent.stopPropagation();originalEvent.cancelBubble=true;};event.timeStamp=event.timeStamp||now();if(!event.target)event.target=event.srcElement||document;if(event.target.nodeType==3)event.target=event.target.parentNode;if(!event.relatedTarget&&event.fromElement)event.relatedTarget=event.fromElement==event.target?event.toElement:event.fromElement;if(event.pageX==null&&event.clientX!=null){var doc=document.documentElement,body=document.body;event.pageX=event.clientX+(doc&&doc.scrollLeft||body&&body.scrollLeft||0)-(doc.clientLeft||0);event.pageY=event.clientY+(doc&&doc.scrollTop||body&&body.scrollTop||0)-(doc.clientTop||0);}if(!event.which&&((event.charCode||event.charCode===0)?event.charCode:event.keyCode))event.which=event.charCode||event.keyCode;if(!event.metaKey&&event.ctrlKey)event.metaKey=event.ctrlKey;if(!event.which&&event.button)event.which=(event.button&1?1:(event.button&2?3:(event.button&4?2:0)));return event;},proxy:function(fn,proxy){proxy.guid=fn.guid=fn.guid||proxy.guid||this.guid++;return proxy;},special:{ready:{setup:function(){bindReady();return;},teardown:function(){return;}},mouseenter:{setup:function(){if(jQuery.browser.msie)return false;jQuery(this).bind("mouseover",jQuery.event.special.mouseenter.handler);return true;},teardown:function(){if(jQuery.browser.msie)return false;jQuery(this).unbind("mouseover",jQuery.event.special.mouseenter.handler);return true;},handler:function(event){if(withinElement(event,this))return true;event.type="mouseenter";return jQuery.event.handle.apply(this,arguments);}},mouseleave:{setup:function(){if(jQuery.browser.msie)return false;jQuery(this).bind("mouseout",jQuery.event.special.mouseleave.handler);return true;},teardown:function(){if(jQuery.browser.msie)return false;jQuery(this).unbind("mouseout",jQuery.event.special.mouseleave.handler);return true;},handler:function(event){if(withinElement(event,this))return true;event.type="mouseleave";return jQuery.event.handle.apply(this,arguments);}}}};jQuery.fn.extend({bind:function(type,data,fn){return type=="unload"?this.one(type,data,fn):this.each(function(){jQuery.event.add(this,type,fn||data,fn&&data);});},one:function(type,data,fn){var one=jQuery.event.proxy(fn||data,function(event){jQuery(this).unbind(event,one);return(fn||data).apply(this,arguments);});return this.each(function(){jQuery.event.add(this,type,one,fn&&data);});},unbind:function(type,fn){return this.each(function(){jQuery.event.remove(this,type,fn);});},trigger:function(type,data,fn){return this.each(function(){jQuery.event.trigger(type,data,this,true,fn);});},triggerHandler:function(type,data,fn){return this[0]&&jQuery.event.trigger(type,data,this[0],false,fn);},toggle:function(fn){var args=arguments,i=1;while(i=0){var selector=url.slice(off,url.length);url=url.slice(0,off);}callback=callback||function(){};var type="GET";if(params)if(jQuery.isFunction(params)){callback=params;params=null;}else{params=jQuery.param(params);type="POST";}var self=this;jQuery.ajax({url:url,type:type,dataType:"html",data:params,complete:function(res,status){if(status=="success"||status=="notmodified")self.html(selector?jQuery("
    ").append(res.responseText.replace(//g,"")).find(selector):res.responseText);self.each(callback,[res.responseText,status,res]);}});return this;},serialize:function(){return jQuery.param(this.serializeArray());},serializeArray:function(){return this.map(function(){return jQuery.nodeName(this,"form")?jQuery.makeArray(this.elements):this;}).filter(function(){return this.name&&!this.disabled&&(this.checked||/select|textarea/i.test(this.nodeName)||/text|hidden|password/i.test(this.type));}).map(function(i,elem){var val=jQuery(this).val();return val==null?null:val.constructor==Array?jQuery.map(val,function(val,i){return{name:elem.name,value:val};}):{name:elem.name,value:val};}).get();}});jQuery.each("ajaxStart,ajaxStop,ajaxComplete,ajaxError,ajaxSuccess,ajaxSend".split(","),function(i,o){jQuery.fn[o]=function(f){return this.bind(o,f);};});var jsc=now();jQuery.extend({get:function(url,data,callback,type){if(jQuery.isFunction(data)){callback=data;data=null;}return jQuery.ajax({type:"GET",url:url,data:data,success:callback,dataType:type});},getScript:function(url,callback){return jQuery.get(url,null,callback,"script");},getJSON:function(url,data,callback){return jQuery.get(url,data,callback,"json");},post:function(url,data,callback,type){if(jQuery.isFunction(data)){callback=data;data={};}return jQuery.ajax({type:"POST",url:url,data:data,success:callback,dataType:type});},ajaxSetup:function(settings){jQuery.extend(jQuery.ajaxSettings,settings);},ajaxSettings:{url:location.href,global:true,type:"GET",timeout:0,contentType:"application/x-www-form-urlencoded",processData:true,async:true,data:null,username:null,password:null,accepts:{xml:"application/xml, text/xml",html:"text/html",script:"text/javascript, application/javascript",json:"application/json, text/javascript",text:"text/plain",_default:"*/*"}},lastModified:{},ajax:function(s){s=jQuery.extend(true,s,jQuery.extend(true,{},jQuery.ajaxSettings,s));var jsonp,jsre=/=\?(&|$)/g,status,data,type=s.type.toUpperCase();if(s.data&&s.processData&&typeof s.data!="string")s.data=jQuery.param(s.data);if(s.dataType=="jsonp"){if(type=="GET"){if(!s.url.match(jsre))s.url+=(s.url.match(/\?/)?"&":"?")+(s.jsonp||"callback")+"=?";}else if(!s.data||!s.data.match(jsre))s.data=(s.data?s.data+"&":"")+(s.jsonp||"callback")+"=?";s.dataType="json";}if(s.dataType=="json"&&(s.data&&s.data.match(jsre)||s.url.match(jsre))){jsonp="jsonp"+jsc++;if(s.data)s.data=(s.data+"").replace(jsre,"="+jsonp+"$1");s.url=s.url.replace(jsre,"="+jsonp+"$1");s.dataType="script";window[jsonp]=function(tmp){data=tmp;success();complete();window[jsonp]=undefined;try{delete window[jsonp];}catch(e){}if(head)head.removeChild(script);};}if(s.dataType=="script"&&s.cache==null)s.cache=false;if(s.cache===false&&type=="GET"){var ts=now();var ret=s.url.replace(/(\?|&)_=.*?(&|$)/,"$1_="+ts+"$2");s.url=ret+((ret==s.url)?(s.url.match(/\?/)?"&":"?")+"_="+ts:"");}if(s.data&&type=="GET"){s.url+=(s.url.match(/\?/)?"&":"?")+s.data;s.data=null;}if(s.global&&!jQuery.active++)jQuery.event.trigger("ajaxStart");var remote=/^(?:\w+:)?\/\/([^\/?#]+)/;if(s.dataType=="script"&&type=="GET"&&remote.test(s.url)&&remote.exec(s.url)[1]!=location.host){var head=document.getElementsByTagName("head")[0];var script=document.createElement("script");script.src=s.url;if(s.scriptCharset)script.charset=s.scriptCharset;if(!jsonp){var done=false;script.onload=script.onreadystatechange=function(){if(!done&&(!this.readyState||this.readyState=="loaded"||this.readyState=="complete")){done=true;success();complete();head.removeChild(script);}};}head.appendChild(script);return undefined;}var requestDone=false;var xhr=window.ActiveXObject?new ActiveXObject("Microsoft.XMLHTTP"):new XMLHttpRequest();if(s.username)xhr.open(type,s.url,s.async,s.username,s.password);else -xhr.open(type,s.url,s.async);try{if(s.data)xhr.setRequestHeader("Content-Type",s.contentType);if(s.ifModified)xhr.setRequestHeader("If-Modified-Since",jQuery.lastModified[s.url]||"Thu, 01 Jan 1970 00:00:00 GMT");xhr.setRequestHeader("X-Requested-With","XMLHttpRequest");xhr.setRequestHeader("Accept",s.dataType&&s.accepts[s.dataType]?s.accepts[s.dataType]+", */*":s.accepts._default);}catch(e){}if(s.beforeSend&&s.beforeSend(xhr,s)===false){s.global&&jQuery.active--;xhr.abort();return false;}if(s.global)jQuery.event.trigger("ajaxSend",[xhr,s]);var onreadystatechange=function(isTimeout){if(!requestDone&&xhr&&(xhr.readyState==4||isTimeout=="timeout")){requestDone=true;if(ival){clearInterval(ival);ival=null;}status=isTimeout=="timeout"&&"timeout"||!jQuery.httpSuccess(xhr)&&"error"||s.ifModified&&jQuery.httpNotModified(xhr,s.url)&&"notmodified"||"success";if(status=="success"){try{data=jQuery.httpData(xhr,s.dataType,s.dataFilter);}catch(e){status="parsererror";}}if(status=="success"){var modRes;try{modRes=xhr.getResponseHeader("Last-Modified");}catch(e){}if(s.ifModified&&modRes)jQuery.lastModified[s.url]=modRes;if(!jsonp)success();}else -jQuery.handleError(s,xhr,status);complete();if(s.async)xhr=null;}};if(s.async){var ival=setInterval(onreadystatechange,13);if(s.timeout>0)setTimeout(function(){if(xhr){xhr.abort();if(!requestDone)onreadystatechange("timeout");}},s.timeout);}try{xhr.send(s.data);}catch(e){jQuery.handleError(s,xhr,null,e);}if(!s.async)onreadystatechange();function success(){if(s.success)s.success(data,status);if(s.global)jQuery.event.trigger("ajaxSuccess",[xhr,s]);}function complete(){if(s.complete)s.complete(xhr,status);if(s.global)jQuery.event.trigger("ajaxComplete",[xhr,s]);if(s.global&&!--jQuery.active)jQuery.event.trigger("ajaxStop");}return xhr;},handleError:function(s,xhr,status,e){if(s.error)s.error(xhr,status,e);if(s.global)jQuery.event.trigger("ajaxError",[xhr,s,e]);},active:0,httpSuccess:function(xhr){try{return!xhr.status&&location.protocol=="file:"||(xhr.status>=200&&xhr.status<300)||xhr.status==304||xhr.status==1223||jQuery.browser.safari&&xhr.status==undefined;}catch(e){}return false;},httpNotModified:function(xhr,url){try{var xhrRes=xhr.getResponseHeader("Last-Modified");return xhr.status==304||xhrRes==jQuery.lastModified[url]||jQuery.browser.safari&&xhr.status==undefined;}catch(e){}return false;},httpData:function(xhr,type,filter){var ct=xhr.getResponseHeader("content-type"),xml=type=="xml"||!type&&ct&&ct.indexOf("xml")>=0,data=xml?xhr.responseXML:xhr.responseText;if(xml&&data.documentElement.tagName=="parsererror")throw"parsererror";if(filter)data=filter(data,type);if(type=="script")jQuery.globalEval(data);if(type=="json")data=eval("("+data+")");return data;},param:function(a){var s=[];if(a.constructor==Array||a.jquery)jQuery.each(a,function(){s.push(encodeURIComponent(this.name)+"="+encodeURIComponent(this.value));});else -for(var j in a)if(a[j]&&a[j].constructor==Array)jQuery.each(a[j],function(){s.push(encodeURIComponent(j)+"="+encodeURIComponent(this));});else -s.push(encodeURIComponent(j)+"="+encodeURIComponent(jQuery.isFunction(a[j])?a[j]():a[j]));return s.join("&").replace(/%20/g,"+");}});jQuery.fn.extend({show:function(speed,callback){return speed?this.animate({height:"show",width:"show",opacity:"show"},speed,callback):this.filter(":hidden").each(function(){this.style.display=this.oldblock||"";if(jQuery.css(this,"display")=="none"){var elem=jQuery("<"+this.tagName+" />").appendTo("body");this.style.display=elem.css("display");if(this.style.display=="none")this.style.display="block";elem.remove();}}).end();},hide:function(speed,callback){return speed?this.animate({height:"hide",width:"hide",opacity:"hide"},speed,callback):this.filter(":visible").each(function(){this.oldblock=this.oldblock||jQuery.css(this,"display");this.style.display="none";}).end();},_toggle:jQuery.fn.toggle,toggle:function(fn,fn2){return jQuery.isFunction(fn)&&jQuery.isFunction(fn2)?this._toggle.apply(this,arguments):fn?this.animate({height:"toggle",width:"toggle",opacity:"toggle"},fn,fn2):this.each(function(){jQuery(this)[jQuery(this).is(":hidden")?"show":"hide"]();});},slideDown:function(speed,callback){return this.animate({height:"show"},speed,callback);},slideUp:function(speed,callback){return this.animate({height:"hide"},speed,callback);},slideToggle:function(speed,callback){return this.animate({height:"toggle"},speed,callback);},fadeIn:function(speed,callback){return this.animate({opacity:"show"},speed,callback);},fadeOut:function(speed,callback){return this.animate({opacity:"hide"},speed,callback);},fadeTo:function(speed,to,callback){return this.animate({opacity:to},speed,callback);},animate:function(prop,speed,easing,callback){var optall=jQuery.speed(speed,easing,callback);return this[optall.queue===false?"each":"queue"](function(){if(this.nodeType!=1)return false;var opt=jQuery.extend({},optall),p,hidden=jQuery(this).is(":hidden"),self=this;for(p in prop){if(prop[p]=="hide"&&hidden||prop[p]=="show"&&!hidden)return opt.complete.call(this);if(p=="height"||p=="width"){opt.display=jQuery.css(this,"display");opt.overflow=this.style.overflow;}}if(opt.overflow!=null)this.style.overflow="hidden";opt.curAnim=jQuery.extend({},prop);jQuery.each(prop,function(name,val){var e=new jQuery.fx(self,opt,name);if(/toggle|show|hide/.test(val))e[val=="toggle"?hidden?"show":"hide":val](prop);else{var parts=val.toString().match(/^([+-]=)?([\d+-.]+)(.*)$/),start=e.cur(true)||0;if(parts){var end=parseFloat(parts[2]),unit=parts[3]||"px";if(unit!="px"){self.style[name]=(end||1)+unit;start=((end||1)/e.cur(true))*start;self.style[name]=start+unit;}if(parts[1])end=((parts[1]=="-="?-1:1)*end)+start;e.custom(start,end,unit);}else -e.custom(start,val,"");}});return true;});},queue:function(type,fn){if(jQuery.isFunction(type)||(type&&type.constructor==Array)){fn=type;type="fx";}if(!type||(typeof type=="string"&&!fn))return queue(this[0],type);return this.each(function(){if(fn.constructor==Array)queue(this,type,fn);else{queue(this,type).push(fn);if(queue(this,type).length==1)fn.call(this);}});},stop:function(clearQueue,gotoEnd){var timers=jQuery.timers;if(clearQueue)this.queue([]);this.each(function(){for(var i=timers.length-1;i>=0;i--)if(timers[i].elem==this){if(gotoEnd)timers[i](true);timers.splice(i,1);}});if(!gotoEnd)this.dequeue();return this;}});var queue=function(elem,type,array){if(elem){type=type||"fx";var q=jQuery.data(elem,type+"queue");if(!q||array)q=jQuery.data(elem,type+"queue",jQuery.makeArray(array));}return q;};jQuery.fn.dequeue=function(type){type=type||"fx";return this.each(function(){var q=queue(this,type);q.shift();if(q.length)q[0].call(this);});};jQuery.extend({speed:function(speed,easing,fn){var opt=speed&&speed.constructor==Object?speed:{complete:fn||!fn&&easing||jQuery.isFunction(speed)&&speed,duration:speed,easing:fn&&easing||easing&&easing.constructor!=Function&&easing};opt.duration=(opt.duration&&opt.duration.constructor==Number?opt.duration:jQuery.fx.speeds[opt.duration])||jQuery.fx.speeds.def;opt.old=opt.complete;opt.complete=function(){if(opt.queue!==false)jQuery(this).dequeue();if(jQuery.isFunction(opt.old))opt.old.call(this);};return opt;},easing:{linear:function(p,n,firstNum,diff){return firstNum+diff*p;},swing:function(p,n,firstNum,diff){return((-Math.cos(p*Math.PI)/2)+0.5)*diff+firstNum;}},timers:[],timerId:null,fx:function(elem,options,prop){this.options=options;this.elem=elem;this.prop=prop;if(!options.orig)options.orig={};}});jQuery.fx.prototype={update:function(){if(this.options.step)this.options.step.call(this.elem,this.now,this);(jQuery.fx.step[this.prop]||jQuery.fx.step._default)(this);if(this.prop=="height"||this.prop=="width")this.elem.style.display="block";},cur:function(force){if(this.elem[this.prop]!=null&&this.elem.style[this.prop]==null)return this.elem[this.prop];var r=parseFloat(jQuery.css(this.elem,this.prop,force));return r&&r>-10000?r:parseFloat(jQuery.curCSS(this.elem,this.prop))||0;},custom:function(from,to,unit){this.startTime=now();this.start=from;this.end=to;this.unit=unit||this.unit||"px";this.now=this.start;this.pos=this.state=0;this.update();var self=this;function t(gotoEnd){return self.step(gotoEnd);}t.elem=this.elem;jQuery.timers.push(t);if(jQuery.timerId==null){jQuery.timerId=setInterval(function(){var timers=jQuery.timers;for(var i=0;ithis.options.duration+this.startTime){this.now=this.end;this.pos=this.state=1;this.update();this.options.curAnim[this.prop]=true;var done=true;for(var i in this.options.curAnim)if(this.options.curAnim[i]!==true)done=false;if(done){if(this.options.display!=null){this.elem.style.overflow=this.options.overflow;this.elem.style.display=this.options.display;if(jQuery.css(this.elem,"display")=="none")this.elem.style.display="block";}if(this.options.hide)this.elem.style.display="none";if(this.options.hide||this.options.show)for(var p in this.options.curAnim)jQuery.attr(this.elem.style,p,this.options.orig[p]);}if(done)this.options.complete.call(this.elem);return false;}else{var n=t-this.startTime;this.state=n/this.options.duration;this.pos=jQuery.easing[this.options.easing||(jQuery.easing.swing?"swing":"linear")](this.state,n,0,1,this.options.duration);this.now=this.start+((this.end-this.start)*this.pos);this.update();}return true;}};jQuery.extend(jQuery.fx,{speeds:{slow:600,fast:200,def:400},step:{scrollLeft:function(fx){fx.elem.scrollLeft=fx.now;},scrollTop:function(fx){fx.elem.scrollTop=fx.now;},opacity:function(fx){jQuery.attr(fx.elem.style,"opacity",fx.now);},_default:function(fx){fx.elem.style[fx.prop]=fx.now+fx.unit;}}});jQuery.fn.offset=function(){var left=0,top=0,elem=this[0],results;if(elem)with(jQuery.browser){var parent=elem.parentNode,offsetChild=elem,offsetParent=elem.offsetParent,doc=elem.ownerDocument,safari2=safari&&parseInt(version)<522&&!/adobeair/i.test(userAgent),css=jQuery.curCSS,fixed=css(elem,"position")=="fixed";if(elem.getBoundingClientRect){var box=elem.getBoundingClientRect();add(box.left+Math.max(doc.documentElement.scrollLeft,doc.body.scrollLeft),box.top+Math.max(doc.documentElement.scrollTop,doc.body.scrollTop));add(-doc.documentElement.clientLeft,-doc.documentElement.clientTop);}else{add(elem.offsetLeft,elem.offsetTop);while(offsetParent){add(offsetParent.offsetLeft,offsetParent.offsetTop);if(mozilla&&!/^t(able|d|h)$/i.test(offsetParent.tagName)||safari&&!safari2)border(offsetParent);if(!fixed&&css(offsetParent,"position")=="fixed")fixed=true;offsetChild=/^body$/i.test(offsetParent.tagName)?offsetChild:offsetParent;offsetParent=offsetParent.offsetParent;}while(parent&&parent.tagName&&!/^body|html$/i.test(parent.tagName)){if(!/^inline|table.*$/i.test(css(parent,"display")))add(-parent.scrollLeft,-parent.scrollTop);if(mozilla&&css(parent,"overflow")!="visible")border(parent);parent=parent.parentNode;}if((safari2&&(fixed||css(offsetChild,"position")=="absolute"))||(mozilla&&css(offsetChild,"position")!="absolute"))add(-doc.body.offsetLeft,-doc.body.offsetTop);if(fixed)add(Math.max(doc.documentElement.scrollLeft,doc.body.scrollLeft),Math.max(doc.documentElement.scrollTop,doc.body.scrollTop));}results={top:top,left:left};}function border(elem){add(jQuery.curCSS(elem,"borderLeftWidth",true),jQuery.curCSS(elem,"borderTopWidth",true));}function add(l,t){left+=parseInt(l,10)||0;top+=parseInt(t,10)||0;}return results;};jQuery.fn.extend({position:function(){var left=0,top=0,results;if(this[0]){var offsetParent=this.offsetParent(),offset=this.offset(),parentOffset=/^body|html$/i.test(offsetParent[0].tagName)?{top:0,left:0}:offsetParent.offset();offset.top-=num(this,'marginTop');offset.left-=num(this,'marginLeft');parentOffset.top+=num(offsetParent,'borderTopWidth');parentOffset.left+=num(offsetParent,'borderLeftWidth');results={top:offset.top-parentOffset.top,left:offset.left-parentOffset.left};}return results;},offsetParent:function(){var offsetParent=this[0].offsetParent;while(offsetParent&&(!/^body|html$/i.test(offsetParent.tagName)&&jQuery.css(offsetParent,'position')=='static'))offsetParent=offsetParent.offsetParent;return jQuery(offsetParent);}});jQuery.each(['Left','Top'],function(i,name){var method='scroll'+name;jQuery.fn[method]=function(val){if(!this[0])return;return val!=undefined?this.each(function(){this==window||this==document?window.scrollTo(!i?val:jQuery(window).scrollLeft(),i?val:jQuery(window).scrollTop()):this[method]=val;}):this[0]==window||this[0]==document?self[i?'pageYOffset':'pageXOffset']||jQuery.boxModel&&document.documentElement[method]||document.body[method]:this[0][method];};});jQuery.each(["Height","Width"],function(i,name){var tl=i?"Left":"Top",br=i?"Right":"Bottom";jQuery.fn["inner"+name]=function(){return this[name.toLowerCase()]()+num(this,"padding"+tl)+num(this,"padding"+br);};jQuery.fn["outer"+name]=function(margin){return this["inner"+name]()+num(this,"border"+tl+"Width")+num(this,"border"+br+"Width")+(margin?num(this,"margin"+tl)+num(this,"margin"+br):0);};});})(); \ No newline at end of file diff --git a/sphinx/static/minus.png b/sphinx/static/minus.png deleted file mode 100644 index da1c5620..00000000 Binary files a/sphinx/static/minus.png and /dev/null differ diff --git a/sphinx/static/navigation.png b/sphinx/static/navigation.png deleted file mode 100644 index 1081dc14..00000000 Binary files a/sphinx/static/navigation.png and /dev/null differ diff --git a/sphinx/static/plus.png b/sphinx/static/plus.png deleted file mode 100644 index b3cb3742..00000000 Binary files a/sphinx/static/plus.png and /dev/null differ diff --git a/sphinx/static/rightsidebar.css b/sphinx/static/rightsidebar.css deleted file mode 100644 index bc604a89..00000000 --- a/sphinx/static/rightsidebar.css +++ /dev/null @@ -1,16 +0,0 @@ -/** - * Sphinx Doc Design -- Right Side Bar Overrides - */ - - -div.sphinxsidebar { - float: right; -} - -div.bodywrapper { - margin: 0 230px 0 0; -} - -div.inlinecomments { - right: 250px; -} diff --git a/sphinx/static/searchtools.js b/sphinx/static/searchtools.js deleted file mode 100644 index f9d9b6c3..00000000 --- a/sphinx/static/searchtools.js +++ /dev/null @@ -1,467 +0,0 @@ -/** - * helper function to return a node containing the - * search summary for a given text. keywords is a list - * of stemmed words, hlwords is the list of normal, unstemmed - * words. the first one is used to find the occurance, the - * latter for highlighting it. - */ - -jQuery.makeSearchSummary = function(text, keywords, hlwords) { - var textLower = text.toLowerCase(); - var start = 0; - $.each(keywords, function() { - var i = textLower.indexOf(this.toLowerCase()); - if (i > -1) - start = i; - }); - start = Math.max(start - 120, 0); - var excerpt = ((start > 0) ? '...' : '') + - $.trim(text.substr(start, 240)) + - ((start + 240 - text.length) ? '...' : ''); - var rv = $('
    ').text(excerpt); - $.each(hlwords, function() { - rv = rv.highlightText(this, 'highlight'); - }); - return rv; -} - -/** - * Porter Stemmer - */ -var PorterStemmer = function() { - - var step2list = { - ational: 'ate', - tional: 'tion', - enci: 'ence', - anci: 'ance', - izer: 'ize', - bli: 'ble', - alli: 'al', - entli: 'ent', - eli: 'e', - ousli: 'ous', - ization: 'ize', - ation: 'ate', - ator: 'ate', - alism: 'al', - iveness: 'ive', - fulness: 'ful', - ousness: 'ous', - aliti: 'al', - iviti: 'ive', - biliti: 'ble', - logi: 'log' - }; - - var step3list = { - icate: 'ic', - ative: '', - alize: 'al', - iciti: 'ic', - ical: 'ic', - ful: '', - ness: '' - }; - - var c = "[^aeiou]"; // consonant - var v = "[aeiouy]"; // vowel - var C = c + "[^aeiouy]*"; // consonant sequence - var V = v + "[aeiou]*"; // vowel sequence - - var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0 - var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 - var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 - var s_v = "^(" + C + ")?" + v; // vowel in stem - - this.stemWord = function (w) { - var stem; - var suffix; - var firstch; - var origword = w; - - if (w.length < 3) - return w; - - var re; - var re2; - var re3; - var re4; - - firstch = w.substr(0,1); - if (firstch == "y") - w = firstch.toUpperCase() + w.substr(1); - - // Step 1a - re = /^(.+?)(ss|i)es$/; - re2 = /^(.+?)([^s])s$/; - - if (re.test(w)) - w = w.replace(re,"$1$2"); - else if (re2.test(w)) - w = w.replace(re2,"$1$2"); - - // Step 1b - re = /^(.+?)eed$/; - re2 = /^(.+?)(ed|ing)$/; - if (re.test(w)) { - var fp = re.exec(w); - re = new RegExp(mgr0); - if (re.test(fp[1])) { - re = /.$/; - w = w.replace(re,""); - } - } - else if (re2.test(w)) { - var fp = re2.exec(w); - stem = fp[1]; - re2 = new RegExp(s_v); - if (re2.test(stem)) { - w = stem; - re2 = /(at|bl|iz)$/; - re3 = new RegExp("([^aeiouylsz])\\1$"); - re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); - if (re2.test(w)) - w = w + "e"; - else if (re3.test(w)) { - re = /.$/; - w = w.replace(re,""); - } - else if (re4.test(w)) - w = w + "e"; - } - } - - // Step 1c - re = /^(.+?)y$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - re = new RegExp(s_v); - if (re.test(stem)) - w = stem + "i"; - } - - // Step 2 - re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - suffix = fp[2]; - re = new RegExp(mgr0); - if (re.test(stem)) - w = stem + step2list[suffix]; - } - - // Step 3 - re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - suffix = fp[2]; - re = new RegExp(mgr0); - if (re.test(stem)) - w = stem + step3list[suffix]; - } - - // Step 4 - re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; - re2 = /^(.+?)(s|t)(ion)$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - re = new RegExp(mgr1); - if (re.test(stem)) - w = stem; - } - else if (re2.test(w)) { - var fp = re2.exec(w); - stem = fp[1] + fp[2]; - re2 = new RegExp(mgr1); - if (re2.test(stem)) - w = stem; - } - - // Step 5 - re = /^(.+?)e$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - re = new RegExp(mgr1); - re2 = new RegExp(meq1); - re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); - if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) - w = stem; - } - re = /ll$/; - re2 = new RegExp(mgr1); - if (re.test(w) && re2.test(w)) { - re = /.$/; - w = w.replace(re,""); - } - - // and turn initial Y back to y - if (firstch == "y") - w = firstch.toLowerCase() + w.substr(1); - return w; - } -} - - -/** - * Search Module - */ -var Search = { - - _index : null, - _queued_query : null, - _pulse_status : -1, - - init : function() { - var params = $.getQueryParameters(); - if (params.q) { - var query = params.q[0]; - $('input[@name="q"]')[0].value = query; - this.performSearch(query); - } - }, - - /** - * Sets the index - */ - setIndex : function(index) { - var q; - this._index = index; - if ((q = this._queued_query) !== null) { - this._queued_query = null; - Search.query(q); - } - }, - - hasIndex : function() { - return this._index !== null; - }, - - deferQuery : function(query) { - this._queued_query = query; - }, - - stopPulse : function() { - this._pulse_status = 0; - }, - - startPulse : function() { - if (this._pulse_status >= 0) - return; - function pulse() { - Search._pulse_status = (Search._pulse_status + 1) % 4; - var dotString = ''; - for (var i = 0; i < Search._pulse_status; i++) - dotString += '.'; - Search.dots.text(dotString); - if (Search._pulse_status > -1) - window.setTimeout(pulse, 500); - }; - pulse(); - }, - - /** - * perform a search for something - */ - performSearch : function(query) { - // create the required interface elements - this.out = $('#search-results'); - this.title = $('

    ' + _('Searching') + '

    ').appendTo(this.out); - this.dots = $('').appendTo(this.title); - this.status = $('

    ').appendTo(this.out); - this.output = $(' -- cgit v1.2.1 From 5e096f48b5ea718f36546176d36fa8497b8393ad Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Fri, 27 Feb 2009 12:11:35 +0100 Subject: Add headlink/headbgcolors. --- doc/theming.rst | 4 +++- sphinx/themes/default/static/default.css_t | 8 ++++---- sphinx/themes/default/theme.conf | 4 +++- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/doc/theming.rst b/doc/theming.rst index 4696df5a..3e3a33c8 100644 --- a/doc/theming.rst +++ b/doc/theming.rst @@ -86,7 +86,9 @@ Sphinx comes with a selection of themes to choose from: - **bgcolor** (CSS color): Body background color. - **textcolor** (CSS color): Body text color. - **linkcolor** (CSS color): Body link color. - - **headcolor** (CSS color): Text color for headings. + - **headbgcolor** (CSS color): Background color for headings. + - **headtextcolor** (CSS color): Text color for headings. + - **headlinkcolor** (CSS color): Link color for headings. - **codebgcolor** (CSS color): Background color for code blocks. - **codetextcolor** (CSS color): Default text color for code blocks, if not set differently by the highlighting style. diff --git a/sphinx/themes/default/static/default.css_t b/sphinx/themes/default/static/default.css_t index 77307084..ab2aeb0c 100644 --- a/sphinx/themes/default/static/default.css_t +++ b/sphinx/themes/default/static/default.css_t @@ -154,9 +154,9 @@ div.body h4, div.body h5, div.body h6 { font-family: {{ theme_headfont }}; - background-color: #f2f2f2; + background-color: {{ theme_headbgcolor }}; font-weight: normal; - color: {{ theme_headcolor }}; + color: {{ theme_headtextcolor }}; border-bottom: 1px solid #ccc; margin: 20px -20px 10px -20px; padding: 3px 0 3px 10px; @@ -170,14 +170,14 @@ div.body h5 { font-size: 110%; } div.body h6 { font-size: 100%; } a.headerlink { - color: #c60f0f; + color: {{ theme_headlinkcolor }}; font-size: 0.8em; padding: 0 4px 0 4px; text-decoration: none; } a.headerlink:hover { - background-color: #c60f0f; + background-color: {{ theme_headlinkcolor }}; color: white; } diff --git a/sphinx/themes/default/theme.conf b/sphinx/themes/default/theme.conf index 9a24b978..812330f8 100644 --- a/sphinx/themes/default/theme.conf +++ b/sphinx/themes/default/theme.conf @@ -17,7 +17,9 @@ relbartextcolor = #ffffff relbarlinkcolor = #ffffff bgcolor = #ffffff textcolor = #000000 -headcolor = #20435c +headbgcolor = #f2f2f2 +headtextcolor = #20435c +headlinkcolor = #c60f0f linkcolor = #355f7c codebgcolor = #eeffcc codetextcolor = #333333 -- cgit v1.2.1 From 16ba2e5411025d8d8f8e29a1502f8620238965c3 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Fri, 27 Feb 2009 15:09:54 +0100 Subject: Add Quex. --- EXAMPLES | 1 + 1 file changed, 1 insertion(+) diff --git a/EXAMPLES b/EXAMPLES index a6535498..3ccb2936 100644 --- a/EXAMPLES +++ b/EXAMPLES @@ -48,6 +48,7 @@ included, please mail to `the Google group * Python: http://docs.python.org/ * python-apt: http://people.debian.org/~jak/python-apt-doc/ * PyUblas: http://documen.tician.de/pyublas/ +* Quex: http://quex.sourceforge.net/ * Reteisi: http://docs.argolinux.org/reteisi/ * Roundup: http://www.roundup-tracker.org/ * Satchmo: http://www.satchmoproject.com/docs/svn/ -- cgit v1.2.1 From 50efec8efc3674961909dafce3303e5a00828433 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Fri, 27 Feb 2009 15:22:45 +0100 Subject: Remove docs for removed automodule_skip_lines setting. --- doc/ext/autodoc.rst | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/doc/ext/autodoc.rst b/doc/ext/autodoc.rst index 00fafdb7..c54125de 100644 --- a/doc/ext/autodoc.rst +++ b/doc/ext/autodoc.rst @@ -182,19 +182,6 @@ directive. There are also new config values that you can set: -.. confval:: automodule_skip_lines - - This value (whose default is ``0``) can be used to skip an amount of lines in - every module docstring that is processed by an :dir:`automodule` directive. - This is provided because some projects like to put headings in the module - docstring, which would then interfere with your sectioning, or automatic - fields with version control tags, that you don't want to put in the generated - documentation. - - .. deprecated:: 0.4 - Use the more versatile docstring processing provided by - :event:`autodoc-process-docstring`. - .. confval:: autoclass_content This value selects what content will be inserted into the main body of an -- cgit v1.2.1 From 0aa7aea4a50c51062325bec02896006369435d8a Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Fri, 27 Feb 2009 15:48:41 +0100 Subject: Autodoc can now order members either alphabetically (like previously) or by member type; configurable either with the config value ``autodoc_member_order`` or a ``member-order`` option per directive. Also fix a bug that documented module-level functions as attributes. --- CHANGES | 5 +++++ doc/ext/autodoc.rst | 14 ++++++++++++++ sphinx/ext/autodoc.py | 41 +++++++++++++++++++++++++++++++++-------- 3 files changed, 52 insertions(+), 8 deletions(-) diff --git a/CHANGES b/CHANGES index 3f71d1ae..4ca63c28 100644 --- a/CHANGES +++ b/CHANGES @@ -148,6 +148,11 @@ New features added - Autodoc can document classes as functions now if explicitly marked with `autofunction`. + - Autodoc can now order members either alphabetically (like + previously) or by member type; configurable either with the + config value ``autodoc_member_order`` or a ``member-order`` + option per directive. + - The function ``Sphinx.add_directive()`` now also supports docutils 0.5-style directive classes. If they inherit from ``sphinx.util.compat.Directive``, they also work with diff --git a/doc/ext/autodoc.rst b/doc/ext/autodoc.rst index c54125de..17a2766b 100644 --- a/doc/ext/autodoc.rst +++ b/doc/ext/autodoc.rst @@ -135,6 +135,12 @@ directive. .. versionadded:: 0.5 + * :dir:`automodule` and :dir:`autoclass` also has an ``member-order`` option + that can be used to override the global value of + :confval:`autodoc_member_order` for one directive. + + .. versionadded:: 0.6 + .. note:: In an :dir:`automodule` directive with the ``members`` option set, only @@ -199,6 +205,14 @@ There are also new config values that you can set: .. versionadded:: 0.3 +.. confval:: autodoc_member_order + + This value selects if automatically documented members are sorted + alphabetical (value ``'alphabetical'``) or by member type (value + ``'groupwise'``). The default is alphabetical. + + .. versionadded:: 0.6 + Docstring preprocessing ----------------------- diff --git a/sphinx/ext/autodoc.py b/sphinx/ext/autodoc.py index d07ab465..10f2bb32 100644 --- a/sphinx/ext/autodoc.py +++ b/sphinx/ext/autodoc.py @@ -218,6 +218,8 @@ class Documenter(object): content_indent = u' ' #: priority if multiple documenters return True from can_document_member priority = 0 + #: order if autodoc_member_order is set to 'groupwise' + member_order = 0 option_spec = {'noindex': bool_option} @@ -548,6 +550,7 @@ class Documenter(object): members_check_module, members = self.get_object_members(want_all) # document non-skipped members + memberdocumenters = [] for (mname, member, isattr) in self.filter_members(members, want_all): classes = [cls for cls in AutoDirective._registry.itervalues() if cls.can_document_member(member, mname, isattr, self)] @@ -560,11 +563,19 @@ class Documenter(object): # of inner classes can be documented full_mname = self.modname + '::' + \ '.'.join(self.objpath + [mname]) - memberdocmtr = classes[-1](self.directive, full_mname, - self.indent) - memberdocmtr.generate(all_members=True, - real_modname=self.real_modname, - check_module=members_check_module) + memberdocumenters.append( + classes[-1](self.directive, full_mname, self.indent)) + + if (self.options.member_order or self.env.config.autodoc_member_order) \ + == 'groupwise': + # sort by group; relies on stable sort to keep items in the + # same group sorted alphabetically + memberdocumenters.sort(key=lambda d: d.member_order) + + for documenter in memberdocumenters: + documenter.generate(all_members=True, + real_modname=self.real_modname, + check_module=members_check_module) # reset current objects self.env.autodoc_current_module = None @@ -655,6 +666,7 @@ class ModuleDocumenter(Documenter): 'noindex': bool_option, 'inherited-members': bool_option, 'show-inheritance': bool_option, 'synopsis': identity, 'platform': identity, 'deprecated': bool_option, + 'member-order': identity, } @classmethod @@ -767,6 +779,7 @@ class FunctionDocumenter(ModuleLevelDocumenter): Specialized Documenter subclass for functions. """ objtype = 'function' + member_order = 30 @classmethod def can_document_member(cls, member, membername, isattr, parent): @@ -800,10 +813,11 @@ class ClassDocumenter(ModuleLevelDocumenter): Specialized Documenter subclass for classes. """ objtype = 'class' + member_order = 20 option_spec = { 'members': members_option, 'undoc-members': bool_option, 'noindex': bool_option, 'inherited-members': bool_option, - 'show-inheritance': bool_option, + 'show-inheritance': bool_option, 'member-order': identity, } @classmethod @@ -897,6 +911,7 @@ class ExceptionDocumenter(ClassDocumenter): Specialized ClassDocumenter subclass for exceptions. """ objtype = 'exception' + member_order = 10 # needs a higher priority than ClassDocumenter priority = 10 @@ -912,6 +927,7 @@ class DataDocumenter(ModuleLevelDocumenter): Specialized Documenter subclass for data items. """ objtype = 'data' + member_order = 40 @classmethod def can_document_member(cls, member, membername, isattr, parent): @@ -926,6 +942,7 @@ class MethodDocumenter(ClassLevelDocumenter): Specialized Documenter subclass for methods (normal, static and class). """ objtype = 'method' + member_order = 50 @classmethod def can_document_member(cls, member, membername, isattr, parent): @@ -939,10 +956,14 @@ class MethodDocumenter(ClassLevelDocumenter): (isinstance(self.object, MethodType) and self.object.im_self is not None): self.directivetype = 'classmethod' + # document class and static members before ordinary ones + self.member_order = self.member_order - 1 elif isinstance(self.object, FunctionType) or \ (isinstance(self.object, BuiltinFunctionType) and self.object.__self__ is not None): self.directivetype = 'staticmethod' + # document class and static members before ordinary ones + self.member_order = self.member_order - 1 else: self.directivetype = 'method' return ret @@ -966,11 +987,13 @@ class AttributeDocumenter(ClassLevelDocumenter): Specialized Documenter subclass for attributes. """ objtype = 'attribute' + member_order = 60 @classmethod def can_document_member(cls, member, membername, isattr, parent): - return isdescriptor(member) or \ - (not isinstance(parent, ModuleDocumenter) and isattr) + return (isdescriptor(member) and not + isinstance(member, (FunctionType, BuiltinFunctionType))) \ + or (not isinstance(parent, ModuleDocumenter) and isattr) def document_members(self, all_members=False): pass @@ -1033,6 +1056,7 @@ class AutoDirective(Directive): old_reporter = self.state.memo.reporter self.state.memo.reporter = AutodocReporter(self.result, self.state.memo.reporter) + if self.name == 'automodule': node = nodes.section() # necessary so that the child nodes get the right source/line set @@ -1068,6 +1092,7 @@ def setup(app): app.add_autodocumenter(AttributeDocumenter) app.add_config_value('autoclass_content', 'class', True) + app.add_config_value('autodoc_member_order', 'alphabetic', True) app.add_event('autodoc-process-docstring') app.add_event('autodoc-process-signature') app.add_event('autodoc-skip-member') -- cgit v1.2.1 From 59f7fac652529a2b66d1ff2e863a6556213849aa Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Fri, 27 Feb 2009 17:13:34 +0100 Subject: Add PyMOTW. --- EXAMPLES | 1 + 1 file changed, 1 insertion(+) diff --git a/EXAMPLES b/EXAMPLES index 3ccb2936..2a5dc6f3 100644 --- a/EXAMPLES +++ b/EXAMPLES @@ -42,6 +42,7 @@ included, please mail to `the Google group * Pyevolve: http://pyevolve.sourceforge.net/ * Pylo: http://documen.tician.de/pylo/ * Pylons: http://docs.pylonshq.com/ +* PyMOTW: http://www.doughellmann.com/PyMOTW/ * PyPubSub: http://pubsub.sourceforge.net/ * pyrticle: http://documen.tician.de/pyrticle/ * Pysparse: http://pysparse.sourceforge.net/ -- cgit v1.2.1 From 2ce68ce1e23ab18069d99b3b774c99d4a780af72 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sun, 1 Mar 2009 16:40:01 +0100 Subject: Re-add dependency recording in autodoc which was lost during the refactoring. --- sphinx/ext/autodoc.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/sphinx/ext/autodoc.py b/sphinx/ext/autodoc.py index 10f2bb32..43c4b26a 100644 --- a/sphinx/ext/autodoc.py +++ b/sphinx/ext/autodoc.py @@ -1051,6 +1051,11 @@ class AutoDirective(Directive): if not self.result: return self.warnings + # record all filenames as dependencies -- this will at least + # partially make automatic invalidation possible + for fn in self.filename_set: + self.env.note_dependency(fn) + # use a custom reporter that correctly assigns lines to source # filename/description and lineno old_reporter = self.state.memo.reporter -- cgit v1.2.1 From 4fdcb75be0417e2c9085a69454ff02af0784114e Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Tue, 3 Mar 2009 08:55:34 +0100 Subject: Add default parent attribute to Node classes. --- sphinx/pycode/nodes.py | 1 + 1 file changed, 1 insertion(+) diff --git a/sphinx/pycode/nodes.py b/sphinx/pycode/nodes.py index efdf8f06..f8f57740 100644 --- a/sphinx/pycode/nodes.py +++ b/sphinx/pycode/nodes.py @@ -14,6 +14,7 @@ class BaseNode(object): """ Node superclass for both terminal and nonterminal nodes. """ + parent = None def _eq(self, other): raise NotImplementedError -- cgit v1.2.1 From 5736b6d92a75237b4aad021157dab6c125718aa6 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Wed, 4 Mar 2009 23:49:32 +0100 Subject: Fix grammar. --- doc/ext/math.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/ext/math.rst b/doc/ext/math.rst index e538edcd..a214b41e 100644 --- a/doc/ext/math.rst +++ b/doc/ext/math.rst @@ -108,8 +108,8 @@ There are various config values you can set to influence how the images are buil .. confval:: pngmath_latex The command name with which to invoke LaTeX. The default is ``'latex'``; you - may need to set this to a full path if ``latex`` not in the executable search - path. + may need to set this to a full path if ``latex`` is not in the executable + search path. Since this setting is not portable from system to system, it is normally not useful to set it in ``conf.py``; rather, giving it on the -- cgit v1.2.1 From e444166923eb4e5cc16d15f5ccfe84adf9d01ba4 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Wed, 4 Mar 2009 23:51:36 +0100 Subject: Add two comments. --- sphinx/directives/other.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sphinx/directives/other.py b/sphinx/directives/other.py index f1d67a3c..bc9a54df 100644 --- a/sphinx/directives/other.py +++ b/sphinx/directives/other.py @@ -89,7 +89,9 @@ class TocTree(Directive): % entry, line=self.lineno)) subnode = addnodes.toctree() subnode['parent'] = env.docname + # entries contains all entries (self references, external links etc.) subnode['entries'] = entries + # includefiles only entries that are documents subnode['includefiles'] = includefiles subnode['maxdepth'] = self.options.get('maxdepth', -1) subnode['glob'] = glob -- cgit v1.2.1 From 2c004ae5097979e06029a7ce0df6fad6301c3874 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Wed, 4 Mar 2009 23:52:56 +0100 Subject: New ``graphviz`` extension to embed graphviz graphs. --- AUTHORS | 1 + CHANGES | 2 + doc/ext/graphviz.rst | 77 +++++++++++++++++++++ doc/extensions.rst | 2 + sphinx/ext/graphviz.py | 182 +++++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 264 insertions(+) create mode 100644 doc/ext/graphviz.rst create mode 100644 sphinx/ext/graphviz.py diff --git a/AUTHORS b/AUTHORS index 0d8afda5..d4bc729f 100644 --- a/AUTHORS +++ b/AUTHORS @@ -6,6 +6,7 @@ Substantial parts of the templates were written by Armin Ronacher Other contributors, listed alphabetically, are: * Daniel Bültmann -- todo extension +* Charles Duffy -- original graphviz extension * Josip Dzolonga -- coverage builder * Horst Gutmann -- internationalization support * Martin Hans -- autodoc improvements diff --git a/CHANGES b/CHANGES index 4ca63c28..44152b1d 100644 --- a/CHANGES +++ b/CHANGES @@ -137,6 +137,8 @@ New features added * Extensions and API: + - New ``graphviz`` extension to embed graphviz graphs. + - Autodoc now has a reusable Python API, which can be used to create custom types of objects to auto-document (e.g. Zope interfaces). See also ``Sphinx.add_autodocumenter()``. diff --git a/doc/ext/graphviz.rst b/doc/ext/graphviz.rst new file mode 100644 index 00000000..1d4ed807 --- /dev/null +++ b/doc/ext/graphviz.rst @@ -0,0 +1,77 @@ +.. highlight:: rest + +The Graphviz extension +====================== + +.. module:: sphinx.ext.graphviz + :synopsis: Support for Graphviz graphs. + +.. versionadded:: 0.6 + +This extension allows you to embed `Graphviz `_ graphs in +your documents. + +It adds these directives: + + +.. directive:: graphviz + + Directive to embed graphviz code. The input code for ``dot`` is given as the + content. For example:: + + .. graphviz:: + + digraph foo { + "bar" -> "baz"; + } + + In HTML output, the code will be rendered to a PNG image. In LaTeX output, + the code will be rendered to an embeddable PDF file. + + +.. directive:: graph + + Directive for embedding a single undirected graph. The name is given as a + directive argument, the contents of the graph are the directive content. + This is a convenience directive to generate ``graph { }``. + + For example:: + + .. graph:: foo + + "bar" -- "baz"; + + +.. directive:: digraph + + Directive for embedding a single directed graph. The name is given as a + directive argument, the contents of the graph are the directive content. + This is a convenience directive to generate ``digraph { }``. + + For example:: + + .. digraph:: foo + + "bar" -> "baz" -> "quux"; + + +There are also these new config values: + +.. confval:: graphviz_dot + + The command name with which to invoke ``dot``. The default is ``'dot'``; you + may need to set this to a full path if ``dot`` is not in the executable + search path. + + Since this setting is not portable from system to system, it is normally not + useful to set it in ``conf.py``; rather, giving it on the + :program:`sphinx-build` command line via the :option:`-D` option should be + preferable, like this:: + + sphinx-build -b html -D graphviz_dot=C:\graphviz\bin\dot.exe . _build/html + +.. confval:: graphviz_dot_args + + Additional command-line arguments to give to dot, as a list. The default is + an empty list. This is the right place to set global graph, node or edge + attributes via dot's ``-G``, ``-N`` and ``-E`` options. diff --git a/doc/extensions.rst b/doc/extensions.rst index 12c82da5..21ba0fd8 100644 --- a/doc/extensions.rst +++ b/doc/extensions.rst @@ -44,6 +44,8 @@ These extensions are built in and can be activated by respective entries in the ext/doctest ext/intersphinx ext/math + ext/graphviz + ext/inheritance ext/refcounting ext/ifconfig ext/coverage diff --git a/sphinx/ext/graphviz.py b/sphinx/ext/graphviz.py new file mode 100644 index 00000000..084797b7 --- /dev/null +++ b/sphinx/ext/graphviz.py @@ -0,0 +1,182 @@ +# -*- coding: utf-8 -*- +""" + sphinx.ext.graphviz + ~~~~~~~~~~~~~~~~~~~ + + Allow graphviz-formatted graphs to be included in Sphinx-generated + documents inline. + + :copyright: Copyright 2007-2009 by the Sphinx team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import os +import re +import sys +import posixpath +from os import path +from subprocess import Popen, PIPE +try: + from hashlib import sha1 as sha +except ImportError: + from sha import sha + +from docutils import nodes + +from sphinx.errors import SphinxError +from sphinx.util import ensuredir +from sphinx.util.compat import Directive + + +mapname_re = re.compile(r' and ) + self.body.append('%s\n' % + (fname, self.encode(code).strip(), imgcss)) + else: + # has a map: get the name of the map and connect the parts + mapname = mapname_re.match(imgmap[0]).group(1) + self.body.append('%s\n' % + (fname, self.encode(code).strip(), + mapname, imgcss)) + self.body.extend(imgmap) + self.body.append('

    \n') + raise nodes.SkipNode + + +def html_visit_graphviz(self, node): + render_dot_html(self, node, node['code'], node['options']) + + +def render_dot_latex(self, node, code, options, prefix='graphviz'): + try: + fname = render_dot(self, code, options, 'pdf', prefix) + except GraphvizError, exc: + self.builder.warn('dot code %r: ' % code + str(exc)) + raise nodes.SkipNode + + if fname is not None: + self.body.append('\\includegraphics{%s}' % fname) + raise nodes.SkipNode + + +def latex_visit_graphviz(self, node): + render_dot_latex(self, node, node['code'], node['options']) + +def setup(app): + app.add_node(graphviz, + html=(html_visit_graphviz, None), + latex=(latex_visit_graphviz, None)) + app.add_directive('graphviz', Graphviz) + app.add_directive('graph', GraphvizSimple) + app.add_directive('digraph', GraphvizSimple) + app.add_config_value('graphviz_dot', 'dot', 'html') + app.add_config_value('graphviz_dot_args', [], 'html') -- cgit v1.2.1 From 1f1c6f7d9ea151d07610b7cb9a8bd958c7bf7740 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Thu, 5 Mar 2009 00:14:29 +0100 Subject: New ``inheritance_diagram`` extension to embed... inheritance diagrams! --- AUTHORS | 1 + CHANGES | 3 + doc/ext/inheritance.rst | 46 +++++ sphinx/ext/inheritance_diagram.py | 367 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 417 insertions(+) create mode 100644 doc/ext/inheritance.rst create mode 100644 sphinx/ext/inheritance_diagram.py diff --git a/AUTHORS b/AUTHORS index d4bc729f..73ec5ed2 100644 --- a/AUTHORS +++ b/AUTHORS @@ -6,6 +6,7 @@ Substantial parts of the templates were written by Armin Ronacher Other contributors, listed alphabetically, are: * Daniel Bültmann -- todo extension +* Michael Droettboom -- inheritance_diagram extension * Charles Duffy -- original graphviz extension * Josip Dzolonga -- coverage builder * Horst Gutmann -- internationalization support diff --git a/CHANGES b/CHANGES index 44152b1d..3518cb03 100644 --- a/CHANGES +++ b/CHANGES @@ -139,6 +139,9 @@ New features added - New ``graphviz`` extension to embed graphviz graphs. + - New ``inheritance_diagram`` extension to embed... inheritance + diagrams! + - Autodoc now has a reusable Python API, which can be used to create custom types of objects to auto-document (e.g. Zope interfaces). See also ``Sphinx.add_autodocumenter()``. diff --git a/doc/ext/inheritance.rst b/doc/ext/inheritance.rst new file mode 100644 index 00000000..edec6c8e --- /dev/null +++ b/doc/ext/inheritance.rst @@ -0,0 +1,46 @@ +.. highlight:: rest + +The inheritance diagram extension +================================= + +.. module:: sphinx.ext.inheritance_diagram + :synopsis: Support for displaying inheritance diagrams via graphviz. + +.. versionadded:: 0.6 + +This extension allows you to include inheritance diagrams, rendered via the +:mod:`Graphviz extension `. + +It adds this directive: + +.. directive:: inheritance-diagram + + This directive has one or more arguments, each giving a module or class + name. Class names can be unqualified; in that case they are taken to exist + in the currently described module (see :dir:`module`). + + For each given class, and each class in each given module, the base classes + are determined. Then, from all classes and their base classes, a graph is + generated which is then rendered via the graphviz extension to a directed + graph. + + This directive supports an option called ``parts`` that, if given, must be an + integer, advising the directive to remove that many parts of module names + from the displayed names. (For example, if all your class names start with + ``lib.``, you can give ``:parts: 1`` to remove that prefix from the displayed + node names.) + + +New config values are: + +.. confval:: inheritance_graph_attrs + + A dictionary of graphviz graph attributes for inheritance diagrams. + +.. confval:: inheritance_node_attrs + + A dictionary of graphviz node attributes for inheritance diagrams. + +.. confval:: inheritance_edge_attrs + + A dictionary of graphviz edge attributes for inheritance diagrams. diff --git a/sphinx/ext/inheritance_diagram.py b/sphinx/ext/inheritance_diagram.py new file mode 100644 index 00000000..8183359d --- /dev/null +++ b/sphinx/ext/inheritance_diagram.py @@ -0,0 +1,367 @@ +""" + sphinx.ext.inheritance_diagram + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Defines a docutils directive for inserting inheritance diagrams. + + Provide the directive with one or more classes or modules (separated + by whitespace). For modules, all of the classes in that module will + be used. + + Example:: + + Given the following classes: + + class A: pass + class B(A): pass + class C(A): pass + class D(B, C): pass + class E(B): pass + + .. inheritance-diagram: D E + + Produces a graph like the following: + + A + / \ + B C + / \ / + E D + + The graph is inserted as a PNG+image map into HTML and a PDF in + LaTeX. + + :copyright: Copyright 2007-2009 by the Sphinx team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import os +import re +import sys +import inspect +import subprocess +try: + from hashlib import md5 +except ImportError: + from md5 import md5 + +from docutils import nodes +from docutils.parsers.rst import directives + +from sphinx.roles import xfileref_role +from sphinx.ext.graphviz import render_dot_html, render_dot_latex +from sphinx.util.compat import Directive + + +class_sig_re = re.compile(r'''^([\w.]*\.)? # module names + (\w+) \s* $ # class/final module name + ''', re.VERBOSE) + + +class InheritanceException(Exception): + pass + + +class InheritanceGraph(object): + """ + Given a list of classes, determines the set of classes that they inherit + from all the way to the root "object", and then is able to generate a + graphviz dot graph from them. + """ + def __init__(self, class_names, currmodule, show_builtins=False): + """ + *class_names* is a list of child classes to show bases from. + + If *show_builtins* is True, then Python builtins will be shown + in the graph. + """ + self.class_names = class_names + self.classes = self._import_classes(class_names, currmodule) + self.all_classes = self._all_classes(self.classes) + if len(self.all_classes) == 0: + raise InheritanceException('No classes found for ' + 'inheritance diagram') + self.show_builtins = show_builtins + + def _import_class_or_module(self, name, currmodule): + """ + Import a class using its fully-qualified *name*. + """ + try: + path, base = class_sig_re.match(name).groups() + except ValueError: + raise InheritanceException('Invalid class or module %r specified ' + 'for inheritance diagram' % name) + + fullname = (path or '') + base + path = (path and path.rstrip('.') or '') + + # two possibilities: either it is a module, then import it + try: + module = __import__(fullname) + todoc = sys.modules[fullname] + except ImportError: + # else it is a class, then import the module + if not path: + if currmodule: + # try the current module + path = currmodule + else: + raise InheritanceException( + 'Could not import class %r specified for ' + 'inheritance diagram' % base) + try: + module = __import__(path) + todoc = getattr(sys.modules[path], base) + except (ImportError, AttributeError): + raise InheritanceException( + 'Could not import class or module %r specified for ' + 'inheritance diagram' % (path + '.' + base)) + + # If a class, just return it + if inspect.isclass(todoc): + return [todoc] + elif inspect.ismodule(todoc): + classes = [] + for cls in todoc.__dict__.values(): + if inspect.isclass(cls) and cls.__module__ == todoc.__name__: + classes.append(cls) + return classes + raise InheritanceException('%r specified for inheritance diagram is ' + 'not a class or module' % name) + + def _import_classes(self, class_names, currmodule): + """ + Import a list of classes. + """ + classes = [] + for name in class_names: + classes.extend(self._import_class_or_module(name, currmodule)) + return classes + + def _all_classes(self, classes): + """ + Return a list of all classes that are ancestors of *classes*. + """ + all_classes = {} + + def recurse(cls): + all_classes[cls] = None + for c in cls.__bases__: + if c not in all_classes: + recurse(c) + + for cls in classes: + recurse(cls) + + return all_classes.keys() + + def class_name(self, cls, parts=0): + """ + Given a class object, return a fully-qualified name. This + works for things I've tested in matplotlib so far, but may not + be completely general. + """ + module = cls.__module__ + if module == '__builtin__': + fullname = cls.__name__ + else: + fullname = '%s.%s' % (module, cls.__name__) + if parts == 0: + return fullname + name_parts = fullname.split('.') + return '.'.join(name_parts[-parts:]) + + def get_all_class_names(self): + """ + Get all of the class names involved in the graph. + """ + return [self.class_name(x) for x in self.all_classes] + + # These are the default attrs for graphviz + default_graph_attrs = { + 'rankdir': 'LR', + 'size': '"8.0, 12.0"', + } + default_node_attrs = { + 'shape': 'box', + 'fontsize': 10, + 'height': 0.25, + 'fontname': 'Vera Sans, DejaVu Sans, Liberation Sans, ' + 'Arial, Helvetica, sans', + 'style': '"setlinewidth(0.5)"', + } + default_edge_attrs = { + 'arrowsize': 0.5, + 'style': '"setlinewidth(0.5)"', + } + + def _format_node_attrs(self, attrs): + return ','.join(['%s=%s' % x for x in attrs.items()]) + + def _format_graph_attrs(self, attrs): + return ''.join(['%s=%s;\n' % x for x in attrs.items()]) + + def generate_dot(self, name, parts=0, urls={}, env=None, + graph_attrs={}, node_attrs={}, edge_attrs={}): + """ + Generate a graphviz dot graph from the classes that + were passed in to __init__. + + *name* is the name of the graph. + + *urls* is a dictionary mapping class names to HTTP URLs. + + *graph_attrs*, *node_attrs*, *edge_attrs* are dictionaries containing + key/value pairs to pass on as graphviz properties. + """ + g_attrs = self.default_graph_attrs.copy() + n_attrs = self.default_node_attrs.copy() + e_attrs = self.default_edge_attrs.copy() + g_attrs.update(graph_attrs) + n_attrs.update(node_attrs) + e_attrs.update(edge_attrs) + if env: + g_attrs.update(env.config.inheritance_graph_attrs) + n_attrs.update(env.config.inheritance_node_attrs) + e_attrs.update(env.config.inheritance_edge_attrs) + + res = [] + res.append('digraph %s {\n' % name) + res.append(self._format_graph_attrs(g_attrs)) + + for cls in self.all_classes: + if not self.show_builtins and cls in __builtins__.values(): + continue + + name = self.class_name(cls, parts) + + # Write the node + this_node_attrs = n_attrs.copy() + url = urls.get(self.class_name(cls)) + if url is not None: + this_node_attrs['URL'] = '"%s"' % url + res.append(' "%s" [%s];\n' % + (name, self._format_node_attrs(this_node_attrs))) + + # Write the edges + for base in cls.__bases__: + if not self.show_builtins and base in __builtins__.values(): + continue + + base_name = self.class_name(base, parts) + res.append(' "%s" -> "%s" [%s];\n' % + (base_name, name, + self._format_node_attrs(e_attrs))) + res.append('}\n') + return ''.join(res) + + +class inheritance_diagram(nodes.General, nodes.Element): + """ + A docutils node to use as a placeholder for the inheritance diagram. + """ + pass + + +class InheritanceDiagram(Directive): + """ + Run when the inheritance_diagram directive is first encountered. + """ + has_content = False + required_arguments = 1 + optional_arguments = 0 + final_argument_whitespace = True + option_spec = { + 'parts': directives.nonnegative_int, + } + + def run(self): + node = inheritance_diagram() + node.document = self.state.document + env = self.state.document.settings.env + class_names = self.arguments[0].split() + + # Create a graph starting with the list of classes + try: + graph = InheritanceGraph(class_names, env.currmodule) + except InheritanceException, err: + return [node.document.reporter.warning(err.args[0], + line=self.lineno)] + + # Create xref nodes for each target of the graph's image map and + # add them to the doc tree so that Sphinx can resolve the + # references to real URLs later. These nodes will eventually be + # removed from the doctree after we're done with them. + for name in graph.get_all_class_names(): + refnodes, x = xfileref_role( + 'class', ':class:`%s`' % name, name, 0, self.state) + node.extend(refnodes) + # Store the graph object so we can use it to generate the + # dot file later + node['graph'] = graph + # Store the original content for use as a hash + node['parts'] = self.options.get('parts', 0) + node['content'] = ' '.join(class_names) + return [node] + + +def get_graph_hash(node): + return md5(node['content'] + str(node['parts'])).hexdigest()[-10:] + + +def html_visit_inheritance_diagram(self, node): + """ + Output the graph for HTML. This will insert a PNG with clickable + image map. + """ + graph = node['graph'] + parts = node['parts'] + + graph_hash = get_graph_hash(node) + name = 'inheritance%s' % graph_hash + + # Create a mapping from fully-qualified class names to URLs. + urls = {} + for child in node: + if child.get('refuri') is not None: + urls[child['reftitle']] = child.get('refuri') + elif child.get('refid') is not None: + urls[child['reftitle']] = '#' + child.get('refid') + + dotcode = graph.generate_dot(name, parts, urls, env=self.builder.env) + render_dot_html(self, node, dotcode, [], 'inheritance', 'inheritance') + raise nodes.SkipNode + + +def latex_visit_inheritance_diagram(self, node): + """ + Output the graph for LaTeX. This will insert a PDF. + """ + graph = node['graph'] + parts = node['parts'] + + graph_hash = get_graph_hash(node) + name = 'inheritance%s' % graph_hash + + dotcode = graph.generate_dot(name, parts, urls, env=self.builder.env, + graph_attrs={'size': '"6.0,6.0"'}) + render_dot_latex(self, node, dotcode, [], 'inheritance') + raise nodes.SkipNode + + +def skip(self, node): + raise nodes.SkipNode + + +def setup(app): + app.setup_extension('sphinx.ext.graphviz') + app.add_node( + inheritance_diagram, + latex=(latex_visit_inheritance_diagram, None), + html=(html_visit_inheritance_diagram, None), + text=(skip, None)) + app.add_directive('inheritance-diagram', InheritanceDiagram) + app.add_config_value('inheritance_graph_attrs', {}, False), + app.add_config_value('inheritance_node_attrs', {}, False), + app.add_config_value('inheritance_edge_attrs', {}, False), -- cgit v1.2.1 From 15daf416c4df699a8e414777fea346d9215afd8d Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Thu, 5 Mar 2009 00:15:15 +0100 Subject: Add Scapy. --- EXAMPLES | 1 + 1 file changed, 1 insertion(+) diff --git a/EXAMPLES b/EXAMPLES index 2a5dc6f3..2f7365d2 100644 --- a/EXAMPLES +++ b/EXAMPLES @@ -53,6 +53,7 @@ included, please mail to `the Google group * Reteisi: http://docs.argolinux.org/reteisi/ * Roundup: http://www.roundup-tracker.org/ * Satchmo: http://www.satchmoproject.com/docs/svn/ +* Scapy: http://www.secdev.org/projects/scapy/doc/ * Self: http://selflanguage.org/ * SimPy: http://simpy.sourceforge.net/ * Sphinx: http://sphinx.pocoo.org/ -- cgit v1.2.1 From aaf084a590dd2abcb6486a4a703e799011d09f08 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Thu, 5 Mar 2009 00:17:14 +0100 Subject: Make titles consistent. --- doc/ext/graphviz.rst | 4 ++-- doc/ext/inheritance.rst | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/ext/graphviz.rst b/doc/ext/graphviz.rst index 1d4ed807..d007bf25 100644 --- a/doc/ext/graphviz.rst +++ b/doc/ext/graphviz.rst @@ -1,7 +1,7 @@ .. highlight:: rest -The Graphviz extension -====================== +:mod:`sphinx.ext.graphviz` -- Add Graphviz graphs +================================================= .. module:: sphinx.ext.graphviz :synopsis: Support for Graphviz graphs. diff --git a/doc/ext/inheritance.rst b/doc/ext/inheritance.rst index edec6c8e..fe6d636d 100644 --- a/doc/ext/inheritance.rst +++ b/doc/ext/inheritance.rst @@ -1,7 +1,7 @@ .. highlight:: rest -The inheritance diagram extension -================================= +:mod:`sphinx.ext.inheritance_diagram` -- Include inheritance diagrams +===================================================================== .. module:: sphinx.ext.inheritance_diagram :synopsis: Support for displaying inheritance diagrams via graphviz. -- cgit v1.2.1 From 1ea50bf165f0456df9d2181c366aecca73e96118 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Thu, 5 Mar 2009 09:21:35 +0100 Subject: Use standard ``file:line: warning: message`` format for warning messages. --- sphinx/application.py | 11 ++++++----- sphinx/builders/__init__.py | 16 +++++++--------- sphinx/builders/html.py | 2 +- sphinx/builders/latex.py | 9 +++++---- sphinx/builders/linkcheck.py | 6 ++++-- sphinx/builders/text.py | 2 +- sphinx/environment.py | 24 ++++++++++++------------ sphinx/theming.py | 2 +- sphinx/writers/latex.py | 17 +++++++++++++---- tests/test_application.py | 2 +- tests/test_autodoc.py | 1 + tests/test_build.py | 20 ++++++++++---------- tests/test_env.py | 8 ++++---- 13 files changed, 66 insertions(+), 54 deletions(-) diff --git a/sphinx/application.py b/sphinx/application.py index 76c8f3c9..9e450240 100644 --- a/sphinx/application.py +++ b/sphinx/application.py @@ -134,16 +134,17 @@ class Sphinx(object): self.emit('build-finished', None) self.builder.cleanup() - def warn(self, message): + def warn(self, message, location=None, prefix='warning: '): + warntext = location and '%s: %s%s\n' % (location, prefix, message) or \ + '%s%s\n' % (prefix, message) if self.warningiserror: - raise SphinxWarning('WARNING: %s\n' % message) + raise SphinxWarning(warntext) self._warncount += 1 try: - self._warning.write('WARNING: %s\n' % message) + self._warning.write(warntext) except UnicodeEncodeError: encoding = getattr(self._warning, 'encoding', 'ascii') - self._warning.write(('WARNING: %s\n' % message).encode(encoding, - 'replace')) + self._warning.write(warntext.encode(encoding, 'replace')) def info(self, message='', nonl=False): try: diff --git a/sphinx/builders/__init__.py b/sphinx/builders/__init__.py index 097be078..7b8a7095 100644 --- a/sphinx/builders/__init__.py +++ b/sphinx/builders/__init__.py @@ -136,9 +136,9 @@ class Builder(object): if candidate: break else: - self.warn('%s:%s: no matching candidate for image URI %r' % - (node.source, getattr(node, 'lineno', ''), - node['uri'])) + self.warn( + 'no matching candidate for image URI %r' % node['uri'], + '%s:%s' % (node.source, getattr(node, 'line', ''))) continue node['uri'] = candidate else: @@ -249,7 +249,7 @@ class Builder(object): updated_docnames = set() # while reading, collect all warnings from docutils warnings = [] - self.env.set_warnfunc(warnings.append) + self.env.set_warnfunc(lambda *args: warnings.append(args)) self.info(bold('updating environment: '), nonl=1) iterator = self.env.update(self.config, self.srcdir, self.doctreedir, self.app) @@ -261,8 +261,7 @@ class Builder(object): # nothing further to do, the environment has already # done the reading for warning in warnings: - if warning.strip(): - self.warn(warning) + self.warn(*warning) self.env.set_warnfunc(self.warn) doccount = len(updated_docnames) @@ -327,14 +326,13 @@ class Builder(object): # write target files warnings = [] - self.env.set_warnfunc(warnings.append) + self.env.set_warnfunc(lambda *args: warnings.append(args)) for docname in self.status_iterator(sorted(docnames), 'writing output... ', darkgreen): doctree = self.env.get_and_resolve_doctree(docname, self) self.write_doc(docname, doctree) for warning in warnings: - if warning.strip(): - self.warn(warning) + self.warn(*warning) self.env.set_warnfunc(self.warn) def prepare_writing(self, docnames): diff --git a/sphinx/builders/html.py b/sphinx/builders/html.py index 6c2593ba..365cf5f9 100644 --- a/sphinx/builders/html.py +++ b/sphinx/builders/html.py @@ -657,7 +657,7 @@ class StandaloneHTMLBuilder(Builder): finally: f.close() except (IOError, OSError), err: - self.warn("Error writing file %s: %s" % (outfilename, err)) + self.warn("error writing file %s: %s" % (outfilename, err)) if self.copysource and ctx.get('sourcename'): # copy the source file for the "show source" link source_name = path.join(self.outdir, '_sources', diff --git a/sphinx/builders/latex.py b/sphinx/builders/latex.py index 9fe7b8a5..96d70e3e 100644 --- a/sphinx/builders/latex.py +++ b/sphinx/builders/latex.py @@ -60,8 +60,8 @@ class LaTeXBuilder(Builder): def init_document_data(self): preliminary_document_data = map(list, self.config.latex_documents) if not preliminary_document_data: - self.warn('No "latex_documents" config value found; no documents ' - 'will be written.') + self.warn('no "latex_documents" config value found; no documents ' + 'will be written') return # assign subdirs to titles self.titles = [] @@ -121,8 +121,9 @@ class LaTeXBuilder(Builder): includefile, self.env.get_doctree(includefile)) self.docnames.add(includefile) except Exception: - self.warn('%s: toctree contains ref to nonexisting ' - 'file %r' % (docname, includefile)) + self.warn('toctree contains ref to nonexisting ' + 'file %r' % includefile, + self.builder.env.doc2path(docname)) else: sof = addnodes.start_of_file(docname=includefile) sof.children = subtree.children diff --git a/sphinx/builders/linkcheck.py b/sphinx/builders/linkcheck.py index b0fa9ba6..f3962965 100644 --- a/sphinx/builders/linkcheck.py +++ b/sphinx/builders/linkcheck.py @@ -87,7 +87,8 @@ class CheckExternalLinksBuilder(Builder): self.write_entry('broken', docname, lineno, uri + ': ' + s) self.broken[uri] = (r, s) if self.app.quiet: - self.warn('%s:%s: broken link: %s' % (docname, lineno, uri)) + self.warn('broken link: %s' % uri, + '%s:%s' % (self.env.doc2path(docname), lineno)) else: self.info(' - ' + purple('redirected') + ' to ' + s) self.write_entry('redirected', docname, @@ -99,7 +100,8 @@ class CheckExternalLinksBuilder(Builder): self.warn(uri + ' - ' + red('malformed!')) self.write_entry('malformed', docname, lineno, uri) if self.app.quiet: - self.warn('%s:%s: malformed link: %s' % (docname, lineno, uri)) + self.warn('malformed link: %s' % uri, + '%s:%s' % (self.env.doc2path(docname), lineno)) self.app.statuscode = 1 if self.broken: diff --git a/sphinx/builders/text.py b/sphinx/builders/text.py index 93737285..8651778c 100644 --- a/sphinx/builders/text.py +++ b/sphinx/builders/text.py @@ -64,7 +64,7 @@ class TextBuilder(Builder): finally: f.close() except (IOError, OSError), err: - self.warn("Error writing file %s: %s" % (outfilename, err)) + self.warn("error writing file %s: %s" % (outfilename, err)) def finish(self): pass diff --git a/sphinx/environment.py b/sphinx/environment.py index c210c395..5840d2e2 100644 --- a/sphinx/environment.py +++ b/sphinx/environment.py @@ -71,12 +71,12 @@ default_substitutions = set([ dummy_reporter = Reporter('', 4, 4) -class RedirStream(object): - def __init__(self, writefunc): - self.writefunc = writefunc +class WarningStream(object): + def __init__(self, warnfunc): + self.warnfunc = warnfunc def write(self, text): if text.strip(): - self.writefunc(text) + self.warnfunc(text, None, '') class NoUri(Exception): @@ -323,15 +323,15 @@ class BuildEnvironment: def set_warnfunc(self, func): self._warnfunc = func - self.settings['warning_stream'] = RedirStream(func) + self.settings['warning_stream'] = WarningStream(func) def warn(self, docname, msg, lineno=None): if docname: if lineno is None: lineno = '' - self._warnfunc('%s:%s: %s' % (self.doc2path(docname), lineno, msg)) + self._warnfunc(msg, '%s:%s' % (self.doc2path(docname), lineno)) else: - self._warnfunc('GLOBAL:: ' + msg) + self._warnfunc(msg) def clear_doc(self, docname): """Remove all traces of a source file in the inventory.""" @@ -688,7 +688,7 @@ class BuildEnvironment: filepath = path.normpath(path.join(docdir, node['reftarget'])) self.dependencies.setdefault(docname, set()).add(filepath) if not os.access(path.join(self.srcdir, filepath), os.R_OK): - self.warn(docname, 'Download file not readable: %s' % filepath, + self.warn(docname, 'download file not readable: %s' % filepath, getattr(node, 'line', None)) continue uniquename = self.dlfiles.add_file(docname, filepath) @@ -707,7 +707,7 @@ class BuildEnvironment: node['candidates'] = candidates = {} imguri = node['uri'] if imguri.find('://') != -1: - self.warn(docname, 'Nonlocal image URI found: %s' % imguri, + self.warn(docname, 'nonlocal image URI found: %s' % imguri, node.line) candidates['?'] = imguri continue @@ -735,7 +735,7 @@ class BuildEnvironment: f.close() except (OSError, IOError): self.warn(docname, - 'Image file %s not readable' % filename) + 'image file %s not readable' % filename) if imgtype: candidates['image/' + imgtype] = new_imgpath else: @@ -745,7 +745,7 @@ class BuildEnvironment: for imgpath in candidates.itervalues(): self.dependencies.setdefault(docname, set()).add(imgpath) if not os.access(path.join(self.srcdir, imgpath), os.R_OK): - self.warn(docname, 'Image file not readable: %s' % imgpath, + self.warn(docname, 'image file not readable: %s' % imgpath, node.line) continue self.images.add_file(docname, imgpath) @@ -970,7 +970,7 @@ class BuildEnvironment: f.close() doctree.settings.env = self doctree.reporter = Reporter(self.doc2path(docname), 2, 4, - stream=RedirStream(self._warnfunc)) + stream=WarningStream(self._warnfunc)) return doctree diff --git a/sphinx/theming.py b/sphinx/theming.py index 97b05233..77c3137c 100644 --- a/sphinx/theming.py +++ b/sphinx/theming.py @@ -48,7 +48,7 @@ class Theme(object): tname = theme[:-4] tinfo = zfile except Exception: - builder.warn('File %r on theme path is not a valid ' + builder.warn('file %r on theme path is not a valid ' 'zipfile or contains no theme' % theme) continue else: diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py index 9954350f..9ec3b00d 100644 --- a/sphinx/writers/latex.py +++ b/sphinx/writers/latex.py @@ -368,7 +368,10 @@ class LaTeXTranslator(nodes.NodeVisitor): elif self.this_is_the_title: if len(node.children) != 1 and not isinstance(node.children[0], nodes.Text): - self.builder.warn('document title is not a single Text node') + self.builder.warn( + 'document title is not a single Text node', + '%s:%s' % (self.builder.env.doc2path(self.curfilestack[-1]), + node.line or '')) if not self.elements['title']: # text needs to be escaped since it is inserted into # the output literally @@ -394,8 +397,11 @@ class LaTeXTranslator(nodes.NodeVisitor): self.table.caption = self.encode(node.astext()) raise nodes.SkipNode else: - self.builder.warn('encountered title node not in section, topic, ' - 'table, admonition or sidebar') + self.builder.warn( + 'encountered title node not in section, topic, table, ' + 'admonition or sidebar', + '%s:%s' % (self.builder.env.doc2path(self.curfilestack[-1]), + node.line or '')) self.body.append('\\textbf{') self.context.append('}\n') self.in_title = 1 @@ -1002,7 +1008,10 @@ class LaTeXTranslator(nodes.NodeVisitor): self.body.append('\\grammartoken{') self.context.append('}') else: - self.builder.warn('unusable reference target found: %s' % uri) + self.builder.warn( + 'unusable reference target found: %s' % uri, + '%s:%s' % (self.builder.env.doc2path(self.curfilestack[-1]), + node.line or '')) self.context.append('') def depart_reference(self, node): self.body.append(self.context.pop()) diff --git a/tests/test_application.py b/tests/test_application.py index 6425e275..4fda0edb 100644 --- a/tests/test_application.py +++ b/tests/test_application.py @@ -53,7 +53,7 @@ def test_output(): old_count = app._warncount app.warn("Bad news!") - assert warnings.getvalue() == "WARNING: Bad news!\n" + assert warnings.getvalue() == "warning: Bad news!\n" assert app._warncount == old_count + 1 finally: app.cleanup() diff --git a/tests/test_autodoc.py b/tests/test_autodoc.py index 25d833a2..a9e030fe 100644 --- a/tests/test_autodoc.py +++ b/tests/test_autodoc.py @@ -36,6 +36,7 @@ def setup_module(): platform = '', deprecated = False, members = [], + member_order = 'alphabetic', ) directive = Struct( diff --git a/tests/test_build.py b/tests/test_build.py index f451b305..00a82b4d 100644 --- a/tests/test_build.py +++ b/tests/test_build.py @@ -38,24 +38,24 @@ html_warnfile = StringIO() latex_warnfile = StringIO() ENV_WARNINGS = """\ -WARNING: %(root)s/images.txt:9: Image file not readable: foo.png -WARNING: %(root)s/images.txt:23: Nonlocal image URI found: \ +%(root)s/images.txt:9: warning: image file not readable: foo.png +%(root)s/images.txt:23: warning: nonlocal image URI found: \ http://www.python.org/logo.png -WARNING: %(root)s/includes.txt:: (WARNING/2) Encoding 'utf-8' used for reading \ +%(root)s/includes.txt:: (WARNING/2) Encoding 'utf-8' used for reading \ included file u'wrongenc.inc' seems to be wrong, try giving an :encoding: option -WARNING: %(root)s/includes.txt:56: Download file not readable: nonexisting.png +%(root)s/includes.txt:56: warning: download file not readable: nonexisting.png """ HTML_WARNINGS = ENV_WARNINGS + """\ -WARNING: %(root)s/images.txt:: no matching candidate for image URI u'foo.*' -WARNING: %(root)s/markup.txt:: invalid index entry u'' -WARNING: %(root)s/markup.txt:: invalid pair index entry u'' -WARNING: %(root)s/markup.txt:: invalid pair index entry u'keyword; ' +%(root)s/images.txt:20: warning: no matching candidate for image URI u'foo.*' +%(root)s/markup.txt:: warning: invalid index entry u'' +%(root)s/markup.txt:: warning: invalid pair index entry u'' +%(root)s/markup.txt:: warning: invalid pair index entry u'keyword; ' """ LATEX_WARNINGS = ENV_WARNINGS + """\ -WARNING: None:: no matching candidate for image URI u'foo.*' -WARNING: invalid pair index entry u'' +None:None: warning: no matching candidate for image URI u'foo.*' +warning: invalid pair index entry u'' """ HTML_XPATH = { diff --git a/tests/test_env.py b/tests/test_env.py index 07e2cbe4..0b944c50 100644 --- a/tests/test_env.py +++ b/tests/test_env.py @@ -22,14 +22,14 @@ def setup_module(): global app, env app = TestApp(srcdir='(temp)') env = BuildEnvironment(app.srcdir, app.doctreedir, app.config) - env.set_warnfunc(warnings.append) + env.set_warnfunc(lambda *args: warnings.append(args)) def teardown_module(): app.cleanup() def warning_emitted(file, text): for warning in warnings: - if file+':' in warning and text in warning: + if len(warning) == 2 and file+':' in warning[1] and text in warning[0]: return True return False @@ -46,8 +46,8 @@ def test_first_update(): assert docnames == env.found_docs == set(env.all_docs) def test_images(): - assert warning_emitted('images.txt', 'Image file not readable: foo.png') - assert warning_emitted('images.txt', 'Nonlocal image URI found: ' + assert warning_emitted('images.txt', 'image file not readable: foo.png') + assert warning_emitted('images.txt', 'nonlocal image URI found: ' 'http://www.python.org/logo.png') tree = env.get_doctree('images') -- cgit v1.2.1 From 9051b533459712afdd770992bc6935d09c7c0a6d Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Thu, 5 Mar 2009 09:48:55 +0100 Subject: #114: Added an ``abbr`` role to markup abbreviations and acronyms. --- CHANGES | 3 +++ doc/concepts.rst | 2 ++ doc/markup/inline.rst | 10 ++++++++++ sphinx/addnodes.py | 6 +++++- sphinx/roles.py | 13 +++++++++++++ sphinx/writers/html.py | 8 ++++++++ sphinx/writers/latex.py | 13 +++++++++++++ sphinx/writers/text.py | 6 ++++++ tests/root/markup.txt | 1 + 9 files changed, 61 insertions(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index 3518cb03..9823bb98 100644 --- a/CHANGES +++ b/CHANGES @@ -44,6 +44,9 @@ New features added - #10: Added HTML section numbers, enabled by giving a ``:numbered:`` flag to the ``toctree`` directive. + - #114: Added an ``abbr`` role to markup abbreviations and + acronyms. + - The ``literalinclude`` directive now supports several more options, to include only parts of a file. diff --git a/doc/concepts.rst b/doc/concepts.rst index e6d5fa02..d3c8cf7c 100644 --- a/doc/concepts.rst +++ b/doc/concepts.rst @@ -19,6 +19,8 @@ such a document name. Examples for document names are ``index``, ``library/zipfile``, or ``reference/datamodel/types``. Note that there is no leading slash. +This is a :abbr:`LIFO (last-in, first-out)` and a :abbr:`LIFO (last-in, first-out)`. + The TOC tree ------------ diff --git a/doc/markup/inline.rst b/doc/markup/inline.rst index 97b20da7..69721b32 100644 --- a/doc/markup/inline.rst +++ b/doc/markup/inline.rst @@ -278,6 +278,16 @@ Other semantic markup The following roles don't do anything special except formatting the text in a different style: +.. role:: abbr + + An abbreviation. If the role content contains a parenthesized explanation, + it will be treated specially: it will be shown in a tool-tip in HTML, and + output only once in LaTeX. + + Example: ``:abbr:`LIFO (last-in, first-out)```. + + .. versionadded:: 0.6 + .. role:: command The name of an OS-level command, such as ``rm``. diff --git a/sphinx/addnodes.py b/sphinx/addnodes.py index f0b0b063..63907a0e 100644 --- a/sphinx/addnodes.py +++ b/sphinx/addnodes.py @@ -84,6 +84,9 @@ class highlightlang(nodes.Element): pass # like emphasis, but doesn't apply further text processors, e.g. smartypants class literal_emphasis(nodes.emphasis): pass +# for abbreviations (with explanations) +class abbreviation(nodes.Inline, nodes.TextElement): pass + # glossary class glossary(nodes.Element): pass @@ -109,4 +112,5 @@ nodes._add_node_class_names("""index desc desc_content desc_signature desc_parameter desc_optional download_reference hlist hlistcol centered versionmodified seealso productionlist production toctree pending_xref compact_paragraph highlightlang literal_emphasis - glossary acks module start_of_file tabular_col_spec meta""".split()) + abbreviation glossary acks module start_of_file tabular_col_spec + meta""".split()) diff --git a/sphinx/roles.py b/sphinx/roles.py index 9cc7fcd2..550deb3e 100644 --- a/sphinx/roles.py +++ b/sphinx/roles.py @@ -226,6 +226,18 @@ def emph_literal_role(typ, rawtext, text, lineno, inliner, return [retnode], [] +_abbr_re = re.compile('\((.*)\)$') + +def abbr_role(typ, rawtext, text, lineno, inliner, options={}, content=[]): + text = utils.unescape(text) + m = _abbr_re.search(text) + if m is None: + return [addnodes.abbreviation(text, text)], [] + abbr = text[:m.start()].strip() + expl = m.group(1) + return [addnodes.abbreviation(abbr, abbr, explanation=expl)], [] + + specific_docroles = { 'data': xfileref_role, 'exc': xfileref_role, @@ -254,6 +266,7 @@ specific_docroles = { 'menuselection': menusel_role, 'file': emph_literal_role, 'samp': emph_literal_role, + 'abbr': abbr_role, } for rolename, func in specific_docroles.iteritems(): diff --git a/sphinx/writers/html.py b/sphinx/writers/html.py index e2f754a8..f600e8bd 100644 --- a/sphinx/writers/html.py +++ b/sphinx/writers/html.py @@ -453,6 +453,14 @@ class HTMLTranslator(BaseTranslator): def depart_literal_emphasis(self, node): return self.depart_emphasis(node) + def visit_abbreviation(self, node): + attrs = {} + if node.hasattr('explanation'): + attrs['title'] = node['explanation'] + self.body.append(self.starttag(node, 'abbr', **attrs)) + def depart_abbreviation(self, node): + self.body.append('') + def depart_title(self, node): close_tag = self.context[-1] if self.add_permalinks and self.builder.add_permalinks and \ diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py index 9ec3b00d..fddc0a79 100644 --- a/sphinx/writers/latex.py +++ b/sphinx/writers/latex.py @@ -216,6 +216,7 @@ class LaTeXTranslator(nodes.NodeVisitor): self.written_ids = set() self.footnotestack = [] self.curfilestack = [] + self.handled_abbrs = set() if self.elements['docclass'] == 'manual': if builder.config.latex_use_parts: self.top_sectionlevel = 0 @@ -1043,6 +1044,18 @@ class LaTeXTranslator(nodes.NodeVisitor): def depart_strong(self, node): self.body.append('}') + def visit_abbreviation(self, node): + abbr = node.astext() + self.body.append(r'\textsc{') + # spell out the explanation once + if node.hasattr('explanation') and abbr not in self.handled_abbrs: + self.context.append('} (%s)' % self.encode(node['explanation'])) + self.handled_abbrs.add(abbr) + else: + self.context.append('}') + def depart_abbreviation(self, node): + self.body.append(self.context.pop()) + def visit_title_reference(self, node): self.body.append(r'\emph{') def depart_title_reference(self, node): diff --git a/sphinx/writers/text.py b/sphinx/writers/text.py index 787e7020..b3b565c5 100644 --- a/sphinx/writers/text.py +++ b/sphinx/writers/text.py @@ -645,6 +645,12 @@ class TextTranslator(nodes.NodeVisitor): def depart_strong(self, node): self.add_text('**') + def visit_abbreviation(self, node): + self.add_text('') + def depart_abbreviation(self, node): + if node.hasattr('explanation'): + self.add_text(' (%s)' % node['explanation']) + def visit_title_reference(self, node): self.add_text('*') def depart_title_reference(self, node): diff --git a/tests/root/markup.txt b/tests/root/markup.txt index d799c80d..52d407e2 100644 --- a/tests/root/markup.txt +++ b/tests/root/markup.txt @@ -152,6 +152,7 @@ Option list: try2_stmt: "try" ":" `suite` : "finally" ":" `suite` +Test :abbr:`abbr (abbreviation)` and another :abbr:`abbr (abbreviation)`. Index markup ------------ -- cgit v1.2.1 From 87b02776e25b4baaf28d3dda502b63ab3f1b9902 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Thu, 5 Mar 2009 10:05:27 +0100 Subject: Remove testing text. --- doc/concepts.rst | 2 -- 1 file changed, 2 deletions(-) diff --git a/doc/concepts.rst b/doc/concepts.rst index d3c8cf7c..e6d5fa02 100644 --- a/doc/concepts.rst +++ b/doc/concepts.rst @@ -19,8 +19,6 @@ such a document name. Examples for document names are ``index``, ``library/zipfile``, or ``reference/datamodel/types``. Note that there is no leading slash. -This is a :abbr:`LIFO (last-in, first-out)` and a :abbr:`LIFO (last-in, first-out)`. - The TOC tree ------------ -- cgit v1.2.1 From 00351527a48409b70e1f80250151569a91f772b6 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Thu, 5 Mar 2009 10:06:18 +0100 Subject: Make WARNING the default warning prefix. --- sphinx/application.py | 2 +- tests/test_application.py | 2 +- tests/test_build.py | 18 +++++++++--------- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/sphinx/application.py b/sphinx/application.py index 9e450240..9c467066 100644 --- a/sphinx/application.py +++ b/sphinx/application.py @@ -134,7 +134,7 @@ class Sphinx(object): self.emit('build-finished', None) self.builder.cleanup() - def warn(self, message, location=None, prefix='warning: '): + def warn(self, message, location=None, prefix='WARNING: '): warntext = location and '%s: %s%s\n' % (location, prefix, message) or \ '%s%s\n' % (prefix, message) if self.warningiserror: diff --git a/tests/test_application.py b/tests/test_application.py index 4fda0edb..6425e275 100644 --- a/tests/test_application.py +++ b/tests/test_application.py @@ -53,7 +53,7 @@ def test_output(): old_count = app._warncount app.warn("Bad news!") - assert warnings.getvalue() == "warning: Bad news!\n" + assert warnings.getvalue() == "WARNING: Bad news!\n" assert app._warncount == old_count + 1 finally: app.cleanup() diff --git a/tests/test_build.py b/tests/test_build.py index 00a82b4d..f509a9f4 100644 --- a/tests/test_build.py +++ b/tests/test_build.py @@ -38,24 +38,24 @@ html_warnfile = StringIO() latex_warnfile = StringIO() ENV_WARNINGS = """\ -%(root)s/images.txt:9: warning: image file not readable: foo.png -%(root)s/images.txt:23: warning: nonlocal image URI found: \ +%(root)s/images.txt:9: WARNING: image file not readable: foo.png +%(root)s/images.txt:23: WARNING: nonlocal image URI found: \ http://www.python.org/logo.png %(root)s/includes.txt:: (WARNING/2) Encoding 'utf-8' used for reading \ included file u'wrongenc.inc' seems to be wrong, try giving an :encoding: option -%(root)s/includes.txt:56: warning: download file not readable: nonexisting.png +%(root)s/includes.txt:56: WARNING: download file not readable: nonexisting.png """ HTML_WARNINGS = ENV_WARNINGS + """\ -%(root)s/images.txt:20: warning: no matching candidate for image URI u'foo.*' -%(root)s/markup.txt:: warning: invalid index entry u'' -%(root)s/markup.txt:: warning: invalid pair index entry u'' -%(root)s/markup.txt:: warning: invalid pair index entry u'keyword; ' +%(root)s/images.txt:20: WARNING: no matching candidate for image URI u'foo.*' +%(root)s/markup.txt:: WARNING: invalid index entry u'' +%(root)s/markup.txt:: WARNING: invalid pair index entry u'' +%(root)s/markup.txt:: WARNING: invalid pair index entry u'keyword; ' """ LATEX_WARNINGS = ENV_WARNINGS + """\ -None:None: warning: no matching candidate for image URI u'foo.*' -warning: invalid pair index entry u'' +None:None: WARNING: no matching candidate for image URI u'foo.*' +WARNING: invalid pair index entry u'' """ HTML_XPATH = { -- cgit v1.2.1 From 1089e377ecc146b346d86564d9ca34d138dc846b Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Thu, 5 Mar 2009 20:25:39 +0100 Subject: Sanitize the Environment.update() method API. --- sphinx/builders/__init__.py | 26 ++++++++++++++++------- sphinx/environment.py | 51 +++++++++++++++++++++++++-------------------- 2 files changed, 46 insertions(+), 31 deletions(-) diff --git a/sphinx/builders/__init__.py b/sphinx/builders/__init__.py index 7b8a7095..4fafa87d 100644 --- a/sphinx/builders/__init__.py +++ b/sphinx/builders/__init__.py @@ -109,7 +109,7 @@ class Builder(object): """ raise NotImplementedError - def status_iterator(self, iterable, summary, colorfunc=darkgreen): + def status_iterator(self, iterable, length, summary, colorfunc=darkgreen): l = -1 for item in iterable: if l == -1: @@ -120,6 +120,17 @@ class Builder(object): if l == 0: self.info() + ## new version with progress info + #def status_iterator(self, iterable, length, summary, colorfunc=darkgreen): + # l = 0 + # for item in iterable: + # if l == 0: + # self.info(bold(summary)) + # l += 1 + # self.info(' [%3d%%] %s\n' % (100*l/length, colorfunc(item)), + # nonl=1) + # yield item + supported_image_types = [] def post_process_images(self, doctree): @@ -251,12 +262,11 @@ class Builder(object): warnings = [] self.env.set_warnfunc(lambda *args: warnings.append(args)) self.info(bold('updating environment: '), nonl=1) - iterator = self.env.update(self.config, self.srcdir, - self.doctreedir, self.app) - # the first item in the iterator is a summary message - self.info(iterator.next()) - for docname in self.status_iterator(iterator, 'reading sources... ', - purple): + msg, length, iterator = self.env.update(self.config, self.srcdir, + self.doctreedir, self.app) + self.info(msg) + for docname in self.status_iterator(iterator, length, + 'reading sources... ', purple): updated_docnames.add(docname) # nothing further to do, the environment has already # done the reading @@ -327,7 +337,7 @@ class Builder(object): # write target files warnings = [] self.env.set_warnfunc(lambda *args: warnings.append(args)) - for docname in self.status_iterator(sorted(docnames), + for docname in self.status_iterator(sorted(docnames), len(docnames), 'writing output... ', darkgreen): doctree = self.env.get_and_resolve_doctree(docname, self) self.write_doc(docname, doctree) diff --git a/sphinx/environment.py b/sphinx/environment.py index 5840d2e2..0884ecca 100644 --- a/sphinx/environment.py +++ b/sphinx/environment.py @@ -453,10 +453,13 @@ class BuildEnvironment: return added, changed, removed def update(self, config, srcdir, doctreedir, app=None): - """(Re-)read all files new or changed since last update. - Yields a summary and then docnames as it processes them. - Store all environment docnames in the canonical format - (ie using SEP as a separator in place of os.path.sep).""" + """ + (Re-)read all files new or changed since last update. Returns a + summary, the total count of documents to reread and an iterator that + yields docnames as it processes them. Store all environment docnames in + the canonical format (ie using SEP as a separator in place of + os.path.sep). + """ config_changed = False if self.config is None: msg = '[new config] ' @@ -482,6 +485,7 @@ class BuildEnvironment: self.srcdir = srcdir self.doctreedir = doctreedir self.find_files(config) + self.config = config added, changed, removed = self.get_outdated_files(config_changed) @@ -492,30 +496,31 @@ class BuildEnvironment: msg += '%s added, %s changed, %s removed' % (len(added), len(changed), len(removed)) - yield msg - self.config = config - self.app = app + def update_generator(): + self.app = app - # clear all files no longer present - for docname in removed: - if app: - app.emit('env-purge-doc', self, docname) - self.clear_doc(docname) + # clear all files no longer present + for docname in removed: + if app: + app.emit('env-purge-doc', self, docname) + self.clear_doc(docname) - # read all new and changed files - to_read = added | changed - for docname in sorted(to_read): - yield docname - self.read_doc(docname, app=app) + # read all new and changed files + to_read = added | changed + for docname in sorted(to_read): + yield docname + self.read_doc(docname, app=app) - if config.master_doc not in self.all_docs: - self.warn(None, 'master file %s not found' % - self.doc2path(config.master_doc)) + if config.master_doc not in self.all_docs: + self.warn(None, 'master file %s not found' % + self.doc2path(config.master_doc)) - self.app = None - if app: - app.emit('env-updated', self) + self.app = None + if app: + app.emit('env-updated', self) + + return msg, len(added | changed), update_generator() def check_dependents(self, already): to_rewrite = self.assign_section_numbers() -- cgit v1.2.1 From 8dd54a55c9a1f1cb1cdde5d0b0ab8e1787c7c087 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sat, 7 Mar 2009 22:54:36 +0100 Subject: Update tutorial for directive classes. --- doc/ext/tutorial.rst | 71 ++++++++++++++++++++++++++++++---------------------- 1 file changed, 41 insertions(+), 30 deletions(-) diff --git a/doc/ext/tutorial.rst b/doc/ext/tutorial.rst index 7d93817c..5f24c0fc 100644 --- a/doc/ext/tutorial.rst +++ b/doc/ext/tutorial.rst @@ -109,8 +109,8 @@ new Python module called :file:`todo.py` and add the setup function:: latex=(visit_todo_node, depart_todo_node), text=(visit_todo_node, depart_todo_node)) - app.add_directive('todo', todo_directive, 1, (0, 0, 1)) - app.add_directive('todolist', todolist_directive, 0, (0, 0, 0)) + app.add_directive('todo', TodoDirective) + app.add_directive('todolist', TodolistDirective) app.connect('doctree-resolved', process_todo_nodes) app.connect('env-purge-doc', purge_todos) @@ -132,9 +132,7 @@ the individual calls do is the following: We need to create the two node classes ``todo`` and ``todolist`` later. -* :meth:`~Sphinx.add_directive` adds a new *directive*, given by name, handler - function and two arguments that specify if the directive has content and how - many arguments it accepts. +* :meth:`~Sphinx.add_directive` adds a new *directive*, given by name and class. The handler functions are created later. @@ -168,17 +166,25 @@ docutils classes defined in :mod:`docutils.nodes`. ``todo`` inherits from is just a "general" node. -The Directive Handlers ----------------------- +The Directive Classes +--------------------- -A directive handler is a function with a host of arguments, covered in detail in -the docutils documentation. It must return a list of nodes. +A directive class is a class deriving usually from +``docutils.parsers.rst.Directive``. Since the class-based directive interface +doesn't exist yet in Docutils 0.4, Sphinx has another base class called +``sphinx.util.compat.Directive`` that you can derive your directive from, and it +will work with both Docutils 0.4 and 0.5 upwards. The directive interface is +covered in detail in the docutils documentation; the important thing is that the +class has a method ``run`` that returns a list of nodes. The ``todolist`` directive is quite simple:: - def todolist_directive(name, arguments, options, content, lineno, - content_offset, block_text, state, state_machine): - return [todolist('')] + from sphinx.util.compat import Directive + + class TodolistDirective(Directive): + + def run(self): + return [todolist('')] An instance of our ``todolist`` node class is created and returned. The todolist directive has neither content nor arguments that need to be handled. @@ -187,30 +193,35 @@ The ``todo`` directive function looks like this:: from sphinx.util.compat import make_admonition - def todo_directive(name, arguments, options, content, lineno, - content_offset, block_text, state, state_machine): - env = state.document.settings.env + class TodoDirective(Directive): - targetid = "todo-%s" % env.index_num - env.index_num += 1 - targetnode = nodes.target('', '', ids=[targetid]) + # this enables content in the directive + has_content = True - ad = make_admonition(todo, name, [_('Todo')], options, content, lineno, - content_offset, block_text, state, state_machine) + def run(self): + env = self.state.document.settings.env - if not hasattr(env, 'todo_all_todos'): - env.todo_all_todos = [] - env.todo_all_todos.append({ - 'docname': env.docname, - 'lineno': lineno, - 'todo': ad[0].deepcopy(), - 'target': targetnode, - }) + targetid = "todo-%s" % env.index_num + env.index_num += 1 + targetnode = nodes.target('', '', ids=[targetid]) + + ad = make_admonition(todo, self.name, [_('Todo')], self.options, + self.content, self.lineno, self.content_offset, + self.block_text, self.state, self.state_machine) + + if not hasattr(env, 'todo_all_todos'): + env.todo_all_todos = [] + env.todo_all_todos.append({ + 'docname': env.docname, + 'lineno': self.lineno, + 'todo': ad[0].deepcopy(), + 'target': targetnode, + }) - return [targetnode] + ad + return [targetnode] + ad Several important things are covered here. First, as you can see, you can refer -to the build environment instance using ``state.document.settings.env``. +to the build environment instance using ``self.state.document.settings.env``. Then, to act as a link target (from the todolist), the todo directive needs to return a target node in addition to the todo node. The target ID (in HTML, this -- cgit v1.2.1 From b03fa8ea963a9b538c1abbde01414964bb2cdbcc Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sun, 8 Mar 2009 10:21:07 +0100 Subject: Use a new progress indicator with percents. --- sphinx/builders/__init__.py | 46 +++++++++++++++++++++++++-------------------- sphinx/util/console.py | 15 ++++++++------- 2 files changed, 34 insertions(+), 27 deletions(-) diff --git a/sphinx/builders/__init__.py b/sphinx/builders/__init__.py index 4fafa87d..41f63de4 100644 --- a/sphinx/builders/__init__.py +++ b/sphinx/builders/__init__.py @@ -18,7 +18,7 @@ from docutils import nodes from sphinx import package_dir, locale from sphinx.util import SEP, relative_uri from sphinx.environment import BuildEnvironment -from sphinx.util.console import bold, purple, darkgreen +from sphinx.util.console import bold, purple, darkgreen, term_width_line # side effect: registers roles and directives from sphinx import roles @@ -109,27 +109,33 @@ class Builder(object): """ raise NotImplementedError - def status_iterator(self, iterable, length, summary, colorfunc=darkgreen): - l = -1 + def old_status_iterator(self, iterable, summary, colorfunc=darkgreen): + l = 0 for item in iterable: - if l == -1: + if l == 0: self.info(bold(summary), nonl=1) - l = 0 + l = 1 self.info(colorfunc(item) + ' ', nonl=1) yield item - if l == 0: + if l == 1: self.info() - ## new version with progress info - #def status_iterator(self, iterable, length, summary, colorfunc=darkgreen): - # l = 0 - # for item in iterable: - # if l == 0: - # self.info(bold(summary)) - # l += 1 - # self.info(' [%3d%%] %s\n' % (100*l/length, colorfunc(item)), - # nonl=1) - # yield item + # new version with progress info + def status_iterator(self, iterable, summary, colorfunc=darkgreen, length=0): + if length == 0: + for item in self.old_status_iterator(iterable, summary, colorfunc): + yield item + return + l = 0 + summary = bold(summary) + for item in iterable: + l += 1 + self.info(term_width_line('%s[%3d%%] %s' % + (summary, 100*l/length, + colorfunc(item))), nonl=1) + yield item + if l > 0: + self.info() supported_image_types = [] @@ -265,8 +271,8 @@ class Builder(object): msg, length, iterator = self.env.update(self.config, self.srcdir, self.doctreedir, self.app) self.info(msg) - for docname in self.status_iterator(iterator, length, - 'reading sources... ', purple): + for docname in self.status_iterator(iterator, 'reading sources... ', + purple, length): updated_docnames.add(docname) # nothing further to do, the environment has already # done the reading @@ -337,8 +343,8 @@ class Builder(object): # write target files warnings = [] self.env.set_warnfunc(lambda *args: warnings.append(args)) - for docname in self.status_iterator(sorted(docnames), len(docnames), - 'writing output... ', darkgreen): + for docname in self.status_iterator( + sorted(docnames), 'writing output... ', darkgreen, len(docnames)): doctree = self.env.get_and_resolve_doctree(docname, self) self.write_doc(docname, doctree) for warning in warnings: diff --git a/sphinx/util/console.py b/sphinx/util/console.py index 724dee10..083fc6f4 100644 --- a/sphinx/util/console.py +++ b/sphinx/util/console.py @@ -16,25 +16,26 @@ codes = {} def get_terminal_width(): """Borrowed from the py lib.""" try: - import os, termios, fcntl, struct - call = fcntl.ioctl(0, termios.TIOCGWINSZ, "\000"*8) - height, width = struct.unpack("hhhh", call)[:2] + import termios, fcntl, struct + call = fcntl.ioctl(0, termios.TIOCGWINSZ, + struct.pack('hhhh', 0, 0, 0, 0)) + height, width = struct.unpack('hhhh', call)[:2] terminal_width = width except (SystemExit, KeyboardInterrupt): raise except: # FALLBACK - terminal_width = int(os.environ.get('COLUMNS', 80))-1 + terminal_width = int(os.environ.get('COLUMNS', 80)) - 1 return terminal_width _tw = get_terminal_width() -def print_and_backspace(text, func): +def term_width_line(text): if not codes: # if no coloring, don't output fancy backspaces - func(text) + return text + '\n' else: - func(text.ljust(_tw) + _tw * "\b") + return text.ljust(_tw) + '\r' def color_terminal(): if 'COLORTERM' in os.environ: -- cgit v1.2.1 From a80c9aa2d4dad5dfd9dff44dd729795ca1aa94ab Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 9 Mar 2009 09:42:05 +0100 Subject: Mark up. --- README | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README b/README index 31eb6842..bb2dea9d 100644 --- a/README +++ b/README @@ -1,3 +1,5 @@ +.. -*- restructuredtext -*- + ================= README for Sphinx ================= -- cgit v1.2.1 From 22fb71059ba1fd72c6eee0a9a2038c2bc34929bd Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 9 Mar 2009 09:47:10 +0100 Subject: Fix node class name. --- doc/ext/tutorial.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/ext/tutorial.rst b/doc/ext/tutorial.rst index 5f24c0fc..c44748d2 100644 --- a/doc/ext/tutorial.rst +++ b/doc/ext/tutorial.rst @@ -289,7 +289,7 @@ emitted at the end of phase 3 and allows custom resolving to be done:: def process_todo_nodes(app, doctree, fromdocname): if not app.config.todo_include_todos: - for node in doctree.traverse(todo_node): + for node in doctree.traverse(todo): node.parent.remove(node) # Replace all todolist nodes with a list of the collected todos. -- cgit v1.2.1 From 460f36a9868d789fbefbe3f40e8c82add6c2da95 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 9 Mar 2009 15:04:32 +0100 Subject: Encode graphviz input to utf-8. --- sphinx/ext/graphviz.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sphinx/ext/graphviz.py b/sphinx/ext/graphviz.py index 084797b7..d51e2ed5 100644 --- a/sphinx/ext/graphviz.py +++ b/sphinx/ext/graphviz.py @@ -115,6 +115,9 @@ def render_dot(self, code, options, format, prefix='graphviz'): self.builder.config.graphviz_dot) self.builder._graphviz_warned_dot = True return None + # graphviz expects UTF-8 by default + if isinstance(code, unicode): + code = code.encode('utf-8') stdout, stderr = p.communicate(code) if p.returncode != 0: raise GraphvizError('dot exited with error:\n[stderr]\n%s\n' -- cgit v1.2.1 From 68a77dd93ffb50c4712bbd8aeb85977b67f63859 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 9 Mar 2009 18:51:55 +0100 Subject: Add PyLit. --- EXAMPLES | 1 + 1 file changed, 1 insertion(+) diff --git a/EXAMPLES b/EXAMPLES index 2f7365d2..9eb0b0ce 100644 --- a/EXAMPLES +++ b/EXAMPLES @@ -40,6 +40,7 @@ included, please mail to `the Google group * PyCuda: http://documen.tician.de/pycuda/ * PyEphem: http://rhodesmill.org/pyephem/ * Pyevolve: http://pyevolve.sourceforge.net/ +* PyLit: http://pylit.berlios.de/ * Pylo: http://documen.tician.de/pylo/ * Pylons: http://docs.pylonshq.com/ * PyMOTW: http://www.doughellmann.com/PyMOTW/ -- cgit v1.2.1 From 45deb642412529c9f00ae41ad39fd5a9c6a9c762 Mon Sep 17 00:00:00 2001 From: gbrandl Date: Sat, 14 Mar 2009 21:08:52 +0100 Subject: Work a bit on coding style of autosummary. --- sphinx-autogen.py | 15 +++ sphinx/ext/autosummary/__init__.py | 160 +++++++++++++------------ sphinx/ext/autosummary/generate.py | 172 +++++++++++++++------------ sphinx/ext/autosummary/templates/module | 39 ++++++ sphinx/ext/autosummary/templates/module.html | 39 ------ 5 files changed, 236 insertions(+), 189 deletions(-) create mode 100755 sphinx-autogen.py create mode 100644 sphinx/ext/autosummary/templates/module delete mode 100644 sphinx/ext/autosummary/templates/module.html diff --git a/sphinx-autogen.py b/sphinx-autogen.py new file mode 100755 index 00000000..494f4d85 --- /dev/null +++ b/sphinx-autogen.py @@ -0,0 +1,15 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" + Sphinx - Python documentation toolchain + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + :copyright: 2007-2009 by Georg Brandl. + :license: BSD. +""" + +import sys + +if __name__ == '__main__': + from sphinx.ext.autosummary.generate import main + sys.exit(main(sys.argv)) diff --git a/sphinx/ext/autosummary/__init__.py b/sphinx/ext/autosummary/__init__.py index f26c4676..8775fe86 100644 --- a/sphinx/ext/autosummary/__init__.py +++ b/sphinx/ext/autosummary/__init__.py @@ -1,80 +1,73 @@ +# -*- coding: utf-8 -*- """ -=========== -autosummary -=========== + sphinx.ext.autosummary + ~~~~~~~~~~~~~~~~~~~~~~ -Sphinx extension that adds an autosummary:: directive, which can be -used to generate function/method/attribute/etc. summary lists, similar -to those output eg. by Epydoc and other API doc generation tools. + Sphinx extension that adds an autosummary:: directive, which can be + used to generate function/method/attribute/etc. summary lists, similar + to those output eg. by Epydoc and other API doc generation tools. -An :autolink: role is also provided. + An :autolink: role is also provided. -autosummary directive ---------------------- + autosummary directive + --------------------- -The autosummary directive has the form:: + The autosummary directive has the form:: - .. autosummary:: - :nosignatures: - :toctree: generated/ - - module.function_1 - module.function_2 - ... + .. autosummary:: + :nosignatures: + :toctree: generated/ -and it generates an output table (containing signatures, optionally) + module.function_1 + module.function_2 + ... - ======================== ============================================= - module.function_1(args) Summary line from the docstring of function_1 - module.function_2(args) Summary line from the docstring - ... - ======================== ============================================= + and it generates an output table (containing signatures, optionally) -If the :toctree: option is specified, files matching the function names -are inserted to the toctree with the given prefix: + ======================== ============================================= + module.function_1(args) Summary line from the docstring of function_1 + module.function_2(args) Summary line from the docstring + ... + ======================== ============================================= - generated/module.function_1 - generated/module.function_2 - ... + If the :toctree: option is specified, files matching the function names + are inserted to the toctree with the given prefix: -Note: The file names contain the module:: or currentmodule:: prefixes. + generated/module.function_1 + generated/module.function_2 + ... -.. seealso:: autosummary_generate.py + Note: The file names contain the module:: or currentmodule:: prefixes. + .. seealso:: autosummary_generate.py -autolink role -------------- -The autolink role functions as ``:obj:`` when the name referred can be -resolved to a Python object, and otherwise it becomes simple emphasis. -This can be used as the default role to make links 'smart'. + autolink role + ------------- + The autolink role functions as ``:obj:`` when the name referred can be + resolved to a Python object, and otherwise it becomes simple emphasis. + This can be used as the default role to make links 'smart'. + + :copyright: Copyright 2007-2009 by the Sphinx team, see AUTHORS. + :license: BSD, see LICENSE for details. """ -import sys, os, posixpath, re + +import os +import re +import sys +import inspect +import posixpath from docutils.parsers.rst import directives from docutils.statemachine import ViewList from docutils import nodes -import sphinx.addnodes, sphinx.roles, sphinx.builder +from sphinx import addnodes, roles from sphinx.util import patfilter -import inspect - -def setup(app): - app.add_directive('autosummary', autosummary_directive, True, (0, 0, False), - toctree=directives.unchanged, - nosignatures=directives.flag) - app.add_role('autolink', autolink_role) - - app.add_node(autosummary_toc, - html=(autosummary_toc_visit_html, autosummary_toc_depart_noop), - latex=(autosummary_toc_visit_latex, autosummary_toc_depart_noop)) - app.connect('doctree-read', process_autosummary_toc) -#------------------------------------------------------------------------------ -# autosummary_toc node -#------------------------------------------------------------------------------ +# -- autosummary_toc node ------------------------------------------------------ class autosummary_toc(nodes.comment): pass @@ -83,7 +76,6 @@ def process_autosummary_toc(app, doctree): """ Insert items described in autosummary:: to the TOC tree, but do not generate the toctree:: list. - """ env = app.builder.env crawled = {} @@ -92,7 +84,7 @@ def process_autosummary_toc(app, doctree): for j, subnode in enumerate(node): try: if (isinstance(subnode, autosummary_toc) - and isinstance(subnode[0], sphinx.addnodes.toctree)): + and isinstance(subnode[0], addnodes.toctree)): env.note_toctree(env.docname, subnode[0]) continue except IndexError: @@ -104,19 +96,18 @@ def process_autosummary_toc(app, doctree): crawl_toc(doctree) def autosummary_toc_visit_html(self, node): - """Hide autosummary toctree list in HTML output""" + """Hide autosummary toctree list in HTML output.""" raise nodes.SkipNode def autosummary_toc_visit_latex(self, node): - """Show autosummary toctree (= put the referenced pages here) in Latex""" + """Show autosummary toctree (= put the referenced pages here) in Latex.""" pass def autosummary_toc_depart_noop(self, node): pass -#------------------------------------------------------------------------------ -# .. autosummary:: -#------------------------------------------------------------------------------ + +# -- .. autosummary:: ---------------------------------------------------------- def autosummary_directive(dirname, arguments, options, content, lineno, content_offset, block_text, state, state_machine): @@ -155,7 +146,7 @@ def autosummary_directive(dirname, arguments, options, content, lineno, line=lineno)) docnames.append(docname) - tocnode = sphinx.addnodes.toctree() + tocnode = addnodes.toctree() tocnode['includefiles'] = docnames tocnode['maxdepth'] = -1 tocnode['glob'] = None @@ -165,6 +156,7 @@ def autosummary_directive(dirname, arguments, options, content, lineno, else: return warnings + [node] + def get_autosummary(names, state, no_signatures=False): """ Generate a proper table node for autosummary:: directive. @@ -175,10 +167,10 @@ def get_autosummary(names, state, no_signatures=False): Names of Python objects to be imported and added to the table. document : document Docutils document object - + """ document = state.document - + real_names = {} warnings = [] @@ -209,14 +201,14 @@ def get_autosummary(names, state, no_signatures=False): except ImportError: warnings.append(document.reporter.warning( 'failed to import %s' % name)) - append_row(":obj:`%s`" % name, "") + append_row(':obj:`%s`' % name, '') continue real_names[name] = real_name - title = "" + title = '' qualifier = 'obj' - col1 = ":"+qualifier+":`%s <%s>`" % (name, real_name) + col1 = ':'+qualifier+':`%s <%s>`' % (name, real_name) col2 = title append_row(col1, col2) @@ -240,7 +232,7 @@ def import_by_name(name, prefixes=[None]): The imported object name Name of the imported object (useful if `prefixes` was used) - + """ for prefix in prefixes: try: @@ -254,9 +246,18 @@ def import_by_name(name, prefixes=[None]): raise ImportError def _import_by_name(name): - """Import a Python object given its full name""" + """Import a Python object given its full name.""" try: + # try first interpret `name` as MODNAME.OBJ name_parts = name.split('.') + try: + modname = '.'.join(name_parts[:-1]) + __import__(modname) + return getattr(sys.modules[modname], name_parts[-1]) + except (ImportError, IndexError, AttributeError): + pass + + # ... then as MODNAME, MODNAME.OBJ1, MODNAME.OBJ1.OBJ2, ... last_j = 0 modname = None for j in reversed(range(1, len(name_parts)+1)): @@ -279,20 +280,19 @@ def _import_by_name(name): except (ValueError, ImportError, AttributeError, KeyError), e: raise ImportError(e) -#------------------------------------------------------------------------------ -# :autolink: (smart default role) -#------------------------------------------------------------------------------ + +# -- :autolink: (smart default role) ------------------------------------------- def autolink_role(typ, rawtext, etext, lineno, inliner, options={}, content=[]): """ Smart linking role. - Expands to ":obj:`text`" if `text` is an object that can be imported; - otherwise expands to "*text*". + Expands to ':obj:`text`' if `text` is an object that can be imported; + otherwise expands to '*text*'. """ - r = sphinx.roles.xfileref_role('obj', rawtext, etext, lineno, inliner, - options, content) + r = roles.xfileref_role('obj', rawtext, etext, lineno, inliner, + options, content) pnode = r[0][0] prefixes = [None] @@ -304,3 +304,15 @@ def autolink_role(typ, rawtext, etext, lineno, inliner, r[0][0] = nodes.emphasis(rawtext, content[0].astext(), classes=content['classes']) return r + + +def setup(app): + app.add_directive('autosummary', autosummary_directive, True, (0, 0, False), + toctree=directives.unchanged, + nosignatures=directives.flag) + app.add_role('autolink', autolink_role) + + app.add_node(autosummary_toc, + html=(autosummary_toc_visit_html, autosummary_toc_depart_noop), + latex=(autosummary_toc_visit_latex, autosummary_toc_depart_noop)) + app.connect('doctree-read', process_autosummary_toc) diff --git a/sphinx/ext/autosummary/generate.py b/sphinx/ext/autosummary/generate.py index ce9e4c2d..178a2870 100644 --- a/sphinx/ext/autosummary/generate.py +++ b/sphinx/ext/autosummary/generate.py @@ -1,85 +1,91 @@ +# -*- coding: utf-8 -*- """ -autosummary_generate.py OPTIONS FILES + sphinx.ext.autosummary.generate + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Generate automatic RST source files for items referred to in -autosummary:: directives. + Usable as a library or script to generate automatic RST source files for + items referred to in autosummary:: directives. -Each generated RST file contains a single auto*:: directive which -extracts the docstring of the referred item. + Each generated RST file contains a single auto*:: directive which + extracts the docstring of the referred item. -Example Makefile rule:: + Example Makefile rule:: - generate: - sphinx-autogen source/*.rst source/generated + generate: + sphinx-autogen source/*.rst source/generated + :copyright: Copyright 2007-2009 by the Sphinx team, see AUTHORS. + :license: BSD, see LICENSE for details. """ -import glob, re, inspect, os, optparse +import os +import re +import sys +import glob +import inspect + +from jinja2 import Environment, PackageLoader + from sphinx.ext.autosummary import import_by_name +from sphinx.util import ensuredir -from jinja import Environment, PackageLoader -env = Environment(loader=PackageLoader('sphinx.ext.autosummary', 'templates')) +# create our own templating environment, for module template only +env = Environment(loader=PackageLoader('sphinx.ext.autosummary', 'templates')) -def main(): - p = optparse.OptionParser(__doc__.strip()) - options, args = p.parse_args() - - if len(args) <2: - p.error("wrong number of arguments") - print 'generating docs from:', args[:-1] - generate_autosummary_docs(args[:-1], args[-1]) - -def generate_autosummary_docs(source_dir, output_dir): +def generate_autosummary_docs(sources, output_dir=None): # read names = {} - for name, loc in get_documented(source_dir).items(): + for name, loc in get_documented(sources).items(): for (filename, sec_title, keyword, toctree) in loc: if toctree is not None: path = os.path.join(os.path.dirname(filename), toctree) names[name] = os.path.abspath(path) - + # write for name, path in sorted(names.items()): - path = output_dir - - if not os.path.isdir(path): - os.makedirs(path) + if output_dir is not None: + path = output_dir + + ensuredir(path) try: obj, name = import_by_name(name) except ImportError, e: - print "Failed to import '%s': %s" % (name, e) + print >>sys.stderr, 'Failed to import %r: %s' % (name, e) continue fn = os.path.join(path, '%s.rst' % name) - - if os.path.exists(fn): - # skip + # skip it if it exists + if os.path.isfile(fn): continue f = open(fn, 'w') - try: - if inspect.ismodule(obj): - tmpl = env.get_template('module.html') - functions = [getattr(obj, item).__name__ for item in dir(obj) if inspect.isfunction(getattr(obj, item))] - classes = [getattr(obj, item).__name__ for item in dir(obj) if inspect.isclass(getattr(obj, item)) and not issubclass(getattr(obj, item), Exception)] - exceptions = [getattr(obj, item).__name__ for item in dir(obj) if inspect.isclass(getattr(obj, item)) and issubclass(getattr(obj, item), Exception)] - rendered = tmpl.render(name=name, - functions=functions, - classes=classes, - exceptions=exceptions, + tmpl = env.get_template('module') + functions = [getattr(obj, item).__name__ + for item in dir(obj) + if inspect.isfunction(getattr(obj, item))] + classes = [getattr(obj, item).__name__ + for item in dir(obj) + if inspect.isclass(getattr(obj, item)) + and not issubclass(getattr(obj, item), Exception)] + exceptions = [getattr(obj, item).__name__ + for item in dir(obj) + if inspect.isclass(getattr(obj, item)) + and issubclass(getattr(obj, item), Exception)] + rendered = tmpl.render(name=name, + functions=functions, + classes=classes, + exceptions=exceptions, len_functions=len(functions), len_classes=len(classes), - len_exceptions=len(exceptions) - - ) + len_exceptions=len(exceptions)) f.write(rendered) else: f.write('%s\n%s\n\n' % (name, '='*len(name))) - + if inspect.isclass(obj): if issubclass(obj, Exception): f.write(format_modulemember(name, 'autoexception')) @@ -96,40 +102,40 @@ def generate_autosummary_docs(source_dir, output_dir): finally: f.close() + def format_modulemember(name, directive): parts = name.split('.') mod, name = '.'.join(parts[:-1]), parts[-1] - return ".. currentmodule:: %s\n\n.. %s:: %s\n" % (mod, directive, name) + return '.. currentmodule:: %s\n\n.. %s:: %s\n' % (mod, directive, name) + def format_classmember(name, directive): parts = name.split('.') mod, name = '.'.join(parts[:-2]), '.'.join(parts[-2:]) - return ".. currentmodule:: %s\n\n.. %s:: %s\n" % (mod, directive, name) + return '.. currentmodule:: %s\n\n.. %s:: %s\n' % (mod, directive, name) + + +title_underline_re = re.compile('^[-=*_^#]{3,}\s*$') +autodoc_re = re.compile(r'.. auto(function|method|attribute|class|exception' + '|module)::\s*([A-Za-z0-9_.]+)\s*$') +autosummary_re = re.compile(r'^\.\.\s+autosummary::\s*') +module_re = re.compile(r'^\.\.\s+(current)?module::\s*([a-zA-Z0-9_.]+)\s*$') +autosummary_item_re = re.compile(r'^\s+([_a-zA-Z][a-zA-Z0-9_.]*)\s*') +toctree_arg_re = re.compile(r'^\s+:toctree:\s*(.*?)\s*$') def get_documented(filenames): """ - Find out what items are documented in source/*.rst - - Returns - ------- - documented : dict of list of (filename, title, keyword, toctree) - Dictionary whose keys are documented names of objects. - The value is a list of locations where the object was documented. - Each location is a tuple of filename, the current section title, - the name of the directive, and the value of the :toctree: argument - (if present) of the directive. + Find out what items are documented in the given filenames. + Returns a dict of list of (filename, title, keyword, toctree) Keys are + documented names of objects. The value is a list of locations where the + object was documented. Each location is a tuple of filename, the current + section title, the name of the directive, and the value of the :toctree: + argument (if present) of the directive. """ - - title_underline_re = re.compile("^[-=*_^#]{3,}\s*$") - autodoc_re = re.compile(".. auto(function|method|attribute|class|exception|module)::\s*([A-Za-z0-9_.]+)\s*$") - autosummary_re = re.compile(r'^\.\.\s+autosummary::\s*') - module_re = re.compile(r'^\.\.\s+(current)?module::\s*([a-zA-Z0-9_.]+)\s*$') - autosummary_item_re = re.compile(r'^\s+([_a-zA-Z][a-zA-Z0-9_.]*)\s*') - toctree_arg_re = re.compile(r'^\s+:toctree:\s*(.*?)\s*$') - + documented = {} - + for filename in filenames: current_title = [] last_line = None @@ -150,32 +156,34 @@ def get_documented(filenames): continue # skip options m = autosummary_item_re.match(line) - + if m: name = m.group(1).strip() - if current_module and not name.startswith(current_module + '.'): - name = "%s.%s" % (current_module, name) + if current_module and \ + not name.startswith(current_module + '.'): + name = '%s.%s' % (current_module, name) documented.setdefault(name, []).append( (filename, current_title, 'autosummary', toctree)) continue if line.strip() == '': continue in_autosummary = False - + m = autosummary_re.match(line) if m: in_autosummary = True continue - + m = autodoc_re.search(line) if m: name = m.group(2).strip() - if current_module and not name.startswith(current_module + '.'): - name = "%s.%s" % (current_module, name) - if m.group(1) == "module": + if current_module and \ + not name.startswith(current_module + '.'): + name = '%s.%s' % (current_module, name) + if m.group(1) == 'module': current_module = name documented.setdefault(name, []).append( - (filename, current_title, "auto" + m.group(1), None)) + (filename, current_title, 'auto' + m.group(1), None)) continue m = title_underline_re.match(line) @@ -191,5 +199,17 @@ def get_documented(filenames): last_line = line return documented -if __name__ == "__main__": + +def main(args=None): + if args is None: + args = sys.argv[1:] + + if len(args) < 2: + print >>sys.stderr, 'usage: %s sourcefile ... outputdir' % sys.argv[0] + + print 'generating docs from:', ', '.join(args[:-1]) + generate_autosummary_docs(args[:-1], args[-1]) + + +if __name__ == '__main__': main() diff --git a/sphinx/ext/autosummary/templates/module b/sphinx/ext/autosummary/templates/module new file mode 100644 index 00000000..34dd8100 --- /dev/null +++ b/sphinx/ext/autosummary/templates/module @@ -0,0 +1,39 @@ +:mod:`{{name}}` +=============================================================================================================================================== + + +.. automodule:: {{name}} + +{% if len_functions > 0 %} +Functions +---------- +{% for item in functions %} +.. autofunction:: {{item}} +{% endfor %} +{% endif %} + +{% if len_classes > 0 %} +Classes +-------- +{% for item in classes %} +.. autoclass:: {{item}} + :show-inheritance: + :members: + :inherited-members: + :undoc-members: + +{% endfor %} +{% endif %} + +{% if len_exceptions > 0 %} +Exceptions +------------ +{% for item in exceptions %} +.. autoclass:: {{item}} + :show-inheritance: + :members: + :inherited-members: + :undoc-members: + +{% endfor %} +{% endif %} \ No newline at end of file diff --git a/sphinx/ext/autosummary/templates/module.html b/sphinx/ext/autosummary/templates/module.html deleted file mode 100644 index 34dd8100..00000000 --- a/sphinx/ext/autosummary/templates/module.html +++ /dev/null @@ -1,39 +0,0 @@ -:mod:`{{name}}` -=============================================================================================================================================== - - -.. automodule:: {{name}} - -{% if len_functions > 0 %} -Functions ----------- -{% for item in functions %} -.. autofunction:: {{item}} -{% endfor %} -{% endif %} - -{% if len_classes > 0 %} -Classes --------- -{% for item in classes %} -.. autoclass:: {{item}} - :show-inheritance: - :members: - :inherited-members: - :undoc-members: - -{% endfor %} -{% endif %} - -{% if len_exceptions > 0 %} -Exceptions ------------- -{% for item in exceptions %} -.. autoclass:: {{item}} - :show-inheritance: - :members: - :inherited-members: - :undoc-members: - -{% endfor %} -{% endif %} \ No newline at end of file -- cgit v1.2.1 From 9d1d3c7b014f1078c2d952626c4d596d12c1d07b Mon Sep 17 00:00:00 2001 From: gbrandl Date: Sat, 14 Mar 2009 21:16:20 +0100 Subject: Remove extra autosummary license since Sphinx now uses 2-clause BSD as well. --- AUTHORS | 4 +++- sphinx/ext/autosummary/LICENSE.txt | 31 ------------------------------- 2 files changed, 3 insertions(+), 32 deletions(-) delete mode 100644 sphinx/ext/autosummary/LICENSE.txt diff --git a/AUTHORS b/AUTHORS index 73ec5ed2..259c0949 100644 --- a/AUTHORS +++ b/AUTHORS @@ -14,10 +14,12 @@ Other contributors, listed alphabetically, are: * Dave Kuhlman -- original LaTeX writer * Thomas Lamb -- linkcheck builder * Will Maier -- directory HTML builder +* Christopher Perkins -- autosummary integration * Benjamin Peterson -- unittests * Stefan Seefeld -- toctree improvements * Antonio Valentino -- qthelp builder -* Pauli Virtanen -- autodoc improvements +* Pauli Virtanen -- autodoc improvements, autosummary extension +* Stefan van der Walt -- autosummary extension * Sebastian Wiesner -- image handling, distutils support Many thanks for all contributions! diff --git a/sphinx/ext/autosummary/LICENSE.txt b/sphinx/ext/autosummary/LICENSE.txt deleted file mode 100644 index aa1bf333..00000000 --- a/sphinx/ext/autosummary/LICENSE.txt +++ /dev/null @@ -1,31 +0,0 @@ - The files - - __init__.py - - generate.py - - templates/module.html - - have the following license: - -Copyright (C) 2008 Stefan van der Walt , Pauli Virtanen , Christopher Perkins - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, -INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. -- cgit v1.2.1 From 0756f1fc6cb5d8c9291ca62c8c0809bff962b5cc Mon Sep 17 00:00:00 2001 From: gbrandl Date: Sat, 14 Mar 2009 21:34:59 +0100 Subject: Document setup_extension(). --- doc/ext/appapi.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/doc/ext/appapi.rst b/doc/ext/appapi.rst index fba1d945..6864d6ba 100644 --- a/doc/ext/appapi.rst +++ b/doc/ext/appapi.rst @@ -10,6 +10,11 @@ This function is called at initialization time with one argument, the application object representing the Sphinx process. This application object has the following public API: +.. method:: Sphinx.setup_extension(name) + + Load the extension given by the module *name*. Use this if your extension + needs the features provided by another extension. + .. method:: Sphinx.add_builder(builder) Register a new builder. *builder* must be a class that inherits from -- cgit v1.2.1 From b00608241e1406c19d572c43b70d3018274274ff Mon Sep 17 00:00:00 2001 From: gbrandl Date: Sun, 15 Mar 2009 18:36:00 +0100 Subject: Work on autodoc: move -g command-line argument to config value, make directive up to new API. --- sphinx/application.py | 6 +- sphinx/cmdline.py | 14 --- sphinx/ext/autosummary/__init__.py | 148 +++++++++++++++++--------------- sphinx/ext/autosummary/generate.py | 46 +++++++--- sphinx/ext/autosummary/templates/module | 4 +- 5 files changed, 115 insertions(+), 103 deletions(-) diff --git a/sphinx/application.py b/sphinx/application.py index 9c467066..e8e9fad6 100644 --- a/sphinx/application.py +++ b/sphinx/application.py @@ -82,6 +82,9 @@ class Sphinx(object): self._events = events.copy() + # say hello to the world + self.info(bold('Running Sphinx v%s' % sphinx.__released__)) + # status code for command-line application self.statuscode = 0 @@ -105,9 +108,6 @@ class Sphinx(object): if buildername not in self.builderclasses: raise SphinxError('Builder name %s not registered' % buildername) - self.info(bold('Sphinx v%s, building %s' % (sphinx.__released__, - buildername))) - builderclass = self.builderclasses[buildername] if isinstance(builderclass, tuple): # builtin builder diff --git a/sphinx/cmdline.py b/sphinx/cmdline.py index 79bd9751..813661bb 100644 --- a/sphinx/cmdline.py +++ b/sphinx/cmdline.py @@ -43,8 +43,6 @@ new and changed files -C -- use no config file at all, only -D options -D -- override a setting in configuration -A -- pass a value into the templates, for HTML builder - -g -- auto-generate docs with sphinx.ext.autosummary - for autosummary directives in sources found in path -N -- do not do colored output -q -- no output on stdout, just warnings on stderr -Q -- no output at all, not even warnings @@ -145,18 +143,6 @@ def main(argv): except ValueError: pass htmlcontext[key] = val - elif opt == '-g': - # XXX XXX XXX - source_filenames = [path.join(srcdir, f) - for f in os.listdir(srcdir) if f.endswith('.rst')] - if val is None: - print >>sys.stderr, \ - 'Error: you must provide a destination directory ' \ - 'for autodoc generation.' - return 1 - p = path.abspath(val) - from sphinx.ext.autosummary.generate import generate_autosummary_docs - generate_autosummary_docs(source_filenames, p) elif opt == '-N': nocolor() elif opt == '-E': diff --git a/sphinx/ext/autosummary/__init__.py b/sphinx/ext/autosummary/__init__.py index 8775fe86..4fe64dd0 100644 --- a/sphinx/ext/autosummary/__init__.py +++ b/sphinx/ext/autosummary/__init__.py @@ -58,6 +58,7 @@ import re import sys import inspect import posixpath +from os import path from docutils.parsers.rst import directives from docutils.statemachine import ViewList @@ -65,6 +66,7 @@ from docutils import nodes from sphinx import addnodes, roles from sphinx.util import patfilter +from sphinx.util.compat import Directive # -- autosummary_toc node ------------------------------------------------------ @@ -103,70 +105,75 @@ def autosummary_toc_visit_latex(self, node): """Show autosummary toctree (= put the referenced pages here) in Latex.""" pass -def autosummary_toc_depart_noop(self, node): +def autosummary_noop(self, node): pass # -- .. autosummary:: ---------------------------------------------------------- -def autosummary_directive(dirname, arguments, options, content, lineno, - content_offset, block_text, state, state_machine): +class Autosummary(Directive): """ Pretty table containing short signatures and summaries of functions etc. autosummary also generates a (hidden) toctree:: node. - """ - names = [] - names += [x.strip() for x in content if x.strip()] - - table, warnings, real_names = get_autosummary(names, state, - 'nosignatures' in options) - node = table - - env = state.document.settings.env - suffix = env.config.source_suffix - all_docnames = env.found_docs.copy() - dirname = posixpath.dirname(env.docname) - - if 'toctree' in options: - tree_prefix = options['toctree'].strip() - docnames = [] - for name in names: - name = real_names.get(name, name) - - docname = tree_prefix + name - if docname.endswith(suffix): - docname = docname[:-len(suffix)] - docname = posixpath.normpath(posixpath.join(dirname, docname)) - if docname not in env.found_docs: - warnings.append(state.document.reporter.warning( - 'toctree references unknown document %r' % docname, - line=lineno)) - docnames.append(docname) - - tocnode = addnodes.toctree() - tocnode['includefiles'] = docnames - tocnode['maxdepth'] = -1 - tocnode['glob'] = None - - tocnode = autosummary_toc('', '', tocnode) - return warnings + [node] + [tocnode] - else: - return warnings + [node] + required_arguments = 0 + optional_arguments = 0 + final_argument_whitespace = False + has_content = True + option_spec = { + 'toctree': directives.unchanged, + 'nosignatures': directives.flag, + } + + def run(self): + names = [] + names += [x.strip() for x in self.content if x.strip()] + + table, warnings, real_names = get_autosummary( + names, self.state, 'nosignatures' in self.options) + node = table + + env = self.state.document.settings.env + suffix = env.config.source_suffix + all_docnames = env.found_docs.copy() + dirname = posixpath.dirname(env.docname) + + if 'toctree' in self.options: + tree_prefix = self.options['toctree'].strip() + docnames = [] + for name in names: + name = real_names.get(name, name) + + docname = posixpath.join(tree_prefix, name) + if docname.endswith(suffix): + docname = docname[:-len(suffix)] + docname = posixpath.normpath(posixpath.join(dirname, docname)) + if docname not in env.found_docs: + warnings.append(self.state.document.reporter.warning( + 'toctree references unknown document %r' % docname, + line=self.lineno)) + docnames.append(docname) + + tocnode = addnodes.toctree() + tocnode['includefiles'] = docnames + tocnode['entries'] = [(None, docname) for docname in docnames] + tocnode['maxdepth'] = -1 + tocnode['glob'] = None + + tocnode = autosummary_toc('', '', tocnode) + return warnings + [node] + [tocnode] + else: + return warnings + [node] def get_autosummary(names, state, no_signatures=False): """ Generate a proper table node for autosummary:: directive. - Parameters - ---------- - names : list of str - Names of Python objects to be imported and added to the table. - document : document - Docutils document object + *names* is a list of names of Python objects to be imported and added to the + table. *document* is the Docutils document object. """ document = state.document @@ -216,23 +223,8 @@ def get_autosummary(names, state, no_signatures=False): def import_by_name(name, prefixes=[None]): """ - Import a Python object that has the given name, under one of the prefixes. - - Parameters - ---------- - name : str - Name of a Python object, eg. 'numpy.ndarray.view' - prefixes : list of (str or None), optional - Prefixes to prepend to the name (None implies no prefix). - The first prefixed name that results to successful import is used. - - Returns - ------- - obj - The imported object - name - Name of the imported object (useful if `prefixes` was used) - + Import a Python object that has the given *name*, under one of the + *prefixes*. The first name that succeeds is used. """ for prefix in prefixes: try: @@ -306,13 +298,27 @@ def autolink_role(typ, rawtext, etext, lineno, inliner, return r -def setup(app): - app.add_directive('autosummary', autosummary_directive, True, (0, 0, False), - toctree=directives.unchanged, - nosignatures=directives.flag) - app.add_role('autolink', autolink_role) +def process_generate_options(app): + genfiles = app.config.autosummary_generate + if not genfiles: + return + from sphinx.ext.autosummary.generate import generate_autosummary_docs + ext = app.config.source_suffix + genfiles = [path.join(app.srcdir, genfile + + (not genfile.endswith(ext) and ext or '')) + for genfile in genfiles] + generate_autosummary_docs(genfiles, warn=app.warn, info=app.info) + + +def setup(app): + # I need autodoc + app.setup_extension('sphinx.ext.autodoc') app.add_node(autosummary_toc, - html=(autosummary_toc_visit_html, autosummary_toc_depart_noop), - latex=(autosummary_toc_visit_latex, autosummary_toc_depart_noop)) + html=(autosummary_toc_visit_html, autosummary_noop), + latex=(autosummary_toc_visit_latex, autosummary_noop)) + app.add_directive('autosummary', Autosummary) + app.add_role('autolink', autolink_role) app.connect('doctree-read', process_autosummary_toc) + app.connect('builder-inited', process_generate_options) + app.add_config_value('autosummary_generate', [], True) diff --git a/sphinx/ext/autosummary/generate.py b/sphinx/ext/autosummary/generate.py index 178a2870..b4c2482c 100644 --- a/sphinx/ext/autosummary/generate.py +++ b/sphinx/ext/autosummary/generate.py @@ -20,7 +20,7 @@ import os import re import sys -import glob +import getopt import inspect from jinja2 import Environment, PackageLoader @@ -32,7 +32,17 @@ from sphinx.util import ensuredir env = Environment(loader=PackageLoader('sphinx.ext.autosummary', 'templates')) -def generate_autosummary_docs(sources, output_dir=None): +def _simple_info(msg): + print msg + +def _simple_warn(msg): + print >>sys.stderr, 'WARNING: ' + msg + +def generate_autosummary_docs(sources, output_dir=None, + warn=_simple_warn, info=_simple_info): + info('generating autosummary for: %s' % ', '.join(sources)) + if output_dir: + info('writing to %s' % output_dir) # read names = {} for name, loc in get_documented(sources).items(): @@ -43,15 +53,13 @@ def generate_autosummary_docs(sources, output_dir=None): # write for name, path in sorted(names.items()): - if output_dir is not None: - path = output_dir - + path = output_dir or path ensuredir(path) try: obj, name = import_by_name(name) except ImportError, e: - print >>sys.stderr, 'Failed to import %r: %s' % (name, e) + warn('failed to import %r: %s' % (name, e)) continue fn = os.path.join(path, '%s.rst' % name) @@ -63,6 +71,7 @@ def generate_autosummary_docs(sources, output_dir=None): try: if inspect.ismodule(obj): + # XXX replace this with autodoc's API? tmpl = env.get_template('module') functions = [getattr(obj, item).__name__ for item in dir(obj) @@ -76,6 +85,7 @@ def generate_autosummary_docs(sources, output_dir=None): if inspect.isclass(getattr(obj, item)) and issubclass(getattr(obj, item), Exception)] rendered = tmpl.render(name=name, + underline='='*len(name), functions=functions, classes=classes, exceptions=exceptions, @@ -177,6 +187,7 @@ def get_documented(filenames): m = autodoc_re.search(line) if m: name = m.group(2).strip() + # XXX look in newer generate.py if current_module and \ not name.startswith(current_module + '.'): name = '%s.%s' % (current_module, name) @@ -200,15 +211,24 @@ def get_documented(filenames): return documented -def main(args=None): - if args is None: - args = sys.argv[1:] +def main(argv): + usage = 'usage: %s [-o output_dir] sourcefile ...' % sys.argv[0] + try: + opts, args = getopt.getopt(argv[1:], 'o:') + except getopt.error: + print >>sys.stderr, usage + return 1 + + output_dir = None + for opt, val in opts: + if opt == '-o': + output_dir = val - if len(args) < 2: - print >>sys.stderr, 'usage: %s sourcefile ... outputdir' % sys.argv[0] + if len(args) < 1: + print >>sys.stderr, usage + return 1 - print 'generating docs from:', ', '.join(args[:-1]) - generate_autosummary_docs(args[:-1], args[-1]) + generate_autosummary_docs(args, output_dir) if __name__ == '__main__': diff --git a/sphinx/ext/autosummary/templates/module b/sphinx/ext/autosummary/templates/module index 34dd8100..0cbc8266 100644 --- a/sphinx/ext/autosummary/templates/module +++ b/sphinx/ext/autosummary/templates/module @@ -1,5 +1,5 @@ :mod:`{{name}}` -=============================================================================================================================================== +======{{ underline }}= .. automodule:: {{name}} @@ -36,4 +36,4 @@ Exceptions :undoc-members: {% endfor %} -{% endif %} \ No newline at end of file +{% endif %} -- cgit v1.2.1 From 227fcafb65129ee5565cd932b29de5a667f41f01 Mon Sep 17 00:00:00 2001 From: gbrandl Date: Sun, 15 Mar 2009 18:42:56 +0100 Subject: Improve ImportError message. --- sphinx/ext/autosummary/__init__.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/sphinx/ext/autosummary/__init__.py b/sphinx/ext/autosummary/__init__.py index 4fe64dd0..01160cd6 100644 --- a/sphinx/ext/autosummary/__init__.py +++ b/sphinx/ext/autosummary/__init__.py @@ -226,6 +226,7 @@ def import_by_name(name, prefixes=[None]): Import a Python object that has the given *name*, under one of the *prefixes*. The first name that succeeds is used. """ + tried = [] for prefix in prefixes: try: if prefix: @@ -234,8 +235,8 @@ def import_by_name(name, prefixes=[None]): prefixed_name = name return _import_by_name(prefixed_name), prefixed_name except ImportError: - pass - raise ImportError + tried.append(prefixed_name) + raise ImportError('no module named %s' % ' or '.join(tried)) def _import_by_name(name): """Import a Python object given its full name.""" @@ -270,7 +271,7 @@ def _import_by_name(name): else: return sys.modules[modname] except (ValueError, ImportError, AttributeError, KeyError), e: - raise ImportError(e) + raise ImportError(*e.args) # -- :autolink: (smart default role) ------------------------------------------- -- cgit v1.2.1 From b2013b7124bb47a02e3021a92767741b6805107f Mon Sep 17 00:00:00 2001 From: gbrandl Date: Sun, 15 Mar 2009 19:02:37 +0100 Subject: Add autosummary to tests. --- tests/root/autosummary.txt | 5 +++++ tests/root/conf.py | 5 ++++- tests/root/contents.txt | 1 + 3 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 tests/root/autosummary.txt diff --git a/tests/root/autosummary.txt b/tests/root/autosummary.txt new file mode 100644 index 00000000..d05dffec --- /dev/null +++ b/tests/root/autosummary.txt @@ -0,0 +1,5 @@ +Autosummary test +================ + +.. autosummary:: sphinx.application + :toctree: xyz diff --git a/tests/root/conf.py b/tests/root/conf.py index 85e9f468..fd82be7d 100644 --- a/tests/root/conf.py +++ b/tests/root/conf.py @@ -5,7 +5,8 @@ import sys, os sys.path.append(os.path.abspath('.')) extensions = ['ext', 'sphinx.ext.autodoc', 'sphinx.ext.jsmath', - 'sphinx.ext.coverage', 'sphinx.ext.todo'] + 'sphinx.ext.coverage', 'sphinx.ext.todo', + 'sphinx.ext.autosummary'] jsmath_path = 'dummy.js' @@ -50,6 +51,8 @@ value_from_conf_py = 84 coverage_c_path = ['special/*.h'] coverage_c_regexes = {'cfunction': r'^PyAPI_FUNC\(.*\)\s+([^_][\w_]+)'} +autosummary_generate = ['autosummary'] + # modify tags from conf.py tags.add('confpytag') diff --git a/tests/root/contents.txt b/tests/root/contents.txt index 0e52593e..24d790a5 100644 --- a/tests/root/contents.txt +++ b/tests/root/contents.txt @@ -19,6 +19,7 @@ Contents: desc math autodoc + autosummary Python -- cgit v1.2.1 From 558af5109a10a5351a45e810bfe3b84285bb1b4a Mon Sep 17 00:00:00 2001 From: gbrandl Date: Sun, 15 Mar 2009 19:04:02 +0100 Subject: Fix test_env after update() API change. --- tests/test_env.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/test_env.py b/tests/test_env.py index 0b944c50..a06656d6 100644 --- a/tests/test_env.py +++ b/tests/test_env.py @@ -37,8 +37,7 @@ def warning_emitted(file, text): # afford to not run update() in the setup but in its own test def test_first_update(): - it = env.update(app.config, app.srcdir, app.doctreedir, app) - msg = it.next() + msg, num, it = env.update(app.config, app.srcdir, app.doctreedir, app) assert msg.endswith('%d added, 0 changed, 0 removed' % len(env.found_docs)) docnames = set() for docname in it: # the generator does all the work @@ -80,8 +79,7 @@ def test_second_update(): # the contents.txt toctree; otherwise section numbers would shift (root / 'autodoc.txt').unlink() (root / 'new.txt').write_text('New file\n========\n') - it = env.update(app.config, app.srcdir, app.doctreedir, app) - msg = it.next() + msg, num, it = env.update(app.config, app.srcdir, app.doctreedir, app) assert '1 added, 3 changed, 1 removed' in msg docnames = set() for docname in it: -- cgit v1.2.1 From 0da45a6b8fb1ca6d6df260c11d6a734ac306ab9c Mon Sep 17 00:00:00 2001 From: gbrandl Date: Sun, 15 Mar 2009 19:06:57 +0100 Subject: Fix attribute reference error. --- sphinx/builders/latex.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sphinx/builders/latex.py b/sphinx/builders/latex.py index 96d70e3e..bc2f1ba5 100644 --- a/sphinx/builders/latex.py +++ b/sphinx/builders/latex.py @@ -123,7 +123,7 @@ class LaTeXBuilder(Builder): except Exception: self.warn('toctree contains ref to nonexisting ' 'file %r' % includefile, - self.builder.env.doc2path(docname)) + self.env.doc2path(docname)) else: sof = addnodes.start_of_file(docname=includefile) sof.children = subtree.children -- cgit v1.2.1 From d1c289ca4035464bb97de15d5251f940febf9d10 Mon Sep 17 00:00:00 2001 From: gbrandl Date: Sun, 15 Mar 2009 19:55:48 +0100 Subject: Fix test. --- sphinx/ext/autosummary/__init__.py | 3 ++- tests/root/autosummary.txt | 6 ++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/sphinx/ext/autosummary/__init__.py b/sphinx/ext/autosummary/__init__.py index 01160cd6..8fdeefe7 100644 --- a/sphinx/ext/autosummary/__init__.py +++ b/sphinx/ext/autosummary/__init__.py @@ -317,7 +317,8 @@ def setup(app): app.setup_extension('sphinx.ext.autodoc') app.add_node(autosummary_toc, html=(autosummary_toc_visit_html, autosummary_noop), - latex=(autosummary_toc_visit_latex, autosummary_noop)) + latex=(autosummary_toc_visit_latex, autosummary_noop), + text=(autosummary_noop, autosummary_noop)) app.add_directive('autosummary', Autosummary) app.add_role('autolink', autolink_role) app.connect('doctree-read', process_autosummary_toc) diff --git a/tests/root/autosummary.txt b/tests/root/autosummary.txt index d05dffec..e4b75167 100644 --- a/tests/root/autosummary.txt +++ b/tests/root/autosummary.txt @@ -1,5 +1,7 @@ Autosummary test ================ -.. autosummary:: sphinx.application - :toctree: xyz +.. autosummary:: + :toctree: generated + + sphinx.application -- cgit v1.2.1 From a72d904ded8d922732728d97f212b7820f03f5f1 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sun, 15 Mar 2009 23:29:10 +0100 Subject: Note ``autosummary`` extension. --- CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGES b/CHANGES index cd0c82fb..a632759d 100644 --- a/CHANGES +++ b/CHANGES @@ -145,6 +145,9 @@ New features added - New ``inheritance_diagram`` extension to embed... inheritance diagrams! + - New ``autosummary`` extension that generates summaries of + modules and automatic documentation of modules. + - Autodoc now has a reusable Python API, which can be used to create custom types of objects to auto-document (e.g. Zope interfaces). See also ``Sphinx.add_autodocumenter()``. -- cgit v1.2.1 From 38b67d02693c8ca897ee22a8b68c7413365b3ace Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sun, 15 Mar 2009 23:34:19 +0100 Subject: #123: The ``glossary`` directive now supports a ``:sorted:`` flag that sorts glossary entries alphabetically. --- CHANGES | 3 +++ doc/markup/para.rst | 16 ++++++++++------ sphinx/directives/other.py | 13 +++++++++++-- 3 files changed, 24 insertions(+), 8 deletions(-) diff --git a/CHANGES b/CHANGES index a632759d..74838c61 100644 --- a/CHANGES +++ b/CHANGES @@ -55,6 +55,9 @@ New features added the directive -- this allows you to define your document structure, but place the links yourself. + - #123: The ``glossary`` directive now supports a ``:sorted:`` + flag that sorts glossary entries alphabetically. + - Paths to images, literal include files and download files can now be absolute (like ``/images/foo.png``). They are treated as relative to the top source directory. diff --git a/doc/markup/para.rst b/doc/markup/para.rst index 2ea2fd2d..e8adc75c 100644 --- a/doc/markup/para.rst +++ b/doc/markup/para.rst @@ -203,14 +203,18 @@ Glossary .. glossary:: environment - A structure where information about all documents under the root is saved, - and used for cross-referencing. The environment is pickled after the - parsing stage, so that successive runs only need to read and parse new and - changed documents. + A structure where information about all documents under the root is + saved, and used for cross-referencing. The environment is pickled + after the parsing stage, so that successive runs only need to read + and parse new and changed documents. source directory - The directory which, including its subdirectories, contains all source - files for one Sphinx project. + The directory which, including its subdirectories, contains all + source files for one Sphinx project. + + .. versionadded:: 0.6 + You can now give the glossary directive a ``:sorted:`` flag that will + automatically sort the entries alphabetically. Grammar production displays diff --git a/sphinx/directives/other.py b/sphinx/directives/other.py index bc9a54df..80c88f5f 100644 --- a/sphinx/directives/other.py +++ b/sphinx/directives/other.py @@ -414,7 +414,9 @@ class Glossary(Directive): required_arguments = 0 optional_arguments = 0 final_argument_whitespace = False - option_spec = {} + option_spec = { + 'sorted': directives.flag, + } def run(self): env = self.state.document.settings.env @@ -426,8 +428,10 @@ class Glossary(Directive): dls = [child for child in node if isinstance(child, nodes.definition_list)] # now, extract definition terms to enable cross-reference creation + new_dl = nodes.definition_list() + new_dl['classes'].append('glossary') + items = [] for dl in dls: - dl['classes'].append('glossary') for li in dl.children: if not li.children or not isinstance(li[0], nodes.term): continue @@ -443,6 +447,11 @@ class Glossary(Directive): indexnode = addnodes.index() indexnode['entries'] = [('single', termtext, new_id, termtext)] li.insert(0, indexnode) + items.append((termtext, li)) + if 'sorted' in self.options: + items.sort() + new_dl.extend(item[1] for item in items) + node.children = [new_dl] return [node] -- cgit v1.2.1 From 223e814b263cb415899408d48d231f6c62e01a02 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sun, 15 Mar 2009 23:43:09 +0100 Subject: Add link to Glenn's SCons script repo. --- doc/faq.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/faq.rst b/doc/faq.rst index ae12cdad..723601f1 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -26,5 +26,9 @@ How do I... There's a third-party extension providing an `api role`_ which refers to Epydoc's API docs for a given identifier. +... use Sphinx with SCons? + Glenn Hutchings has written a SCons build script to build Sphinx + documentation; it is hosted here: http://bitbucket.org/zondo/sphinx-scons + .. _api role: http://git.savannah.gnu.org/cgit/kenozooid.git/tree/doc/extapi.py -- cgit v1.2.1 From def22820159c63af6f604a1554d05a5575356c9b Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sun, 15 Mar 2009 23:52:48 +0100 Subject: Autodoc can now exclude single members from documentation via the ``exclude-members`` option. --- CHANGES | 3 +++ doc/ext/autodoc.rst | 6 ++++++ sphinx/ext/autodoc.py | 14 +++++++++++++- tests/test_autodoc.py | 6 +++++- 4 files changed, 27 insertions(+), 2 deletions(-) diff --git a/CHANGES b/CHANGES index 74838c61..e15d5280 100644 --- a/CHANGES +++ b/CHANGES @@ -162,6 +162,9 @@ New features added - Autodoc can document classes as functions now if explicitly marked with `autofunction`. + - Autodoc can now exclude single members from documentation + via the ``exclude-members`` option. + - Autodoc can now order members either alphabetically (like previously) or by member type; configurable either with the config value ``autodoc_member_order`` or a ``member-order`` diff --git a/doc/ext/autodoc.rst b/doc/ext/autodoc.rst index 17a2766b..ff8d189d 100644 --- a/doc/ext/autodoc.rst +++ b/doc/ext/autodoc.rst @@ -141,6 +141,12 @@ directive. .. versionadded:: 0.6 + * The directives supporting member documentation also have a + ``exclude-members`` option that can be used to exclude single member names + from documentation, if all members are to be documented. + + .. versionadded:: 0.6 + .. note:: In an :dir:`automodule` directive with the ``members`` option set, only diff --git a/sphinx/ext/autodoc.py b/sphinx/ext/autodoc.py index 43c4b26a..5c1a0653 100644 --- a/sphinx/ext/autodoc.py +++ b/sphinx/ext/autodoc.py @@ -79,6 +79,12 @@ def members_option(arg): return ALL return [x.strip() for x in arg.split(',')] +def members_set_option(arg): + """Used to convert the :members: option to auto directives.""" + if arg is None: + return ALL + return set(x.strip() for x in arg.split(',')) + def bool_option(arg): """Used to convert flag options to auto directives. (Instead of directives.flag(), which returns None.)""" @@ -549,6 +555,11 @@ class Documenter(object): # find out which members are documentable members_check_module, members = self.get_object_members(want_all) + # remove members given by exclude-members + if self.options.exclude_members: + members = [(membername, member) for (membername, member) in members + if membername not in self.options.exclude_members] + # document non-skipped members memberdocumenters = [] for (mname, member, isattr) in self.filter_members(members, want_all): @@ -666,7 +677,7 @@ class ModuleDocumenter(Documenter): 'noindex': bool_option, 'inherited-members': bool_option, 'show-inheritance': bool_option, 'synopsis': identity, 'platform': identity, 'deprecated': bool_option, - 'member-order': identity, + 'member-order': identity, 'exclude-members': members_set_option, } @classmethod @@ -818,6 +829,7 @@ class ClassDocumenter(ModuleLevelDocumenter): 'members': members_option, 'undoc-members': bool_option, 'noindex': bool_option, 'inherited-members': bool_option, 'show-inheritance': bool_option, 'member-order': identity, + 'exclude-members': members_set_option, } @classmethod diff --git a/tests/test_autodoc.py b/tests/test_autodoc.py index a9e030fe..d7e2f4ef 100644 --- a/tests/test_autodoc.py +++ b/tests/test_autodoc.py @@ -37,6 +37,7 @@ def setup_module(): deprecated = False, members = [], member_order = 'alphabetic', + exclude_members = set(), ) directive = Struct( @@ -375,6 +376,7 @@ def test_generate(): assert_processes(should, 'class', 'Class') should.extend([('method', 'test_autodoc.Class.meth')]) options.members = ['meth'] + options.exclude_members = set(['excludemeth']) assert_processes(should, 'class', 'Class') should.extend([('attribute', 'test_autodoc.Class.prop'), ('attribute', 'test_autodoc.Class.attr'), @@ -458,7 +460,9 @@ class Class(Base): def skipmeth(self): """Method that should be skipped.""" - pass + + def excludemeth(self): + """Method that should be excluded.""" # should not be documented skipattr = 'foo' -- cgit v1.2.1 From 125f29cfc23baabfe708669d45f50d4d3773edee Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sun, 15 Mar 2009 23:55:05 +0100 Subject: Update MapServer URL. --- EXAMPLES | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/EXAMPLES b/EXAMPLES index 9eb0b0ce..709a6f76 100644 --- a/EXAMPLES +++ b/EXAMPLES @@ -22,7 +22,7 @@ included, please mail to `the Google group * Hedge: http://documen.tician.de/hedge/ * IFM: http://fluffybunny.memebot.com/ifm-docs/index.html * Jinja: http://jinja.pocoo.org/2/documentation/ -* MapServer: http://mapserver.osgeo.org/ +* MapServer: http://mapserver.org/ * Matplotlib: http://matplotlib.sourceforge.net/ * Mayavi: http://code.enthought.com/projects/mayavi/docs/development/html/mayavi * MeshPy: http://documen.tician.de/meshpy/ -- cgit v1.2.1 From 9ac2f1ad4ee44bb7455cc1e979d3f38c84c9960e Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 16 Mar 2009 01:11:33 +0100 Subject: Add Sage. --- EXAMPLES | 1 + 1 file changed, 1 insertion(+) diff --git a/EXAMPLES b/EXAMPLES index 709a6f76..41b5774a 100644 --- a/EXAMPLES +++ b/EXAMPLES @@ -53,6 +53,7 @@ included, please mail to `the Google group * Quex: http://quex.sourceforge.net/ * Reteisi: http://docs.argolinux.org/reteisi/ * Roundup: http://www.roundup-tracker.org/ +* Sage: http://sagemath.org/doc/ * Satchmo: http://www.satchmoproject.com/docs/svn/ * Scapy: http://www.secdev.org/projects/scapy/doc/ * Self: http://selflanguage.org/ -- cgit v1.2.1 From d7fa65d4bbc99358bbf0eea0040ff0168d3cf094 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 16 Mar 2009 01:30:09 +0100 Subject: Add autosummary doc stub. --- doc/ext/autosummary.rst | 9 +++++++++ doc/extensions.rst | 1 + 2 files changed, 10 insertions(+) create mode 100644 doc/ext/autosummary.rst diff --git a/doc/ext/autosummary.rst b/doc/ext/autosummary.rst new file mode 100644 index 00000000..a9255857 --- /dev/null +++ b/doc/ext/autosummary.rst @@ -0,0 +1,9 @@ +.. highlight:: rest + +:mod:`sphinx.ext.autosummary` -- Generate autodoc summaries +=========================================================== + +.. module:: sphinx.ext.autosummary + :synopsis: Generate autodoc summaries + +TBW. diff --git a/doc/extensions.rst b/doc/extensions.rst index 21ba0fd8..5eb26c14 100644 --- a/doc/extensions.rst +++ b/doc/extensions.rst @@ -41,6 +41,7 @@ These extensions are built in and can be activated by respective entries in the .. toctree:: ext/autodoc + ext/autosummary ext/doctest ext/intersphinx ext/math -- cgit v1.2.1 From b7ba1a3be7b22d45d6d044c3010bca7fa5701588 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 16 Mar 2009 12:07:23 +0100 Subject: Fix test_env. --- tests/test_env.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_env.py b/tests/test_env.py index a06656d6..94e7af93 100644 --- a/tests/test_env.py +++ b/tests/test_env.py @@ -80,7 +80,7 @@ def test_second_update(): (root / 'autodoc.txt').unlink() (root / 'new.txt').write_text('New file\n========\n') msg, num, it = env.update(app.config, app.srcdir, app.doctreedir, app) - assert '1 added, 3 changed, 1 removed' in msg + assert '1 added, 4 changed, 1 removed' in msg docnames = set() for docname in it: docnames.add(docname) -- cgit v1.2.1 From 76fe43679a3aa2a2e1941ef04140c569478254cb Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 16 Mar 2009 21:15:26 +0100 Subject: Fix for graphviz map files from Sebastian Wiesner. --- sphinx/ext/graphviz.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/sphinx/ext/graphviz.py b/sphinx/ext/graphviz.py index d51e2ed5..ea7b206d 100644 --- a/sphinx/ext/graphviz.py +++ b/sphinx/ext/graphviz.py @@ -91,7 +91,7 @@ def render_dot(self, code, options, format, prefix='graphviz'): outfn = path.join(self.builder.outdir, fname) if path.isfile(outfn): - return relfn + return relfn, outfn if hasattr(self.builder, '_graphviz_warned_dot') or \ hasattr(self.builder, '_graphviz_warned_ps2pdf'): @@ -122,12 +122,12 @@ def render_dot(self, code, options, format, prefix='graphviz'): if p.returncode != 0: raise GraphvizError('dot exited with error:\n[stderr]\n%s\n' '[stdout]\n%s' % (stderr, stdout)) - return relfn + return relfn, outfn def render_dot_html(self, node, code, options, prefix='graphviz', imgcls=None): try: - fname = render_dot(self, code, options, 'png', prefix) + fname, outfn = render_dot(self, code, options, 'png', prefix) except GraphvizError, exc: self.builder.warn('dot code %r: ' % code + str(exc)) raise nodes.SkipNode @@ -136,9 +136,11 @@ def render_dot_html(self, node, code, options, prefix='graphviz', imgcls=None): if fname is None: self.body.append(self.encode(code)) else: - mapfile = open(path.join(self.builder.outdir, fname) + '.map', 'rb') - imgmap = mapfile.readlines() - mapfile.close() + mapfile = open(outfn + '.map', 'rb') + try: + imgmap = mapfile.readlines() + finally: + mapfile.close() imgcss = imgcls and 'class="%s"' % imgcls or '' if len(imgmap) == 2: # nothing in image map (the lines are and ) -- cgit v1.2.1 From b1d5b3540860d02654a9992bcb3b8e13c0543a60 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 16 Mar 2009 23:30:17 +0100 Subject: Do proper cleanup of generated files. --- tests/test_env.py | 1 + tests/test_i18n.py | 4 ++++ tests/test_theming.py | 4 ++++ 3 files changed, 9 insertions(+) diff --git a/tests/test_env.py b/tests/test_env.py index a06656d6..ba0a916f 100644 --- a/tests/test_env.py +++ b/tests/test_env.py @@ -26,6 +26,7 @@ def setup_module(): def teardown_module(): app.cleanup() + (test_root / 'generated').rmtree() def warning_emitted(file, text): for warning in warnings: diff --git a/tests/test_i18n.py b/tests/test_i18n.py index 94648727..61dbef84 100644 --- a/tests/test_i18n.py +++ b/tests/test_i18n.py @@ -11,6 +11,10 @@ from util import * +def teardown_module(): + (test_root / '_build').rmtree() + (test_root / 'generated').rmtree() + @with_app(confoverrides={'language': 'de'}) def test_i18n(app): diff --git a/tests/test_theming.py b/tests/test_theming.py index 349a9ce4..cf450c02 100644 --- a/tests/test_theming.py +++ b/tests/test_theming.py @@ -16,6 +16,10 @@ from util import * from sphinx.theming import Theme, ThemeError +def teardown_module(): + (test_root / '_build').rmtree() + (test_root / 'generated').rmtree() + @with_app(confoverrides={'html_theme': 'ziptheme', 'html_theme_options.testopt': 'foo'}) -- cgit v1.2.1 From dad7f3eaf3fe59ed30825ee2adf7d0d77da01b02 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 16 Mar 2009 23:46:58 +0100 Subject: Restore 2.4 compatibility and fix removing the generated file properly. --- sphinx/ext/autodoc.py | 7 ++++++- tests/test_autodoc.py | 3 ++- tests/test_build.py | 3 +-- tests/test_env.py | 1 - tests/test_i18n.py | 4 ---- tests/test_theming.py | 4 ---- tests/util.py | 2 +- 7 files changed, 10 insertions(+), 14 deletions(-) diff --git a/sphinx/ext/autodoc.py b/sphinx/ext/autodoc.py index 5c1a0653..72d50029 100644 --- a/sphinx/ext/autodoc.py +++ b/sphinx/ext/autodoc.py @@ -853,7 +853,12 @@ class ClassDocumenter(ModuleLevelDocumenter): if initmeth is None or initmeth is object.__init__ or not \ (inspect.ismethod(initmeth) or inspect.isfunction(initmeth)): return None - argspec = inspect.getargspec(initmeth) + try: + argspec = inspect.getargspec(initmeth) + except TypeError: + # still not possible: happens e.g. for old-style classes + # with __init__ in C + return None if argspec[0] and argspec[0][0] in ('cls', 'self'): del argspec[0][0] return inspect.formatargspec(*argspec) diff --git a/tests/test_autodoc.py b/tests/test_autodoc.py index d7e2f4ef..8e011438 100644 --- a/tests/test_autodoc.py +++ b/tests/test_autodoc.py @@ -470,9 +470,10 @@ class Class(Base): #: should be documented -- süß attr = 'bar' - @property def prop(self): """Property.""" + # stay 2.4 compatible (docstring!) + prop = property(prop, doc="Property.") docattr = 'baz' """should likewise be documented -- süß""" diff --git a/tests/test_build.py b/tests/test_build.py index 18c78499..7b33dc09 100644 --- a/tests/test_build.py +++ b/tests/test_build.py @@ -31,8 +31,7 @@ from sphinx.writers.latex import LaTeXTranslator def teardown_module(): - (test_root / '_build').rmtree() - (test_root / 'generated').rmtree() + (test_root / '_build').rmtree(True) html_warnfile = StringIO() diff --git a/tests/test_env.py b/tests/test_env.py index ba0a916f..a06656d6 100644 --- a/tests/test_env.py +++ b/tests/test_env.py @@ -26,7 +26,6 @@ def setup_module(): def teardown_module(): app.cleanup() - (test_root / 'generated').rmtree() def warning_emitted(file, text): for warning in warnings: diff --git a/tests/test_i18n.py b/tests/test_i18n.py index 61dbef84..94648727 100644 --- a/tests/test_i18n.py +++ b/tests/test_i18n.py @@ -11,10 +11,6 @@ from util import * -def teardown_module(): - (test_root / '_build').rmtree() - (test_root / 'generated').rmtree() - @with_app(confoverrides={'language': 'de'}) def test_i18n(app): diff --git a/tests/test_theming.py b/tests/test_theming.py index cf450c02..349a9ce4 100644 --- a/tests/test_theming.py +++ b/tests/test_theming.py @@ -16,10 +16,6 @@ from util import * from sphinx.theming import Theme, ThemeError -def teardown_module(): - (test_root / '_build').rmtree() - (test_root / 'generated').rmtree() - @with_app(confoverrides={'html_theme': 'ziptheme', 'html_theme_options.testopt': 'foo'}) diff --git a/tests/util.py b/tests/util.py index 2c9d3176..4bb6a653 100644 --- a/tests/util.py +++ b/tests/util.py @@ -105,7 +105,7 @@ class TestApp(application.Sphinx): application.CONFIG_FILENAME = confname - self.cleanup_trees = [] + self.cleanup_trees = [test_root / 'generated'] if srcdir is None: srcdir = test_root -- cgit v1.2.1 From a1691a3424ae6b91743db7f37cf4fe86f7c93760 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 16 Mar 2009 23:50:34 +0100 Subject: Update version info for 0.6b1. --- sphinx/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sphinx/__init__.py b/sphinx/__init__.py index e0d8813f..0e37bf15 100644 --- a/sphinx/__init__.py +++ b/sphinx/__init__.py @@ -13,8 +13,8 @@ import sys from os import path __revision__ = '$Revision$' -__version__ = '0.6' -__released__ = '0.6 (hg)' +__version__ = '0.6b1' +__released__ = '0.6b1' package_dir = path.abspath(path.dirname(__file__)) -- cgit v1.2.1