summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorshimizukawa <shimizukawa@gmail.com>2014-04-29 23:44:12 +0900
committershimizukawa <shimizukawa@gmail.com>2014-04-29 23:44:12 +0900
commit16720b18c13a747e562c55842e7814d57376506e (patch)
treec2c9028bcde9a51b75743a7b00d298da59e86e03
parentaf372f6aceb77cf0586fdb786a674cdfd72c547d (diff)
downloadsphinx-16720b18c13a747e562c55842e7814d57376506e.tar.gz
use six privided text_type() to replace with unicode() to support py2/py3 in one source. refs #1350.
-rw-r--r--sphinx/builders/html.py4
-rw-r--r--sphinx/builders/qthelp.py3
-rw-r--r--sphinx/cmdline.py5
-rw-r--r--sphinx/config.py8
-rw-r--r--sphinx/domains/cpp.py62
-rw-r--r--sphinx/environment.py6
-rw-r--r--sphinx/ext/autodoc.py12
-rw-r--r--sphinx/ext/autosummary/__init__.py3
-rw-r--r--sphinx/ext/graphviz.py3
-rw-r--r--sphinx/ext/pngmath.py3
-rw-r--r--sphinx/ext/viewcode.py4
-rw-r--r--sphinx/highlighting.py7
-rw-r--r--sphinx/locale/__init__.py9
-rw-r--r--sphinx/pycode/__init__.py6
-rw-r--r--sphinx/pycode/pgen2/literals.py5
-rw-r--r--sphinx/quickstart.py7
-rw-r--r--sphinx/search/__init__.py4
-rw-r--r--sphinx/util/__init__.py4
-rw-r--r--sphinx/util/jsonimpl.py3
-rw-r--r--sphinx/util/osutil.py3
-rw-r--r--sphinx/util/pycompat.py2
-rw-r--r--sphinx/websupport/search/__init__.py4
-rw-r--r--sphinx/websupport/search/whooshsearch.py4
-rw-r--r--sphinx/writers/latex.py8
-rw-r--r--tests/etree13/HTMLTreeBuilder.py4
-rwxr-xr-xtests/path.py9
-rw-r--r--tests/test_cpp_domain.py84
-rw-r--r--tests/test_quickstart.py3
28 files changed, 151 insertions, 128 deletions
diff --git a/sphinx/builders/html.py b/sphinx/builders/html.py
index a1daced4..10d21f7a 100644
--- a/sphinx/builders/html.py
+++ b/sphinx/builders/html.py
@@ -18,7 +18,7 @@ from os import path
from hashlib import md5
import six
-from six import iteritems, itervalues
+from six import iteritems, itervalues, text_type
from six.moves import cPickle as pickle
from docutils import nodes
from docutils.io import DocTreeInput, StringOutput
@@ -60,7 +60,7 @@ def get_stable_hash(obj):
return get_stable_hash(list(obj.items()))
elif isinstance(obj, (list, tuple)):
obj = sorted(get_stable_hash(o) for o in obj)
- return md5(unicode(obj).encode('utf8')).hexdigest()
+ return md5(text_type(obj).encode('utf8')).hexdigest()
class StandaloneHTMLBuilder(Builder):
diff --git a/sphinx/builders/qthelp.py b/sphinx/builders/qthelp.py
index 1863fb49..c0fff2a6 100644
--- a/sphinx/builders/qthelp.py
+++ b/sphinx/builders/qthelp.py
@@ -15,6 +15,7 @@ import codecs
import posixpath
from os import path
+from six import text_type
from docutils import nodes
from sphinx import addnodes
@@ -136,7 +137,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
# they are all unicode strings before joining them
new_sections = []
for section in sections:
- if not isinstance(section, unicode):
+ if not isinstance(section, text_type):
new_sections.append(force_decode(section, None))
else:
new_sections.append(section)
diff --git a/sphinx/cmdline.py b/sphinx/cmdline.py
index 377f2385..aa45b8a7 100644
--- a/sphinx/cmdline.py
+++ b/sphinx/cmdline.py
@@ -17,6 +17,7 @@ import traceback
from os import path
import six
+from six import text_type
from docutils.utils import SystemMessage
from sphinx import __version__
@@ -272,10 +273,10 @@ def main(argv):
print(terminal_safe(err.args[0]), file=error)
elif isinstance(err, SphinxError):
print(red('%s:' % err.category), file=error)
- print(terminal_safe(unicode(err)), file=error)
+ print(terminal_safe(text_type(err)), file=error)
elif isinstance(err, UnicodeError):
print(red('Encoding error:'), file=error)
- print(terminal_safe(unicode(err)), file=error)
+ print(terminal_safe(text_type(err)), file=error)
tbpath = save_traceback(app)
print(red('The full traceback has been saved in %s, if you want '
'to report the issue to the developers.' % tbpath),
diff --git a/sphinx/config.py b/sphinx/config.py
index dcfcc624..56e28c03 100644
--- a/sphinx/config.py
+++ b/sphinx/config.py
@@ -14,7 +14,7 @@ import re
from os import path
import six
-from six import iteritems
+from six import iteritems, string_types
from sphinx.errors import ConfigError
from sphinx.locale import l_
@@ -215,7 +215,7 @@ class Config(object):
self.values = Config.config_values.copy()
config = {}
if 'extensions' in overrides:
- if isinstance(overrides['extensions'], (str, unicode)):
+ if isinstance(overrides['extensions'], string_types):
config['extensions'] = overrides.pop('extensions').split(',')
else:
config['extensions'] = overrides.pop('extensions')
@@ -262,7 +262,7 @@ class Config(object):
warn('unknown config value %r in override, ignoring' % valname)
continue
defvalue = self.values[valname][0]
- if isinstance(value, (str, unicode)):
+ if isinstance(value, string_types):
if isinstance(defvalue, dict):
warn('cannot override dictionary config setting %r, '
'ignoring (use %r to set individual elements)' %
@@ -276,7 +276,7 @@ class Config(object):
except ValueError:
warn('invalid number %r for config value %r, ignoring'
% (value, valname))
- elif defvalue is not None and not isinstance(defvalue, (str, unicode)):
+ elif defvalue is not None and not isinstance(defvalue, string_types):
warn('cannot override config setting %r with unsupported type, '
'ignoring' % valname)
else:
diff --git a/sphinx/domains/cpp.py b/sphinx/domains/cpp.py
index c380d303..0b9d3b02 100644
--- a/sphinx/domains/cpp.py
+++ b/sphinx/domains/cpp.py
@@ -12,7 +12,7 @@
import re
from copy import deepcopy
-from six import iteritems
+from six import iteritems, text_type
from docutils import nodes
from sphinx import addnodes
@@ -110,7 +110,7 @@ class DefinitionError(Exception):
self.description = description
def __str__(self):
- return unicode(self).encode('utf-8')
+ return text_type(self).encode('utf-8')
def __unicode__(self):
return self.description
@@ -163,7 +163,7 @@ class DefExpr(object):
raise NotImplementedError()
def __str__(self):
- return unicode(self).encode('utf-8')
+ return text_type(self).encode('utf-8')
def __unicode__(self):
raise NotImplementedError()
@@ -197,7 +197,7 @@ class NameDefExpr(PrimaryDefExpr):
return self.name.replace(u' ', u'-')
def __unicode__(self):
- return unicode(self.name)
+ return text_type(self.name)
class PathDefExpr(PrimaryDefExpr):
@@ -222,7 +222,7 @@ class PathDefExpr(PrimaryDefExpr):
return PathDefExpr([prefix] + self.path)
def __unicode__(self):
- return u'::'.join(map(unicode, self.path))
+ return u'::'.join(map(text_type, self.path))
class ArrayTypeSuffixDefExpr(object):
@@ -235,7 +235,7 @@ class ArrayTypeSuffixDefExpr(object):
def __unicode__(self):
return u'[%s]' % (
- self.size_hint is not None and unicode(self.size_hint) or u'',
+ self.size_hint is not None and text_type(self.size_hint) or u'',
)
@@ -254,7 +254,7 @@ class TemplateDefExpr(PrimaryDefExpr):
u'.'.join(x.get_id() for x in self.args))
def __unicode__(self):
- return u'%s<%s>' % (self.typename, u', '.join(map(unicode, self.args)))
+ return u'%s<%s>' % (self.typename, u', '.join(map(text_type, self.args)))
class ConstantTemplateArgExpr(PrimaryDefExpr):
@@ -266,7 +266,7 @@ class ConstantTemplateArgExpr(PrimaryDefExpr):
return self.arg.replace(u' ', u'-')
def __unicode__(self):
- return unicode(self.arg)
+ return text_type(self.arg)
class WrappingDefExpr(DefExpr):
@@ -285,13 +285,13 @@ class ModifierDefExpr(WrappingDefExpr):
self.modifiers = modifiers
def get_id(self):
- pieces = [_id_shortwords.get(unicode(x), unicode(x))
+ pieces = [_id_shortwords.get(text_type(x), text_type(x))
for x in self.modifiers]
pieces.append(self.typename.get_id())
return u'-'.join(pieces)
def __unicode__(self):
- return u' '.join(map(unicode, list(self.modifiers) + [self.typename]))
+ return u' '.join(map(text_type, list(self.modifiers) + [self.typename]))
class PtrDefExpr(WrappingDefExpr):
@@ -369,7 +369,7 @@ class ArgumentDefExpr(DefExpr):
if self.default is not None:
buf.append('=%s' % self.default)
for suffix in self.type_suffixes:
- buf.append(unicode(suffix))
+ buf.append(text_type(suffix))
return u''.join(buf)
@@ -411,12 +411,12 @@ class TypeObjDefExpr(NamedDefExpr):
def __unicode__(self):
buf = self.get_modifiers()
if self.typename is None:
- buf.append(unicode(self.name))
+ buf.append(text_type(self.name))
else:
- buf.extend(map(unicode, (self.typename, self.name)))
+ buf.extend(map(text_type, (self.typename, self.name)))
buf = [u' '.join(buf)]
for suffix in self.type_suffixes:
- buf.append(unicode(suffix))
+ buf.append(text_type(suffix))
return u''.join(buf)
@@ -437,10 +437,10 @@ class MemberObjDefExpr(NamedDefExpr):
def __unicode__(self):
buf = self.get_modifiers()
- buf.extend((unicode(self.typename), unicode(self.name)))
+ buf.extend((text_type(self.typename), text_type(self.name)))
buf = [u' '.join(buf)]
for suffix in self.type_suffixes:
- buf.append(unicode(suffix))
+ buf.append(text_type(suffix))
if self.value is not None:
buf.append(u' = %s' % self.value)
return u''.join(buf)
@@ -481,9 +481,9 @@ class FuncDefExpr(NamedDefExpr):
if self.constexpr:
buf.append(u'constexpr')
if self.rv is not None:
- buf.append(unicode(self.rv))
+ buf.append(text_type(self.rv))
buf.append(u'%s(%s)' % (self.name, u', '.join(
- map(unicode, self.signature))))
+ map(text_type, self.signature))))
if self.const:
buf.append(u'const')
if self.volatile:
@@ -516,7 +516,7 @@ class ClassDefExpr(NamedDefExpr):
def _tostring(self, visibility='public'):
buf = self.get_modifiers(visibility)
- buf.append(unicode(self.name))
+ buf.append(text_type(self.name))
if self.bases:
buf.append(u':')
buf.append(u', '.join(base._tostring('private')
@@ -994,19 +994,19 @@ class CPPObject(ObjectDescription):
def attach_name(self, node, name):
owner, name = name.split_owner()
- varname = unicode(name)
+ varname = text_type(name)
if owner is not None:
- owner = unicode(owner) + '::'
+ owner = text_type(owner) + '::'
node += addnodes.desc_addname(owner, owner)
node += addnodes.desc_name(varname, varname)
def attach_type_suffixes(self, node, suffixes):
for suffix in suffixes:
- node += nodes.Text(unicode(suffix))
+ node += nodes.Text(text_type(suffix))
def attach_type(self, node, type):
# XXX: link to c?
- text = unicode(type)
+ text = text_type(type)
pnode = addnodes.pending_xref(
'', refdomain='cpp', reftype='type',
reftarget=text, modname=None, classname=None)
@@ -1028,7 +1028,7 @@ class CPPObject(ObjectDescription):
def add_target_and_index(self, sigobj, sig, signode):
theid = sigobj.get_id()
- name = unicode(sigobj.name)
+ name = text_type(sigobj.name)
if theid not in self.state.document.ids:
signode['names'].append(theid)
signode['ids'].append(theid)
@@ -1094,8 +1094,8 @@ class CPPClassObject(CPPObject):
signode += nodes.Text(' : ')
for base in cls.bases:
self.attach_modifiers(signode, base, 'private')
- signode += nodes.emphasis(unicode(base.name),
- unicode(base.name))
+ signode += nodes.emphasis(text_type(base.name),
+ text_type(base.name))
signode += nodes.Text(', ')
signode.pop() # remove the trailing comma
@@ -1145,7 +1145,7 @@ class CPPFunctionObject(CPPObject):
def attach_function(self, node, func):
owner, name = func.name.split_owner()
if owner is not None:
- owner = unicode(owner) + '::'
+ owner = text_type(owner) + '::'
node += addnodes.desc_addname(owner, owner)
# cast operator is special. in this case the return value
@@ -1155,7 +1155,7 @@ class CPPFunctionObject(CPPObject):
node += nodes.Text(u' ')
self.attach_type(node, name.typename)
else:
- funcname = unicode(name)
+ funcname = text_type(name)
node += addnodes.desc_name(funcname, funcname)
paramlist = addnodes.desc_parameterlist()
@@ -1164,10 +1164,10 @@ class CPPFunctionObject(CPPObject):
if arg.type is not None:
self.attach_type(param, arg.type)
param += nodes.Text(u' ')
- param += nodes.emphasis(unicode(arg.name), unicode(arg.name))
+ param += nodes.emphasis(text_type(arg.name), text_type(arg.name))
self.attach_type_suffixes(param, arg.type_suffixes)
if arg.default is not None:
- def_ = u'=' + unicode(arg.default)
+ def_ = u'=' + text_type(arg.default)
param += nodes.emphasis(def_, def_)
paramlist += param
@@ -1280,7 +1280,7 @@ class CPPDomain(Domain):
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
def _create_refnode(expr):
- name = unicode(expr)
+ name = text_type(expr)
if name not in self.data['objects']:
return None
obj = self.data['objects'][name]
diff --git a/sphinx/environment.py b/sphinx/environment.py
index abf9a73a..eab36fb2 100644
--- a/sphinx/environment.py
+++ b/sphinx/environment.py
@@ -23,7 +23,7 @@ from glob import glob
from itertools import groupby
import six
-from six import iteritems, itervalues
+from six import iteritems, itervalues, text_type
from six.moves import cPickle as pickle, zip
from docutils import nodes
from docutils.io import FileInput, NullOutput
@@ -594,7 +594,7 @@ class BuildEnvironment:
FileInput.__init__(self_, *args, **kwds)
def decode(self_, data):
- if isinstance(data, unicode):
+ if isinstance(data, text_type):
return data
return data.decode(self_.encoding, 'sphinx')
@@ -1509,7 +1509,7 @@ class BuildEnvironment:
# Force the word to be unicode if it's a ASCII bytestring.
# This will solve problems with unicode normalization later.
# For instance the RFC role will add bytestrings at the moment
- word = unicode(word)
+ word = text_type(word)
entry = dic.get(word)
if not entry:
dic[word] = entry = [[], {}]
diff --git a/sphinx/ext/autodoc.py b/sphinx/ext/autodoc.py
index b5db636c..c9857606 100644
--- a/sphinx/ext/autodoc.py
+++ b/sphinx/ext/autodoc.py
@@ -18,7 +18,7 @@ import traceback
from types import FunctionType, BuiltinFunctionType, MethodType
import six
-from six import iteritems, itervalues
+from six import iteritems, itervalues, text_type
from docutils import nodes
from docutils.utils import assemble_option_dict
from docutils.statemachine import ViewList
@@ -481,7 +481,7 @@ class Documenter(object):
docstring = self.get_attr(self.object, '__doc__', None)
# make sure we have Unicode docstrings, then sanitize and split
# into lines
- if isinstance(docstring, unicode):
+ if isinstance(docstring, text_type):
return [prepare_docstring(docstring, ignore)]
elif isinstance(docstring, str): # this will not trigger on Py3
return [prepare_docstring(force_decode(docstring, encoding),
@@ -505,9 +505,9 @@ class Documenter(object):
# set sourcename and add content from attribute documentation
if self.analyzer:
# prevent encoding errors when the file name is non-ASCII
- if not isinstance(self.analyzer.srcname, unicode):
- filename = unicode(self.analyzer.srcname,
- sys.getfilesystemencoding(), 'replace')
+ if not isinstance(self.analyzer.srcname, text_type):
+ filename = text_type(self.analyzer.srcname,
+ sys.getfilesystemencoding(), 'replace')
else:
filename = self.analyzer.srcname
sourcename = u'%s:docstring of %s' % (filename, self.fullname)
@@ -1129,7 +1129,7 @@ class ClassDocumenter(ModuleLevelDocumenter):
docstrings.append(initdocstring)
doc = []
for docstring in docstrings:
- if not isinstance(docstring, unicode):
+ if not isinstance(docstring, text_type):
docstring = force_decode(docstring, encoding)
doc.append(prepare_docstring(docstring))
return doc
diff --git a/sphinx/ext/autosummary/__init__.py b/sphinx/ext/autosummary/__init__.py
index 5bee387e..8ba76a58 100644
--- a/sphinx/ext/autosummary/__init__.py
+++ b/sphinx/ext/autosummary/__init__.py
@@ -59,6 +59,7 @@ import sys
import inspect
import posixpath
+from six import text_type
from docutils.parsers.rst import directives
from docutils.statemachine import ViewList
from docutils import nodes
@@ -116,7 +117,7 @@ def autosummary_table_visit_html(self, node):
par = col1_entry[0]
for j, subnode in enumerate(list(par)):
if isinstance(subnode, nodes.Text):
- new_text = unicode(subnode.astext())
+ new_text = text_type(subnode.astext())
new_text = new_text.replace(u" ", u"\u00a0")
par[j] = nodes.Text(new_text)
except IndexError:
diff --git a/sphinx/ext/graphviz.py b/sphinx/ext/graphviz.py
index abfb6ac6..3fbded34 100644
--- a/sphinx/ext/graphviz.py
+++ b/sphinx/ext/graphviz.py
@@ -20,6 +20,7 @@ try:
except ImportError:
from sha import sha
+from six import text_type
from docutils import nodes
from docutils.parsers.rst import directives
@@ -145,7 +146,7 @@ def render_dot(self, code, options, format, prefix='graphviz'):
ensuredir(path.dirname(outfn))
# graphviz expects UTF-8 by default
- if isinstance(code, unicode):
+ if isinstance(code, text_type):
code = code.encode('utf-8')
dot_args = [self.builder.config.graphviz_dot]
diff --git a/sphinx/ext/pngmath.py b/sphinx/ext/pngmath.py
index 6bfe644c..9bc1e546 100644
--- a/sphinx/ext/pngmath.py
+++ b/sphinx/ext/pngmath.py
@@ -21,6 +21,7 @@ try:
except ImportError:
from sha import sha
+from six import text_type
from docutils import nodes
from sphinx.errors import SphinxError
@@ -191,7 +192,7 @@ def html_visit_math(self, node):
try:
fname, depth = render_math(self, '$'+node['latex']+'$')
except MathExtError as exc:
- msg = unicode(exc)
+ msg = text_type(exc)
sm = nodes.system_message(msg, type='WARNING', level=2,
backrefs=[], source=node['latex'])
sm.walkabout(self)
diff --git a/sphinx/ext/viewcode.py b/sphinx/ext/viewcode.py
index cfc8ebc5..4a62bf6d 100644
--- a/sphinx/ext/viewcode.py
+++ b/sphinx/ext/viewcode.py
@@ -9,7 +9,7 @@
:license: BSD, see LICENSE for details.
"""
-from six import iteritems
+from six import iteritems, text_type
from docutils import nodes
from sphinx import addnodes
@@ -30,7 +30,7 @@ def doctree_read(app, doctree):
except Exception:
env._viewcode_modules[modname] = False
return
- if not isinstance(analyzer.code, unicode):
+ if not isinstance(analyzer.code, text_type):
code = analyzer.code.decode(analyzer.encoding)
else:
code = analyzer.code
diff --git a/sphinx/highlighting.py b/sphinx/highlighting.py
index 6e5ada6f..818705b2 100644
--- a/sphinx/highlighting.py
+++ b/sphinx/highlighting.py
@@ -19,6 +19,7 @@ except ImportError:
parser = None
import six
+from six import text_type
from sphinx.util.pycompat import htmlescape
from sphinx.util.texescape import tex_hl_escape_map_new
@@ -132,7 +133,7 @@ class PygmentsBridge(object):
# lines beginning with "..." are probably placeholders for suite
src = re.sub(r"(?m)^(\s*)" + mark + "(.)", r"\1"+ mark + r"# \2", src)
- if six.PY2 and isinstance(src, unicode):
+ if six.PY2 and isinstance(src, text_type):
# Non-ASCII chars will only occur in string literals
# and comments. If we wanted to give them to the parser
# correctly, we'd have to find out the correct source
@@ -151,7 +152,7 @@ class PygmentsBridge(object):
return True
def highlight_block(self, source, lang, warn=None, force=False, **kwargs):
- if not isinstance(source, unicode):
+ if not isinstance(source, text_type):
source = source.decode()
if not pygments:
return self.unhighlighted(source)
@@ -208,7 +209,7 @@ class PygmentsBridge(object):
if self.dest == 'html':
return hlsource
else:
- if not isinstance(hlsource, unicode): # Py2 / Pygments < 1.6
+ if not isinstance(hlsource, text_type): # Py2 / Pygments < 1.6
hlsource = hlsource.decode()
return hlsource.translate(tex_hl_escape_map_new)
diff --git a/sphinx/locale/__init__.py b/sphinx/locale/__init__.py
index 913533f4..113a5a1b 100644
--- a/sphinx/locale/__init__.py
+++ b/sphinx/locale/__init__.py
@@ -12,6 +12,7 @@
import gettext
import six
+from six import text_type
from six.moves import UserString
@@ -33,7 +34,7 @@ class _TranslationProxy(UserString, object):
def __new__(cls, func, *args):
if not args:
# not called with "function" and "arguments", but a plain string
- return unicode(func)
+ return text_type(func)
return object.__new__(cls)
def __getnewargs__(self):
@@ -64,7 +65,7 @@ class _TranslationProxy(UserString, object):
return bool(self.data)
def __dir__(self):
- return dir(unicode)
+ return dir(text_type)
def __iter__(self):
return iter(self.data)
@@ -76,7 +77,7 @@ class _TranslationProxy(UserString, object):
return str(self.data)
def __unicode__(self):
- return unicode(self.data)
+ return text_type(self.data)
def __add__(self, other):
return self.data + other
@@ -133,7 +134,7 @@ class _TranslationProxy(UserString, object):
def __repr__(self):
try:
- return 'i' + repr(unicode(self.data))
+ return 'i' + repr(text_type(self.data))
except:
return '<%s broken>' % self.__class__.__name__
diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py
index d0bdcad4..2ae0bcda 100644
--- a/sphinx/pycode/__init__.py
+++ b/sphinx/pycode/__init__.py
@@ -14,7 +14,7 @@ import sys
from os import path
import six
-from six import iteritems
+from six import iteritems, text_type
from sphinx import package_dir
from sphinx.errors import PycodeError
@@ -101,7 +101,7 @@ class AttrDocVisitor(nodes.NodeVisitor):
continue # skip over semicolon
if parent[idx].type == sym.NEWLINE:
prefix = parent[idx].get_prefix()
- if not isinstance(prefix, unicode):
+ if not isinstance(prefix, text_type):
prefix = prefix.decode(self.encoding)
docstring = prepare_commentdoc(prefix)
if docstring:
@@ -119,7 +119,7 @@ class AttrDocVisitor(nodes.NodeVisitor):
if not pnode or pnode.type not in (token.INDENT, token.DEDENT):
break
prefix = pnode.get_prefix()
- if not isinstance(prefix, unicode):
+ if not isinstance(prefix, text_type):
prefix = prefix.decode(self.encoding)
docstring = prepare_commentdoc(prefix)
self.add_docstring(node, docstring)
diff --git a/sphinx/pycode/pgen2/literals.py b/sphinx/pycode/pgen2/literals.py
index ce4a0ebc..25e09b62 100644
--- a/sphinx/pycode/pgen2/literals.py
+++ b/sphinx/pycode/pgen2/literals.py
@@ -8,6 +8,9 @@ from __future__ import print_function
import re
+from six import text_type
+
+
simple_escapes = {"a": "\a",
"b": "\b",
"f": "\f",
@@ -67,7 +70,7 @@ uni_escape_re = re.compile(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3}|"
def evalString(s, encoding=None):
regex = escape_re
repl = escape
- if encoding and not isinstance(s, unicode):
+ if encoding and not isinstance(s, text_type):
s = s.decode(encoding)
if s.startswith('u') or s.startswith('U'):
regex = uni_escape_re
diff --git a/sphinx/quickstart.py b/sphinx/quickstart.py
index f2ccb4e5..7117e336 100644
--- a/sphinx/quickstart.py
+++ b/sphinx/quickstart.py
@@ -27,6 +27,7 @@ except ImportError:
pass
import six
+from six import text_type
from six.moves import input
from docutils.utils import column_width
@@ -1013,7 +1014,7 @@ def do_prompt(d, key, text, default=None, validator=nonempty):
x = term_input(prompt).strip()
if default and not x:
x = default
- if not isinstance(x, unicode):
+ if not isinstance(x, text_type):
# for Python 2.x, try to get a Unicode string out of it
if x.decode('ascii', 'replace').encode('ascii', 'replace') != x:
if TERM_ENCODING:
@@ -1239,10 +1240,10 @@ def generate(d, overwrite=True, silent=False):
else:
d['extensions'] = extensions
d['copyright'] = time.strftime('%Y') + ', ' + d['author']
- d['author_texescaped'] = unicode(d['author']).\
+ d['author_texescaped'] = text_type(d['author']).\
translate(texescape.tex_escape_map)
d['project_doc'] = d['project'] + ' Documentation'
- d['project_doc_texescaped'] = unicode(d['project'] + ' Documentation').\
+ d['project_doc_texescaped'] = text_type(d['project'] + ' Documentation').\
translate(texescape.tex_escape_map)
# escape backslashes and single quotes in strings that are put into
diff --git a/sphinx/search/__init__.py b/sphinx/search/__init__.py
index c61dc1c0..ea6fc2c8 100644
--- a/sphinx/search/__init__.py
+++ b/sphinx/search/__init__.py
@@ -13,7 +13,7 @@ from __future__ import with_statement
import re
import six
-from six import iteritems, itervalues
+from six import iteritems, itervalues, text_type
from six.moves import cPickle as pickle
from docutils.nodes import raw, comment, title, Text, NodeVisitor, SkipNode
@@ -291,7 +291,7 @@ class IndexBuilder(object):
if otype:
# use unicode() to fire translation proxies
onames[typeindex] = (domainname, type,
- unicode(domain.get_type_name(otype)))
+ text_type(domain.get_type_name(otype)))
else:
onames[typeindex] = (domainname, type, type)
if anchor == fullname:
diff --git a/sphinx/util/__init__.py b/sphinx/util/__init__.py
index a90167bf..4a553918 100644
--- a/sphinx/util/__init__.py
+++ b/sphinx/util/__init__.py
@@ -22,7 +22,7 @@ from codecs import open, BOM_UTF8
from collections import deque
import six
-from six import iteritems
+from six import iteritems, text_type
from six.moves import range
import docutils
from docutils.utils import relative_path
@@ -55,7 +55,7 @@ def docname_join(basedocname, docname):
def path_stabilize(filepath):
"normalize path separater and unicode string"
newpath = filepath.replace(os.path.sep, SEP)
- if isinstance(newpath, unicode):
+ if isinstance(newpath, text_type):
newpath = unicodedata.normalize('NFC', newpath)
return newpath
diff --git a/sphinx/util/jsonimpl.py b/sphinx/util/jsonimpl.py
index 6682a376..ac5c54ae 100644
--- a/sphinx/util/jsonimpl.py
+++ b/sphinx/util/jsonimpl.py
@@ -11,6 +11,7 @@
import json
+from six import text_type
from six.moves import UserString
@@ -18,7 +19,7 @@ class SphinxJSONEncoder(json.JSONEncoder):
"""JSONEncoder subclass that forces translation proxies."""
def default(self, obj):
if isinstance(obj, UserString):
- return unicode(obj)
+ return text_type(obj)
return json.JSONEncoder.default(self, obj)
diff --git a/sphinx/util/osutil.py b/sphinx/util/osutil.py
index 00cc1ae3..34b609b2 100644
--- a/sphinx/util/osutil.py
+++ b/sphinx/util/osutil.py
@@ -21,6 +21,7 @@ import gettext
from os import path
import six
+from six import text_type
# Errnos that we need.
EEXIST = getattr(errno, 'EEXIST', 0)
@@ -155,7 +156,7 @@ if six.PY2:
# if a locale is set, the time strings are encoded in the encoding
# given by LC_TIME; if that is available, use it
enc = locale.getlocale(locale.LC_TIME)[1] or 'utf-8'
- return time.strftime(unicode(format).encode(enc), *args).decode(enc)
+ return time.strftime(text_type(format).encode(enc), *args).decode(enc)
else:
ustrftime = time.strftime
diff --git a/sphinx/util/pycompat.py b/sphinx/util/pycompat.py
index 8d1b3d06..519afe5f 100644
--- a/sphinx/util/pycompat.py
+++ b/sphinx/util/pycompat.py
@@ -45,7 +45,7 @@ if six.PY3:
lineno, offset = err.context[1]
# try to match ParseError details with SyntaxError details
raise SyntaxError(err.msg, (filepath, lineno, offset, err.value))
- return unicode(tree)
+ return six.text_type(tree)
from html import escape as htmlescape # >= Python 3.2
else:
diff --git a/sphinx/websupport/search/__init__.py b/sphinx/websupport/search/__init__.py
index 45068d29..f2a67b4d 100644
--- a/sphinx/websupport/search/__init__.py
+++ b/sphinx/websupport/search/__init__.py
@@ -11,6 +11,8 @@
import re
+from six import text_type
+
class BaseSearch(object):
def __init__(self, path):
@@ -109,7 +111,7 @@ class BaseSearch(object):
context_end < len(text) and '...' or ''])
try:
- return unicode(context, errors='ignore')
+ return text_type(context, errors='ignore')
except TypeError:
return context
diff --git a/sphinx/websupport/search/whooshsearch.py b/sphinx/websupport/search/whooshsearch.py
index 6d1f9de4..17adf058 100644
--- a/sphinx/websupport/search/whooshsearch.py
+++ b/sphinx/websupport/search/whooshsearch.py
@@ -14,6 +14,8 @@ from whoosh.fields import Schema, ID, TEXT
from whoosh.qparser import QueryParser
from whoosh.analysis import StemmingAnalyzer
+from six import text_type
+
from sphinx.util.osutil import ensuredir
from sphinx.websupport.search import BaseSearch
@@ -43,7 +45,7 @@ class WhooshSearch(BaseSearch):
self.index_writer.commit()
def add_document(self, pagename, title, text):
- self.index_writer.add_document(path=unicode(pagename),
+ self.index_writer.add_document(path=text_type(pagename),
title=title,
text=text)
diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py
index 05e83d08..53aff908 100644
--- a/sphinx/writers/latex.py
+++ b/sphinx/writers/latex.py
@@ -16,7 +16,7 @@ import re
import sys
from os import path
-from six import itervalues
+from six import itervalues, text_type
from docutils import nodes, writers
from docutils.writers.latex2e import Babel
@@ -307,7 +307,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
return '\\autopageref*{%s}' % self.idescape(id)
def idescape(self, id):
- return unicode(id).translate(tex_replace_map).\
+ return text_type(id).translate(tex_replace_map).\
encode('ascii', 'backslashreplace').decode('ascii').\
replace('\\', '_')
@@ -320,7 +320,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
if i > 0:
ret.append('\\indexspace\n')
ret.append('\\bigletter{%s}\n' %
- unicode(letter).translate(tex_escape_map))
+ text_type(letter).translate(tex_escape_map))
for entry in entries:
if not entry[3]:
continue
@@ -1513,7 +1513,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
# text handling
def encode(self, text):
- text = unicode(text).translate(tex_escape_map)
+ text = text_type(text).translate(tex_escape_map)
if self.literal_whitespace:
# Insert a blank before the newline, to avoid
# ! LaTeX Error: There's no line here to end.
diff --git a/tests/etree13/HTMLTreeBuilder.py b/tests/etree13/HTMLTreeBuilder.py
index 3e3a7d92..cf332c75 100644
--- a/tests/etree13/HTMLTreeBuilder.py
+++ b/tests/etree13/HTMLTreeBuilder.py
@@ -54,6 +54,8 @@ import htmlentitydefs
import re, string, sys
import mimetools, StringIO
+from six import text_type
+
from . import ElementTree
AUTOCLOSE = "p", "li", "tr", "th", "td", "head", "body"
@@ -199,7 +201,7 @@ class HTMLTreeBuilder(HTMLParser):
def handle_data(self, data):
if isinstance(data, type('')) and is_not_ascii(data):
# convert to unicode, but only if necessary
- data = unicode(data, self.encoding, "ignore")
+ data = text_type(data, self.encoding, "ignore")
self.__builder.data(data)
##
diff --git a/tests/path.py b/tests/path.py
index 860e65e1..ddfd49b7 100755
--- a/tests/path.py
+++ b/tests/path.py
@@ -13,12 +13,13 @@ import shutil
from codecs import open
import six
+from six import text_type
FILESYSTEMENCODING = sys.getfilesystemencoding() or sys.getdefaultencoding()
-class path(unicode):
+class path(text_type):
"""
Represents a path which behaves like a string.
"""
@@ -26,8 +27,8 @@ class path(unicode):
def __new__(cls, s, encoding=FILESYSTEMENCODING, errors='strict'):
if isinstance(s, str):
s = s.decode(encoding, errors)
- return unicode.__new__(cls, s)
- return unicode.__new__(cls, s)
+ return text_type.__new__(cls, s)
+ return text_type.__new__(cls, s)
@property
def parent(self):
@@ -195,4 +196,4 @@ class path(unicode):
__div__ = __truediv__ = joinpath
def __repr__(self):
- return '%s(%s)' % (self.__class__.__name__, unicode.__repr__(self))
+ return '%s(%s)' % (self.__class__.__name__, text_type.__repr__(self))
diff --git a/tests/test_cpp_domain.py b/tests/test_cpp_domain.py
index 8e1cb22b..1fec2ba2 100644
--- a/tests/test_cpp_domain.py
+++ b/tests/test_cpp_domain.py
@@ -9,6 +9,8 @@
:license: BSD, see LICENSE for details.
"""
+from six import text_type
+
from util import raises
from sphinx.domains.cpp import DefinitionParser, DefinitionError
@@ -20,100 +22,100 @@ def parse(name, string):
def test_type_definitions():
rv = parse('member_object', ' const std::string & name = 42')
- assert unicode(rv) == 'const std::string& name = 42'
+ assert text_type(rv) == 'const std::string& name = 42'
rv = parse('member_object', ' const std::string & name leftover')
- assert unicode(rv) == 'const std::string& name'
+ assert text_type(rv) == 'const std::string& name'
rv = parse('member_object', ' const std::string & name [n] leftover')
- assert unicode(rv) == 'const std::string& name[n]'
+ assert text_type(rv) == 'const std::string& name[n]'
rv = parse('member_object', 'const std::vector< unsigned int, long> &name')
- assert unicode(rv) == 'const std::vector<unsigned int, long>& name'
+ assert text_type(rv) == 'const std::vector<unsigned int, long>& name'
x = 'std::vector<std::pair<std::string, int>>& module::test(register ' \
'foo, bar, std::string baz="foobar, blah, bleh") const = 0'
- assert unicode(parse('function', x)) == x
+ assert text_type(parse('function', x)) == x
x = 'module::myclass::operator std::vector<std::string>()'
- assert unicode(parse('function', x)) == x
+ assert text_type(parse('function', x)) == x
x = 'explicit module::myclass::foo::foo()'
- assert unicode(parse('function', x)) == x
+ assert text_type(parse('function', x)) == x
x = 'int printf(const char* fmt, ...)'
- assert unicode(parse('function', x)) == x
+ assert text_type(parse('function', x)) == x
x = 'int foo(const unsigned int j)'
- assert unicode(parse('function', x)) == x
+ assert text_type(parse('function', x)) == x
x = 'int foo(const unsigned int const j)'
- assert unicode(parse('function', x)) == x
+ assert text_type(parse('function', x)) == x
x = 'int foo(const int* const ptr)'
- assert unicode(parse('function', x)) == x
+ assert text_type(parse('function', x)) == x
x = 'std::vector<std::pair<std::string, long long>> module::blah'
- assert unicode(parse('type_object', x)) == x
+ assert text_type(parse('type_object', x)) == x
- assert unicode(parse('type_object', 'long long int foo')) == 'long long foo'
+ assert text_type(parse('type_object', 'long long int foo')) == 'long long foo'
x = 'void operator()(const boost::array<VertexID, 2>& v) const'
- assert unicode(parse('function', x)) == x
+ assert text_type(parse('function', x)) == x
x = 'void operator()(const boost::array<VertexID, 2, "foo, bar">& v) const'
- assert unicode(parse('function', x)) == x
+ assert text_type(parse('function', x)) == x
x = 'MyClass::MyClass(MyClass::MyClass&&)'
- assert unicode(parse('function', x)) == x
+ assert text_type(parse('function', x)) == x
x = 'constexpr int get_value()'
- assert unicode(parse('function', x)) == x
+ assert text_type(parse('function', x)) == x
x = 'static constexpr int get_value()'
- assert unicode(parse('function', x)) == x
+ assert text_type(parse('function', x)) == x
x = 'int get_value() const noexcept'
- assert unicode(parse('function', x)) == x
+ assert text_type(parse('function', x)) == x
x = 'int get_value() const noexcept = delete'
- assert unicode(parse('function', x)) == x
+ assert text_type(parse('function', x)) == x
x = 'MyClass::MyClass(MyClass::MyClass&&) = default'
- assert unicode(parse('function', x)) == x
+ assert text_type(parse('function', x)) == x
x = 'MyClass::a_virtual_function() const override'
- assert unicode(parse('function', x)) == x
+ assert text_type(parse('function', x)) == x
x = 'MyClass::a_member_function() volatile'
- assert unicode(parse('function', x)) == x
+ assert text_type(parse('function', x)) == x
x = 'MyClass::a_member_function() const volatile'
- assert unicode(parse('function', x)) == x
+ assert text_type(parse('function', x)) == x
x = 'MyClass::a_member_function() &&'
- assert unicode(parse('function', x)) == x
+ assert text_type(parse('function', x)) == x
x = 'MyClass::a_member_function() &'
- assert unicode(parse('function', x)) == x
+ assert text_type(parse('function', x)) == x
x = 'MyClass::a_member_function() const &'
- assert unicode(parse('function', x)) == x
+ assert text_type(parse('function', x)) == x
x = 'int main(int argc, char* argv[][])'
- assert unicode(parse('function', x)) == x
+ assert text_type(parse('function', x)) == x
x = 'std::vector<std::pair<std::string, int>>& module::test(register ' \
'foo, bar[n], std::string baz="foobar, blah, bleh") const = 0'
- assert unicode(parse('function', x)) == x
+ assert text_type(parse('function', x)) == x
x = 'module::myclass foo[n]'
- assert unicode(parse('member_object', x)) == x
+ assert text_type(parse('member_object', x)) == x
x = 'int foo(Foo f=Foo(double(), std::make_pair(int(2), double(3.4))))'
- assert unicode(parse('function', x)) == x
+ assert text_type(parse('function', x)) == x
x = 'int foo(A a=x(a))'
- assert unicode(parse('function', x)) == x
+ assert text_type(parse('function', x)) == x
x = 'int foo(B b=x(a)'
raises(DefinitionError, parse, 'function', x)
@@ -127,31 +129,31 @@ def test_type_definitions():
def test_bases():
x = 'A'
- assert unicode(parse('class', x)) == x
+ assert text_type(parse('class', x)) == x
x = 'A : B'
- assert unicode(parse('class', x)) == x
+ assert text_type(parse('class', x)) == x
x = 'A : private B'
- assert unicode(parse('class', x)) == 'A : B'
+ assert text_type(parse('class', x)) == 'A : B'
x = 'A : public B'
- assert unicode(parse('class', x)) == x
+ assert text_type(parse('class', x)) == x
x = 'A : B, C'
- assert unicode(parse('class', x)) == x
+ assert text_type(parse('class', x)) == x
x = 'A : B, protected C, D'
- assert unicode(parse('class', x)) == x
+ assert text_type(parse('class', x)) == x
def test_operators():
x = parse('function', 'void operator new [ ] ()')
- assert unicode(x) == 'void operator new[]()'
+ assert text_type(x) == 'void operator new[]()'
x = parse('function', 'void operator delete ()')
- assert unicode(x) == 'void operator delete()'
+ assert text_type(x) == 'void operator delete()'
for op in '*-+=/%!':
x = parse('function', 'void operator %s ()' % op)
- assert unicode(x) == 'void operator%s()' % op
+ assert text_type(x) == 'void operator%s()' % op
diff --git a/tests/test_quickstart.py b/tests/test_quickstart.py
index 6c1ffcf8..31f6fa97 100644
--- a/tests/test_quickstart.py
+++ b/tests/test_quickstart.py
@@ -13,6 +13,7 @@ import sys
import time
import six
+from six import text_type
from util import raises, with_tempdir, SkipTest
@@ -39,7 +40,7 @@ def mock_input(answers, needanswer=False):
prompt = str(prompt) # Python2.x raw_input emulation
# `raw_input` encode `prompt` by default encoding to print.
else:
- prompt = unicode(prompt) # Python3.x input emulation
+ prompt = text_type(prompt) # Python3.x input emulation
# `input` decode prompt by default encoding before print.
for question in answers:
if prompt.startswith(qs.PROMPT_PREFIX + question):