summaryrefslogtreecommitdiff
path: root/sphinx/pycode/pgen2
diff options
context:
space:
mode:
Diffstat (limited to 'sphinx/pycode/pgen2')
-rw-r--r--sphinx/pycode/pgen2/driver.py2
-rw-r--r--sphinx/pycode/pgen2/grammar.py13
-rw-r--r--sphinx/pycode/pgen2/literals.py8
-rw-r--r--sphinx/pycode/pgen2/parse.c89
-rw-r--r--sphinx/pycode/pgen2/pgen.py45
-rwxr-xr-xsphinx/pycode/pgen2/token.py2
-rw-r--r--sphinx/pycode/pgen2/tokenize.py25
7 files changed, 58 insertions, 126 deletions
diff --git a/sphinx/pycode/pgen2/driver.py b/sphinx/pycode/pgen2/driver.py
index 422671db..c531edb3 100644
--- a/sphinx/pycode/pgen2/driver.py
+++ b/sphinx/pycode/pgen2/driver.py
@@ -131,7 +131,7 @@ def load_grammar(gt="Grammar.txt", gp=None,
logger.info("Writing grammar tables to %s", gp)
try:
g.dump(gp)
- except IOError, e:
+ except IOError as e:
logger.info("Writing failed:"+str(e))
else:
g = grammar.Grammar()
diff --git a/sphinx/pycode/pgen2/grammar.py b/sphinx/pycode/pgen2/grammar.py
index 01d84346..91874fa2 100644
--- a/sphinx/pycode/pgen2/grammar.py
+++ b/sphinx/pycode/pgen2/grammar.py
@@ -11,6 +11,7 @@ token module; the Python tokenize module reports all operators as the
fallback token code OP, but the parser needs the actual token code.
"""
+from __future__ import print_function
# Python imports
import pickle
@@ -100,17 +101,17 @@ class Grammar(object):
def report(self):
"""Dump the grammar tables to standard output, for debugging."""
from pprint import pprint
- print "s2n"
+ print("s2n")
pprint(self.symbol2number)
- print "n2s"
+ print("n2s")
pprint(self.number2symbol)
- print "states"
+ print("states")
pprint(self.states)
- print "dfas"
+ print("dfas")
pprint(self.dfas)
- print "labels"
+ print("labels")
pprint(self.labels)
- print "start", self.start
+ print("start", self.start)
# Map from operator to number (since tokenize doesn't do this)
diff --git a/sphinx/pycode/pgen2/literals.py b/sphinx/pycode/pgen2/literals.py
index d4893702..25e09b62 100644
--- a/sphinx/pycode/pgen2/literals.py
+++ b/sphinx/pycode/pgen2/literals.py
@@ -4,9 +4,13 @@
# Extended to handle raw and unicode literals by Georg Brandl.
"""Safely evaluate Python string literals without using eval()."""
+from __future__ import print_function
import re
+from six import text_type
+
+
simple_escapes = {"a": "\a",
"b": "\b",
"f": "\f",
@@ -66,7 +70,7 @@ uni_escape_re = re.compile(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3}|"
def evalString(s, encoding=None):
regex = escape_re
repl = escape
- if encoding and not isinstance(s, unicode):
+ if encoding and not isinstance(s, text_type):
s = s.decode(encoding)
if s.startswith('u') or s.startswith('U'):
regex = uni_escape_re
@@ -89,7 +93,7 @@ def test():
s = repr(c)
e = evalString(s)
if e != c:
- print i, c, s, e
+ print(i, c, s, e)
if __name__ == "__main__":
diff --git a/sphinx/pycode/pgen2/parse.c b/sphinx/pycode/pgen2/parse.c
index e09f5058..96fa6c8b 100644
--- a/sphinx/pycode/pgen2/parse.c
+++ b/sphinx/pycode/pgen2/parse.c
@@ -353,95 +353,6 @@ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name); /*proto*/
-#if PY_VERSION_HEX < 0x02050000
-#ifndef PyAnySet_CheckExact
-
-#define PyAnySet_CheckExact(ob) \
- ((ob)->ob_type == &PySet_Type || \
- (ob)->ob_type == &PyFrozenSet_Type)
-
-#define PySet_New(iterable) \
- PyObject_CallFunctionObjArgs((PyObject *)&PySet_Type, (iterable), NULL)
-
-#define Pyx_PyFrozenSet_New(iterable) \
- PyObject_CallFunctionObjArgs((PyObject *)&PyFrozenSet_Type, (iterable), NULL)
-
-#define PySet_Size(anyset) \
- PyObject_Size((anyset))
-
-#define PySet_Contains(anyset, key) \
- PySequence_Contains((anyset), (key))
-
-#define PySet_Pop(set) \
- PyObject_CallMethod(set, (char *)"pop", NULL)
-
-static INLINE int PySet_Clear(PyObject *set) {
- PyObject *ret = PyObject_CallMethod(set, (char *)"clear", NULL);
- if (!ret) return -1;
- Py_DECREF(ret); return 0;
-}
-
-static INLINE int PySet_Discard(PyObject *set, PyObject *key) {
- PyObject *ret = PyObject_CallMethod(set, (char *)"discard", (char *)"O", key);
- if (!ret) return -1;
- Py_DECREF(ret); return 0;
-}
-
-static INLINE int PySet_Add(PyObject *set, PyObject *key) {
- PyObject *ret = PyObject_CallMethod(set, (char *)"add", (char *)"O", key);
- if (!ret) return -1;
- Py_DECREF(ret); return 0;
-}
-
-#endif /* PyAnySet_CheckExact (<= Py2.4) */
-
-#if PY_VERSION_HEX < 0x02040000
-#ifndef Py_SETOBJECT_H
-#define Py_SETOBJECT_H
-
-static PyTypeObject *__Pyx_PySet_Type = NULL;
-static PyTypeObject *__Pyx_PyFrozenSet_Type = NULL;
-
-#define PySet_Type (*__Pyx_PySet_Type)
-#define PyFrozenSet_Type (*__Pyx_PyFrozenSet_Type)
-
-#define PyAnySet_Check(ob) \
- (PyAnySet_CheckExact(ob) || \
- PyType_IsSubtype((ob)->ob_type, &PySet_Type) || \
- PyType_IsSubtype((ob)->ob_type, &PyFrozenSet_Type))
-
-#define PyFrozenSet_CheckExact(ob) ((ob)->ob_type == &PyFrozenSet_Type)
-
-static int __Pyx_Py23SetsImport(void) {
- PyObject *sets=0, *Set=0, *ImmutableSet=0;
-
- sets = PyImport_ImportModule((char *)"sets");
- if (!sets) goto bad;
- Set = PyObject_GetAttrString(sets, (char *)"Set");
- if (!Set) goto bad;
- ImmutableSet = PyObject_GetAttrString(sets, (char *)"ImmutableSet");
- if (!ImmutableSet) goto bad;
- Py_DECREF(sets);
-
- __Pyx_PySet_Type = (PyTypeObject*) Set;
- __Pyx_PyFrozenSet_Type = (PyTypeObject*) ImmutableSet;
-
- return 0;
-
- bad:
- Py_XDECREF(sets);
- Py_XDECREF(Set);
- Py_XDECREF(ImmutableSet);
- return -1;
-}
-
-#else
-static int __Pyx_Py23SetsImport(void) { return 0; }
-#endif /* !Py_SETOBJECT_H */
-#endif /* < Py2.4 */
-#endif /* < Py2.5 */
-
-
static INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
PyObject *r;
if (!j) return NULL;
diff --git a/sphinx/pycode/pgen2/pgen.py b/sphinx/pycode/pgen2/pgen.py
index 0a04447d..e199ed8a 100644
--- a/sphinx/pycode/pgen2/pgen.py
+++ b/sphinx/pycode/pgen2/pgen.py
@@ -1,7 +1,12 @@
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
+from __future__ import print_function
+
+from six import iteritems
+
# Pgen imports
+
from sphinx.pycode.pgen2 import grammar, token, tokenize
class PgenGrammar(grammar.Grammar):
@@ -26,7 +31,7 @@ class ParserGenerator(object):
def make_grammar(self):
c = PgenGrammar()
- names = self.dfas.keys()
+ names = list(self.dfas.keys())
names.sort()
names.remove(self.startsymbol)
names.insert(0, self.startsymbol)
@@ -39,7 +44,7 @@ class ParserGenerator(object):
states = []
for state in dfa:
arcs = []
- for label, next in state.arcs.iteritems():
+ for label, next in iteritems(state.arcs):
arcs.append((self.make_label(c, label), dfa.index(next)))
if state.isfinal:
arcs.append((0, dfa.index(state)))
@@ -105,7 +110,7 @@ class ParserGenerator(object):
return ilabel
def addfirstsets(self):
- names = self.dfas.keys()
+ names = list(self.dfas.keys())
names.sort()
for name in names:
if name not in self.first:
@@ -118,7 +123,7 @@ class ParserGenerator(object):
state = dfa[0]
totalset = {}
overlapcheck = {}
- for label, next in state.arcs.iteritems():
+ for label, next in iteritems(state.arcs):
if label in self.dfas:
if label in self.first:
fset = self.first[label]
@@ -133,7 +138,7 @@ class ParserGenerator(object):
totalset[label] = 1
overlapcheck[label] = {label: 1}
inverse = {}
- for label, itsfirst in overlapcheck.iteritems():
+ for label, itsfirst in iteritems(overlapcheck):
for symbol in itsfirst:
if symbol in inverse:
raise ValueError("rule %s is ambiguous; %s is in the"
@@ -192,7 +197,7 @@ class ParserGenerator(object):
for label, next in nfastate.arcs:
if label is not None:
addclosure(next, arcs.setdefault(label, {}))
- for label, nfaset in arcs.iteritems():
+ for label, nfaset in iteritems(arcs):
for st in states:
if st.nfaset == nfaset:
break
@@ -203,10 +208,10 @@ class ParserGenerator(object):
return states # List of DFAState instances; first one is start
def dump_nfa(self, name, start, finish):
- print "Dump of NFA for", name
+ print("Dump of NFA for", name)
todo = [start]
for i, state in enumerate(todo):
- print " State", i, state is finish and "(final)" or ""
+ print(" State", i, state is finish and "(final)" or "")
for label, next in state.arcs:
if next in todo:
j = todo.index(next)
@@ -214,16 +219,16 @@ class ParserGenerator(object):
j = len(todo)
todo.append(next)
if label is None:
- print " -> %d" % j
+ print(" -> %d" % j)
else:
- print " %s -> %d" % (label, j)
+ print(" %s -> %d" % (label, j))
def dump_dfa(self, name, dfa):
- print "Dump of DFA for", name
+ print("Dump of DFA for", name)
for i, state in enumerate(dfa):
- print " State", i, state.isfinal and "(final)" or ""
- for label, next in state.arcs.iteritems():
- print " %s -> %d" % (label, dfa.index(next))
+ print(" State", i, state.isfinal and "(final)" or "")
+ for label, next in iteritems(state.arcs):
+ print(" %s -> %d" % (label, dfa.index(next)))
def simplify_dfa(self, dfa):
# This is not theoretically optimal, but works well enough.
@@ -319,9 +324,9 @@ class ParserGenerator(object):
return value
def gettoken(self):
- tup = self.generator.next()
+ tup = next(self.generator)
while tup[0] in (tokenize.COMMENT, tokenize.NL):
- tup = self.generator.next()
+ tup = next(self.generator)
self.type, self.value, self.begin, self.end, self.line = tup
#print token.tok_name[self.type], repr(self.value)
@@ -330,7 +335,7 @@ class ParserGenerator(object):
try:
msg = msg % args
except:
- msg = " ".join([msg] + map(str, args))
+ msg = " ".join([msg] + [str(x) for x in args])
raise SyntaxError(msg, (self.filename, self.end[0],
self.end[1], self.line))
@@ -348,7 +353,7 @@ class DFAState(object):
def __init__(self, nfaset, final):
assert isinstance(nfaset, dict)
- assert isinstance(iter(nfaset).next(), NFAState)
+ assert isinstance(next(iter(nfaset)), NFAState)
assert isinstance(final, NFAState)
self.nfaset = nfaset
self.isfinal = final in nfaset
@@ -361,7 +366,7 @@ class DFAState(object):
self.arcs[label] = next
def unifystate(self, old, new):
- for label, next in self.arcs.iteritems():
+ for label, next in iteritems(self.arcs):
if next is old:
self.arcs[label] = new
@@ -374,7 +379,7 @@ class DFAState(object):
# would invoke this method recursively, with cycles...
if len(self.arcs) != len(other.arcs):
return False
- for label, next in self.arcs.iteritems():
+ for label, next in iteritems(self.arcs):
if next is not other.arcs.get(label):
return False
return True
diff --git a/sphinx/pycode/pgen2/token.py b/sphinx/pycode/pgen2/token.py
index 56a40ce7..55bf5e8d 100755
--- a/sphinx/pycode/pgen2/token.py
+++ b/sphinx/pycode/pgen2/token.py
@@ -68,7 +68,7 @@ NT_OFFSET = 256
#--end constants--
tok_name = {}
-for _name, _value in globals().items():
+for _name, _value in list(globals().items()):
if type(_value) is type(0):
tok_name[_value] = _name
diff --git a/sphinx/pycode/pgen2/tokenize.py b/sphinx/pycode/pgen2/tokenize.py
index 7ad9f012..d6253505 100644
--- a/sphinx/pycode/pgen2/tokenize.py
+++ b/sphinx/pycode/pgen2/tokenize.py
@@ -23,13 +23,17 @@ Older entry points
tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
-each time a new token is found."""
+each time a new token is found.
+"""
+
+from __future__ import print_function
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = \
'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
import string, re
+from six import PY3
from sphinx.pycode.pgen2.token import *
from sphinx.pycode.pgen2 import token
@@ -81,6 +85,9 @@ Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'[:;.,`@]')
+if PY3:
+ Ellipsis_ = r'\.{3}'
+ Special = group(Ellipsis_, Special)
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
@@ -94,8 +101,9 @@ ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
-tokenprog, pseudoprog, single3prog, double3prog = map(
- re.compile, (Token, PseudoToken, Single3, Double3))
+tokenprog, pseudoprog, single3prog, double3prog = [
+ re.compile(x) for x in (Token, PseudoToken, Single3, Double3)
+]
endprogs = {"'": re.compile(Single), '"': re.compile(Double),
"'''": single3prog, '"""': double3prog,
"r'''": single3prog, 'r"""': double3prog,
@@ -146,8 +154,8 @@ class StopTokenizing(Exception): pass
def printtoken(type, token, scell, ecell, line): # for testing
srow, scol = scell
erow, ecol = ecell
- print "%d,%d-%d,%d:\t%s\t%s" % \
- (srow, scol, erow, ecol, tok_name[type], repr(token))
+ print("%d,%d-%d,%d:\t%s\t%s" %
+ (srow, scol, erow, ecol, tok_name[type], repr(token)))
def tokenize(readline, tokeneater=printtoken):
"""
@@ -352,8 +360,9 @@ def generate_tokens(readline):
spos, epos, pos = (lnum, start), (lnum, end), end
token, initial = line[start:end], line[start]
- if initial in numchars or \
- (initial == '.' and token != '.'): # ordinary number
+ if initial in numchars or (
+ initial == '.' and token not in ('.', '...')
+ ): # ordinary number
yield (NUMBER, token, spos, epos, line)
elif initial in '\r\n':
newline = NEWLINE
@@ -389,6 +398,8 @@ def generate_tokens(readline):
yield (STRING, token, spos, epos, line)
elif initial in namechars: # ordinary name
yield (NAME, token, spos, epos, line)
+ elif token in ('...',): # ordinary name
+ yield (NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
# This yield is new; needed for better idempotency:
yield (NL, token, spos, (lnum, pos), line)