summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--COPYING.txt4
-rw-r--r--Cython/Build/Dependencies.py50
-rw-r--r--Cython/Build/Inline.py6
-rw-r--r--Cython/Build/Tests/TestInline.py2
-rw-r--r--Cython/CodeWriter.py54
-rw-r--r--Cython/Compiler/AnalysedTreeTransforms.py2
-rw-r--r--Cython/Compiler/Annotate.py36
-rw-r--r--Cython/Compiler/AutoDocTransforms.py4
-rw-r--r--Cython/Compiler/Buffer.py56
-rw-r--r--Cython/Compiler/Builtin.py8
-rw-r--r--Cython/Compiler/CmdLine.py8
-rw-r--r--Cython/Compiler/Code.py128
-rw-r--r--Cython/Compiler/ControlFlow.py46
-rw-r--r--Cython/Compiler/CythonScope.py12
-rw-r--r--Cython/Compiler/Errors.py14
-rwxr-xr-xCython/Compiler/ExprNodes.py968
-rw-r--r--Cython/Compiler/Interpreter.py4
-rw-r--r--Cython/Compiler/Lexicon.py54
-rw-r--r--Cython/Compiler/Main.py54
-rw-r--r--Cython/Compiler/ModuleNode.py174
-rw-r--r--Cython/Compiler/Nodes.py586
-rw-r--r--Cython/Compiler/Optimize.py38
-rw-r--r--Cython/Compiler/Options.py38
-rw-r--r--Cython/Compiler/ParseTreeTransforms.py162
-rw-r--r--Cython/Compiler/Parsing.py104
-rwxr-xr-xCython/Compiler/PyrexTypes.py360
-rw-r--r--Cython/Compiler/Scanning.py52
-rw-r--r--Cython/Compiler/Symtab.py262
-rw-r--r--Cython/Compiler/Tests/TestBuffer.py8
-rw-r--r--Cython/Compiler/Tests/TestDecorators.py2
-rw-r--r--Cython/Compiler/Tests/TestParseTreeTransforms.py28
-rw-r--r--Cython/Compiler/Tests/TestTreeFragment.py6
-rw-r--r--Cython/Compiler/TreeFragment.py26
-rw-r--r--Cython/Compiler/TreePath.py4
-rw-r--r--Cython/Compiler/TypeInference.py44
-rw-r--r--Cython/Compiler/TypeSlots.py106
-rw-r--r--Cython/Compiler/UtilNodes.py26
-rw-r--r--Cython/Compiler/Visitor.py28
-rw-r--r--Cython/Debugger/Cygdb.py22
-rw-r--r--Cython/Debugger/DebugWriter.py14
-rw-r--r--Cython/Debugger/Tests/TestLibCython.py70
-rw-r--r--Cython/Debugger/Tests/cfuncs.c2
-rw-r--r--Cython/Debugger/Tests/test_libcython_in_gdb.py148
-rw-r--r--Cython/Debugger/Tests/test_libpython_in_gdb.py60
-rw-r--r--Cython/Debugger/libcython.py374
-rw-r--r--Cython/Debugger/libpython.py314
-rw-r--r--Cython/Distutils/__init__.py2
-rw-r--r--Cython/Distutils/build_ext.py32
-rw-r--r--Cython/Includes/cpython/__init__.pxd10
-rw-r--r--Cython/Includes/cpython/bool.pxd6
-rw-r--r--Cython/Includes/cpython/buffer.pxd4
-rw-r--r--Cython/Includes/cpython/bytes.pxd16
-rw-r--r--Cython/Includes/cpython/complex.pxd10
-rw-r--r--Cython/Includes/cpython/dict.pxd12
-rw-r--r--Cython/Includes/cpython/exc.pxd6
-rw-r--r--Cython/Includes/cpython/float.pxd8
-rw-r--r--Cython/Includes/cpython/function.pxd8
-rw-r--r--Cython/Includes/cpython/instance.pxd6
-rw-r--r--Cython/Includes/cpython/int.pxd6
-rw-r--r--Cython/Includes/cpython/iterator.pxd2
-rw-r--r--Cython/Includes/cpython/list.pxd10
-rw-r--r--Cython/Includes/cpython/long.pxd18
-rw-r--r--Cython/Includes/cpython/mem.pxd2
-rw-r--r--Cython/Includes/cpython/method.pxd12
-rw-r--r--Cython/Includes/cpython/module.pxd8
-rw-r--r--Cython/Includes/cpython/number.pxd12
-rw-r--r--Cython/Includes/cpython/object.pxd8
-rw-r--r--Cython/Includes/cpython/pycapsule.pxd2
-rw-r--r--Cython/Includes/cpython/ref.pxd2
-rw-r--r--Cython/Includes/cpython/sequence.pxd4
-rw-r--r--Cython/Includes/cpython/set.pxd10
-rw-r--r--Cython/Includes/cpython/string.pxd16
-rw-r--r--Cython/Includes/cpython/tuple.pxd8
-rw-r--r--Cython/Includes/cpython/type.pxd6
-rw-r--r--Cython/Includes/cpython/unicode.pxd28
-rw-r--r--Cython/Includes/libc/signal.pxd2
-rw-r--r--Cython/Includes/libc/stdio.pxd2
-rw-r--r--Cython/Includes/libc/stdlib.pxd4
-rw-r--r--Cython/Includes/libc/string.pxd2
-rw-r--r--Cython/Includes/numpy.pxd38
-rw-r--r--Cython/Includes/posix/fcntl.pxd2
-rw-r--r--Cython/Includes/posix/unistd.pxd2
-rw-r--r--Cython/Plex/Actions.py6
-rw-r--r--Cython/Plex/DFA.py6
-rw-r--r--Cython/Plex/Errors.py2
-rw-r--r--Cython/Plex/Lexicons.py2
-rw-r--r--Cython/Plex/Machines.py28
-rw-r--r--Cython/Plex/Regexps.py38
-rw-r--r--Cython/Plex/Scanners.pxd2
-rw-r--r--Cython/Plex/Scanners.py6
-rw-r--r--Cython/Plex/Traditional.py24
-rw-r--r--Cython/Plex/Transitions.py58
-rw-r--r--Cython/Shadow.py26
-rw-r--r--Cython/StringIOTree.py2
-rw-r--r--Cython/TestUtils.py20
-rw-r--r--Cython/Tests/TestCodeWriter.py10
-rw-r--r--Cython/Tests/TestStringIOTree.py20
-rw-r--r--Cython/Tests/xmlrunner.py108
-rw-r--r--Cython/Utils.py4
-rw-r--r--Cython/__init__.py2
-rw-r--r--INSTALL.txt4
-rw-r--r--ToDo.txt2
-rw-r--r--USAGE.txt2
-rw-r--r--tests/broken/b_extimpinherit.pyx2
-rw-r--r--tests/broken/cdefemptysue.pyx6
-rw-r--r--tests/broken/cdefexternblock.pyx6
-rw-r--r--tests/broken/externsue.pyx4
-rw-r--r--tests/broken/getattr.pyx2
-rw-r--r--tests/broken/r_excval.pyx2
-rw-r--r--tests/broken/r_extcmethod.pyx2
-rw-r--r--tests/broken/r_extimpinherit.pyx2
-rw-r--r--tests/broken/r_extinherit.pyx4
-rw-r--r--tests/broken/r_extmember.pyx4
-rw-r--r--tests/broken/r_extnumeric2.pyx6
-rw-r--r--tests/broken/r_extproperty.pyx8
-rw-r--r--tests/bugs.txt4
-rw-r--r--tests/compile/a_capi.pyx2
-rw-r--r--tests/compile/c_directives.pyx4
-rw-r--r--tests/compile/cargdef.pyx6
-rw-r--r--tests/compile/cassign.pyx4
-rw-r--r--tests/compile/cast_ctypedef_array_T518.pyx8
-rw-r--r--tests/compile/cenum.pyx2
-rw-r--r--tests/compile/docstrings.pyx2
-rw-r--r--tests/compile/extcmethcall.pyx2
-rw-r--r--tests/compile/extcoerce.pyx2
-rw-r--r--tests/compile/extdelattr.pyx2
-rw-r--r--tests/compile/extdelitem.pyx2
-rw-r--r--tests/compile/extdelslice.pyx2
-rw-r--r--tests/compile/extgetattr.pyx2
-rw-r--r--tests/compile/extinheritdel.pyx2
-rw-r--r--tests/compile/extinheritset.pyx2
-rw-r--r--tests/compile/extpropertyall.pyx6
-rw-r--r--tests/compile/extsetattr.pyx2
-rw-r--r--tests/compile/extsetitem.pyx2
-rw-r--r--tests/compile/extsetslice.pyx2
-rw-r--r--tests/compile/for.pyx14
-rw-r--r--tests/compile/fromimport.pyx2
-rw-r--r--tests/compile/gencall.pyx2
-rw-r--r--tests/compile/huss2.pyx2
-rw-r--r--tests/compile/ia_cdefblock.pyx8
-rw-r--r--tests/compile/import.pyx2
-rw-r--r--tests/compile/index.pyx2
-rw-r--r--tests/compile/jiba3.pyx2
-rw-r--r--tests/compile/libc_signal.pyx2
-rw-r--r--tests/compile/point.h2
-rw-r--r--tests/compile/pylong.pyx8
-rw-r--r--tests/compile/tryexcept.pyx22
-rw-r--r--tests/compile/tryfinally.pyx8
-rw-r--r--tests/compile/types_and_names.pxd6
-rw-r--r--tests/compile/types_and_names.pyx4
-rw-r--r--tests/compile/while.pyx10
-rw-r--r--tests/errors/e_ass.pyx2
-rw-r--r--tests/errors/e_badtypeuse.pyx2
-rw-r--r--tests/errors/e_bufaccess.pyx2
-rw-r--r--tests/errors/e_cenum.pyx2
-rw-r--r--tests/errors/e_ctypedefornot.pyx2
-rw-r--r--tests/errors/e_slice.pyx4
-rw-r--r--tests/errors/e_tempcast.pyx2
-rw-r--r--tests/errors/encoding.pyx2
-rw-r--r--tests/errors/nogil.pyx2
-rw-r--r--tests/run/__getattribute_subclasses__.pyx10
-rw-r--r--tests/run/autotestdict.pyx2
-rw-r--r--tests/run/autotestdict_all.pyx2
-rw-r--r--tests/run/autotestdict_cdef.pyx2
-rw-r--r--tests/run/bufaccess.pyx80
-rw-r--r--tests/run/buffmt.pyx30
-rw-r--r--tests/run/call_crash.pyx6
-rw-r--r--tests/run/callargs.pyx4
-rw-r--r--tests/run/cdefassign.pyx2
-rw-r--r--tests/run/cdefoptargs.pyx2
-rw-r--r--tests/run/closure_decorators_T478.pyx8
-rw-r--r--tests/run/complex_numbers_T305.pyx2
-rw-r--r--tests/run/cpp_classes.pyx8
-rw-r--r--tests/run/cpp_exceptions.pyx2
-rw-r--r--tests/run/cpp_namespaces_helper.h20
-rw-r--r--tests/run/cpp_nested_templates.pyx6
-rw-r--r--tests/run/cpp_operators.pyx4
-rw-r--r--tests/run/cpp_operators_helper.h6
-rw-r--r--tests/run/cpp_stl.pyx2
-rw-r--r--tests/run/cpp_templates.pyx2
-rw-r--r--tests/run/ctruthtests.pyx2
-rw-r--r--tests/run/cython_includes.pyx2
-rw-r--r--tests/run/dict_getitem.pyx2
-rw-r--r--tests/run/exceptionrefcount.pyx2
-rw-r--r--tests/run/extcmethod.pyx2
-rw-r--r--tests/run/extern_builtins_T258.pyx2
-rw-r--r--tests/run/extpropertyref.pyx2
-rw-r--r--tests/run/exttype.pyx4
-rw-r--r--tests/run/for_decrement.pyx4
-rw-r--r--tests/run/function_as_method_T494.pyx2
-rw-r--r--tests/run/function_binding_T494.pyx2
-rw-r--r--tests/run/hash_T326.pyx2
-rw-r--r--tests/run/if.pyx2
-rw-r--r--tests/run/ifelseexpr_T267.pyx2
-rw-r--r--tests/run/importfrom.pyx2
-rw-r--r--tests/run/index.pyx2
-rw-r--r--tests/run/inplace.pyx8
-rw-r--r--tests/run/int_literals.pyx4
-rw-r--r--tests/run/knuth_man_or_boy_test.pyx2
-rw-r--r--tests/run/large_consts_T237.pyx2
-rw-r--r--tests/run/list_pop.pyx16
-rw-r--r--tests/run/literals.pyx4
-rw-r--r--tests/run/modbody.pyx2
-rw-r--r--tests/run/moduletryexcept.pyx4
-rw-r--r--tests/run/numpy_common.pxi2
-rw-r--r--tests/run/numpy_test.pyx28
-rw-r--r--tests/run/pinard5.pyx2
-rw-r--r--tests/run/pure.pyx4
-rw-r--r--tests/run/pure_py.py4
-rw-r--r--tests/run/r_extcomplex2.pyx6
-rw-r--r--tests/run/r_forloop.pyx2
-rw-r--r--tests/run/r_huss3.pyx2
-rw-r--r--tests/run/r_pythonapi.pyx2
-rw-r--r--tests/run/r_spamtype.pyx8
-rw-r--r--tests/run/shapes.h8
-rw-r--r--tests/run/simpcall.pyx4
-rw-r--r--tests/run/slice3.pyx2
-rw-r--r--tests/run/slice_ptr.pyx2
-rw-r--r--tests/run/special_methods_T561.pyx10
-rw-r--r--tests/run/special_methods_T561_py2.pyx2
-rw-r--r--tests/run/strfunction.pyx2
-rw-r--r--tests/run/strliterals.pyx2
-rw-r--r--tests/run/struct_conversion.pyx6
-rw-r--r--tests/run/subop.pyx2
-rw-r--r--tests/run/temps_corner1.pyx2
-rw-r--r--tests/run/type_inference.pyx6
-rw-r--r--tests/run/typedfieldbug_T303.pyx2
-rw-r--r--tests/run/typeof.pyx2
-rw-r--r--tests/run/unicodemethods.pyx36
-rw-r--r--tests/run/unsignedbehaviour_T184.pyx2
-rw-r--r--tests/run/withstat.pyx8
-rw-r--r--tests/wrappers/cpp_overload_wrapper_lib.cpp2
-rw-r--r--tests/wrappers/cpp_overload_wrapper_lib.h4
-rw-r--r--tests/wrappers/cpp_references.pyx2
-rw-r--r--tests/wrappers/cppwrap_lib.h4
235 files changed, 2969 insertions, 2969 deletions
diff --git a/COPYING.txt b/COPYING.txt
index 314694850..898b2b74a 100644
--- a/COPYING.txt
+++ b/COPYING.txt
@@ -5,9 +5,9 @@ redistribute, modify and distribute modified versions."
------------------
Cython, which derives from Pyrex, is licensed under the Python
-Software Foundation License. More precisely, all modifications
+Software Foundation License. More precisely, all modifications
made to go from Pyrex to Cython are so licensed.
See LICENSE.txt for more details.
-
+
diff --git a/Cython/Build/Dependencies.py b/Cython/Build/Dependencies.py
index e4cd4fa80..05c5035ff 100644
--- a/Cython/Build/Dependencies.py
+++ b/Cython/Build/Dependencies.py
@@ -36,7 +36,7 @@ def parse_list(s):
return literals[literal[1:-1]]
else:
return literal
-
+
return [unquote(item) for item in s.split(delimiter)]
transitive_str = object()
@@ -70,7 +70,7 @@ def line_iter(source):
start = end+1
class DistutilsInfo(object):
-
+
def __init__(self, source=None, exn=None):
self.values = {}
if source is not None:
@@ -97,7 +97,7 @@ class DistutilsInfo(object):
value = getattr(exn, key, None)
if value:
self.values[key] = value
-
+
def merge(self, other):
if other is None:
return self
@@ -114,7 +114,7 @@ class DistutilsInfo(object):
else:
self.values[key] = value
return self
-
+
def subs(self, aliases):
if aliases is None:
return self
@@ -140,9 +140,9 @@ class DistutilsInfo(object):
def strip_string_literals(code, prefix='__Pyx_L'):
"""
- Normalizes every string literal to be of the form '__Pyx_Lxxx',
+ Normalizes every string literal to be of the form '__Pyx_Lxxx',
returning the normalized code and a mapping of labels to
- string literals.
+ string literals.
"""
new_code = []
literals = {}
@@ -156,7 +156,7 @@ def strip_string_literals(code, prefix='__Pyx_L'):
double_q = code.find('"', q)
q = min(single_q, double_q)
if q == -1: q = max(single_q, double_q)
-
+
# We're done.
if q == -1 and hash_mark == -1:
new_code.append(code[start:])
@@ -181,7 +181,7 @@ def strip_string_literals(code, prefix='__Pyx_L'):
start = q
else:
q += 1
-
+
# Process comment.
elif -1 != hash_mark and (hash_mark < q or q == -1):
end = code.find('\n', hash_mark)
@@ -212,7 +212,7 @@ def strip_string_literals(code, prefix='__Pyx_L'):
new_code.append(code[start:end])
start = q
q += len(in_quote)
-
+
return "".join(new_code), literals
@@ -245,16 +245,16 @@ def parse_dependencies(source_filename):
class DependencyTree(object):
-
+
def __init__(self, context):
self.context = context
self._transitive_cache = {}
-
+
#@cached_method
def parse_dependencies(self, source_filename):
return parse_dependencies(source_filename)
parse_dependencies = cached_method(parse_dependencies)
-
+
#@cached_method
def cimports_and_externs(self, filename):
cimports, includes, externs = self.parse_dependencies(filename)[:3]
@@ -272,10 +272,10 @@ class DependencyTree(object):
print("Unable to locate '%s' referenced from '%s'" % (filename, include))
return tuple(cimports), tuple(externs)
cimports_and_externs = cached_method(cimports_and_externs)
-
+
def cimports(self, filename):
return self.cimports_and_externs(filename)[0]
-
+
#@cached_method
def package(self, filename):
dir = os.path.dirname(filename)
@@ -284,13 +284,13 @@ class DependencyTree(object):
else:
return ()
package = cached_method(package)
-
+
#@cached_method
def fully_qualifeid_name(self, filename):
module = os.path.splitext(os.path.basename(filename))[0]
return '.'.join(self.package(filename) + (module,))
fully_qualifeid_name = cached_method(fully_qualifeid_name)
-
+
def find_pxd(self, module, filename=None):
if module[0] == '.':
raise NotImplementedError("New relative imports.")
@@ -301,7 +301,7 @@ class DependencyTree(object):
return pxd
return self.context.find_pxd_file(module, None)
find_pxd = cached_method(find_pxd)
-
+
#@cached_method
def cimported_files(self, filename):
if filename[-4:] == '.pyx' and os.path.exists(filename[:-4] + '.pxd'):
@@ -316,33 +316,33 @@ class DependencyTree(object):
print("\n\t".join(b))
return tuple(self_pxd + filter(None, [self.find_pxd(m, filename) for m in self.cimports(filename)]))
cimported_files = cached_method(cimported_files)
-
+
def immediate_dependencies(self, filename):
all = list(self.cimported_files(filename))
for extern in sum(self.cimports_and_externs(filename), ()):
all.append(os.path.normpath(os.path.join(os.path.dirname(filename), extern)))
return tuple(all)
-
+
#@cached_method
def timestamp(self, filename):
return os.path.getmtime(filename)
timestamp = cached_method(timestamp)
-
+
def extract_timestamp(self, filename):
# TODO: .h files from extern blocks
return self.timestamp(filename), filename
-
+
def newest_dependency(self, filename):
return self.transitive_merge(filename, self.extract_timestamp, max)
-
+
def distutils_info0(self, filename):
return self.parse_dependencies(filename)[3]
-
+
def distutils_info(self, filename, aliases=None, base=None):
return (self.transitive_merge(filename, self.distutils_info0, DistutilsInfo.merge)
.subs(aliases)
.merge(base))
-
+
def transitive_merge(self, node, extract, merge):
try:
seen = self._transitive_cache[extract, merge]
@@ -350,7 +350,7 @@ class DependencyTree(object):
seen = self._transitive_cache[extract, merge] = {}
return self.transitive_merge_helper(
node, extract, merge, seen, {}, self.cimported_files)[0]
-
+
def transitive_merge_helper(self, node, extract, merge, seen, stack, outgoing):
if node in seen:
return seen[node], None
diff --git a/Cython/Build/Inline.py b/Cython/Build/Inline.py
index e72e50953..eec5401ea 100644
--- a/Cython/Build/Inline.py
+++ b/Cython/Build/Inline.py
@@ -88,7 +88,7 @@ def safe_type(arg, context=None):
return '%s.%s' % (base_type.__module__, base_type.__name__)
return 'object'
-def cython_inline(code,
+def cython_inline(code,
get_type=unsafe_type,
lib_dir=os.path.expanduser('~/.cython/inline'),
cython_include_dirs=['.'],
@@ -252,14 +252,14 @@ def get_body(source):
else:
return source[ix+1:]
-# Lots to be done here... It would be especially cool if compiled functions
+# Lots to be done here... It would be especially cool if compiled functions
# could invoke each other quickly.
class RuntimeCompiledFunction(object):
def __init__(self, f):
self._f = f
self._body = get_body(inspect.getsource(f))
-
+
def __call__(self, *args, **kwds):
all = getcallargs(self._f, *args, **kwds)
return cython_inline(self._body, locals=self._f.func_globals, globals=self._f.func_globals, **all)
diff --git a/Cython/Build/Tests/TestInline.py b/Cython/Build/Tests/TestInline.py
index 4a6376934..b9ffade83 100644
--- a/Cython/Build/Tests/TestInline.py
+++ b/Cython/Build/Tests/TestInline.py
@@ -32,7 +32,7 @@ class TestInline(CythonTest):
self.assertEquals(inline("return global_value + 1", **test_kwds), global_value + 1)
if has_numpy:
-
+
def test_numpy(self):
import numpy
a = numpy.ndarray((10, 20))
diff --git a/Cython/CodeWriter.py b/Cython/CodeWriter.py
index 0be388172..da6a03c7f 100644
--- a/Cython/CodeWriter.py
+++ b/Cython/CodeWriter.py
@@ -14,14 +14,14 @@ class LinesResult(object):
def __init__(self):
self.lines = []
self.s = u""
-
+
def put(self, s):
self.s += s
-
+
def newline(self):
self.lines.append(self.s)
self.s = u""
-
+
def putline(self, s):
self.put(s)
self.newline()
@@ -29,7 +29,7 @@ class LinesResult(object):
class CodeWriter(TreeVisitor):
indent_string = u" "
-
+
def __init__(self, result = None):
super(CodeWriter, self).__init__()
if result is None:
@@ -38,22 +38,22 @@ class CodeWriter(TreeVisitor):
self.numindents = 0
self.tempnames = {}
self.tempblockindex = 0
-
+
def write(self, tree):
self.visit(tree)
-
+
def indent(self):
self.numindents += 1
-
+
def dedent(self):
self.numindents -= 1
-
+
def startline(self, s = u""):
self.result.put(self.indent_string * self.numindents + s)
-
+
def put(self, s):
self.result.put(s)
-
+
def endline(self, s = u""):
self.result.putline(s)
@@ -70,13 +70,13 @@ class CodeWriter(TreeVisitor):
self.visit(item.default)
self.put(u", ")
self.visit(items[-1])
-
+
def visit_Node(self, node):
raise AssertionError("Node not handled by serializer: %r" % node)
-
+
def visit_ModuleNode(self, node):
self.visitchildren(node)
-
+
def visit_StatListNode(self, node):
self.visitchildren(node)
@@ -87,7 +87,7 @@ class CodeWriter(TreeVisitor):
self.indent()
self.visit(node.body)
self.dedent()
-
+
def visit_CArgDeclNode(self, node):
if node.base_type.name is not None:
self.visit(node.base_type)
@@ -96,10 +96,10 @@ class CodeWriter(TreeVisitor):
if node.default is not None:
self.put(u" = ")
self.visit(node.default)
-
+
def visit_CNameDeclaratorNode(self, node):
self.put(node.name)
-
+
def visit_CSimpleBaseTypeNode(self, node):
# See Parsing.p_sign_and_longness
if node.is_basic_c_type:
@@ -108,16 +108,16 @@ class CodeWriter(TreeVisitor):
self.put("short " * -node.longness)
elif node.longness > 0:
self.put("long " * node.longness)
-
+
self.put(node.name)
-
+
def visit_SingleAssignmentNode(self, node):
self.startline()
self.visit(node.lhs)
self.put(u" = ")
self.visit(node.rhs)
self.endline()
-
+
def visit_CascadedAssignmentNode(self, node):
self.startline()
for lhs in node.lhs_list:
@@ -125,10 +125,10 @@ class CodeWriter(TreeVisitor):
self.put(u" = ")
self.visit(node.rhs)
self.endline()
-
+
def visit_NameNode(self, node):
self.put(node.name)
-
+
def visit_IntNode(self, node):
self.put(node.value)
@@ -164,7 +164,7 @@ class CodeWriter(TreeVisitor):
def visit_PassStatNode(self, node):
self.startline(u"pass")
self.endline()
-
+
def visit_PrintStatNode(self, node):
self.startline(u"print ")
self.comma_separated_list(node.arg_tuple.args)
@@ -176,7 +176,7 @@ class CodeWriter(TreeVisitor):
self.visit(node.operand1)
self.put(u" %s " % node.operator)
self.visit(node.operand2)
-
+
def visit_CVarDefNode(self, node):
self.startline(u"cdef ")
self.visit(node.base_type)
@@ -201,7 +201,7 @@ class CodeWriter(TreeVisitor):
def visit_SequenceNode(self, node):
self.comma_separated_list(node.args) # Might need to discover whether we need () around tuples...hmm...
-
+
def visit_SimpleCallNode(self, node):
self.visit(node.function)
self.put(u"(")
@@ -224,14 +224,14 @@ class CodeWriter(TreeVisitor):
self.startline()
self.visit(node.expr)
self.endline()
-
+
def visit_InPlaceAssignmentNode(self, node):
self.startline()
self.visit(node.lhs)
self.put(u" %s= " % node.operator)
self.visit(node.rhs)
self.endline()
-
+
def visit_WithStatNode(self, node):
self.startline()
self.put(u"with ")
@@ -243,7 +243,7 @@ class CodeWriter(TreeVisitor):
self.indent()
self.visit(node.body)
self.dedent()
-
+
def visit_AttributeNode(self, node):
self.visit(node.obj)
self.put(u".%s" % node.attribute)
diff --git a/Cython/Compiler/AnalysedTreeTransforms.py b/Cython/Compiler/AnalysedTreeTransforms.py
index 79a0485e2..de527e00b 100644
--- a/Cython/Compiler/AnalysedTreeTransforms.py
+++ b/Cython/Compiler/AnalysedTreeTransforms.py
@@ -11,7 +11,7 @@ import Symtab
class AutoTestDictTransform(ScopeTrackingTransform):
# Handles autotestdict directive
- blacklist = ['__cinit__', '__dealloc__', '__richcmp__',
+ blacklist = ['__cinit__', '__dealloc__', '__richcmp__',
'__nonzero__', '__bool__',
'__len__', '__contains__']
diff --git a/Cython/Compiler/Annotate.py b/Cython/Compiler/Annotate.py
index 425d956e8..fe38378b3 100644
--- a/Cython/Compiler/Annotate.py
+++ b/Cython/Compiler/Annotate.py
@@ -12,9 +12,9 @@ from Cython import Utils
# need one-characters subsitutions (for now) so offsets aren't off
special_chars = [(u'<', u'\xF0', u'&lt;'),
- (u'>', u'\xF1', u'&gt;'),
+ (u'>', u'\xF1', u'&gt;'),
(u'&', u'\xF2', u'&amp;')]
-
+
line_pos_comment = re.compile(r'/\*.*?<<<<<<<<<<<<<<.*?\*/\n*', re.DOTALL)
class AnnotationCCodeWriter(CCodeWriter):
@@ -32,14 +32,14 @@ class AnnotationCCodeWriter(CCodeWriter):
self.annotations = create_from.annotations
self.code = create_from.code
self.last_pos = create_from.last_pos
-
+
def create_new(self, create_from, buffer, copy_formatting):
return AnnotationCCodeWriter(create_from, buffer, copy_formatting)
def write(self, s):
CCodeWriter.write(self, s)
self.annotation_buffer.write(s)
-
+
def mark_pos(self, pos):
if pos is not None:
CCodeWriter.mark_pos(self, pos)
@@ -52,7 +52,7 @@ class AnnotationCCodeWriter(CCodeWriter):
def annotate(self, pos, item):
self.annotations.append((pos, item))
-
+
def save_annotation(self, source_filename, target_filename):
self.mark_pos(None)
f = Utils.open_source_file(source_filename)
@@ -74,7 +74,7 @@ class AnnotationCCodeWriter(CCodeWriter):
all.append(((source_filename, pos[1], pos[2]+size), end))
else:
all.append((pos, start+end))
-
+
all.sort()
all.reverse()
for pos, item in all:
@@ -83,7 +83,7 @@ class AnnotationCCodeWriter(CCodeWriter):
col += 1
line = lines[line_no]
lines[line_no] = line[:col] + item + line[col:]
-
+
html_filename = os.path.splitext(target_filename)[0] + ".html"
f = codecs.open(html_filename, "w", encoding="UTF-8")
f.write(u'<html>\n')
@@ -130,14 +130,14 @@ function toggleDiv(id) {
c_file = Utils.decode_filename(os.path.basename(target_filename))
f.write(u'<p>Raw output: <a href="%s">%s</a>\n' % (c_file, c_file))
k = 0
-
+
py_c_api = re.compile(u'(Py[A-Z][a-z]+_[A-Z][a-z][A-Za-z_]+)\(')
py_marco_api = re.compile(u'(Py[A-Z][a-z]+_[A-Z][A-Z_]+)\(')
pyx_c_api = re.compile(u'(__Pyx_[A-Z][a-z_][A-Za-z_]+)\(')
pyx_macro_api = re.compile(u'(__Pyx_[A-Z][A-Z_]+)\(')
error_goto = re.compile(ur'((; *if .*)? \{__pyx_filename = .*goto __pyx_L\w+;\})')
refnanny = re.compile(u'(__Pyx_X?(GOT|GIVE)REF|__Pyx_RefNanny[A-Za-z]+)')
-
+
code_source_file = self.code[source_filename]
for line in lines:
@@ -146,18 +146,18 @@ function toggleDiv(id) {
code = code_source_file[k]
except KeyError:
code = ''
-
+
code = code.replace('<', '<code><</code>')
-
+
code, py_c_api_calls = py_c_api.subn(ur"<span class='py_c_api'>\1</span>(", code)
code, pyx_c_api_calls = pyx_c_api.subn(ur"<span class='pyx_c_api'>\1</span>(", code)
code, py_macro_api_calls = py_marco_api.subn(ur"<span class='py_macro_api'>\1</span>(", code)
code, pyx_macro_api_calls = pyx_macro_api.subn(ur"<span class='pyx_macro_api'>\1</span>(", code)
code, refnanny_calls = refnanny.subn(ur"<span class='refnanny'>\1</span>", code)
code, error_goto_calls = error_goto.subn(ur"<span class='error_goto'>\1</span>", code)
-
+
code = code.replace(u"<span class='error_goto'>;", u";<span class='error_goto'>")
-
+
score = 5*py_c_api_calls + 2*pyx_c_api_calls + py_macro_api_calls + pyx_macro_api_calls - refnanny_calls
color = u"FFFF%02x" % int(255/(1+score/10.0))
f.write(u"<pre class='line' style='background-color: #%s' onclick='toggleDiv(\"line%s\")'>" % (color, k))
@@ -166,13 +166,13 @@ function toggleDiv(id) {
for c, cc, html in special_chars:
line = line.replace(cc, html)
f.write(line.rstrip())
-
+
f.write(u'</pre>\n')
code = re.sub(line_pos_comment, '', code) # inline annotations are redundant
f.write(u"<pre id='line%s' class='code' style='background-color: #%s'>%s</pre>" % (k, color, code))
f.write(u'</body></html>\n')
f.close()
-
+
# TODO: make this cleaner
def escape(raw_string):
@@ -184,15 +184,15 @@ def escape(raw_string):
class AnnotationItem(object):
-
+
def __init__(self, style, text, tag="", size=0):
self.style = style
self.text = text
self.tag = tag
self.size = size
-
+
def start(self):
return u"<span class='tag %s' title='%s'>%s" % (self.style, self.text, self.tag)
-
+
def end(self):
return self.size, u"</span>"
diff --git a/Cython/Compiler/AutoDocTransforms.py b/Cython/Compiler/AutoDocTransforms.py
index 13859eac6..0fcdd0e96 100644
--- a/Cython/Compiler/AutoDocTransforms.py
+++ b/Cython/Compiler/AutoDocTransforms.py
@@ -101,7 +101,7 @@ class EmbedSignature(CythonTransform):
return node
else:
return super(EmbedSignature, self).__call__(node)
-
+
def visit_ClassDefNode(self, node):
oldname = self.class_name
oldclass = self.class_node
@@ -120,7 +120,7 @@ class EmbedSignature(CythonTransform):
def visit_DefNode(self, node):
if not self.current_directives['embedsignature']:
return node
-
+
is_constructor = False
hide_self = False
if node.entry.is_special:
diff --git a/Cython/Compiler/Buffer.py b/Cython/Compiler/Buffer.py
index a7f4c3d3b..3584234cf 100644
--- a/Cython/Compiler/Buffer.py
+++ b/Cython/Compiler/Buffer.py
@@ -57,12 +57,12 @@ class IntroduceBufferAuxiliaryVars(CythonTransform):
if isinstance(node, ModuleNode) and len(bufvars) > 0:
- # for now...note that pos is wrong
+ # for now...note that pos is wrong
raise CompileError(node.pos, "Buffer vars not allowed in module scope")
for entry in bufvars:
if entry.type.dtype.is_ptr:
raise CompileError(node.pos, "Buffers with pointer types not yet supported.")
-
+
name = entry.name
buftype = entry.type
if buftype.ndim > self.max_ndim:
@@ -84,10 +84,10 @@ class IntroduceBufferAuxiliaryVars(CythonTransform):
if entry.is_arg:
result.used = True
return result
-
+
stridevars = [var(Naming.bufstride_prefix, i, "0") for i in range(entry.type.ndim)]
- shapevars = [var(Naming.bufshape_prefix, i, "0") for i in range(entry.type.ndim)]
+ shapevars = [var(Naming.bufshape_prefix, i, "0") for i in range(entry.type.ndim)]
mode = entry.type.mode
if mode == 'full':
suboffsetvars = [var(Naming.bufsuboffset_prefix, i, "-1") for i in range(entry.type.ndim)]
@@ -95,7 +95,7 @@ class IntroduceBufferAuxiliaryVars(CythonTransform):
suboffsetvars = None
entry.buffer_aux = Symtab.BufferAux(bufinfo, stridevars, shapevars, suboffsetvars)
-
+
scope.buffer_entries = bufvars
self.scope = scope
@@ -138,9 +138,9 @@ def analyse_buffer_options(globalpos, env, posargs, dictargs, defaults=None, nee
"""
if defaults is None:
defaults = buffer_defaults
-
+
posargs, dictargs = Interpreter.interpret_compiletime_options(posargs, dictargs, type_env=env, type_args = (0,'dtype'))
-
+
if len(posargs) > buffer_positional_options_count:
raise CompileError(posargs[-1][1], ERR_BUF_TOO_MANY)
@@ -187,7 +187,7 @@ def analyse_buffer_options(globalpos, env, posargs, dictargs, defaults=None, nee
assert_bool('cast')
return options
-
+
#
# Code generation
@@ -209,7 +209,7 @@ def get_flags(buffer_aux, buffer_type):
assert False
if buffer_aux.writable_needed: flags += "| PyBUF_WRITABLE"
return flags
-
+
def used_buffer_aux_vars(entry):
buffer_aux = entry.buffer_aux
buffer_aux.buffer_info_var.used = True
@@ -258,10 +258,10 @@ def get_getbuffer_call(code, obj_cname, buffer_aux, buffer_type):
bufstruct = buffer_aux.buffer_info_var.cname
dtype_typeinfo = get_type_information_cname(code, buffer_type.dtype)
-
+
return ("__Pyx_GetBufferAndValidate(&%(bufstruct)s, "
"(PyObject*)%(obj_cname)s, &%(dtype_typeinfo)s, %(flags)s, %(ndim)d, "
- "%(cast)d, __pyx_stack)" % locals())
+ "%(cast)d, __pyx_stack)" % locals())
def put_assign_to_buffer(lhs_cname, rhs_cname, buffer_aux, buffer_type,
is_initialized, pos, code):
@@ -272,7 +272,7 @@ def put_assign_to_buffer(lhs_cname, rhs_cname, buffer_aux, buffer_type,
However, the assignment operation may throw an exception so that the reassignment
never happens.
-
+
Depending on the circumstances there are two possible outcomes:
- Old buffer released, new acquired, rhs assigned to lhs
- Old buffer released, new acquired which fails, reaqcuire old lhs buffer
@@ -285,7 +285,7 @@ def put_assign_to_buffer(lhs_cname, rhs_cname, buffer_aux, buffer_type,
code.putln("{") # Set up necesarry stack for getbuffer
code.putln("__Pyx_BufFmt_StackElem __pyx_stack[%d];" % buffer_type.dtype.struct_nesting_depth())
-
+
getbuffer = get_getbuffer_call(code, "%s", buffer_aux, buffer_type) # fill in object below
if is_initialized:
@@ -370,7 +370,7 @@ def put_buffer_lookup_code(entry, index_signeds, index_cnames, directives, pos,
code.putln("%s = %d;" % (tmp_cname, dim))
code.put("} else ")
# check bounds in positive direction
- if signed != 0:
+ if signed != 0:
cast = ""
else:
cast = "(size_t)"
@@ -389,7 +389,7 @@ def put_buffer_lookup_code(entry, index_signeds, index_cnames, directives, pos,
bufaux.shapevars):
if signed != 0:
code.putln("if (%s < 0) %s += %s;" % (cname, cname, shape.cname))
-
+
# Create buffer lookup and return it
# This is done via utility macros/inline functions, which vary
# according to the access mode used.
@@ -418,7 +418,7 @@ def put_buffer_lookup_code(entry, index_signeds, index_cnames, directives, pos,
for i, s in zip(index_cnames, bufaux.stridevars):
params.append(i)
params.append(s.cname)
-
+
# Make sure the utility code is available
if funcname not in code.globalstate.utility_codes:
code.globalstate.utility_codes.add(funcname)
@@ -458,7 +458,7 @@ def buf_lookup_full_code(proto, defin, name, nd):
char* ptr = (char*)buf;
""") % (name, funcargs) + "".join([dedent("""\
ptr += s%d * i%d;
- if (o%d >= 0) ptr = *((char**)ptr) + o%d;
+ if (o%d >= 0) ptr = *((char**)ptr) + o%d;
""") % (i, i, i, i) for i in range(nd)]
) + "\nreturn ptr;\n}")
@@ -563,7 +563,7 @@ def use_py2_buffer_functions(env):
#endif
""")
-
+
env.use_utility_code(UtilityCode(
proto = dedent("""\
#if PY_MAJOR_VERSION < 3
@@ -613,9 +613,9 @@ def get_type_information_cname(code, dtype, maxdepth=None):
if name not in code.globalstate.utility_codes:
code.globalstate.utility_codes.add(name)
typecode = code.globalstate['typeinfo']
-
+
complex_possible = dtype.is_struct_or_union and dtype.can_be_complex()
-
+
declcode = dtype.declaration_code("")
if dtype.is_simple_buffer_dtype():
structinfo_name = "NULL"
@@ -634,7 +634,7 @@ def get_type_information_cname(code, dtype, maxdepth=None):
typecode.putln("};", safe=True)
else:
assert False
-
+
rep = str(dtype)
if dtype.is_int:
if dtype.signed == 0:
@@ -851,7 +851,7 @@ static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
- }
+ }
}
}
@@ -895,7 +895,7 @@ static size_t __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
- }
+ }
}
}
@@ -932,7 +932,7 @@ static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
-
+
if (ctx->packmode == '@' || ctx->packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
@@ -955,7 +955,7 @@ static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
ctx->head->parent_offset = parent_offset;
continue;
}
-
+
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
@@ -969,7 +969,7 @@ static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
}
ctx->fmt_offset += size;
-
+
--ctx->enc_count; /* Consume from buffer string */
/* Done checking, move to next field, pushing or popping struct stack if needed */
@@ -1002,7 +1002,7 @@ static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
- return 0;
+ return 0;
}
static int __Pyx_BufFmt_FirstPack(__Pyx_BufFmt_Context* ctx) {
@@ -1124,7 +1124,7 @@ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const cha
return NULL;
}
}
-
+
}
}
}
diff --git a/Cython/Compiler/Builtin.py b/Cython/Compiler/Builtin.py
index 196a2094a..296f1cd3d 100644
--- a/Cython/Compiler/Builtin.py
+++ b/Cython/Compiler/Builtin.py
@@ -438,14 +438,14 @@ builtin_types_table = [
("type", "PyType_Type", []),
# This conflicts with the C++ bool type, and unfortunately
-# C++ is too liberal about PyObject* <-> bool conversions,
+# C++ is too liberal about PyObject* <-> bool conversions,
# resulting in unintuitive runtime behavior and segfaults.
# ("bool", "PyBool_Type", []),
("int", "PyInt_Type", []),
("long", "PyLong_Type", []),
("float", "PyFloat_Type", []),
-
+
("complex", "PyComplex_Type", [BuiltinAttribute('cval', field_type_name = 'Py_complex'),
BuiltinAttribute('real', 'cval.real', field_type = PyrexTypes.c_double_type),
BuiltinAttribute('imag', 'cval.imag', field_type = PyrexTypes.c_double_type),
@@ -474,7 +474,7 @@ builtin_types_table = [
]),
# ("file", "PyFile_Type", []), # not in Py3
- ("set", "PySet_Type", [BuiltinMethod("clear", "T", "i", "PySet_Clear"),
+ ("set", "PySet_Type", [BuiltinMethod("clear", "T", "i", "PySet_Clear"),
BuiltinMethod("discard", "TO", "i", "PySet_Discard"),
BuiltinMethod("add", "TO", "i", "PySet_Add"),
BuiltinMethod("pop", "T", "O", "PySet_Pop")]),
@@ -490,7 +490,7 @@ types_that_construct_their_instance = (
# 'file', # only in Py2.x
)
-
+
builtin_structs_table = [
('Py_buffer', 'Py_buffer',
[("buf", PyrexTypes.c_void_ptr_type),
diff --git a/Cython/Compiler/CmdLine.py b/Cython/Compiler/CmdLine.py
index 05ec91252..061610f0b 100644
--- a/Cython/Compiler/CmdLine.py
+++ b/Cython/Compiler/CmdLine.py
@@ -24,9 +24,9 @@ Options:
-v, --verbose Be verbose, print file names on multiple compilation
-p, --embed-positions If specified, the positions in Cython files of each
function definition is embedded in its docstring.
- --cleanup <level> Release interned objects on python exit, for memory debugging.
- Level indicates aggressiveness, default 0 releases nothing.
- -w, --working <directory> Sets the working directory for Cython (the directory modules
+ --cleanup <level> Release interned objects on python exit, for memory debugging.
+ Level indicates aggressiveness, default 0 releases nothing.
+ -w, --working <directory> Sets the working directory for Cython (the directory modules
are searched from)
--gdb Output debug information for cygdb
@@ -65,7 +65,7 @@ def parse_command_line(args):
return args.pop(0)
else:
bad_usage()
-
+
def get_param(option):
tail = option[2:]
if tail:
diff --git a/Cython/Compiler/Code.py b/Cython/Compiler/Code.py
index c3ed37186..5134dcf27 100644
--- a/Cython/Compiler/Code.py
+++ b/Cython/Compiler/Code.py
@@ -29,7 +29,7 @@ class UtilityCode(object):
# See GlobalState.put_utility_code.
#
# hashes/equals by instance
-
+
def __init__(self, proto=None, impl=None, init=None, cleanup=None, requires=None,
proto_block='utility_code_proto'):
# proto_block: Which code block to dump prototype in. See GlobalState.
@@ -84,8 +84,8 @@ class UtilityCode(object):
writer.put(self.cleanup)
else:
self.cleanup(writer, output.module_pos)
-
-
+
+
class FunctionState(object):
# return_label string function return point label
@@ -101,7 +101,7 @@ class FunctionState(object):
def __init__(self, owner, names_taken=cython.set()):
self.names_taken = names_taken
self.owner = owner
-
+
self.error_label = None
self.label_counter = 0
self.labels_used = cython.set()
@@ -127,28 +127,28 @@ class FunctionState(object):
if name is not None:
label += '_' + name
return label
-
+
def new_error_label(self):
old_err_lbl = self.error_label
self.error_label = self.new_label('error')
return old_err_lbl
-
+
def get_loop_labels(self):
return (
self.continue_label,
self.break_label)
-
+
def set_loop_labels(self, labels):
(self.continue_label,
self.break_label) = labels
-
+
def new_loop_labels(self):
old_labels = self.get_loop_labels()
self.set_loop_labels(
- (self.new_label("continue"),
+ (self.new_label("continue"),
self.new_label("break")))
return old_labels
-
+
def get_all_labels(self):
return (
self.continue_label,
@@ -172,10 +172,10 @@ class FunctionState(object):
new_labels.append(old_label)
self.set_all_labels(new_labels)
return old_labels
-
+
def use_label(self, lbl):
self.labels_used.add(lbl)
-
+
def label_used(self, lbl):
return lbl in self.labels_used
@@ -402,7 +402,7 @@ class GlobalState(object):
# parts {string:CCodeWriter}
-
+
# interned_strings
# consts
# interned_nums
@@ -438,7 +438,7 @@ class GlobalState(object):
'utility_code_def',
'end'
]
-
+
def __init__(self, writer, emit_linenums=False):
self.filename_table = {}
@@ -556,7 +556,7 @@ class GlobalState(object):
w = self.parts['cleanup_module']
w.putln("}")
w.exit_cfunc_scope()
-
+
def put_pyobject_decl(self, entry):
self['global_var'].putln("static PyObject *%s;" % entry.cname)
@@ -764,7 +764,7 @@ class GlobalState(object):
# The functions below are there in a transition phase only
# and will be deprecated. They are called from Nodes.BlockNode.
# The copy&paste duplication is intentional in order to be able
- # to see quickly how BlockNode worked, until this is replaced.
+ # to see quickly how BlockNode worked, until this is replaced.
def should_declare(self, cname, entry):
if cname in self.declared_cnames:
@@ -813,7 +813,7 @@ class GlobalState(object):
#
# Utility code state
#
-
+
def use_utility_code(self, utility_code):
"""
Adds code to the C file. utility_code should
@@ -859,10 +859,10 @@ class CCodeWriter(object):
- filename_table, filename_list, input_file_contents: All codewriters
coming from the same root share the same instances simultaneously.
"""
-
+
# f file output file
# buffer StringIOTree
-
+
# level int indentation level
# bol bool beginning of line?
# marker string comment to emit before next line
@@ -876,7 +876,7 @@ class CCodeWriter(object):
# about the current class one is in
globalstate = None
-
+
def __init__(self, create_from=None, buffer=None, copy_formatting=False, emit_linenums=None):
if buffer is None: buffer = StringIOTree()
self.buffer = buffer
@@ -884,7 +884,7 @@ class CCodeWriter(object):
self.last_marker_line = 0
self.source_desc = ""
self.pyclass_stack = []
-
+
self.funcstate = None
self.level = 0
self.call_level = 0
@@ -916,13 +916,13 @@ class CCodeWriter(object):
return self.buffer.getvalue()
def write(self, s):
- # also put invalid markers (lineno 0), to indicate that those lines
+ # also put invalid markers (lineno 0), to indicate that those lines
# have no Cython source code correspondence
if self.marker is None:
cython_lineno = self.last_marker_line
else:
cython_lineno = self.marker[0]
-
+
self.buffer.markers.extend([cython_lineno] * s.count('\n'))
self.buffer.write(s)
@@ -971,7 +971,7 @@ class CCodeWriter(object):
def enter_cfunc_scope(self):
self.funcstate = FunctionState(self)
-
+
def exit_cfunc_scope(self):
self.funcstate = None
@@ -1008,7 +1008,7 @@ class CCodeWriter(object):
self.emit_marker()
if self.emit_linenums and self.last_marker_line != 0:
self.write('\n#line %s "%s"\n' % (self.last_marker_line, self.source_desc))
-
+
if code:
if safe:
self.put_safe(code)
@@ -1016,7 +1016,7 @@ class CCodeWriter(object):
self.put(code)
self.write("\n");
self.bol = 1
-
+
def emit_marker(self):
self.write("\n");
self.indent()
@@ -1054,18 +1054,18 @@ class CCodeWriter(object):
def increase_indent(self):
self.level = self.level + 1
-
+
def decrease_indent(self):
self.level = self.level - 1
-
+
def begin_block(self):
self.putln("{")
self.increase_indent()
-
+
def end_block(self):
self.decrease_indent()
self.putln("}")
-
+
def indent(self):
self.write(" " * self.level)
@@ -1089,21 +1089,21 @@ class CCodeWriter(object):
self.marker = (line, marker)
if self.emit_linenums:
self.source_desc = source_desc.get_escaped_description()
-
+
def put_label(self, lbl):
if lbl in self.funcstate.labels_used:
self.putln("%s:;" % lbl)
-
+
def put_goto(self, lbl):
self.funcstate.use_label(lbl)
self.putln("goto %s;" % lbl)
-
+
def put_var_declarations(self, entries, static = 0, dll_linkage = None,
definition = True):
for entry in entries:
if not entry.in_cinclude:
self.put_var_declaration(entry, static, dll_linkage, definition)
-
+
def put_var_declaration(self, entry, static = 0, dll_linkage = None,
definition = True):
#print "Code.put_var_declaration:", entry.name, "definition =", definition ###
@@ -1146,7 +1146,7 @@ class CCodeWriter(object):
def put_h_guard(self, guard):
self.putln("#ifndef %s" % guard)
self.putln("#define %s" % guard)
-
+
def unlikely(self, cond):
if Options.gcc_branch_hints:
return 'unlikely(%s)' % cond
@@ -1162,17 +1162,17 @@ class CCodeWriter(object):
return "(PyObject *)" + entry.cname
else:
return entry.cname
-
+
def as_pyobject(self, cname, type):
from PyrexTypes import py_object_type, typecast
return typecast(py_object_type, type, cname)
-
+
def put_gotref(self, cname):
self.putln("__Pyx_GOTREF(%s);" % cname)
-
+
def put_giveref(self, cname):
self.putln("__Pyx_GIVEREF(%s);" % cname)
-
+
def put_xgiveref(self, cname):
self.putln("__Pyx_XGIVEREF(%s);" % cname)
@@ -1184,7 +1184,7 @@ class CCodeWriter(object):
self.putln("__Pyx_INCREF(%s);" % self.as_pyobject(cname, type))
else:
self.putln("Py_INCREF(%s);" % self.as_pyobject(cname, type))
-
+
def put_decref(self, cname, type, nanny=True):
if nanny:
self.putln("__Pyx_DECREF(%s);" % self.as_pyobject(cname, type))
@@ -1194,7 +1194,7 @@ class CCodeWriter(object):
def put_var_gotref(self, entry):
if entry.type.is_pyobject:
self.putln("__Pyx_GOTREF(%s);" % self.entry_as_pyobject(entry))
-
+
def put_var_giveref(self, entry):
if entry.type.is_pyobject:
self.putln("__Pyx_GIVEREF(%s);" % self.entry_as_pyobject(entry))
@@ -1210,7 +1210,7 @@ class CCodeWriter(object):
def put_var_incref(self, entry):
if entry.type.is_pyobject:
self.putln("__Pyx_INCREF(%s);" % self.entry_as_pyobject(entry))
-
+
def put_decref_clear(self, cname, type, nanny=True):
from PyrexTypes import py_object_type, typecast
if nanny:
@@ -1219,13 +1219,13 @@ class CCodeWriter(object):
else:
self.putln("Py_DECREF(%s); %s = 0;" % (
typecast(py_object_type, type, cname), cname))
-
+
def put_xdecref(self, cname, type, nanny=True):
if nanny:
self.putln("__Pyx_XDECREF(%s);" % self.as_pyobject(cname, type))
else:
self.putln("Py_XDECREF(%s);" % self.as_pyobject(cname, type))
-
+
def put_xdecref_clear(self, cname, type, nanny=True):
if nanny:
self.putln("__Pyx_XDECREF(%s); %s = 0;" % (
@@ -1240,21 +1240,21 @@ class CCodeWriter(object):
self.putln("__Pyx_XDECREF(%s);" % self.entry_as_pyobject(entry))
else:
self.putln("__Pyx_DECREF(%s);" % self.entry_as_pyobject(entry))
-
+
def put_var_decref_clear(self, entry):
if entry.type.is_pyobject:
self.putln("__Pyx_DECREF(%s); %s = 0;" % (
self.entry_as_pyobject(entry), entry.cname))
-
+
def put_var_xdecref(self, entry):
if entry.type.is_pyobject:
self.putln("__Pyx_XDECREF(%s);" % self.entry_as_pyobject(entry))
-
+
def put_var_xdecref_clear(self, entry):
if entry.type.is_pyobject:
self.putln("__Pyx_XDECREF(%s); %s = 0;" % (
self.entry_as_pyobject(entry), entry.cname))
-
+
def put_var_decrefs(self, entries, used_only = 0):
for entry in entries:
if not used_only or entry.used:
@@ -1262,15 +1262,15 @@ class CCodeWriter(object):
self.put_var_xdecref(entry)
else:
self.put_var_decref(entry)
-
+
def put_var_xdecrefs(self, entries):
for entry in entries:
self.put_var_xdecref(entry)
-
+
def put_var_xdecrefs_clear(self, entries):
for entry in entries:
self.put_var_xdecref_clear(entry)
-
+
def put_init_to_py_none(self, cname, type, nanny=True):
from PyrexTypes import py_object_type, typecast
py_none = typecast(type, py_object_type, "Py_None")
@@ -1278,7 +1278,7 @@ class CCodeWriter(object):
self.putln("%s = %s; __Pyx_INCREF(Py_None);" % (cname, py_none))
else:
self.putln("%s = %s; Py_INCREF(Py_None);" % (cname, py_none))
-
+
def put_init_var_to_py_none(self, entry, template = "%s", nanny=True):
code = template % entry.cname
#if entry.type.is_extension_type:
@@ -1306,7 +1306,7 @@ class CCodeWriter(object):
method_flags += [method_coexist]
self.putln(
'{__Pyx_NAMESTR("%s"), (PyCFunction)%s, %s, __Pyx_DOCSTR(%s)}%s' % (
- entry.name,
+ entry.name,
entry.func_cname,
"|".join(method_flags),
doc_code,
@@ -1330,7 +1330,7 @@ class CCodeWriter(object):
Naming.lineno_cname,
pos[1],
cinfo)
-
+
def error_goto(self, pos):
lbl = self.funcstate.error_label
self.funcstate.use_label(lbl)
@@ -1340,16 +1340,16 @@ class CCodeWriter(object):
def error_goto_if(self, cond, pos):
return "if (%s) %s" % (self.unlikely(cond), self.error_goto(pos))
-
+
def error_goto_if_null(self, cname, pos):
return self.error_goto_if("!%s" % cname, pos)
-
+
def error_goto_if_neg(self, cname, pos):
return self.error_goto_if("%s < 0" % cname, pos)
-
+
def error_goto_if_PyErr(self, pos):
return self.error_goto_if("PyErr_Occurred()", pos)
-
+
def lookup_filename(self, filename):
return self.globalstate.lookup_filename(filename)
@@ -1361,13 +1361,13 @@ class CCodeWriter(object):
def put_trace_declarations(self):
self.putln('__Pyx_TraceDeclarations');
-
+
def put_trace_call(self, name, pos):
self.putln('__Pyx_TraceCall("%s", %s[%s], %s);' % (name, Naming.filetable_cname, self.lookup_filename(pos[0]), pos[1]));
-
+
def put_trace_exception(self):
self.putln("__Pyx_TraceException();")
-
+
def put_trace_return(self, retvalue_cname):
self.putln("__Pyx_TraceReturn(%s);" % retvalue_cname)
@@ -1379,13 +1379,13 @@ class PyrexCodeWriter(object):
def __init__(self, outfile_name):
self.f = Utils.open_new_file(outfile_name)
self.level = 0
-
+
def putln(self, code):
self.f.write("%s%s\n" % (" " * self.level, code))
-
+
def indent(self):
self.level += 1
-
+
def dedent(self):
self.level -= 1
diff --git a/Cython/Compiler/ControlFlow.py b/Cython/Compiler/ControlFlow.py
index f63d52caa..cadcdf5ad 100644
--- a/Cython/Compiler/ControlFlow.py
+++ b/Cython/Compiler/ControlFlow.py
@@ -1,17 +1,17 @@
import bisect, sys
-# This module keeps track of arbitrary "states" at any point of the code.
+# This module keeps track of arbitrary "states" at any point of the code.
# A state is considered known if every path to the given point agrees on
-# its state, otherwise it is None (i.e. unknown).
+# its state, otherwise it is None (i.e. unknown).
-# It might be useful to be able to "freeze" the set of states by pushing
+# It might be useful to be able to "freeze" the set of states by pushing
# all state changes to the tips of the trees for fast reading. Perhaps this
-# could be done on get_state, clearing the cache on set_state (assuming
-# incoming is immutable).
+# could be done on get_state, clearing the cache on set_state (assuming
+# incoming is immutable).
-# This module still needs a lot of work, and probably should totally be
-# redesigned. It doesn't take return, raise, continue, or break into
-# account.
+# This module still needs a lot of work, and probably should totally be
+# redesigned. It doesn't take return, raise, continue, or break into
+# account.
from Cython.Compiler.Scanning import StringSourceDescriptor
try:
@@ -31,26 +31,26 @@ class ControlFlow(object):
self.parent = parent
self.tip = {}
self.end_pos = _END_POS
-
+
def start_branch(self, pos):
self.end_pos = pos
branch_point = BranchingControlFlow(pos, self)
if self.parent is not None:
self.parent.branches[-1] = branch_point
return branch_point.branches[0]
-
+
def next_branch(self, pos):
self.end_pos = pos
return self.parent.new_branch(pos)
-
+
def finish_branch(self, pos):
self.end_pos = pos
self.parent.end_pos = pos
return LinearControlFlow(pos, self.parent)
-
+
def get_state(self, item, pos=_END_POS):
return self.get_pos_state(item, pos)[1]
-
+
def get_pos_state(self, item, pos=_END_POS):
# do some caching
if pos > self.end_pos:
@@ -86,14 +86,14 @@ class ControlFlow(object):
if item in current.tip:
del current.tip[item]
current._set_state_local(pos, item, state)
-
-
+
+
class LinearControlFlow(ControlFlow):
def __init__(self, start_pos=(), incoming=None, parent=None):
ControlFlow.__init__(self, start_pos, incoming, parent)
self.events = {}
-
+
def _set_state_local(self, pos, item, state):
if item in self.events:
event_list = self.events[item]
@@ -111,10 +111,10 @@ class LinearControlFlow(ControlFlow):
return None
def to_string(self, indent='', limit=None):
-
+
if len(self.events) == 0:
s = indent + "[no state changes]"
-
+
else:
all = []
for item, event_list in self.events.items():
@@ -126,21 +126,21 @@ class LinearControlFlow(ControlFlow):
if self.incoming is not limit and self.incoming is not None:
s = "%s\n%s" % (self.incoming.to_string(indent, limit=limit), s)
return s
-
-
+
+
class BranchingControlFlow(ControlFlow):
-
+
def __init__(self, start_pos, incoming, parent=None):
ControlFlow.__init__(self, start_pos, incoming, parent)
self.branches = [LinearControlFlow(start_pos, incoming, parent=self)]
self.branch_starts = [start_pos]
-
+
def _set_state_local(self, pos, item, state):
for branch_pos, branch in zip(self.branch_starts[::-1], self.branches[::-1]):
if pos >= branch_pos:
branch._set_state_local(pos, item, state)
return
-
+
def _get_pos_state_local(self, item, pos, stop_at=None):
if pos < self.end_pos:
for branch_pos, branch in zip(self.branch_starts[::-1], self.branches[::-1]):
diff --git a/Cython/Compiler/CythonScope.py b/Cython/Compiler/CythonScope.py
index 54dc38666..deffaffb4 100644
--- a/Cython/Compiler/CythonScope.py
+++ b/Cython/Compiler/CythonScope.py
@@ -15,9 +15,9 @@ class CythonScope(ModuleScope):
pos=None,
defining = 1,
cname='<error>')
-
+
def lookup_type(self, name):
- # This function should go away when types are all first-level objects.
+ # This function should go away when types are all first-level objects.
type = parse_basic_type(name)
if type:
return type
@@ -32,12 +32,12 @@ def create_utility_scope(context):
utility_scope = ModuleScope(u'utility', None, context)
# These are used to optimize isinstance in FinalOptimizePhase
- type_object = utility_scope.declare_typedef('PyTypeObject',
- base_type = c_void_type,
+ type_object = utility_scope.declare_typedef('PyTypeObject',
+ base_type = c_void_type,
pos = None,
cname = 'PyTypeObject')
type_object.is_void = True
-
+
utility_scope.declare_cfunction(
'PyObject_TypeCheck',
CFuncType(c_bint_type, [CFuncTypeArg("o", py_object_type, None),
@@ -45,5 +45,5 @@ def create_utility_scope(context):
pos = None,
defining = 1,
cname = 'PyObject_TypeCheck')
-
+
return utility_scope
diff --git a/Cython/Compiler/Errors.py b/Cython/Compiler/Errors.py
index 0e9954b86..401125bbe 100644
--- a/Cython/Compiler/Errors.py
+++ b/Cython/Compiler/Errors.py
@@ -44,7 +44,7 @@ def format_error(message, position):
return message
class CompileError(PyrexError):
-
+
def __init__(self, position = None, message = u""):
self.position = position
self.message_only = message
@@ -54,7 +54,7 @@ class CompileError(PyrexError):
Exception.__init__(self, format_error(message, position))
class CompileWarning(PyrexWarning):
-
+
def __init__(self, position = None, message = ""):
self.position = position
# Deprecated and withdrawn in 2.6:
@@ -63,7 +63,7 @@ class CompileWarning(PyrexWarning):
class InternalError(Exception):
# If this is ever raised, there is a bug in the compiler.
-
+
def __init__(self, message):
self.message_only = message
Exception.__init__(self, u"Internal compiler error: %s"
@@ -71,7 +71,7 @@ class InternalError(Exception):
class AbortError(Exception):
# Throw this to stop the compilation immediately.
-
+
def __init__(self, message):
self.message_only = message
Exception.__init__(self, u"Abort error: %s" % message)
@@ -98,7 +98,7 @@ class CompilerCrash(CompileError):
CompileError.__init__(self, pos, message)
class NoElementTreeInstalledException(PyrexError):
- """raised when the user enabled options.gdb_debug but no ElementTree
+ """raised when the user enabled options.gdb_debug but no ElementTree
implementation was found
"""
@@ -155,7 +155,7 @@ def error(position, message):
#print "Errors.error:", repr(position), repr(message) ###
if position is None:
raise InternalError(message)
- err = CompileError(position, message)
+ err = CompileError(position, message)
if debug_exception_on_error: raise Exception(err) # debug
report_error(err)
return err
@@ -198,7 +198,7 @@ def warn_once(position, message, level=0):
return warn
-# These functions can be used to momentarily suppress errors.
+# These functions can be used to momentarily suppress errors.
error_stack = []
diff --git a/Cython/Compiler/ExprNodes.py b/Cython/Compiler/ExprNodes.py
index 0e89b311f..6eb7aa6a9 100755
--- a/Cython/Compiler/ExprNodes.py
+++ b/Cython/Compiler/ExprNodes.py
@@ -70,14 +70,14 @@ class ExprNode(Node):
# result_code string Code fragment
# result_ctype string C type of result_code if different from type
# is_temp boolean Result is in a temporary variable
- # is_sequence_constructor
+ # is_sequence_constructor
# boolean Is a list or tuple constructor expression
# is_starred boolean Is a starred expression (e.g. '*a')
# saved_subexpr_nodes
# [ExprNode or [ExprNode or None] or None]
# Cached result of subexpr_nodes()
# use_managed_ref boolean use ref-counted temps/assignments/etc.
-
+
result_ctype = None
type = None
temp_code = None
@@ -110,8 +110,8 @@ class ExprNode(Node):
# 'subexprs' class attribute of ExprNodes, which should
# contain a list of the names of attributes which can
# hold sub-nodes or sequences of sub-nodes.
- #
- # The framework makes use of a number of abstract methods.
+ #
+ # The framework makes use of a number of abstract methods.
# Their responsibilities are as follows.
#
# Declaration Analysis phase
@@ -134,7 +134,7 @@ class ExprNode(Node):
#
# analyse_target_types
# Called during the Analyse Types phase to analyse
- # the LHS of an assignment or argument of a del
+ # the LHS of an assignment or argument of a del
# statement. Similar responsibilities to analyse_types.
#
# target_code
@@ -145,9 +145,9 @@ class ExprNode(Node):
# check_const
# - Check that this node and its subnodes form a
# legal constant expression. If so, do nothing,
- # otherwise call not_const.
+ # otherwise call not_const.
#
- # The default implementation of check_const
+ # The default implementation of check_const
# assumes that the expression is not constant.
#
# check_const_addr
@@ -156,7 +156,7 @@ class ExprNode(Node):
# constant. Otherwise, call addr_not_const.
#
# The default implementation of calc_const_addr
- # assumes that the expression is not a constant
+ # assumes that the expression is not a constant
# lvalue.
#
# Code Generation phase
@@ -177,8 +177,8 @@ class ExprNode(Node):
# sub-expressions.
#
# calculate_result_code
- # - Should return a C code fragment evaluating to the
- # result. This is only called when the result is not
+ # - Should return a C code fragment evaluating to the
+ # result. This is only called when the result is not
# a temporary.
#
# generate_assignment_code
@@ -196,10 +196,10 @@ class ExprNode(Node):
# - Call generate_disposal_code on all sub-expressions.
#
#
-
+
is_sequence_constructor = 0
is_attribute = 0
-
+
saved_subexpr_nodes = None
is_temp = 0
is_target = 0
@@ -215,16 +215,16 @@ class ExprNode(Node):
return self.subexprs
_get_child_attrs = __get_child_attrs
child_attrs = property(fget=_get_child_attrs)
-
+
def not_implemented(self, method_name):
print_call_chain(method_name, "not implemented") ###
raise InternalError(
"%s.%s not implemented" %
(self.__class__.__name__, method_name))
-
+
def is_lvalue(self):
return 0
-
+
def is_ephemeral(self):
# An ephemeral node is one whose result is in
# a Python temporary and we suspect there are no
@@ -245,21 +245,21 @@ class ExprNode(Node):
else:
nodes.append(item)
return nodes
-
+
def result(self):
if self.is_temp:
return self.temp_code
else:
return self.calculate_result_code()
-
+
def result_as(self, type = None):
# Return the result code cast to the specified C type.
return typecast(type, self.ctype(), self.result())
-
+
def py_result(self):
# Return the result code cast to PyObject *.
return self.result_as(py_object_type)
-
+
def ctype(self):
# Return the native C type of the result (i.e. the
# C type of the result_code expression).
@@ -295,18 +295,18 @@ class ExprNode(Node):
def compile_time_value(self, denv):
# Return value of compile-time expression, or report error.
error(self.pos, "Invalid compile-time expression")
-
+
def compile_time_value_error(self, e):
error(self.pos, "Error in compile-time expression: %s: %s" % (
e.__class__.__name__, e))
-
+
# ------------- Declaration Analysis ----------------
-
+
def analyse_target_declaration(self, env):
error(self.pos, "Cannot assign to or delete this")
-
+
# ------------- Expression Analysis ----------------
-
+
def analyse_const_expression(self, env):
# Called during the analyse_declarations phase of a
# constant expression. Analyses the expression's type,
@@ -314,25 +314,25 @@ class ExprNode(Node):
# and determines its value.
self.analyse_types(env)
return self.check_const()
-
+
def analyse_expressions(self, env):
# Convenience routine performing both the Type
- # Analysis and Temp Allocation phases for a whole
+ # Analysis and Temp Allocation phases for a whole
# expression.
self.analyse_types(env)
-
+
def analyse_target_expression(self, env, rhs):
# Convenience routine performing both the Type
# Analysis and Temp Allocation phases for the LHS of
# an assignment.
self.analyse_target_types(env)
-
+
def analyse_boolean_expression(self, env):
# Analyse expression and coerce to a boolean.
self.analyse_types(env)
bool = self.coerce_to_boolean(env)
return bool
-
+
def analyse_temp_boolean_expression(self, env):
# Analyse boolean expression and coerce result into
# a temporary. This is used when a branch is to be
@@ -345,17 +345,17 @@ class ExprNode(Node):
return self.coerce_to_boolean(env).coerce_to_simple(env)
# --------------- Type Inference -----------------
-
+
def type_dependencies(self, env):
# Returns the list of entries whose types must be determined
# before the type of self can be infered.
if hasattr(self, 'type') and self.type is not None:
return ()
return sum([node.type_dependencies(env) for node in self.subexpr_nodes()], ())
-
+
def infer_type(self, env):
- # Attempt to deduce the type of self.
- # Differs from analyse_types as it avoids unnecessary
+ # Attempt to deduce the type of self.
+ # Differs from analyse_types as it avoids unnecessary
# analysis of subexpressions, but can assume everything
# in self.type_dependencies() has been resolved.
if hasattr(self, 'type') and self.type is not None:
@@ -364,27 +364,27 @@ class ExprNode(Node):
return self.entry.type
else:
self.not_implemented("infer_type")
-
+
# --------------- Type Analysis ------------------
-
+
def analyse_as_module(self, env):
# If this node can be interpreted as a reference to a
# cimported module, return its scope, else None.
return None
-
+
def analyse_as_type(self, env):
# If this node can be interpreted as a reference to a
# type, return that type, else None.
return None
-
+
def analyse_as_extension_type(self, env):
# If this node can be interpreted as a reference to an
# extension type, return its type, else None.
return None
-
+
def analyse_types(self, env):
self.not_implemented("analyse_types")
-
+
def analyse_target_types(self, env):
self.analyse_types(env)
@@ -402,33 +402,33 @@ class ExprNode(Node):
def check_const(self):
self.not_const()
return False
-
+
def not_const(self):
error(self.pos, "Not allowed in a constant expression")
-
+
def check_const_addr(self):
self.addr_not_const()
return False
-
+
def addr_not_const(self):
error(self.pos, "Address is not constant")
# ----------------- Result Allocation -----------------
-
+
def result_in_temp(self):
# Return true if result is in a temporary owned by
# this node or one of its subexpressions. Overridden
# by certain nodes which can share the result of
# a subnode.
return self.is_temp
-
+
def target_code(self):
# Return code fragment for use as LHS of a C assignment.
return self.calculate_result_code()
-
+
def calculate_result_code(self):
self.not_implemented("calculate_result_code")
-
+
# def release_target_temp(self, env):
# # Release temporaries used by LHS of an assignment.
# self.release_subexpr_temps(env)
@@ -458,16 +458,16 @@ class ExprNode(Node):
self.temp_code = None
# ---------------- Code Generation -----------------
-
+
def make_owned_reference(self, code):
# If result is a pyobject, make sure we own
# a reference to it.
if self.type.is_pyobject and not self.result_in_temp():
code.put_incref(self.result(), self.ctype())
-
+
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
-
+
# Generate code to evaluate this node and
# its sub-expressions, and dispose of any
# temporary results of its sub-expressions.
@@ -486,10 +486,10 @@ class ExprNode(Node):
def generate_subexpr_evaluation_code(self, code):
for node in self.subexpr_nodes():
node.generate_evaluation_code(code)
-
+
def generate_result_code(self, code):
self.not_implemented("generate_result_code")
-
+
def generate_disposal_code(self, code):
if self.is_temp:
if self.type.is_pyobject:
@@ -503,7 +503,7 @@ class ExprNode(Node):
# of all sub-expressions.
for node in self.subexpr_nodes():
node.generate_disposal_code(code)
-
+
def generate_post_assignment_code(self, code):
if self.is_temp:
if self.type.is_pyobject:
@@ -513,10 +513,10 @@ class ExprNode(Node):
def generate_assignment_code(self, rhs, code):
# Stub method for nodes which are not legal as
- # the LHS of an assignment. An error will have
+ # the LHS of an assignment. An error will have
# been reported earlier.
pass
-
+
def generate_deletion_code(self, code):
# Stub method for nodes that are not legal as
# the argument of a del statement. An error
@@ -529,7 +529,7 @@ class ExprNode(Node):
self.release_temp_result(code)
else:
self.free_subexpr_temps(code)
-
+
def free_subexpr_temps(self, code):
for sub in self.subexpr_nodes():
sub.free_temps(code)
@@ -538,13 +538,13 @@ class ExprNode(Node):
pass
# ---------------- Annotation ---------------------
-
+
def annotate(self, code):
for node in self.subexpr_nodes():
node.annotate(code)
-
+
# ----------------- Coercion ----------------------
-
+
def coerce_to(self, dst_type, env):
# Coerce the result so that it can be assigned to
# something of type dst_type. If processing is necessary,
@@ -571,7 +571,7 @@ class ExprNode(Node):
if dst_type.is_reference:
dst_type = dst_type.ref_base_type
-
+
if dst_type.is_pyobject:
if not src.type.is_pyobject:
if dst_type is bytes_type and src.type.is_int:
@@ -583,7 +583,7 @@ class ExprNode(Node):
src = PyTypeTestNode(src, dst_type, env)
elif src.type.is_pyobject:
src = CoerceFromPyTypeNode(dst_type, src, env)
- elif (dst_type.is_complex
+ elif (dst_type.is_complex
and src_type != dst_type
and dst_type.assignable_from(src_type)):
src = CoerceToComplexNode(src, dst_type, env)
@@ -628,31 +628,31 @@ class ExprNode(Node):
return CoerceToBooleanNode(self, env)
else:
if not (type.is_int or type.is_enum or type.is_error):
- error(self.pos,
+ error(self.pos,
"Type '%s' not acceptable as a boolean" % type)
return self
-
+
def coerce_to_integer(self, env):
# If not already some C integer type, coerce to longint.
if self.type.is_int:
return self
else:
return self.coerce_to(PyrexTypes.c_long_type, env)
-
+
def coerce_to_temp(self, env):
# Ensure that the result is in a temporary.
if self.result_in_temp():
return self
else:
return CoerceToTempNode(self, env)
-
+
def coerce_to_simple(self, env):
# Ensure that the result is simple (see is_simple).
if self.is_simple():
return self
else:
return self.coerce_to_temp(env)
-
+
def is_simple(self):
# A node is simple if its result is something that can
# be referred to without performing any operations, e.g.
@@ -682,7 +682,7 @@ class ExprNode(Node):
class AtomicExprNode(ExprNode):
# Abstract base class for expression nodes which have
# no sub-expressions.
-
+
subexprs = []
# Override to optimize -- we know we have no children
@@ -693,10 +693,10 @@ class AtomicExprNode(ExprNode):
class PyConstNode(AtomicExprNode):
# Abstract base class for constant Python values.
-
+
is_literal = 1
type = py_object_type
-
+
def is_simple(self):
return 1
@@ -705,7 +705,7 @@ class PyConstNode(AtomicExprNode):
def analyse_types(self, env):
pass
-
+
def calculate_result_code(self):
return self.value
@@ -715,11 +715,11 @@ class PyConstNode(AtomicExprNode):
class NoneNode(PyConstNode):
# The constant value None
-
+
value = "Py_None"
constant_result = None
-
+
nogil_check = None
def compile_time_value(self, denv):
@@ -731,7 +731,7 @@ class NoneNode(PyConstNode):
class EllipsisNode(PyConstNode):
# '...' in a subscript list.
-
+
value = "Py_Ellipsis"
constant_result = Ellipsis
@@ -744,7 +744,7 @@ class ConstNode(AtomicExprNode):
# Abstract base type for literal constant nodes.
#
# value string C code fragment
-
+
is_literal = 1
nogil_check = None
@@ -756,10 +756,10 @@ class ConstNode(AtomicExprNode):
def analyse_types(self, env):
pass # Types are held in class variables
-
+
def check_const(self):
return True
-
+
def get_constant_c_result_code(self):
return self.calculate_result_code()
@@ -779,7 +779,7 @@ class BoolNode(ConstNode):
def compile_time_value(self, denv):
return self.value
-
+
def calculate_result_code(self):
return str(int(self.value))
@@ -798,10 +798,10 @@ class CharNode(ConstNode):
def calculate_constant_result(self):
self.constant_result = ord(self.value)
-
+
def compile_time_value(self, denv):
return ord(self.value)
-
+
def calculate_result_code(self):
return "'%s'" % StringEncoding.escape_char(self.value)
@@ -891,7 +891,7 @@ class IntNode(ConstNode):
self.result_code = code.get_py_num(plain_integer_string, self.longness)
else:
self.result_code = self.get_constant_c_result_code()
-
+
def get_constant_c_result_code(self):
return self.value_as_c_integer_string() + self.unsigned + self.longness
@@ -928,7 +928,7 @@ class FloatNode(ConstNode):
def compile_time_value(self, denv):
return float(self.value)
-
+
def calculate_result_code(self):
strval = self.value
assert isinstance(strval, (str, unicode))
@@ -955,7 +955,7 @@ class BytesNode(ConstNode):
def analyse_as_type(self, env):
type = PyrexTypes.parse_basic_type(self.value)
- if type is not None:
+ if type is not None:
return type
from TreeFragment import TreeFragment
pos = (self.pos[0], self.pos[1], self.pos[2]-7)
@@ -1019,7 +1019,7 @@ class BytesNode(ConstNode):
def get_constant_c_result_code(self):
return None # FIXME
-
+
def calculate_result_code(self):
return self.result_code
@@ -1077,7 +1077,7 @@ class UnicodeNode(PyConstNode):
def calculate_result_code(self):
return self.result_code
-
+
def compile_time_value(self, env):
return self.value
@@ -1126,7 +1126,7 @@ class StringNode(PyConstNode):
def calculate_result_code(self):
return self.result_code
-
+
def compile_time_value(self, env):
return self.value
@@ -1146,10 +1146,10 @@ class LongNode(AtomicExprNode):
def calculate_constant_result(self):
self.constant_result = Utils.str_to_number(self.value)
-
+
def compile_time_value(self, denv):
return Utils.str_to_number(self.value)
-
+
def analyse_types(self, env):
self.is_temp = 1
@@ -1171,15 +1171,15 @@ class ImagNode(AtomicExprNode):
# Imaginary number literal
#
# value float imaginary part
-
+
type = PyrexTypes.c_double_complex_type
def calculate_constant_result(self):
self.constant_result = complex(0.0, self.value)
-
+
def compile_time_value(self, denv):
return complex(0.0, self.value)
-
+
def analyse_types(self, env):
self.type.create_declaration_utility_code(env)
@@ -1214,16 +1214,16 @@ class ImagNode(AtomicExprNode):
float(self.value),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
-
+
class NewExprNode(AtomicExprNode):
# C++ new statement
#
# cppclass node c++ class to create
-
+
type = None
-
+
def infer_type(self, env):
type = self.cppclass.analyse_as_type(env)
if type is None or not type.is_cpp_class:
@@ -1241,7 +1241,7 @@ class NewExprNode(AtomicExprNode):
self.entry = constructor
self.type = constructor.type
return self.type
-
+
def analyse_types(self, env):
if self.type is None:
self.infer_type(env)
@@ -1251,7 +1251,7 @@ class NewExprNode(AtomicExprNode):
def generate_result_code(self, code):
pass
-
+
def calculate_result_code(self):
return "new " + self.class_type.declaration_code("")
@@ -1262,7 +1262,7 @@ class NameNode(AtomicExprNode):
# name string Python name of the variable
# entry Entry Symbol table entry
# type_entry Entry For extension type names, the original type entry
-
+
is_name = True
is_cython_module = False
cython_attribute = None
@@ -1275,12 +1275,12 @@ class NameNode(AtomicExprNode):
node = NameNode(pos)
node.analyse_types(env, entry=entry)
return node
-
+
def as_cython_attribute(self):
return self.cython_attribute
-
+
create_analysed_rvalue = staticmethod(create_analysed_rvalue)
-
+
def type_dependencies(self, env):
if self.entry is None:
self.entry = env.lookup(self.name)
@@ -1288,7 +1288,7 @@ class NameNode(AtomicExprNode):
return (self.entry,)
else:
return ()
-
+
def infer_type(self, env):
if self.entry is None:
self.entry = env.lookup(self.name)
@@ -1301,7 +1301,7 @@ class NameNode(AtomicExprNode):
return type_type
else:
return self.entry.type
-
+
def compile_time_value(self, denv):
try:
return denv.lookup(self.name)
@@ -1312,7 +1312,7 @@ class NameNode(AtomicExprNode):
if not self.entry or self.entry.type.is_pyobject:
return None
return self.entry.cname
-
+
def coerce_to(self, dst_type, env):
# If coercing to a generic pyobject and this is a builtin
# C function with a Python equivalent, manufacture a NameNode
@@ -1330,7 +1330,7 @@ class NameNode(AtomicExprNode):
node.analyse_rvalue_entry(env)
return node
return super(NameNode, self).coerce_to(dst_type, env)
-
+
def analyse_as_module(self, env):
# Try to interpret this as a reference to a cimported module.
# Returns the module scope, or None.
@@ -1340,7 +1340,7 @@ class NameNode(AtomicExprNode):
if entry and entry.as_module:
return entry.as_module
return None
-
+
def analyse_as_type(self, env):
if self.cython_attribute:
type = PyrexTypes.parse_basic_type(self.cython_attribute)
@@ -1355,7 +1355,7 @@ class NameNode(AtomicExprNode):
return entry.type
else:
return None
-
+
def analyse_as_extension_type(self, env):
# Try to interpret this as a reference to an extension type.
# Returns the extension type, or None.
@@ -1366,7 +1366,7 @@ class NameNode(AtomicExprNode):
return entry.type
else:
return None
-
+
def analyse_target_declaration(self, env):
if not self.entry:
self.entry = env.lookup_here(self.name)
@@ -1382,7 +1382,7 @@ class NameNode(AtomicExprNode):
env.control_flow.set_state(self.pos, (self.name, 'source'), 'assignment')
if self.entry.is_declared_generic:
self.result_ctype = py_object_type
-
+
def analyse_types(self, env):
if self.entry is None:
self.entry = env.lookup(self.name)
@@ -1400,7 +1400,7 @@ class NameNode(AtomicExprNode):
if entry.utility_code:
env.use_utility_code(entry.utility_code)
self.analyse_rvalue_entry(env)
-
+
def analyse_target_types(self, env):
self.analyse_entry(env)
if not self.is_lvalue():
@@ -1411,7 +1411,7 @@ class NameNode(AtomicExprNode):
if self.entry.type.is_buffer:
import Buffer
Buffer.used_buffer_aux_vars(self.entry)
-
+
def analyse_rvalue_entry(self, env):
#print "NameNode.analyse_rvalue_entry:", self.name ###
#print "Entry:", self.entry.__dict__ ###
@@ -1452,29 +1452,29 @@ class NameNode(AtomicExprNode):
entry = self.entry
if entry.is_type and entry.type.is_extension_type:
self.type_entry = entry
- if not (entry.is_const or entry.is_variable
+ if not (entry.is_const or entry.is_variable
or entry.is_builtin or entry.is_cfunction
or entry.is_cpp_class):
if self.entry.as_variable:
self.entry = self.entry.as_variable
else:
- error(self.pos,
+ error(self.pos,
"'%s' is not a constant, variable or function identifier" % self.name)
def is_simple(self):
# If it's not a C variable, it'll be in a temp.
return 1
-
+
def calculate_target_results(self, env):
pass
-
+
def check_const(self):
entry = self.entry
if entry is not None and not (entry.is_const or entry.is_cfunction or entry.is_builtin):
self.not_const()
return False
return True
-
+
def check_const_addr(self):
entry = self.entry
if not (entry.is_cglobal or entry.is_cfunction or entry.is_builtin):
@@ -1486,18 +1486,18 @@ class NameNode(AtomicExprNode):
return self.entry.is_variable and \
not self.entry.type.is_array and \
not self.entry.is_readonly
-
+
def is_ephemeral(self):
# Name nodes are never ephemeral, even if the
# result is in a temporary.
return 0
-
+
def calculate_result_code(self):
entry = self.entry
if not entry:
return "<error>" # There was an error earlier
return entry.cname
-
+
def generate_result_code(self, code):
assert hasattr(self, 'entry')
entry = self.entry
@@ -1519,7 +1519,7 @@ class NameNode(AtomicExprNode):
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
-
+
elif entry.is_pyglobal or entry.is_builtin:
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
@@ -1531,11 +1531,11 @@ class NameNode(AtomicExprNode):
code.putln(
'%s = __Pyx_GetName(%s, %s); %s' % (
self.result(),
- namespace,
+ namespace,
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
-
+
elif entry.is_local and False:
# control flow not good enough yet
assigned = entry.scope.control_flow.get_state((entry.name, 'initialized'), self.pos)
@@ -1555,7 +1555,7 @@ class NameNode(AtomicExprNode):
if (self.entry.type.is_ptr and isinstance(rhs, ListNode)
and not self.lhs_of_first_assignment):
error(self.pos, "Literal list must be assigned to pointer at time of declaration")
-
+
# is_pyglobal seems to be True for module level-globals only.
# We use this to access class->tp_dict if necessary.
if entry.is_pyglobal:
@@ -1651,11 +1651,11 @@ class NameNode(AtomicExprNode):
Buffer.put_assign_to_buffer(self.result(), rhstmp, buffer_aux, self.entry.type,
is_initialized=not self.lhs_of_first_assignment,
pos=self.pos, code=code)
-
+
if not pretty_rhs:
code.putln("%s = 0;" % rhstmp)
code.funcstate.release_temp(rhstmp)
-
+
def generate_deletion_code(self, code):
if self.entry is None:
return # There was an error earlier
@@ -1669,11 +1669,11 @@ class NameNode(AtomicExprNode):
namespace,
self.entry.name))
else:
- code.put_error_if_neg(self.pos,
+ code.put_error_if_neg(self.pos,
'__Pyx_DelAttrString(%s, "%s")' % (
Naming.module_cname,
self.entry.name))
-
+
def annotate(self, code):
if hasattr(self, 'is_called') and self.is_called:
pos = (self.pos[0], self.pos[1], self.pos[2] - len(self.name) - 1)
@@ -1681,16 +1681,16 @@ class NameNode(AtomicExprNode):
code.annotate(pos, AnnotationItem('py_call', 'python function', size=len(self.name)))
else:
code.annotate(pos, AnnotationItem('c_call', 'c function', size=len(self.name)))
-
+
class BackquoteNode(ExprNode):
# `expr`
#
# arg ExprNode
-
+
type = py_object_type
-
+
subexprs = ['arg']
-
+
def analyse_types(self, env):
self.arg.analyse_types(env)
self.arg = self.arg.coerce_to_pyobject(env)
@@ -1708,21 +1708,21 @@ class BackquoteNode(ExprNode):
self.arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
-
+
class ImportNode(ExprNode):
# Used as part of import statement implementation.
- # Implements result =
+ # Implements result =
# __import__(module_name, globals(), None, name_list)
#
# module_name StringNode dotted name of module
# name_list ListNode or None list of names to be imported
-
+
type = py_object_type
-
+
subexprs = ['module_name', 'name_list']
-
+
def analyse_types(self, env):
self.module_name.analyse_types(env)
self.module_name = self.module_name.coerce_to_pyobject(env)
@@ -1757,11 +1757,11 @@ class IteratorNode(ExprNode):
# Implements result = iter(sequence)
#
# sequence ExprNode
-
+
type = py_object_type
-
+
subexprs = ['sequence']
-
+
def analyse_types(self, env):
self.sequence.analyse_types(env)
if (self.sequence.type.is_array or self.sequence.type.is_ptr) and \
@@ -1822,16 +1822,16 @@ class NextNode(AtomicExprNode):
# The iterator is not owned by this node.
#
# iterator ExprNode
-
+
type = py_object_type
-
+
def __init__(self, iterator, env):
self.pos = iterator.pos
self.iterator = iterator
if iterator.type.is_ptr or iterator.type.is_array:
self.type = iterator.type.base_type
self.is_temp = 1
-
+
def generate_result_code(self, code):
sequence_type = self.iterator.sequence.type
if sequence_type is list_type:
@@ -1884,15 +1884,15 @@ class ExcValueNode(AtomicExprNode):
# Node created during analyse_types phase
# of an ExceptClauseNode to fetch the current
# exception value.
-
+
type = py_object_type
-
+
def __init__(self, pos, env):
ExprNode.__init__(self, pos)
def set_var(self, var):
self.var = var
-
+
def calculate_result_code(self):
return self.var
@@ -1913,17 +1913,17 @@ class TempNode(ExprNode):
# the regular cycle.
subexprs = []
-
+
def __init__(self, pos, type, env):
ExprNode.__init__(self, pos)
self.type = type
if type.is_pyobject:
self.result_ctype = py_object_type
self.is_temp = 1
-
+
def analyse_types(self, env):
return self.type
-
+
def generate_result_code(self, code):
pass
@@ -1944,19 +1944,19 @@ class TempNode(ExprNode):
# Do not participate in normal temp alloc/dealloc:
def allocate_temp_result(self, code):
pass
-
+
def release_temp_result(self, code):
pass
class PyTempNode(TempNode):
# TempNode holding a Python value.
-
+
def __init__(self, pos, env):
TempNode.__init__(self, pos, PyrexTypes.py_object_type, env)
class RawCNameExprNode(ExprNode):
subexprs = []
-
+
def __init__(self, pos, type=None):
self.pos = pos
self.type = type
@@ -1991,7 +1991,7 @@ class IndexNode(ExprNode):
# indices is used on buffer access, index on non-buffer access.
# The former contains a clean list of index parameters, the
# latter whatever Python object is needed for index access.
-
+
subexprs = ['base', 'index', 'indices']
indices = None
@@ -2010,13 +2010,13 @@ class IndexNode(ExprNode):
return base[index]
except Exception, e:
self.compile_time_value_error(e)
-
+
def is_ephemeral(self):
return self.base.is_ephemeral()
-
+
def analyse_target_declaration(self, env):
pass
-
+
def analyse_as_type(self, env):
base_type = self.base.analyse_as_type(env)
if base_type and not base_type.is_pyobject:
@@ -2027,23 +2027,23 @@ class IndexNode(ExprNode):
template_values = [self.index]
import Nodes
type_node = Nodes.TemplatedTypeNode(
- pos = self.pos,
- positional_args = template_values,
+ pos = self.pos,
+ positional_args = template_values,
keyword_args = None)
return type_node.analyse(env, base_type = base_type)
else:
return PyrexTypes.CArrayType(base_type, int(self.index.compile_time_value(env)))
return None
-
+
def type_dependencies(self, env):
return self.base.type_dependencies(env)
-
+
def infer_type(self, env):
base_type = self.base.infer_type(env)
if isinstance(self.index, SliceNode):
# slicing!
if base_type.is_string:
- # sliced C strings must coerce to Python
+ # sliced C strings must coerce to Python
return bytes_type
elif base_type in (unicode_type, bytes_type, str_type, list_type, tuple_type):
# slicing these returns the same type
@@ -2080,10 +2080,10 @@ class IndexNode(ExprNode):
else:
# TODO: Handle buffers (hopefully without too much redundancy).
return py_object_type
-
+
def analyse_types(self, env):
self.analyse_base_and_index_types(env, getting = 1)
-
+
def analyse_target_types(self, env):
self.analyse_base_and_index_types(env, setting = 1)
@@ -2103,7 +2103,7 @@ class IndexNode(ExprNode):
# error messages
self.type = PyrexTypes.error_type
return
-
+
is_slice = isinstance(self.index, SliceNode)
# Potentially overflowing index value.
if not is_slice and isinstance(self.index, IntNode) and Utils.long_literal(self.index.value):
@@ -2235,7 +2235,7 @@ class IndexNode(ExprNode):
def check_const_addr(self):
return self.base.check_const_addr() and self.index.check_const()
-
+
def is_lvalue(self):
return 1
@@ -2253,7 +2253,7 @@ class IndexNode(ExprNode):
else:
return "(%s[%s])" % (
self.base.result(), self.index.result())
-
+
def extra_index_params(self):
if self.index.type.is_int:
if self.original_index_type.signed:
@@ -2271,7 +2271,7 @@ class IndexNode(ExprNode):
else:
for i in self.indices:
i.generate_evaluation_code(code)
-
+
def generate_subexpr_disposal_code(self, code):
self.base.generate_disposal_code(code)
if not self.indices:
@@ -2339,7 +2339,7 @@ class IndexNode(ExprNode):
self.extra_index_params(),
self.result(),
code.error_goto(self.pos)))
-
+
def generate_setitem_code(self, value_code, code):
if self.index.type.is_int:
function = "__Pyx_SetItemInt"
@@ -2350,12 +2350,12 @@ class IndexNode(ExprNode):
if self.base.type is dict_type:
function = "PyDict_SetItem"
# It would seem that we could specialized lists/tuples, but that
- # shouldn't happen here.
- # Both PyList_SetItem PyTuple_SetItem and a Py_ssize_t as input,
- # not a PyObject*, and bad conversion here would give the wrong
- # exception. Also, tuples are supposed to be immutable, and raise
- # TypeErrors when trying to set their entries (PyTuple_SetItem
- # is for creating new tuples from).
+ # shouldn't happen here.
+ # Both PyList_SetItem PyTuple_SetItem and a Py_ssize_t as input,
+ # not a PyObject*, and bad conversion here would give the wrong
+ # exception. Also, tuples are supposed to be immutable, and raise
+ # TypeErrors when trying to set their entries (PyTuple_SetItem
+ # is for creating new tuples from).
else:
function = "PyObject_SetItem"
code.putln(
@@ -2385,7 +2385,7 @@ class IndexNode(ExprNode):
code.putln("*%s %s= %s;" % (ptr, op, rhs_code))
code.put_giveref("*%s" % ptr)
code.funcstate.release_temp(ptr)
- else:
+ else:
# Simple case
code.putln("*%s %s= %s;" % (ptrexpr, op, rhs.result()))
@@ -2403,7 +2403,7 @@ class IndexNode(ExprNode):
self.free_subexpr_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
-
+
def generate_deletion_code(self, code):
self.generate_subexpr_evaluation_code(code)
#if self.type.is_pyobject:
@@ -2455,7 +2455,7 @@ class SliceIndexNode(ExprNode):
# base ExprNode
# start ExprNode or None
# stop ExprNode or None
-
+
subexprs = ['base', 'start', 'stop']
def infer_type(self, env):
@@ -2485,10 +2485,10 @@ class SliceIndexNode(ExprNode):
return base[start:stop]
except Exception, e:
self.compile_time_value_error(e)
-
+
def analyse_target_declaration(self, env):
pass
-
+
def analyse_target_types(self, env):
self.analyse_types(env)
# when assigning, we must accept any Python type
@@ -2558,11 +2558,11 @@ class SliceIndexNode(ExprNode):
self.stop_code(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
-
+
def generate_assignment_code(self, rhs, code):
self.generate_subexpr_evaluation_code(code)
if self.type.is_pyobject:
- code.put_error_if_neg(self.pos,
+ code.put_error_if_neg(self.pos,
"__Pyx_PySequence_SetSlice(%s, %s, %s, %s)" % (
self.base.py_result(),
self.start_code(),
@@ -2653,13 +2653,13 @@ class SliceIndexNode(ExprNode):
target_size, check))
code.putln(code.error_goto(self.pos))
code.putln("}")
-
+
def start_code(self):
if self.start:
return self.start.result()
else:
return "0"
-
+
def stop_code(self):
if self.stop:
return self.stop.result()
@@ -2667,11 +2667,11 @@ class SliceIndexNode(ExprNode):
return self.base.type.size
else:
return "PY_SSIZE_T_MAX"
-
+
def calculate_result_code(self):
# self.result() is not used, but this method must exist
return "<unused>"
-
+
class SliceNode(ExprNode):
# start:stop:step in subscript list
@@ -2679,7 +2679,7 @@ class SliceNode(ExprNode):
# start ExprNode
# stop ExprNode
# step ExprNode
-
+
type = py_object_type
is_temp = 1
@@ -2705,7 +2705,7 @@ class SliceNode(ExprNode):
self.compile_time_value_error(e)
subexprs = ['start', 'stop', 'step']
-
+
def analyse_types(self, env):
self.start.analyse_types(env)
self.stop.analyse_types(env)
@@ -2720,8 +2720,8 @@ class SliceNode(ExprNode):
code.putln(
"%s = PySlice_New(%s, %s, %s); %s" % (
self.result(),
- self.start.py_result(),
- self.stop.py_result(),
+ self.start.py_result(),
+ self.stop.py_result(),
self.step.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
@@ -2760,7 +2760,7 @@ class CallNode(ExprNode):
self.function.set_cname(type.declaration_code(""))
self.analyse_c_function_call(env)
return True
-
+
def is_lvalue(self):
return self.type.is_reference
@@ -2785,9 +2785,9 @@ class SimpleCallNode(CallNode):
# wrapper_call bool used internally
# has_optional_args bool used internally
# nogil bool used internally
-
+
subexprs = ['self', 'coerced_self', 'function', 'args', 'arg_tuple']
-
+
self = None
coerced_self = None
arg_tuple = None
@@ -2795,7 +2795,7 @@ class SimpleCallNode(CallNode):
has_optional_args = False
nogil = False
analysed = False
-
+
def compile_time_value(self, denv):
function = self.function.compile_time_value(denv)
args = [arg.compile_time_value(denv) for arg in self.args]
@@ -2803,12 +2803,12 @@ class SimpleCallNode(CallNode):
return function(*args)
except Exception, e:
self.compile_time_value_error(e)
-
+
def type_dependencies(self, env):
# TODO: Update when Danilo's C++ code merged in to handle the
# the case of function overloading.
return self.function.type_dependencies(env)
-
+
def infer_type(self, env):
function = self.function
func_type = function.infer_type(env)
@@ -2903,7 +2903,7 @@ class SimpleCallNode(CallNode):
# Insert coerced 'self' argument into argument list.
self.args.insert(0, self.coerced_self)
self.analyse_c_function_call(env)
-
+
def function_type(self):
# Return the type of the function being called, coercing a function
# pointer to a function if necessary.
@@ -2911,7 +2911,7 @@ class SimpleCallNode(CallNode):
if func_type.is_ptr:
func_type = func_type.base_type
return func_type
-
+
def analyse_c_function_call(self, env):
if self.function.type is error_type:
self.type = error_type
@@ -2955,7 +2955,7 @@ class SimpleCallNode(CallNode):
self.args[i] = self.args[i].coerce_to(formal_type, env)
for i in range(max_nargs, actual_nargs):
if self.args[i].type.is_pyobject:
- error(self.args[i].pos,
+ error(self.args[i].pos,
"Python object cannot be passed as a varargs parameter")
# Calc result type and code fragment
if isinstance(self.function, NewExprNode):
@@ -2981,7 +2981,7 @@ class SimpleCallNode(CallNode):
def calculate_result_code(self):
return self.c_call_code()
-
+
def c_call_code(self):
func_type = self.function_type()
if self.type is PyrexTypes.error_type or not func_type.is_cfunction:
@@ -2995,23 +2995,23 @@ class SimpleCallNode(CallNode):
for formal_arg, actual_arg in args[:expected_nargs]:
arg_code = actual_arg.result_as(formal_arg.type)
arg_list_code.append(arg_code)
-
+
if func_type.is_overridable:
arg_list_code.append(str(int(self.wrapper_call or self.function.entry.is_unbound_cmethod)))
-
+
if func_type.optional_arg_count:
if expected_nargs == actual_nargs:
optional_args = 'NULL'
else:
optional_args = "&%s" % self.opt_arg_struct
arg_list_code.append(optional_args)
-
+
for actual_arg in self.args[len(formal_args):]:
arg_list_code.append(actual_arg.result())
result = "%s(%s)" % (self.function.result(),
', '.join(arg_list_code))
return result
-
+
def generate_result_code(self, code):
func_type = self.function_type()
if func_type.is_pyobject:
@@ -3050,7 +3050,7 @@ class SimpleCallNode(CallNode):
if exc_check:
if self.nogil:
exc_checks.append("__Pyx_ErrOccurredWithGIL()")
- else:
+ else:
exc_checks.append("PyErr_Occurred()")
if self.is_temp or exc_checks:
rhs = self.c_call_code()
@@ -3140,9 +3140,9 @@ class GeneralCallNode(CallNode):
# positional_args ExprNode Tuple of positional arguments
# keyword_args ExprNode or None Dict of keyword arguments
# starstar_arg ExprNode or None Dict of extra keyword args
-
+
type = py_object_type
-
+
subexprs = ['function', 'positional_args', 'keyword_args', 'starstar_arg']
nogil_check = Node.gil_error
@@ -3157,7 +3157,7 @@ class GeneralCallNode(CallNode):
return function(*positional_args, **keyword_args)
except Exception, e:
self.compile_time_value_error(e)
-
+
def explicit_args_kwds(self):
if self.starstar_arg or not isinstance(self.positional_args, TupleNode):
raise CompileError(self.pos,
@@ -3196,14 +3196,14 @@ class GeneralCallNode(CallNode):
else:
self.type = py_object_type
self.is_temp = 1
-
+
def generate_result_code(self, code):
if self.type.is_error: return
kwargs_call_function = "PyEval_CallObjectWithKeywords"
if self.keyword_args and self.starstar_arg:
- code.put_error_if_neg(self.pos,
+ code.put_error_if_neg(self.pos,
"PyDict_Update(%s, %s)" % (
- self.keyword_args.py_result(),
+ self.keyword_args.py_result(),
self.starstar_arg.py_result()))
keyword_code = self.keyword_args.py_result()
elif self.keyword_args:
@@ -3239,12 +3239,12 @@ class AsTupleNode(ExprNode):
# the * argument of a function call.
#
# arg ExprNode
-
+
subexprs = ['arg']
def calculate_constant_result(self):
self.constant_result = tuple(self.base.constant_result)
-
+
def compile_time_value(self, denv):
arg = self.arg.compile_time_value(denv)
try:
@@ -3271,7 +3271,7 @@ class AsTupleNode(ExprNode):
self.arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
-
+
class AttributeNode(ExprNode):
# obj.attribute
@@ -3287,10 +3287,10 @@ class AttributeNode(ExprNode):
# member string C name of struct member
# is_called boolean Function call is being done on result
# entry Entry Symbol table entry of attribute
-
+
is_attribute = 1
subexprs = ['obj']
-
+
type = PyrexTypes.error_type
entry = None
is_called = 0
@@ -3312,7 +3312,7 @@ class AttributeNode(ExprNode):
# must be a cpdef function
self.is_temp = 1
self.entry = entry.as_variable
- self.analyse_as_python_attribute(env)
+ self.analyse_as_python_attribute(env)
return self
return ExprNode.coerce_to(self, dst_type, env)
@@ -3333,10 +3333,10 @@ class AttributeNode(ExprNode):
return getattr(obj, attr)
except Exception, e:
self.compile_time_value_error(e)
-
+
def type_dependencies(self, env):
return self.obj.type_dependencies(env)
-
+
def infer_type(self, env):
if self.analyse_as_cimported_attribute(env, 0):
return self.entry.type
@@ -3348,17 +3348,17 @@ class AttributeNode(ExprNode):
def analyse_target_declaration(self, env):
pass
-
+
def analyse_target_types(self, env):
self.analyse_types(env, target = 1)
-
+
def analyse_types(self, env, target = 0):
if self.analyse_as_cimported_attribute(env, target):
return
if not target and self.analyse_as_unbound_cmethod(env):
return
self.analyse_as_ordinary_attribute(env, target)
-
+
def analyse_as_cimported_attribute(self, env, target):
# Try to interpret this as a reference to an imported
# C const, type, var or function. If successful, mutates
@@ -3373,7 +3373,7 @@ class AttributeNode(ExprNode):
self.mutate_into_name_node(env, entry, target)
return 1
return 0
-
+
def analyse_as_unbound_cmethod(self, env):
# Try to interpret this as a reference to an unbound
# C method of an extension type. If successful, mutates
@@ -3394,7 +3394,7 @@ class AttributeNode(ExprNode):
self.mutate_into_name_node(env, ubcm_entry, None)
return 1
return 0
-
+
def analyse_as_type(self, env):
module_scope = self.obj.analyse_as_module(env)
if module_scope:
@@ -3404,7 +3404,7 @@ class AttributeNode(ExprNode):
if base_type and hasattr(base_type, 'scope'):
return base_type.scope.lookup_type(self.attribute)
return None
-
+
def analyse_as_extension_type(self, env):
# Try to interpret this as a reference to an extension type
# in a cimported module. Returns the extension type, or None.
@@ -3414,7 +3414,7 @@ class AttributeNode(ExprNode):
if entry and entry.is_type and entry.type.is_extension_type:
return entry.type
return None
-
+
def analyse_as_module(self, env):
# Try to interpret this as a reference to a cimported module
# in another cimported module. Returns the module scope, or None.
@@ -3424,7 +3424,7 @@ class AttributeNode(ExprNode):
if entry and entry.as_module:
return entry.as_module
return None
-
+
def mutate_into_name_node(self, env, entry, target):
# Mutate this node into a NameNode and complete the
# analyse_types phase.
@@ -3437,7 +3437,7 @@ class AttributeNode(ExprNode):
NameNode.analyse_target_types(self, env)
else:
NameNode.analyse_rvalue_entry(self, env)
-
+
def analyse_as_ordinary_attribute(self, env, target):
self.obj.analyse_types(env)
self.analyse_attribute(env)
@@ -3479,8 +3479,8 @@ class AttributeNode(ExprNode):
if entry and entry.is_member:
entry = None
else:
- error(self.pos,
- "Cannot select attribute of incomplete type '%s'"
+ error(self.pos,
+ "Cannot select attribute of incomplete type '%s'"
% obj_type)
self.type = PyrexTypes.error_type
return
@@ -3499,7 +3499,7 @@ class AttributeNode(ExprNode):
# method of an extension type, so we treat it like a Python
# attribute.
pass
- # If we get here, the base object is not a struct/union/extension
+ # If we get here, the base object is not a struct/union/extension
# type, or it is an extension type and the attribute is either not
# declared or is declared as a Python method. Treat it as a Python
# attribute reference.
@@ -3536,13 +3536,13 @@ class AttributeNode(ExprNode):
return 1
else:
return NameNode.is_lvalue(self)
-
+
def is_ephemeral(self):
if self.obj:
return self.obj.is_ephemeral()
else:
return NameNode.is_ephemeral(self)
-
+
def calculate_result_code(self):
#print "AttributeNode.calculate_result_code:", self.member ###
#print "...obj node =", self.obj, "code", self.obj.result() ###
@@ -3553,7 +3553,7 @@ class AttributeNode(ExprNode):
if self.entry and self.entry.is_cmethod:
if obj.type.is_extension_type:
return "((struct %s *)%s%s%s)->%s" % (
- obj.type.vtabstruct_cname, obj_code, self.op,
+ obj.type.vtabstruct_cname, obj_code, self.op,
obj.type.vtabslot_cname, self.member)
else:
return self.member
@@ -3564,7 +3564,7 @@ class AttributeNode(ExprNode):
# accessing a field of a builtin type, need to cast better than result_as() does
obj_code = obj.type.cast_code(obj.result(), to_object_struct = True)
return "%s%s%s" % (obj_code, self.op, self.member)
-
+
def generate_result_code(self, code):
interned_attr_cname = code.intern_identifier(self.attribute)
if self.is_py_attr:
@@ -3582,12 +3582,12 @@ class AttributeNode(ExprNode):
and self.needs_none_check
and code.globalstate.directives['nonecheck']):
self.put_nonecheck(code)
-
+
def generate_assignment_code(self, rhs, code):
interned_attr_cname = code.intern_identifier(self.attribute)
self.obj.generate_evaluation_code(code)
if self.is_py_attr:
- code.put_error_if_neg(self.pos,
+ code.put_error_if_neg(self.pos,
'PyObject_SetAttr(%s, %s, %s)' % (
self.obj.py_result(),
interned_attr_cname,
@@ -3620,7 +3620,7 @@ class AttributeNode(ExprNode):
rhs.free_temps(code)
self.obj.generate_disposal_code(code)
self.obj.free_temps(code)
-
+
def generate_deletion_code(self, code):
interned_attr_cname = code.intern_identifier(self.attribute)
self.obj.generate_evaluation_code(code)
@@ -3634,7 +3634,7 @@ class AttributeNode(ExprNode):
error(self.pos, "Cannot delete C attribute of extension type")
self.obj.generate_disposal_code(code)
self.obj.free_temps(code)
-
+
def annotate(self, code):
if self.is_py_attr:
code.annotate(self.pos, AnnotationItem('py_attr', 'python attribute', size=len(self.attribute)))
@@ -3707,9 +3707,9 @@ class SequenceNode(ExprNode):
# iterator ExprNode
# unpacked_items [ExprNode] or None
# coerced_unpacked_items [ExprNode] or None
-
+
subexprs = ['args']
-
+
is_sequence_constructor = 1
unpacked_items = None
@@ -3766,7 +3766,7 @@ class SequenceNode(ExprNode):
def generate_result_code(self, code):
self.generate_operation_code(code)
-
+
def generate_assignment_code(self, rhs, code):
if self.starred_assignment:
self.generate_starred_assignment_code(rhs, code)
@@ -3788,8 +3788,8 @@ class SequenceNode(ExprNode):
tuple_check = "PyTuple_CheckExact(%s)"
code.putln(
"if (%s && likely(PyTuple_GET_SIZE(%s) == %s)) {" % (
- tuple_check % rhs.py_result(),
- rhs.py_result(),
+ tuple_check % rhs.py_result(),
+ rhs.py_result(),
len(self.args)))
code.putln("PyObject* tuple = %s;" % rhs.py_result())
for item in self.unpacked_items:
@@ -3808,7 +3808,7 @@ class SequenceNode(ExprNode):
for i in range(len(self.args)):
self.args[i].generate_assignment_code(
self.coerced_unpacked_items[i], code)
-
+
code.putln("} else {")
if rhs.type is tuple_type:
@@ -3934,7 +3934,7 @@ class SequenceNode(ExprNode):
class TupleNode(SequenceNode):
# Tuple constructor.
-
+
type = tuple_type
gil_message = "Constructing Python tuple"
@@ -3968,7 +3968,7 @@ class TupleNode(SequenceNode):
return tuple(values)
except Exception, e:
self.compile_time_value_error(e)
-
+
def generate_operation_code(self, code):
if len(self.args) == 0:
# result_code is Naming.empty_tuple
@@ -3998,7 +3998,7 @@ class TupleNode(SequenceNode):
code.put_giveref(arg.py_result())
if self.is_literal:
code.put_giveref(self.py_result())
-
+
def generate_subexpr_disposal_code(self, code):
# We call generate_post_assignment_code here instead
# of generate_disposal_code, because values were stored
@@ -4011,7 +4011,7 @@ class TupleNode(SequenceNode):
class ListNode(SequenceNode):
# List constructor.
-
+
# obj_conversion_errors [PyrexError] used internally
# orignial_args [ExprNode] used internally
@@ -4019,10 +4019,10 @@ class ListNode(SequenceNode):
type = list_type
gil_message = "Constructing Python list"
-
+
def type_dependencies(self, env):
return ()
-
+
def infer_type(self, env):
# TOOD: Infer non-object list arrays.
return list_type
@@ -4037,7 +4037,7 @@ class ListNode(SequenceNode):
SequenceNode.analyse_types(self, env)
self.obj_conversion_errors = held_errors()
release_errors(ignore=True)
-
+
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject:
for err in self.obj_conversion_errors:
@@ -4068,11 +4068,11 @@ class ListNode(SequenceNode):
self.type = error_type
error(self.pos, "Cannot coerce list to type '%s'" % dst_type)
return self
-
+
def release_temp(self, env):
if self.type.is_array:
- # To be valid C++, we must allocate the memory on the stack
- # manually and be sure not to reuse it for something else.
+ # To be valid C++, we must allocate the memory on the stack
+ # manually and be sure not to reuse it for something else.
pass
else:
SequenceNode.release_temp(self, env)
@@ -4245,7 +4245,7 @@ class ComprehensionNode(ScopedExprNode):
def calculate_result_code(self):
return self.target.result()
-
+
def generate_result_code(self, code):
self.generate_operation_code(code)
@@ -4263,7 +4263,7 @@ class ComprehensionAppendNode(Node):
child_attrs = ['expr']
type = PyrexTypes.c_int_type
-
+
def analyse_expressions(self, env):
self.expr.analyse_expressions(env)
if not self.expr.type.is_pyobject:
@@ -4407,7 +4407,7 @@ class SetNode(ExprNode):
subexprs = ['args']
gil_message = "Constructing Python set"
-
+
def analyse_types(self, env):
for i in range(len(self.args)):
arg = self.args[i]
@@ -4454,7 +4454,7 @@ class DictNode(ExprNode):
# key_value_pairs [DictItemNode]
#
# obj_conversion_errors [PyrexError] used internally
-
+
subexprs = ['key_value_pairs']
is_temp = 1
type = dict_type
@@ -4464,7 +4464,7 @@ class DictNode(ExprNode):
def calculate_constant_result(self):
self.constant_result = dict([
item.constant_result for item in self.key_value_pairs])
-
+
def compile_time_value(self, denv):
pairs = [(item.key.compile_time_value(denv), item.value.compile_time_value(denv))
for item in self.key_value_pairs]
@@ -4472,10 +4472,10 @@ class DictNode(ExprNode):
return dict(pairs)
except Exception, e:
self.compile_time_value_error(e)
-
+
def type_dependencies(self, env):
return ()
-
+
def infer_type(self, env):
# TOOD: Infer struct constructors.
return dict_type
@@ -4489,7 +4489,7 @@ class DictNode(ExprNode):
def may_be_none(self):
return False
-
+
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject:
self.release_errors()
@@ -4521,7 +4521,7 @@ class DictNode(ExprNode):
self.type = error_type
error(self.pos, "Cannot interpret dict as type '%s'" % dst_type)
return self
-
+
def release_errors(self):
for err in self.obj_conversion_errors:
report_error(err)
@@ -4544,7 +4544,7 @@ class DictNode(ExprNode):
for item in self.key_value_pairs:
item.generate_evaluation_code(code)
if self.type.is_pyobject:
- code.put_error_if_neg(self.pos,
+ code.put_error_if_neg(self.pos,
"PyDict_SetItem(%s, %s, %s)" % (
self.result(),
item.key.py_result(),
@@ -4556,11 +4556,11 @@ class DictNode(ExprNode):
item.value.result()))
item.generate_disposal_code(code)
item.free_temps(code)
-
+
def annotate(self, code):
for item in self.key_value_pairs:
item.annotate(code)
-
+
class DictItemNode(ExprNode):
# Represents a single item in a DictNode
#
@@ -4573,13 +4573,13 @@ class DictItemNode(ExprNode):
def calculate_constant_result(self):
self.constant_result = (
self.key.constant_result, self.value.constant_result)
-
+
def analyse_types(self, env):
self.key.analyse_types(env)
self.value.analyse_types(env)
self.key = self.key.coerce_to_pyobject(env)
self.value = self.value.coerce_to_pyobject(env)
-
+
def generate_evaluation_code(self, code):
self.key.generate_evaluation_code(code)
self.value.generate_evaluation_code(code)
@@ -4591,7 +4591,7 @@ class DictItemNode(ExprNode):
def free_temps(self, code):
self.key.free_temps(code)
self.value.free_temps(code)
-
+
def __iter__(self):
return iter([self.key, self.value])
@@ -4613,7 +4613,7 @@ class ClassNode(ExprNode, ModuleNameMixin):
# dict ExprNode Class dict (not owned by this node)
# doc ExprNode or None Doc string
# module_name EncodedString Name of defining module
-
+
subexprs = ['bases', 'doc']
def analyse_types(self, env):
@@ -4636,7 +4636,7 @@ class ClassNode(ExprNode, ModuleNameMixin):
cname = code.intern_identifier(self.name)
if self.doc:
- code.put_error_if_neg(self.pos,
+ code.put_error_if_neg(self.pos,
'PyDict_SetItemString(%s, "__doc__", %s)' % (
self.dict.py_result(),
self.doc.py_result()))
@@ -4804,9 +4804,9 @@ class BoundMethodNode(ExprNode):
#
# function ExprNode Function object
# self_object ExprNode self object
-
+
subexprs = ['function']
-
+
def analyse_types(self, env):
self.function.analyse_types(env)
self.type = py_object_type
@@ -4830,12 +4830,12 @@ class UnboundMethodNode(ExprNode):
# object from a class and a function.
#
# function ExprNode Function object
-
+
type = py_object_type
is_temp = 1
-
+
subexprs = ['function']
-
+
def analyse_types(self, env):
self.function.analyse_types(env)
@@ -4868,10 +4868,10 @@ class PyCFunctionNode(ExprNode, ModuleNameMixin):
subexprs = []
self_object = None
binding = False
-
+
type = py_object_type
is_temp = 1
-
+
def analyse_types(self, env):
if self.binding:
env.use_utility_code(binding_cfunc_utility_code)
@@ -4881,7 +4881,7 @@ class PyCFunctionNode(ExprNode, ModuleNameMixin):
def may_be_none(self):
return False
-
+
gil_message = "Constructing Python function"
def self_result_code(self):
@@ -4990,14 +4990,14 @@ class UnopNode(ExprNode):
# - Check operand type and coerce if needed.
# - Determine result type and result code fragment.
# - Allocate temporary for result if needed.
-
+
subexprs = ['operand']
infix = True
def calculate_constant_result(self):
func = compile_time_unary_operators[self.operator]
self.constant_result = func(self.operand.constant_result)
-
+
def compile_time_value(self, denv):
func = compile_time_unary_operators.get(self.operator)
if not func:
@@ -5009,7 +5009,7 @@ class UnopNode(ExprNode):
return func(operand)
except Exception, e:
self.compile_time_value_error(e)
-
+
def infer_type(self, env):
operand_type = self.operand.infer_type(env)
if operand_type.is_pyobject:
@@ -5027,10 +5027,10 @@ class UnopNode(ExprNode):
self.analyse_cpp_operation(env)
else:
self.analyse_c_operation(env)
-
+
def check_const(self):
return self.operand.check_const()
-
+
def is_py_operation(self):
return self.operand.type.is_pyobject
@@ -5041,24 +5041,24 @@ class UnopNode(ExprNode):
def is_cpp_operation(self):
type = self.operand.type
return type.is_cpp_class
-
+
def coerce_operand_to_pyobject(self, env):
self.operand = self.operand.coerce_to_pyobject(env)
-
+
def generate_result_code(self, code):
if self.operand.type.is_pyobject:
self.generate_py_operation_code(code)
-
+
def generate_py_operation_code(self, code):
function = self.py_operation_function()
code.putln(
"%s = %s(%s); %s" % (
- self.result(),
- function,
+ self.result(),
+ function,
self.operand.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
-
+
def type_error(self):
if not self.operand.type.is_error:
error(self.pos, "Invalid operand type for '%s' (%s)" %
@@ -5085,11 +5085,11 @@ class NotNode(ExprNode):
# 'not' operator
#
# operand ExprNode
-
+
type = PyrexTypes.c_bint_type
subexprs = ['operand']
-
+
def calculate_constant_result(self):
self.constant_result = not self.operand.constant_result
@@ -5102,29 +5102,29 @@ class NotNode(ExprNode):
def infer_type(self, env):
return PyrexTypes.c_bint_type
-
+
def analyse_types(self, env):
self.operand.analyse_types(env)
self.operand = self.operand.coerce_to_boolean(env)
-
+
def calculate_result_code(self):
return "(!%s)" % self.operand.result()
-
+
def generate_result_code(self, code):
pass
class UnaryPlusNode(UnopNode):
# unary '+' operator
-
+
operator = '+'
-
+
def analyse_c_operation(self, env):
self.type = self.operand.type
-
+
def py_operation_function(self):
return "PyNumber_Positive"
-
+
def calculate_result_code(self):
if self.is_cpp_operation():
return "(+%s)" % self.operand.result()
@@ -5134,9 +5134,9 @@ class UnaryPlusNode(UnopNode):
class UnaryMinusNode(UnopNode):
# unary '-' operator
-
+
operator = '-'
-
+
def analyse_c_operation(self, env):
if self.operand.type.is_numeric:
self.type = self.operand.type
@@ -5144,10 +5144,10 @@ class UnaryMinusNode(UnopNode):
self.type_error()
if self.type.is_complex:
self.infix = False
-
+
def py_operation_function(self):
return "PyNumber_Negative"
-
+
def calculate_result_code(self):
if self.infix:
return "(-%s)" % self.operand.result()
@@ -5170,7 +5170,7 @@ class TildeNode(UnopNode):
def py_operation_function(self):
return "PyNumber_Invert"
-
+
def calculate_result_code(self):
return "(~%s)" % self.operand.result()
@@ -5184,7 +5184,7 @@ class DereferenceNode(CUnopNode):
# unary * operator
operator = '*'
-
+
def analyse_c_operation(self, env):
if self.operand.type.is_ptr:
self.type = self.operand.type.base_type
@@ -5197,7 +5197,7 @@ class DereferenceNode(CUnopNode):
class DecrementIncrementNode(CUnopNode):
# unary ++/-- operator
-
+
def analyse_c_operation(self, env):
if self.operand.type.is_ptr or self.operand.type.is_numeric:
self.type = self.operand.type
@@ -5218,9 +5218,9 @@ class AmpersandNode(ExprNode):
# The C address-of operator.
#
# operand ExprNode
-
+
subexprs = ['operand']
-
+
def infer_type(self, env):
return PyrexTypes.c_ptr_type(self.operand.infer_type(env))
@@ -5234,21 +5234,21 @@ class AmpersandNode(ExprNode):
self.error("Cannot take address of Python variable")
return
self.type = PyrexTypes.c_ptr_type(argtype)
-
+
def check_const(self):
return self.operand.check_const_addr()
-
+
def error(self, mess):
error(self.pos, mess)
self.type = PyrexTypes.error_type
self.result_code = "<error>"
-
+
def calculate_result_code(self):
return "(&%s)" % self.operand.result()
def generate_result_code(self, code):
pass
-
+
unop_node_classes = {
"+": UnaryPlusNode,
@@ -5257,14 +5257,14 @@ unop_node_classes = {
}
def unop_node(pos, operator, operand):
- # Construct unnop node of appropriate class for
+ # Construct unnop node of appropriate class for
# given operator.
if isinstance(operand, IntNode) and operator == '-':
return IntNode(pos = operand.pos, value = str(-Utils.str_to_number(operand.value)))
elif isinstance(operand, UnopNode) and operand.operator == operator:
warning(pos, "Python has no increment/decrement operator: %s%sx = %s(%sx) = x" % ((operator,)*4), 5)
- return unop_node_classes[operator](pos,
- operator = operator,
+ return unop_node_classes[operator](pos,
+ operator = operator,
operand = operand)
@@ -5277,19 +5277,19 @@ class TypecastNode(ExprNode):
#
# If used from a transform, one can if wanted specify the attribute
# "type" directly and leave base_type and declarator to None
-
+
subexprs = ['operand']
base_type = declarator = type = None
-
+
def type_dependencies(self, env):
return ()
-
+
def infer_type(self, env):
if self.type is None:
base_type = self.base_type.analyse(env)
_, self.type = self.declarator.analyse(base_type, env)
return self.type
-
+
def analyse_types(self, env):
if self.type is None:
base_type = self.base_type.analyse(env)
@@ -5317,7 +5317,7 @@ class TypecastNode(ExprNode):
if not (self.operand.type.base_type.is_void or self.operand.type.base_type.is_struct):
error(self.pos, "Python objects cannot be cast from pointers of primitive types")
else:
- # Should this be an error?
+ # Should this be an error?
warning(self.pos, "No conversion from %s to %s, python object pointer used." % (self.operand.type, self.type))
self.operand = self.operand.coerce_to_simple(env)
elif from_py and not to_py:
@@ -5345,7 +5345,7 @@ class TypecastNode(ExprNode):
# we usually do not know the result of a type cast at code
# generation time
pass
-
+
def calculate_result_code(self):
if self.type.is_complex:
operand_result = self.operand.result()
@@ -5358,15 +5358,15 @@ class TypecastNode(ExprNode):
return "%s(%s, %s)" % (
self.type.from_parts,
real_part,
- imag_part)
+ imag_part)
else:
return self.type.cast_code(self.operand.result())
-
+
def get_constant_c_result_code(self):
operand_result = self.operand.get_constant_c_result_code()
if operand_result:
return self.type.cast_code(operand_result)
-
+
def result_as(self, type):
if self.type.is_pyobject and not self.is_temp:
# Optimise away some unnecessary casting
@@ -5385,7 +5385,7 @@ class TypecastNode(ExprNode):
class SizeofNode(ExprNode):
# Abstract base class for sizeof(x) expression nodes.
-
+
type = PyrexTypes.c_size_t_type
def check_const(self):
@@ -5400,10 +5400,10 @@ class SizeofTypeNode(SizeofNode):
#
# base_type CBaseTypeNode
# declarator CDeclaratorNode
-
+
subexprs = []
arg_type = None
-
+
def analyse_types(self, env):
# we may have incorrectly interpreted a dotted name as a type rather than an attribute
# this could be better handled by more uniformly treating types as runtime-available objects
@@ -5424,7 +5424,7 @@ class SizeofTypeNode(SizeofNode):
_, arg_type = self.declarator.analyse(base_type, env)
self.arg_type = arg_type
self.check_type()
-
+
def check_type(self):
arg_type = self.arg_type
if arg_type.is_pyobject and not arg_type.is_extension_type:
@@ -5433,7 +5433,7 @@ class SizeofTypeNode(SizeofNode):
error(self.pos, "Cannot take sizeof void")
elif not arg_type.is_complete():
error(self.pos, "Cannot take sizeof incomplete type '%s'" % arg_type)
-
+
def calculate_result_code(self):
if self.arg_type.is_extension_type:
# the size of the pointer is boring
@@ -5442,15 +5442,15 @@ class SizeofTypeNode(SizeofNode):
else:
arg_code = self.arg_type.declaration_code("")
return "(sizeof(%s))" % arg_code
-
+
class SizeofVarNode(SizeofNode):
# C sizeof function applied to a variable
#
# operand ExprNode
-
+
subexprs = ['operand']
-
+
def analyse_types(self, env):
# We may actually be looking at a type rather than a variable...
# If we are, traditional analysis would fail...
@@ -5461,10 +5461,10 @@ class SizeofVarNode(SizeofNode):
self.check_type()
else:
self.operand.analyse_types(env)
-
+
def calculate_result_code(self):
return "(sizeof(%s))" % self.operand.result()
-
+
def generate_result_code(self, code):
pass
@@ -5473,12 +5473,12 @@ class TypeofNode(ExprNode):
#
# operand ExprNode
# literal StringNode # internal
-
+
literal = None
type = py_object_type
-
+
subexprs = ['literal'] # 'operand' will be ignored after type analysis!
-
+
def analyse_types(self, env):
self.operand.analyse_types(env)
self.literal = StringNode(
@@ -5491,7 +5491,7 @@ class TypeofNode(ExprNode):
def generate_evaluation_code(self, code):
self.literal.generate_evaluation_code(code)
-
+
def calculate_result_code(self):
return self.literal.calculate_result_code()
@@ -5549,7 +5549,7 @@ class BinopNode(ExprNode):
# - Check operand types and coerce if needed.
# - Determine result type and result code fragment.
# - Allocate temporary for result if needed.
-
+
subexprs = ['operand1', 'operand2']
inplace = False
@@ -5567,16 +5567,16 @@ class BinopNode(ExprNode):
return func(operand1, operand2)
except Exception, e:
self.compile_time_value_error(e)
-
+
def infer_type(self, env):
return self.result_type(self.operand1.infer_type(env),
self.operand2.infer_type(env))
-
+
def analyse_types(self, env):
self.operand1.analyse_types(env)
self.operand2.analyse_types(env)
self.analyse_operation(env)
-
+
def analyse_operation(self, env):
if self.is_py_operation():
self.coerce_operands_to_pyobjects(env)
@@ -5588,17 +5588,17 @@ class BinopNode(ExprNode):
self.analyse_cpp_operation(env)
else:
self.analyse_c_operation(env)
-
+
def is_py_operation(self):
return self.is_py_operation_types(self.operand1.type, self.operand2.type)
-
+
def is_py_operation_types(self, type1, type2):
return type1.is_pyobject or type2.is_pyobject
def is_cpp_operation(self):
return (self.operand1.type.is_cpp_class
or self.operand2.type.is_cpp_class)
-
+
def analyse_cpp_operation(self, env):
type1 = self.operand1.type
type2 = self.operand2.type
@@ -5615,7 +5615,7 @@ class BinopNode(ExprNode):
self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env)
self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env)
self.type = func_type.return_type
-
+
def result_type(self, type1, type2):
if self.is_py_operation_types(type1, type2):
if type2.is_string:
@@ -5649,14 +5649,14 @@ class BinopNode(ExprNode):
def nogil_check(self, env):
if self.is_py_operation():
self.gil_error()
-
+
def coerce_operands_to_pyobjects(self, env):
self.operand1 = self.operand1.coerce_to_pyobject(env)
self.operand2 = self.operand2.coerce_to_pyobject(env)
-
+
def check_const(self):
return self.operand1.check_const() and self.operand2.check_const()
-
+
def generate_result_code(self, code):
#print "BinopNode.generate_result_code:", self.operand1, self.operand2 ###
if self.operand1.type.is_pyobject:
@@ -5667,37 +5667,37 @@ class BinopNode(ExprNode):
extra_args = ""
code.putln(
"%s = %s(%s, %s%s); %s" % (
- self.result(),
- function,
+ self.result(),
+ function,
self.operand1.py_result(),
self.operand2.py_result(),
extra_args,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
-
+
def type_error(self):
if not (self.operand1.type.is_error
or self.operand2.type.is_error):
error(self.pos, "Invalid operand types for '%s' (%s; %s)" %
- (self.operator, self.operand1.type,
+ (self.operator, self.operand1.type,
self.operand2.type))
self.type = PyrexTypes.error_type
class CBinopNode(BinopNode):
-
+
def analyse_types(self, env):
BinopNode.analyse_types(self, env)
if self.is_py_operation():
self.type = PyrexTypes.error_type
-
+
def py_operation_function():
return ""
-
+
def calculate_result_code(self):
return "(%s %s %s)" % (
- self.operand1.result(),
- self.operator,
+ self.operand1.result(),
+ self.operator,
self.operand2.result())
@@ -5708,9 +5708,9 @@ def c_binop_constructor(operator):
class NumBinopNode(BinopNode):
# Binary operation taking numeric arguments.
-
+
infix = True
-
+
def analyse_c_operation(self, env):
type1 = self.operand1.type
type2 = self.operand2.type
@@ -5723,7 +5723,7 @@ class NumBinopNode(BinopNode):
if not self.infix or (type1.is_numeric and type2.is_numeric):
self.operand1 = self.operand1.coerce_to(self.type, env)
self.operand2 = self.operand2.coerce_to(self.type, env)
-
+
def compute_c_result_type(self, type1, type2):
if self.c_types_okay(type1, type2):
widest_type = PyrexTypes.widest_numeric_type(type1, type2)
@@ -5742,7 +5742,7 @@ class NumBinopNode(BinopNode):
return "(%s %s %s)" % (value1, self.operator, value2)
else:
return None
-
+
def c_types_okay(self, type1, type2):
#print "NumBinopNode.c_types_okay:", type1, type2 ###
return (type1.is_numeric or type1.is_enum) \
@@ -5751,8 +5751,8 @@ class NumBinopNode(BinopNode):
def calculate_result_code(self):
if self.infix:
return "(%s %s %s)" % (
- self.operand1.result(),
- self.operator,
+ self.operand1.result(),
+ self.operator,
self.operand2.result())
else:
func = self.type.binary_op(self.operator)
@@ -5762,12 +5762,12 @@ class NumBinopNode(BinopNode):
func,
self.operand1.result(),
self.operand2.result())
-
+
def is_py_operation_types(self, type1, type2):
return (type1 is PyrexTypes.c_py_unicode_type or
type2 is PyrexTypes.c_py_unicode_type or
BinopNode.is_py_operation_types(self, type1, type2))
-
+
def py_operation_function(self):
fuction = self.py_functions[self.operator]
if self.inplace:
@@ -5791,16 +5791,16 @@ class NumBinopNode(BinopNode):
class IntBinopNode(NumBinopNode):
# Binary operation taking integer arguments.
-
+
def c_types_okay(self, type1, type2):
#print "IntBinopNode.c_types_okay:", type1, type2 ###
return (type1.is_int or type1.is_enum) \
and (type2.is_int or type2.is_enum)
-
+
class AddNode(NumBinopNode):
# '+' operator.
-
+
def is_py_operation_types(self, type1, type2):
if type1.is_string and type2.is_string:
return 1
@@ -5820,7 +5820,7 @@ class AddNode(NumBinopNode):
class SubNode(NumBinopNode):
# '-' operator.
-
+
def compute_c_result_type(self, type1, type2):
if (type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum):
return type1
@@ -5833,7 +5833,7 @@ class SubNode(NumBinopNode):
class MulNode(NumBinopNode):
# '*' operator.
-
+
def is_py_operation_types(self, type1, type2):
if (type1.is_string and type2.is_int) \
or (type2.is_string and type1.is_int):
@@ -5844,7 +5844,7 @@ class MulNode(NumBinopNode):
class DivNode(NumBinopNode):
# '/' or '//' operator.
-
+
cdivision = None
truedivision = None # == "unknown" if operator == '/'
ctruedivision = False
@@ -5914,14 +5914,14 @@ class DivNode(NumBinopNode):
def generate_evaluation_code(self, code):
if not self.type.is_pyobject and not self.type.is_complex:
if self.cdivision is None:
- self.cdivision = (code.globalstate.directives['cdivision']
+ self.cdivision = (code.globalstate.directives['cdivision']
or not self.type.signed
or self.type.is_float)
if not self.cdivision:
code.globalstate.use_utility_code(div_int_utility_code.specialize(self.type))
NumBinopNode.generate_evaluation_code(self, code)
self.generate_div_warning_code(code)
-
+
def generate_div_warning_code(self, code):
if not self.type.is_pyobject:
if self.zerodivision_check:
@@ -5936,7 +5936,7 @@ class DivNode(NumBinopNode):
if self.type.is_int and self.type.signed and self.operator != '%':
code.globalstate.use_utility_code(division_overflow_test_code)
code.putln("else if (sizeof(%s) == sizeof(long) && unlikely(%s == -1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(%s))) {" % (
- self.type.declaration_code(''),
+ self.type.declaration_code(''),
self.operand2.result(),
self.operand1.result()))
code.putln('PyErr_Format(PyExc_OverflowError, "value too large to perform division");')
@@ -5951,7 +5951,7 @@ class DivNode(NumBinopNode):
code.put("if (__Pyx_cdivision_warning()) ")
code.put_goto(code.error_label)
code.putln("}")
-
+
def calculate_result_code(self):
if self.type.is_complex:
return NumBinopNode.calculate_result_code(self)
@@ -5971,7 +5971,7 @@ class DivNode(NumBinopNode):
else:
return "__Pyx_div_%s(%s, %s)" % (
self.type.specialization_name(),
- self.operand1.result(),
+ self.operand1.result(),
self.operand2.result())
@@ -5988,7 +5988,7 @@ class ModNode(DivNode):
return "integer division or modulo by zero"
else:
return "float divmod()"
-
+
def generate_evaluation_code(self, code):
if not self.type.is_pyobject:
if self.cdivision is None:
@@ -6001,27 +6001,27 @@ class ModNode(DivNode):
mod_float_utility_code.specialize(self.type, math_h_modifier=self.type.math_h_modifier))
NumBinopNode.generate_evaluation_code(self, code)
self.generate_div_warning_code(code)
-
+
def calculate_result_code(self):
if self.cdivision:
if self.type.is_float:
return "fmod%s(%s, %s)" % (
self.type.math_h_modifier,
- self.operand1.result(),
+ self.operand1.result(),
self.operand2.result())
else:
return "(%s %% %s)" % (
- self.operand1.result(),
+ self.operand1.result(),
self.operand2.result())
else:
return "__Pyx_mod_%s(%s, %s)" % (
self.type.specialization_name(),
- self.operand1.result(),
+ self.operand1.result(),
self.operand2.result())
class PowNode(NumBinopNode):
# '**' operator.
-
+
def analyse_c_operation(self, env):
NumBinopNode.analyse_c_operation(self, env)
if self.type.is_complex:
@@ -6037,7 +6037,7 @@ class PowNode(NumBinopNode):
else:
self.pow_func = "__Pyx_pow_%s" % self.type.declaration_code('').replace(' ', '_')
env.use_utility_code(
- int_pow_utility_code.specialize(func_name=self.pow_func,
+ int_pow_utility_code.specialize(func_name=self.pow_func,
type=self.type.declaration_code('')))
def calculate_result_code(self):
@@ -6048,8 +6048,8 @@ class PowNode(NumBinopNode):
else:
return self.type.cast_code(operand.result())
return "%s(%s, %s)" % (
- self.pow_func,
- typecast(self.operand1),
+ self.pow_func,
+ typecast(self.operand1),
typecast(self.operand2))
@@ -6065,9 +6065,9 @@ class BoolBinopNode(ExprNode):
# operator string
# operand1 ExprNode
# operand2 ExprNode
-
+
subexprs = ['operand1', 'operand2']
-
+
def infer_type(self, env):
type1 = self.operand1.infer_type(env)
type2 = self.operand2.infer_type(env)
@@ -6088,7 +6088,7 @@ class BoolBinopNode(ExprNode):
self.constant_result = \
self.operand1.constant_result or \
self.operand2.constant_result
-
+
def compile_time_value(self, denv):
if self.operator == 'and':
return self.operand1.compile_time_value(denv) \
@@ -6096,7 +6096,7 @@ class BoolBinopNode(ExprNode):
else:
return self.operand1.compile_time_value(denv) \
or self.operand2.compile_time_value(denv)
-
+
def coerce_to_boolean(self, env):
return BoolBinopNode(
self.pos,
@@ -6112,7 +6112,7 @@ class BoolBinopNode(ExprNode):
self.type = PyrexTypes.independent_spanning_type(self.operand1.type, self.operand2.type)
self.operand1 = self.operand1.coerce_to(self.type, env)
self.operand2 = self.operand2.coerce_to(self.type, env)
-
+
# For what we're about to do, it's vital that
# both operands be temp nodes.
self.operand1 = self.operand1.coerce_to_simple(env)
@@ -6123,7 +6123,7 @@ class BoolBinopNode(ExprNode):
def check_const(self):
return self.operand1.check_const() and self.operand2.check_const()
-
+
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
self.operand1.generate_evaluation_code(code)
@@ -6151,7 +6151,7 @@ class BoolBinopNode(ExprNode):
self.operand1.generate_post_assignment_code(code)
self.operand1.free_temps(code)
code.putln("}")
-
+
def generate_operand1_test(self, code):
# Generate code to test the truth of the first operand.
if self.type.is_pyobject:
@@ -6173,15 +6173,15 @@ class CondExprNode(ExprNode):
# test ExprNode
# true_val ExprNode
# false_val ExprNode
-
+
true_val = None
false_val = None
-
+
subexprs = ['test', 'true_val', 'false_val']
-
+
def type_dependencies(self, env):
return self.true_val.type_dependencies(env) + self.false_val.type_dependencies(env)
-
+
def infer_type(self, env):
return PyrexTypes.independent_spanning_type(self.true_val.infer_type(env),
self.false_val.infer_type(env))
@@ -6204,22 +6204,22 @@ class CondExprNode(ExprNode):
self.is_temp = 1
if self.type == PyrexTypes.error_type:
self.type_error()
-
+
def type_error(self):
if not (self.true_val.type.is_error or self.false_val.type.is_error):
error(self.pos, "Incompatable types in conditional expression (%s; %s)" %
(self.true_val.type, self.false_val.type))
self.type = PyrexTypes.error_type
-
+
def check_const(self):
- return (self.test.check_const()
+ return (self.test.check_const()
and self.true_val.check_const()
and self.false_val.check_const())
-
+
def generate_evaluation_code(self, code):
# Because subexprs may not be evaluated we can use a more optimal
# subexpr allocation strategy than the default, so override evaluation_code.
-
+
code.mark_pos(self.pos)
self.allocate_temp_result(code)
self.test.generate_evaluation_code(code)
@@ -6424,7 +6424,7 @@ class CmpNode(object):
and (self.operand2.type.is_string or self.operand2.type is bytes_type)) or
(self.operand1.type is PyrexTypes.c_py_unicode_type
and self.operand2.type is unicode_type))
-
+
def is_ptr_contains(self):
if self.operator in ('in', 'not_in'):
container_type = self.operand2.type
@@ -6441,15 +6441,15 @@ class CmpNode(object):
return True
return False
- def generate_operation_code(self, code, result_code,
+ def generate_operation_code(self, code, result_code,
operand1, op , operand2):
if self.type.is_pyobject:
coerce_result = "__Pyx_PyBool_FromLong"
else:
coerce_result = ""
- if 'not' in op:
+ if 'not' in op:
negation = "!"
- else:
+ else:
negation = ""
if self.special_bool_cmp_function:
if operand1.type.is_pyobject:
@@ -6489,30 +6489,30 @@ class CmpNode(object):
coerce_result,
negation,
method,
- operand2.py_result(),
- operand1.py_result(),
+ operand2.py_result(),
+ operand1.py_result(),
got_ref,
error_clause(result_code, self.pos)))
elif (operand1.type.is_pyobject
and op not in ('is', 'is_not')):
code.putln("%s = PyObject_RichCompare(%s, %s, %s); %s" % (
- result_code,
- operand1.py_result(),
- operand2.py_result(),
+ result_code,
+ operand1.py_result(),
+ operand2.py_result(),
richcmp_constants[op],
code.error_goto_if_null(result_code, self.pos)))
code.put_gotref(result_code)
elif operand1.type.is_complex:
- if op == "!=":
+ if op == "!=":
negation = "!"
- else:
+ else:
negation = ""
code.putln("%s = %s(%s%s(%s, %s));" % (
- result_code,
+ result_code,
coerce_result,
negation,
- operand1.type.unary_op('eq'),
- operand1.result(),
+ operand1.type.unary_op('eq'),
+ operand1.result(),
operand2.result()))
else:
type1 = operand1.type
@@ -6527,10 +6527,10 @@ class CmpNode(object):
code1 = operand1.result_as(common_type)
code2 = operand2.result_as(common_type)
code.putln("%s = %s(%s %s %s);" % (
- result_code,
- coerce_result,
- code1,
- self.c_operator(op),
+ result_code,
+ coerce_result,
+ code1,
+ self.c_operator(op),
code2))
def c_operator(self, op):
@@ -6540,7 +6540,7 @@ class CmpNode(object):
return "!="
else:
return op
-
+
contains_utility_code = UtilityCode(
proto="""
static CYTHON_INLINE long __Pyx_NegateNonNeg(long b) { return unlikely(b < 0) ? b : !b; }
@@ -6630,14 +6630,14 @@ class PrimaryCmpNode(ExprNode, CmpNode):
# operand1 ExprNode
# operand2 ExprNode
# cascade CascadedCmpNode
-
+
# We don't use the subexprs mechanism, because
# things here are too complicated for it to handle.
# Instead, we override all the framework methods
# which use it.
-
+
child_attrs = ['operand1', 'operand2', 'cascade']
-
+
cascade = None
def infer_type(self, env):
@@ -6649,7 +6649,7 @@ class PrimaryCmpNode(ExprNode, CmpNode):
def calculate_constant_result(self):
self.calculate_cascaded_constant_result(self.operand1.constant_result)
-
+
def compile_time_value(self, denv):
operand1 = self.operand1.compile_time_value(denv)
return self.cascaded_compile_time_value(operand1, denv)
@@ -6719,7 +6719,7 @@ class PrimaryCmpNode(ExprNode, CmpNode):
cdr = cdr.cascade
if self.is_pycmp or self.cascade:
self.is_temp = 1
-
+
def analyse_cpp_comparison(self, env):
type1 = self.operand1.type
type2 = self.operand2.type
@@ -6739,11 +6739,11 @@ class PrimaryCmpNode(ExprNode, CmpNode):
self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env)
self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env)
self.type = func_type.return_type
-
+
def has_python_operands(self):
return (self.operand1.type.is_pyobject
or self.operand2.type.is_pyobject)
-
+
def check_const(self):
if self.cascade:
self.not_const()
@@ -6759,8 +6759,8 @@ class PrimaryCmpNode(ExprNode, CmpNode):
negation = ""
return "(%s%s(%s, %s))" % (
negation,
- self.operand1.type.binary_op('=='),
- self.operand1.result(),
+ self.operand1.type.binary_op('=='),
+ self.operand1.result(),
self.operand2.result())
elif self.is_c_string_contains():
if self.operand2.type is bytes_type:
@@ -6774,7 +6774,7 @@ class PrimaryCmpNode(ExprNode, CmpNode):
return "(%s%s(%s, %s))" % (
negation,
method,
- self.operand2.result(),
+ self.operand2.result(),
self.operand1.result())
else:
return "(%s %s %s)" % (
@@ -6787,7 +6787,7 @@ class PrimaryCmpNode(ExprNode, CmpNode):
self.operand2.generate_evaluation_code(code)
if self.is_temp:
self.allocate_temp_result(code)
- self.generate_operation_code(code, self.result(),
+ self.generate_operation_code(code, self.result(),
self.operand1, self.operator, self.operand2)
if self.cascade:
self.cascade.generate_evaluation_code(code,
@@ -6802,13 +6802,13 @@ class PrimaryCmpNode(ExprNode, CmpNode):
# so only need to dispose of the two main operands.
self.operand1.generate_disposal_code(code)
self.operand2.generate_disposal_code(code)
-
+
def free_subexpr_temps(self, code):
# If this is called, it is a non-cascaded cmp,
# so only need to dispose of the two main operands.
self.operand1.free_temps(code)
self.operand2.free_temps(code)
-
+
def annotate(self, code):
self.operand1.annotate(code)
self.operand2.annotate(code)
@@ -6817,9 +6817,9 @@ class PrimaryCmpNode(ExprNode, CmpNode):
class CascadedCmpNode(Node, CmpNode):
- # A CascadedCmpNode is not a complete expression node. It
- # hangs off the side of another comparison node, shares
- # its left operand with that node, and shares its result
+ # A CascadedCmpNode is not a complete expression node. It
+ # hangs off the side of another comparison node, shares
+ # its left operand with that node, and shares its result
# with the PrimaryCmpNode at the head of the chain.
#
# operator string
@@ -6849,7 +6849,7 @@ class CascadedCmpNode(Node, CmpNode):
def has_python_operands(self):
return self.operand2.type.is_pyobject
-
+
def coerce_operands_to_pyobjects(self, env):
self.operand2 = self.operand2.coerce_to_pyobject(env)
if self.operand2.type is dict_type and self.operator in ('in', 'not_in'):
@@ -6862,7 +6862,7 @@ class CascadedCmpNode(Node, CmpNode):
#self.operand2 = self.operand2.coerce_to_temp(env) #CTT
self.operand2 = self.operand2.coerce_to_simple(env)
self.cascade.coerce_cascaded_operands_to_temp(env)
-
+
def generate_evaluation_code(self, code, result, operand1):
if self.type.is_pyobject:
code.putln("if (__Pyx_PyObject_IsTrue(%s)) {" % result)
@@ -6870,7 +6870,7 @@ class CascadedCmpNode(Node, CmpNode):
else:
code.putln("if (%s) {" % result)
self.operand2.generate_evaluation_code(code)
- self.generate_operation_code(code, result,
+ self.generate_operation_code(code, result,
operand1, self.operator, self.operand2)
if self.cascade:
self.cascade.generate_evaluation_code(
@@ -6904,11 +6904,11 @@ binop_node_classes = {
}
def binop_node(pos, operator, operand1, operand2, inplace=False):
- # Construct binop node of appropriate class for
+ # Construct binop node of appropriate class for
# given operator.
- return binop_node_classes[operator](pos,
- operator = operator,
- operand1 = operand1,
+ return binop_node_classes[operator](pos,
+ operator = operator,
+ operand1 = operand1,
operand2 = operand2,
inplace = inplace)
@@ -6927,10 +6927,10 @@ class CoercionNode(ExprNode):
# Abstract base class for coercion nodes.
#
# arg ExprNode node being coerced
-
+
subexprs = ['arg']
constant_result = not_a_constant
-
+
def __init__(self, arg):
self.pos = arg.pos
self.arg = arg
@@ -6940,7 +6940,7 @@ class CoercionNode(ExprNode):
def calculate_constant_result(self):
# constant folding can break type coercion, so this is disabled
pass
-
+
def annotate(self, code):
self.arg.annotate(code)
if self.arg.type != self.type:
@@ -6950,14 +6950,14 @@ class CoercionNode(ExprNode):
class CastNode(CoercionNode):
# Wrap a node in a C type cast.
-
+
def __init__(self, arg, new_type):
CoercionNode.__init__(self, arg)
self.type = new_type
def may_be_none(self):
return self.arg.may_be_none()
-
+
def calculate_result_code(self):
return self.arg.result_as(self.type)
@@ -6981,7 +6981,7 @@ class PyTypeTestNode(CoercionNode):
nogil_check = Node.gil_error
gil_message = "Python type test"
-
+
def analyse_types(self, env):
pass
@@ -6989,10 +6989,10 @@ class PyTypeTestNode(CoercionNode):
if self.notnone:
return False
return self.arg.may_be_none()
-
+
def result_in_temp(self):
return self.arg.result_in_temp()
-
+
def is_ephemeral(self):
return self.arg.is_ephemeral()
@@ -7002,7 +7002,7 @@ class PyTypeTestNode(CoercionNode):
def calculate_result_code(self):
return self.arg.result()
-
+
def generate_result_code(self, code):
if self.type.typeobj_is_available():
if not self.type.is_builtin_type:
@@ -7014,7 +7014,7 @@ class PyTypeTestNode(CoercionNode):
else:
error(self.pos, "Cannot test type of extern C class "
"without type object name specification")
-
+
def generate_post_assignment_code(self, code):
self.arg.generate_post_assignment_code(code)
@@ -7045,7 +7045,7 @@ class NoneCheckNode(CoercionNode):
def calculate_result_code(self):
return self.arg.result()
-
+
def generate_result_code(self, code):
code.putln(
"if (unlikely(%s == Py_None)) {" % self.arg.result())
@@ -7066,7 +7066,7 @@ class NoneCheckNode(CoercionNode):
class CoerceToPyTypeNode(CoercionNode):
# This node is used to convert a C data type
# to a Python object.
-
+
type = py_object_type
is_temp = 1
@@ -7100,7 +7100,7 @@ class CoerceToPyTypeNode(CoercionNode):
return self.arg.coerce_to_temp(env)
else:
return CoerceToBooleanNode(self, env)
-
+
def coerce_to_integer(self, env):
# If not already some C integer type, coerce to longint.
if self.arg.type.is_int:
@@ -7115,9 +7115,9 @@ class CoerceToPyTypeNode(CoercionNode):
def generate_result_code(self, code):
function = self.arg.type.to_py_function
code.putln('%s = %s(%s); %s' % (
- self.result(),
- function,
- self.arg.result(),
+ self.result(),
+ function,
+ self.arg.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
@@ -7176,7 +7176,7 @@ class CoerceFromPyTypeNode(CoercionNode):
if self.type.is_string and self.arg.is_ephemeral():
error(arg.pos,
"Obtaining char * from temporary Python value")
-
+
def analyse_types(self, env):
# The arg is always already analysed
pass
@@ -7188,7 +7188,7 @@ class CoerceFromPyTypeNode(CoercionNode):
if self.type.is_enum:
rhs = typecast(self.type, c_long_type, rhs)
code.putln('%s = %s; %s' % (
- self.result(),
+ self.result(),
rhs,
code.error_goto_if(self.type.error_condition(self.result()), self.pos)))
if self.type.is_pyobject:
@@ -7198,7 +7198,7 @@ class CoerceFromPyTypeNode(CoercionNode):
class CoerceToBooleanNode(CoercionNode):
# This node is used when a result needs to be used
# in a boolean context.
-
+
type = PyrexTypes.c_bint_type
_special_builtins = {
@@ -7218,13 +7218,13 @@ class CoerceToBooleanNode(CoercionNode):
self.gil_error()
gil_message = "Truth-testing Python object"
-
+
def check_const(self):
if self.is_temp:
self.not_const()
return False
return self.arg.check_const()
-
+
def calculate_result_code(self):
return "(%s != 0)" % self.arg.result()
@@ -7241,8 +7241,8 @@ class CoerceToBooleanNode(CoercionNode):
else:
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
- self.result(),
- self.arg.py_result(),
+ self.result(),
+ self.arg.py_result(),
code.error_goto_if_neg(self.result(), self.pos)))
class CoerceToComplexNode(CoercionNode):
@@ -7265,7 +7265,7 @@ class CoerceToComplexNode(CoercionNode):
self.type.from_parts,
real_part,
imag_part)
-
+
def generate_result_code(self, code):
pass
@@ -7287,7 +7287,7 @@ class CoerceToTempNode(CoercionNode):
def analyse_types(self, env):
# The arg is always already analysed
pass
-
+
def coerce_to_boolean(self, env):
self.arg = self.arg.coerce_to_boolean(env)
if self.arg.is_simple():
@@ -7310,12 +7310,12 @@ class CloneNode(CoercionNode):
# to be used multiple times. The argument node's result must
# be in a temporary. This node "borrows" the result from the
# argument node, and does not generate any evaluation or
- # disposal code for it. The original owner of the argument
+ # disposal code for it. The original owner of the argument
# node is responsible for doing those things.
-
+
subexprs = [] # Arg is not considered a subexpr
nogil_check = None
-
+
def __init__(self, arg):
CoercionNode.__init__(self, arg)
if hasattr(arg, 'type'):
@@ -7323,13 +7323,13 @@ class CloneNode(CoercionNode):
self.result_ctype = arg.result_ctype
if hasattr(arg, 'entry'):
self.entry = arg.entry
-
+
def result(self):
return self.arg.result()
-
+
def type_dependencies(self, env):
return self.arg.type_dependencies(env)
-
+
def infer_type(self, env):
return self.arg.infer_type(env)
@@ -7339,27 +7339,27 @@ class CloneNode(CoercionNode):
self.is_temp = 1
if hasattr(self.arg, 'entry'):
self.entry = self.arg.entry
-
+
def generate_evaluation_code(self, code):
pass
def generate_result_code(self, code):
pass
-
+
def generate_disposal_code(self, code):
pass
-
+
def free_temps(self, code):
pass
class ModuleRefNode(ExprNode):
# Simple returns the module object
-
+
type = py_object_type
is_temp = False
subexprs = []
-
+
def analyse_types(self, env):
pass
@@ -7374,11 +7374,11 @@ class ModuleRefNode(ExprNode):
class DocstringRefNode(ExprNode):
# Extracts the docstring of the body element
-
+
subexprs = ['body']
type = py_object_type
is_temp = True
-
+
def __init__(self, pos, body):
ExprNode.__init__(self, pos)
assert body.type.is_pyobject
@@ -7802,7 +7802,7 @@ static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) {
#else
#define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key)
#endif
-""",
+""",
requires = [raise_noneindex_error_utility_code])
#------------------------------------------------------------------------------------
@@ -8001,7 +8001,7 @@ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
tuple_unpacking_error_code = UtilityCode(
proto = """
static void __Pyx_UnpackTupleError(PyObject *, Py_ssize_t index); /*proto*/
-""",
+""",
impl = """
static void __Pyx_UnpackTupleError(PyObject *t, Py_ssize_t index) {
if (t == Py_None) {
@@ -8012,7 +8012,7 @@ static void __Pyx_UnpackTupleError(PyObject *t, Py_ssize_t index) {
__Pyx_RaiseTooManyValuesError(index);
}
}
-""",
+""",
requires = [raise_none_iter_error_utility_code,
raise_need_more_values_to_unpack,
raise_too_many_values_to_unpack]
@@ -8075,7 +8075,7 @@ static PyObject* __Pyx_PyEval_CallObjectWithKeywords(PyObject *callable, PyObjec
return result; /* may be NULL */
}
}
-""",
+""",
)
@@ -8154,9 +8154,9 @@ static int __Pyx_cdivision_warning(void); /* proto */
""",
impl="""
static int __Pyx_cdivision_warning(void) {
- return PyErr_WarnExplicit(PyExc_RuntimeWarning,
+ return PyErr_WarnExplicit(PyExc_RuntimeWarning,
"division with oppositely signed operands, C and Python semantics differ",
- %(FILENAME)s,
+ %(FILENAME)s,
%(LINENO)s,
__Pyx_MODULE_NAME,
NULL);
diff --git a/Cython/Compiler/Interpreter.py b/Cython/Compiler/Interpreter.py
index 298041d01..83cb184f9 100644
--- a/Cython/Compiler/Interpreter.py
+++ b/Cython/Compiler/Interpreter.py
@@ -14,7 +14,7 @@ from Errors import CompileError
class EmptyScope(object):
def lookup(self, name):
return None
-
+
empty_scope = EmptyScope()
def interpret_compiletime_options(optlist, optdict, type_env=None, type_args=()):
@@ -45,7 +45,7 @@ def interpret_compiletime_options(optlist, optdict, type_env=None, type_args=())
raise CompileError(node.pos, "Type not allowed here.")
else:
return (node.compile_time_value(empty_scope), node.pos)
-
+
if optlist:
optlist = [interpret(x, ix) for ix, x in enumerate(optlist)]
if optdict:
diff --git a/Cython/Compiler/Lexicon.py b/Cython/Compiler/Lexicon.py
index f31e5be53..ad736df13 100644
--- a/Cython/Compiler/Lexicon.py
+++ b/Cython/Compiler/Lexicon.py
@@ -19,12 +19,12 @@ def make_lexicon():
octdigit = Any("01234567")
hexdigit = Any("0123456789ABCDEFabcdef")
indentation = Bol + Rep(Any(" \t"))
-
+
decimal = Rep1(digit)
dot = Str(".")
exponent = Any("Ee") + Opt(Any("+-")) + decimal
decimal_fract = (decimal + dot + Opt(decimal)) | (dot + decimal)
-
+
name = letter + Rep(letter | digit)
intconst = decimal | (Str("0") + ((Any("Xx") + Rep1(hexdigit)) |
(Any("Oo") + Rep1(octdigit)) |
@@ -33,33 +33,33 @@ def make_lexicon():
intliteral = intconst + intsuffix
fltconst = (decimal_fract + Opt(exponent)) | (decimal + exponent)
imagconst = (intconst | fltconst) + Any("jJ")
-
+
sq_string = (
- Str("'") +
- Rep(AnyBut("\\\n'") | (Str("\\") + AnyChar)) +
+ Str("'") +
+ Rep(AnyBut("\\\n'") | (Str("\\") + AnyChar)) +
Str("'")
)
-
+
dq_string = (
- Str('"') +
- Rep(AnyBut('\\\n"') | (Str("\\") + AnyChar)) +
+ Str('"') +
+ Rep(AnyBut('\\\n"') | (Str("\\") + AnyChar)) +
Str('"')
)
-
+
non_sq = AnyBut("'") | (Str('\\') + AnyChar)
tsq_string = (
Str("'''")
- + Rep(non_sq | (Str("'") + non_sq) | (Str("''") + non_sq))
+ + Rep(non_sq | (Str("'") + non_sq) | (Str("''") + non_sq))
+ Str("'''")
)
-
+
non_dq = AnyBut('"') | (Str('\\') + AnyChar)
tdq_string = (
Str('"""')
- + Rep(non_dq | (Str('"') + non_dq) | (Str('""') + non_dq))
+ + Rep(non_dq | (Str('"') + non_dq) | (Str('""') + non_dq))
+ Str('"""')
)
-
+
beginstring = Opt(Any(string_prefixes)) + Opt(Any(raw_prefixes)) + (Str("'") | Str('"') | Str("'''") | Str('"""'))
two_oct = octdigit + octdigit
three_oct = octdigit + octdigit + octdigit
@@ -68,21 +68,21 @@ def make_lexicon():
escapeseq = Str("\\") + (two_oct | three_oct |
Str('u') + four_hex | Str('x') + two_hex |
Str('U') + four_hex + four_hex | AnyChar)
-
+
deco = Str("@")
bra = Any("([{")
ket = Any(")]}")
punct = Any(":,;+-*/|&<>=.%`~^?")
diphthong = Str("==", "<>", "!=", "<=", ">=", "<<", ">>", "**", "//",
- "+=", "-=", "*=", "/=", "%=", "|=", "^=", "&=",
+ "+=", "-=", "*=", "/=", "%=", "|=", "^=", "&=",
"<<=", ">>=", "**=", "//=", "->")
spaces = Rep1(Any(" \t\f"))
escaped_newline = Str("\\\n")
lineterm = Eol + Opt(Str("\n"))
- comment = Str("#") + Rep(AnyBut("\n"))
-
+ comment = Str("#") + Rep(AnyBut("\n"))
+
return Lexicon([
(name, IDENT),
(intliteral, 'INT'),
@@ -90,25 +90,25 @@ def make_lexicon():
(imagconst, 'IMAG'),
(deco, 'DECORATOR'),
(punct | diphthong, TEXT),
-
+
(bra, Method('open_bracket_action')),
(ket, Method('close_bracket_action')),
(lineterm, Method('newline_action')),
-
+
#(stringlit, 'STRING'),
(beginstring, Method('begin_string_action')),
-
+
(comment, IGNORE),
(spaces, IGNORE),
(escaped_newline, IGNORE),
-
+
State('INDENT', [
(comment + lineterm, Method('commentline')),
(Opt(spaces) + Opt(comment) + lineterm, IGNORE),
(indentation, Method('indentation_action')),
(Eof, Method('eof_action'))
]),
-
+
State('SQ_STRING', [
(escapeseq, 'ESCAPE'),
(Rep1(AnyBut("'\"\n\\")), 'CHARS'),
@@ -117,7 +117,7 @@ def make_lexicon():
(Str("'"), Method('end_string_action')),
(Eof, 'EOF')
]),
-
+
State('DQ_STRING', [
(escapeseq, 'ESCAPE'),
(Rep1(AnyBut('"\n\\')), 'CHARS'),
@@ -126,7 +126,7 @@ def make_lexicon():
(Str('"'), Method('end_string_action')),
(Eof, 'EOF')
]),
-
+
State('TSQ_STRING', [
(escapeseq, 'ESCAPE'),
(Rep1(AnyBut("'\"\n\\")), 'CHARS'),
@@ -135,7 +135,7 @@ def make_lexicon():
(Str("'''"), Method('end_string_action')),
(Eof, 'EOF')
]),
-
+
State('TDQ_STRING', [
(escapeseq, 'ESCAPE'),
(Rep1(AnyBut('"\'\n\\')), 'CHARS'),
@@ -144,10 +144,10 @@ def make_lexicon():
(Str('"""'), Method('end_string_action')),
(Eof, 'EOF')
]),
-
+
(Eof, Method('eof_action'))
],
-
+
# FIXME: Plex 1.9 needs different args here from Plex 1.1.4
#debug_flags = scanner_debug_flags,
#debug_file = scanner_dump_file
diff --git a/Cython/Compiler/Main.py b/Cython/Compiler/Main.py
index 13b0b49ea..3088db344 100644
--- a/Cython/Compiler/Main.py
+++ b/Cython/Compiler/Main.py
@@ -68,7 +68,7 @@ class Context(object):
# include_directories [string]
# future_directives [object]
# language_level int currently 2 or 3 for Python 2/3
-
+
def __init__(self, include_directories, compiler_directives, cpp=False, language_level=2):
#self.modules = {"__builtin__" : BuiltinScope()}
import Builtin, CythonScope
@@ -86,7 +86,7 @@ class Context(object):
self.include_directories = include_directories + [standard_include_path]
self.set_language_level(language_level)
-
+
self.gdb_debug_outputwriter = None
def set_language_level(self, level):
@@ -120,12 +120,12 @@ class Context(object):
else:
_check_c_declarations = check_c_declarations
_specific_post_parse = None
-
+
if py and not pxd:
_align_function_definitions = AlignFunctionDefinitions(self)
else:
_align_function_definitions = None
-
+
return [
NormalizeTree(self),
PostParse(self),
@@ -190,7 +190,7 @@ class Context(object):
debug_transform = [DebugTransform(self, options, result)]
else:
debug_transform = []
-
+
return list(itertools.chain(
[create_parse(self)],
self.create_pipeline(pxd=False, py=py),
@@ -214,7 +214,7 @@ class Context(object):
return [parse_pxd] + self.create_pipeline(pxd=True) + [
ExtractPxdCode(self),
]
-
+
def create_py_pipeline(self, options, result):
return self.create_pyx_pipeline(options, result, py=True)
@@ -223,7 +223,7 @@ class Context(object):
pipeline = self.create_pxd_pipeline(scope, module_name)
result = self.run_pipeline(pipeline, source_desc)
return result
-
+
def nonfatal_error(self, exc):
return Errors.report_error(exc)
@@ -253,7 +253,7 @@ class Context(object):
error = err
return (error, data)
- def find_module(self, module_name,
+ def find_module(self, module_name,
relative_to = None, pos = None, need_pxd = 1):
# Finds and returns the module scope corresponding to
# the given relative or absolute module name. If this
@@ -320,7 +320,7 @@ class Context(object):
except CompileError:
pass
return scope
-
+
def find_pxd_file(self, qualified_name, pos):
# Search include path for the .pxd file corresponding to the
# given fully-qualified module name.
@@ -355,7 +355,7 @@ class Context(object):
# Search include path for the .pyx file corresponding to the
# given fully-qualified module name, as for find_pxd_file().
return self.search_include_directories(qualified_name, ".pyx", pos)
-
+
def find_include_file(self, filename, pos):
# Search list of include directories for filename.
# Reports an error and returns None if not found.
@@ -364,7 +364,7 @@ class Context(object):
if not path:
error(pos, "'%s' not found" % filename)
return path
-
+
def search_include_directories(self, qualified_name, suffix, pos,
include=False):
# Search the list of include directories for the given
@@ -445,15 +445,15 @@ class Context(object):
if dep_path and Utils.file_newer_than(dep_path, c_time):
return 1
return 0
-
+
def find_cimported_module_names(self, source_path):
return [ name for kind, name in self.read_dependency_file(source_path)
if kind == "cimport" ]
def is_package_dir(self, dir_path):
# Return true if the given directory is a package directory.
- for filename in ("__init__.py",
- "__init__.pyx",
+ for filename in ("__init__.py",
+ "__init__.pyx",
"__init__.pxd"):
path = os.path.join(dir_path, filename)
if Utils.path_exists(path):
@@ -479,7 +479,7 @@ class Context(object):
# Find a top-level module, creating a new one if needed.
scope = self.lookup_submodule(name)
if not scope:
- scope = ModuleScope(name,
+ scope = ModuleScope(name,
parent_module = None, context = self)
self.modules[name] = scope
return scope
@@ -590,7 +590,7 @@ def run_pipeline(source, options, full_module_name = None):
# Set up result object
result = create_default_resultobj(source, options)
-
+
# Get pipeline
if source_desc.filename.endswith(".py"):
pipeline = context.create_py_pipeline(options, result)
@@ -601,7 +601,7 @@ def run_pipeline(source, options, full_module_name = None):
err, enddata = context.run_pipeline(pipeline, source)
context.teardown_errors(err, options, result)
return result
-
+
#------------------------------------------------------------------------
#
@@ -622,7 +622,7 @@ class CompilationSource(object):
class CompilationOptions(object):
"""
Options to the Cython compiler:
-
+
show_version boolean Display version number
use_listing_file boolean Generate a .lis file
errors_to_stderr boolean Echo errors to stderr when using .lis
@@ -637,10 +637,10 @@ class CompilationOptions(object):
compiler_directives dict Overrides for pragma options (see Options.py)
evaluate_tree_assertions boolean Test support: evaluate parse tree assertions
language_level integer The Python language level: 2 or 3
-
+
cplus boolean Compile as c++ code
"""
-
+
def __init__(self, defaults = None, **kw):
self.include_path = []
if defaults:
@@ -659,7 +659,7 @@ class CompilationOptions(object):
class CompilationResult(object):
"""
Results from the Cython compiler:
-
+
c_file string or None The generated C source file
h_file string or None The generated C header file
i_file string or None The generated .pxi file
@@ -670,7 +670,7 @@ class CompilationResult(object):
num_errors integer Number of compilation errors
compilation_source CompilationSource
"""
-
+
def __init__(self):
self.c_file = None
self.h_file = None
@@ -687,10 +687,10 @@ class CompilationResultSet(dict):
Results from compiling multiple Pyrex source files. A mapping
from source file paths to CompilationResult instances. Also
has the following attributes:
-
+
num_errors integer Total number of compilation errors
"""
-
+
num_errors = 0
def add(self, source, result):
@@ -701,7 +701,7 @@ class CompilationResultSet(dict):
def compile_single(source, options, full_module_name = None):
"""
compile_single(source, options, full_module_name)
-
+
Compile the given Pyrex implementation file and return a CompilationResult.
Always compiles a single file; does not perform timestamp checking or
recursion.
@@ -712,7 +712,7 @@ def compile_single(source, options, full_module_name = None):
def compile_multiple(sources, options):
"""
compile_multiple(sources, options)
-
+
Compiles the given sequence of Pyrex implementation files and returns
a CompilationResultSet. Performs timestamp checking and/or recursion
if these are specified in the options.
@@ -750,7 +750,7 @@ def compile_multiple(sources, options):
def compile(source, options = None, full_module_name = None, **kwds):
"""
compile(source [, options], [, <option> = <value>]...)
-
+
Compile one or more Pyrex implementation files, with optional timestamp
checking and recursing on dependecies. The source argument may be a string
or a sequence of strings If it is a string and no recursion or timestamp
diff --git a/Cython/Compiler/ModuleNode.py b/Cython/Compiler/ModuleNode.py
index 027b5f8d5..2caf78120 100644
--- a/Cython/Compiler/ModuleNode.py
+++ b/Cython/Compiler/ModuleNode.py
@@ -51,7 +51,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
child_attrs = ["body"]
directives = None
-
+
def analyse_declarations(self, env):
if Options.embed_pos_in_docstring:
env.doc = EncodedString(u'File: %s (starting at line %s)' % Nodes.relative_position(self.pos))
@@ -62,7 +62,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
env.doc = self.doc
env.directives = self.directives
self.body.analyse_declarations(env)
-
+
def process_implementation(self, options, result):
env = self.scope
env.return_type = PyrexTypes.c_void_type
@@ -73,14 +73,14 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
self.generate_c_code(env, options, result)
self.generate_h_code(env, options, result)
self.generate_api_code(env, result)
-
+
def has_imported_c_functions(self):
for module in self.referenced_modules:
for entry in module.cfunc_entries:
if entry.defined_in_pxd:
return 1
return 0
-
+
def generate_dep_file(self, env, result):
modules = self.referenced_modules
if len(modules) > 1 or env.included_files:
@@ -138,21 +138,21 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
h_code.putln("PyMODINIT_FUNC init%s(void);" % env.module_name)
h_code.putln("")
h_code.putln("#endif")
-
+
h_code.copyto(open_new_file(result.h_file))
-
+
def generate_public_declaration(self, entry, h_code, i_code):
h_code.putln("%s %s;" % (
Naming.extern_c_macro,
entry.type.declaration_code(
entry.cname, dll_linkage = "DL_IMPORT")))
if i_code:
- i_code.putln("cdef extern %s" %
+ i_code.putln("cdef extern %s" %
entry.type.declaration_code(entry.cname, pyrex = 1))
-
+
def api_name(self, env):
return env.qualified_name.replace(".", "__")
-
+
def generate_api_code(self, env, result):
api_funcs = []
public_extension_types = []
@@ -221,14 +221,14 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
h_code.putln("}")
h_code.putln("")
h_code.putln("#endif")
-
+
h_code.copyto(open_new_file(result.api_file))
-
+
def generate_cclass_header_code(self, type, h_code):
h_code.putln("%s DL_IMPORT(PyTypeObject) %s;" % (
Naming.extern_c_macro,
type.typeobj_cname))
-
+
def generate_cclass_include_code(self, type, i_code):
i_code.putln("cdef extern class %s.%s:" % (
type.module_name, type.name))
@@ -236,12 +236,12 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
var_entries = type.scope.var_entries
if var_entries:
for entry in var_entries:
- i_code.putln("cdef %s" %
+ i_code.putln("cdef %s" %
entry.type.declaration_code(entry.cname, pyrex = 1))
else:
i_code.putln("pass")
i_code.dedent()
-
+
def generate_c_code(self, env, options, result):
modules = self.referenced_modules
@@ -254,7 +254,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
globalstate = Code.GlobalState(rootwriter, emit_linenums)
globalstate.initialize_main_c_code()
h_code = globalstate['h_code']
-
+
self.generate_module_preamble(env, modules, h_code)
globalstate.module_pos = self.pos
@@ -287,14 +287,14 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
if Options.embed:
self.generate_main_method(env, globalstate['main_method'])
self.generate_filename_table(globalstate['filename_table'])
-
+
self.generate_declarations_for_modules(env, modules, globalstate)
h_code.write('\n')
for utilcode in env.utility_code_list:
globalstate.use_utility_code(utilcode)
globalstate.finalize_main_c_code()
-
+
f = open_new_file(result.c_file)
rootwriter.copyto(f)
if options.gdb_debug:
@@ -304,16 +304,16 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
if Options.annotate or options.annotate:
self.annotate(rootwriter)
rootwriter.save_annotation(result.main_source_file, result.c_file)
-
+
def _serialize_lineno_map(self, env, ccodewriter):
tb = env.context.gdb_debug_outputwriter
markers = ccodewriter.buffer.allmarkers()
-
+
d = {}
- for c_lineno, cython_lineno in enumerate(markers):
+ for c_lineno, cython_lineno in enumerate(markers):
if cython_lineno > 0:
d.setdefault(cython_lineno, []).append(c_lineno + 1)
-
+
tb.start('LineNumberMapping')
for cython_lineno, c_linenos in sorted(d.iteritems()):
attrs = {
@@ -324,7 +324,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
tb.end('LineNumber')
tb.end('LineNumberMapping')
tb.serialize()
-
+
def find_referenced_modules(self, env, module_list, modules_seen):
if env not in modules_seen:
modules_seen[env] = 1
@@ -378,7 +378,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
if type.is_extension_type and not entry.in_cinclude:
type = entry.type
vtabslot_dict[type.objstruct_cname] = entry
-
+
def vtabstruct_cname(entry_type):
return entry_type.vtabstruct_cname
vtab_list = self.sort_types_by_inheritance(
@@ -447,7 +447,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln(" #error Python headers needed to compile C extensions, please install development version of Python.")
code.putln("#else")
code.globalstate["end"].putln("#endif /* Py_PYTHON_H */")
-
+
code.put("""
#include <stddef.h> /* For offsetof */
#ifndef offsetof
@@ -714,7 +714,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln('#include %s' % byte_decoded_filenname)
else:
code.putln('#include "%s"' % byte_decoded_filenname)
-
+
def generate_filename_table(self, code):
code.putln("")
code.putln("static const char *%s[] = {" % Naming.filetable_cname)
@@ -747,7 +747,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
self.generate_enum_definition(entry, code)
elif type.is_extension_type:
self.generate_objstruct_definition(type, code)
-
+
def generate_gcc33_hack(self, env, code):
# Workaround for spurious warning generation in gcc 3.3
code.putln("")
@@ -761,7 +761,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
tail = name
code.putln("typedef struct %s __pyx_gcc33_%s;" % (
name, tail))
-
+
def generate_typedef(self, entry, code):
base_type = entry.type.typedef_base_type
if base_type.is_numeric:
@@ -779,7 +779,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
header = "%s %s {" % (kind, name)
footer = "};"
return header, footer
-
+
def generate_struct_union_definition(self, entry, code):
code.mark_pos(entry.pos)
type = entry.type
@@ -848,7 +848,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
value_code += ","
code.putln(value_code)
code.putln(footer)
-
+
def generate_typeobject_predeclaration(self, entry, code):
code.putln("")
name = entry.type.typeobj_cname
@@ -865,7 +865,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
# ??? Do we really need the rest of this? ???
#else:
# code.putln("staticforward PyTypeObject %s;" % name)
-
+
def generate_exttype_vtable_struct(self, entry, code):
code.mark_pos(entry.pos)
# Generate struct declaration for an extension type's vtable.
@@ -886,7 +886,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
"%s;" % method_entry.type.declaration_code("(*%s)" % method_entry.name))
code.putln(
"};")
-
+
def generate_exttype_vtabptr_declaration(self, entry, code):
code.mark_pos(entry.pos)
# Generate declaration of pointer to an extension type's vtable.
@@ -895,7 +895,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln("static struct %s *%s;" % (
type.vtabstruct_cname,
type.vtabptr_cname))
-
+
def generate_objstruct_definition(self, type, code):
code.mark_pos(type.pos)
# Generate object struct definition for an
@@ -934,11 +934,11 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln("")
for entry in env.c_class_entries:
if definition or entry.defined_in_pxd:
- code.putln("static PyTypeObject *%s = 0;" %
+ code.putln("static PyTypeObject *%s = 0;" %
entry.type.typeptr_cname)
- code.put_var_declarations(env.var_entries, static = 1,
+ code.put_var_declarations(env.var_entries, static = 1,
dll_linkage = "DL_EXPORT", definition = definition)
-
+
def generate_cfunction_predeclarations(self, env, code, definition):
for entry in env.cfunc_entries:
if entry.inline_func_in_pxd or (not entry.in_cinclude and (definition
@@ -950,7 +950,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
type = entry.type
if not definition and entry.defined_in_pxd:
type = CPtrType(type)
- header = type.declaration_code(entry.cname,
+ header = type.declaration_code(entry.cname,
dll_linkage = dll_linkage)
if entry.visibility == 'private':
storage_class = "static "
@@ -967,7 +967,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
storage_class,
modifiers,
header))
-
+
def generate_typeobj_definitions(self, env, code):
full_module_name = env.qualified_name
for entry in env.c_class_entries:
@@ -1006,7 +1006,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
self.generate_method_table(scope, code)
self.generate_getset_table(scope, code)
self.generate_typeobj_definition(full_module_name, entry, code)
-
+
def generate_exttype_vtable(self, scope, code):
# Generate the definition of an extension type's vtable.
type = scope.parent_type
@@ -1014,14 +1014,14 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln("static struct %s %s;" % (
type.vtabstruct_cname,
type.vtable_cname))
-
+
def generate_self_cast(self, scope, code):
type = scope.parent_type
code.putln(
"%s = (%s)o;" % (
type.declaration_code("p"),
type.declaration_code("")))
-
+
def generate_new_function(self, scope, code):
tp_slot = TypeSlots.ConstructorSlot("tp_new", '__new__')
slot_func = scope.mangle_internal("tp_new")
@@ -1081,7 +1081,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
else:
cinit_args = "o, a, k"
code.putln(
- "if (%s(%s) < 0) {" %
+ "if (%s(%s) < 0) {" %
(entry.func_cname, cinit_args))
code.put_decref_clear("o", py_object_type, nanny=False);
code.putln(
@@ -1090,7 +1090,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
"return o;")
code.putln(
"}")
-
+
def generate_dealloc_function(self, scope, code):
tp_slot = TypeSlots.ConstructorSlot("tp_dealloc", '__dealloc__')
slot_func = scope.mangle_internal("tp_dealloc")
@@ -1124,7 +1124,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
"(*Py_TYPE(o)->tp_free)(o);")
code.putln(
"}")
-
+
def generate_usr_dealloc_call(self, scope, code):
entry = scope.lookup_here("__dealloc__")
if entry:
@@ -1137,7 +1137,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln(
"++Py_REFCNT(o);")
code.putln(
- "%s(o);" %
+ "%s(o);" %
entry.func_cname)
code.putln(
"if (PyErr_Occurred()) PyErr_WriteUnraisable(o);")
@@ -1147,7 +1147,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
"PyErr_Restore(etype, eval, etb);")
code.putln(
"}")
-
+
def generate_traverse_function(self, scope, code):
tp_slot = TypeSlots.GCDependentSlot("tp_traverse")
slot_func = scope.mangle_internal("tp_traverse")
@@ -1185,7 +1185,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
if entry.type.is_extension_type:
var_code = "((PyObject*)%s)" % var_code
code.putln(
- "e = (*v)(%s, a); if (e) return e;"
+ "e = (*v)(%s, a); if (e) return e;"
% var_code)
code.putln(
"}")
@@ -1193,7 +1193,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
"return 0;")
code.putln(
"}")
-
+
def generate_clear_function(self, scope, code):
tp_slot = TypeSlots.GCDependentSlot("tp_clear")
slot_func = scope.mangle_internal("tp_clear")
@@ -1227,7 +1227,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
"return 0;")
code.putln(
"}")
-
+
def generate_getitem_int_function(self, scope, code):
# This function is put into the sq_item slot when
# a __getitem__ method is present. It converts its
@@ -1295,7 +1295,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
"}")
code.putln(
"}")
-
+
def generate_guarded_basetype_call(
self, base_type, substructure, slot, args, code):
if base_type:
@@ -1407,7 +1407,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
"return v;")
code.putln(
"}")
-
+
def generate_setattro_function(self, scope, code):
# Setting and deleting an attribute are both done through
# the setattro method, so we dispatch to user's __setattr__
@@ -1447,7 +1447,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
"}")
code.putln(
"}")
-
+
def generate_descr_get_function(self, scope, code):
# The __get__ function of a descriptor object can be
# called with NULL for the second or third arguments
@@ -1475,7 +1475,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
"return r;")
code.putln(
"}")
-
+
def generate_descr_set_function(self, scope, code):
# Setting and deleting are both done through the __set__
# method of a descriptor, so we dispatch to user's __set__
@@ -1516,10 +1516,10 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln(
"return -1;")
code.putln(
- "}")
+ "}")
code.putln(
"}")
-
+
def generate_property_accessors(self, cclass_scope, code):
for entry in cclass_scope.property_entries:
property_scope = entry.scope
@@ -1527,7 +1527,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
self.generate_property_get_function(entry, code)
if property_scope.defines_any(["__set__", "__del__"]):
self.generate_property_set_function(entry, code)
-
+
def generate_property_get_function(self, property_entry, code):
property_scope = property_entry.scope
property_entry.getter_cname = property_scope.parent_scope.mangle(
@@ -1542,7 +1542,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
get_entry.func_cname)
code.putln(
"}")
-
+
def generate_property_set_function(self, property_entry, code):
property_scope = property_entry.scope
property_entry.setter_cname = property_scope.parent_scope.mangle(
@@ -1613,11 +1613,11 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
slot.generate(scope, code)
code.putln(
"};")
-
+
def generate_method_table(self, env, code):
code.putln("")
code.putln(
- "static PyMethodDef %s[] = {" %
+ "static PyMethodDef %s[] = {" %
env.method_table_cname)
for entry in env.pyfunc_entries:
code.put_pymethoddef(entry, ",")
@@ -1625,7 +1625,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
"{0, 0, 0, 0}")
code.putln(
"};")
-
+
def generate_getset_table(self, env, code):
if env.property_entries:
code.putln("")
@@ -1681,7 +1681,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln("Py_INCREF(o);")
code.put_decref(entry.cname, entry.type, nanny=False)
code.putln("%s = %s;" % (
- entry.cname,
+ entry.cname,
PyrexTypes.typecast(entry.type, py_object_type, "o")))
elif entry.type.from_py_function:
rhs = "%s(o)" % entry.type.from_py_function
@@ -1797,7 +1797,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln("/*--- Execution code ---*/")
code.mark_pos(None)
-
+
self.body.generate_execution_code(code)
if Options.generate_cleanup_code:
@@ -1835,7 +1835,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
if not Options.generate_cleanup_code:
return
code.globalstate.use_utility_code(register_cleanup_utility_code)
- code.putln('static PyObject *%s(CYTHON_UNUSED PyObject *self, CYTHON_UNUSED PyObject *unused) {' %
+ code.putln('static PyObject *%s(CYTHON_UNUSED PyObject *self, CYTHON_UNUSED PyObject *unused) {' %
Naming.cleanup_cname)
if Options.generate_cleanup_code >= 2:
code.putln("/*--- Global cleanup code ---*/")
@@ -1910,9 +1910,9 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln("#if PY_MAJOR_VERSION < 3")
code.putln(
'%s = Py_InitModule4(__Pyx_NAMESTR("%s"), %s, %s, 0, PYTHON_API_VERSION);' % (
- env.module_cname,
- env.module_name,
- env.method_table_cname,
+ env.module_cname,
+ env.module_name,
+ env.method_table_cname,
doc))
code.putln("#else")
code.putln(
@@ -1944,7 +1944,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
if Options.pre_import is not None:
code.putln(
'%s = PyImport_AddModule(__Pyx_NAMESTR("%s"));' % (
- Naming.preimport_cname,
+ Naming.preimport_cname,
Options.pre_import))
code.putln(
"if (!%s) %s;" % (
@@ -1968,9 +1968,9 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln('if (__Pyx_ExportFunction("%s", (void (*)(void))%s, "%s") < 0) %s' % (
entry.name,
entry.cname,
- signature,
+ signature,
code.error_goto(self.pos)))
-
+
def generate_type_import_code_for_module(self, module, env, code):
# Generate type import code for all exported extension types in
# an imported module.
@@ -1978,7 +1978,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
for entry in module.c_class_entries:
if entry.defined_in_pxd:
self.generate_type_import_code(env, entry.type, entry.pos, code)
-
+
def generate_c_function_import_code_for_module(self, module, env, code):
# Generate import code for all exported C functions in a cimported module.
entries = []
@@ -2004,7 +2004,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
entry.type.signature_string(),
code.error_goto(self.pos)))
code.putln("Py_DECREF(%s); %s = 0;" % (temp, temp))
-
+
def generate_type_init_code(self, env, code):
# Generate type import code for extern extension types
# and type ready code for non-extern ones.
@@ -2022,11 +2022,11 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
if base_type and base_type.module_name != env.qualified_name \
and not base_type.is_builtin_type:
self.generate_type_import_code(env, base_type, self.pos, code)
-
+
def use_type_import_utility_code(self, env):
env.use_utility_code(type_import_utility_code)
env.use_utility_code(import_module_utility_code)
-
+
def generate_type_import_code(self, env, type, pos, code):
# If not already done, generate code to import the typeobject of an
# extension type defined in another module, and extract its C method
@@ -2094,9 +2094,9 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
"if (PyType_Ready(&%s) < 0) %s" % (
typeobj_cname,
code.error_goto(entry.pos)))
- # Fix special method docstrings. This is a bit of a hack, but
+ # Fix special method docstrings. This is a bit of a hack, but
# unless we let PyType_Ready create the slot wrappers we have
- # a significant performance hit. (See trac #561.)
+ # a significant performance hit. (See trac #561.)
for func in entry.type.scope.pyfunc_entries:
if func.is_special and Options.docstrings and func.wrapperbase_cname:
code.putln("{");
@@ -2149,7 +2149,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
weakref_entry.cname))
else:
error(weakref_entry.pos, "__weakref__ slot must be of type 'object'")
-
+
def generate_exttype_vtable_init_code(self, entry, code):
# Generate code to initialise the C method table of an
# extension type.
@@ -2178,7 +2178,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
meth_entry.cname,
cast,
meth_entry.func_cname))
-
+
def generate_typeptr_assignment_code(self, entry, code):
# Generate code to initialise the typeptr of an extension
# type defined in this module to point to its type object.
@@ -2187,7 +2187,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln(
"%s = &%s;" % (
type.typeptr_cname, type.typeobj_cname))
-
+
#------------------------------------------------------------------------------------
#
# Runtime support code
@@ -2270,13 +2270,13 @@ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class
if (!result)
goto bad;
if (!PyType_Check(result)) {
- PyErr_Format(PyExc_TypeError,
+ PyErr_Format(PyExc_TypeError,
"%s.%s is not a type object",
module_name, class_name);
goto bad;
}
if (!strict && ((PyTypeObject *)result)->tp_basicsize > size) {
- PyOS_snprintf(warning, sizeof(warning),
+ PyOS_snprintf(warning, sizeof(warning),
"%s.%s size changed, may indicate binary incompatibility",
module_name, class_name);
#if PY_VERSION_HEX < 0x02050000
@@ -2286,7 +2286,7 @@ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class
#endif
}
else if (((PyTypeObject *)result)->tp_basicsize != size) {
- PyErr_Format(PyExc_ValueError,
+ PyErr_Format(PyExc_ValueError,
"%s.%s has the wrong size, try recompiling",
module_name, class_name);
goto bad;
@@ -2417,8 +2417,8 @@ static PyMethodDef cleanup_def = {__Pyx_NAMESTR("__cleanup"), (PyCFunction)&%(mo
""" % {'module_cleanup': Naming.cleanup_cname},
impl = """
static int __Pyx_RegisterCleanup(void) {
- /* Don't use Py_AtExit because that has a 32-call limit
- * and is called after python finalization.
+ /* Don't use Py_AtExit because that has a 32-call limit
+ * and is called after python finalization.
*/
PyObject *cleanup_func = 0;
@@ -2427,7 +2427,7 @@ static int __Pyx_RegisterCleanup(void) {
PyObject *args = 0;
PyObject *res = 0;
int ret = -1;
-
+
cleanup_func = PyCFunction_New(&cleanup_def, 0);
args = PyTuple_New(1);
if (!cleanup_func || !args)
@@ -2536,11 +2536,11 @@ static int %(IMPORT_STAR)s(PyObject* m) {
#endif
PyObject *name;
PyObject *item;
-
+
locals = PyDict_New(); if (!locals) goto bad;
if (__Pyx_import_all_from(locals, m) < 0) goto bad;
list = PyDict_Items(locals); if (!list) goto bad;
-
+
for(i=0; i<PyList_GET_SIZE(list); i++) {
name = PyTuple_GET_ITEM(PyList_GET_ITEM(list, i), 0);
item = PyTuple_GET_ITEM(PyList_GET_ITEM(list, i), 1);
@@ -2557,7 +2557,7 @@ static int %(IMPORT_STAR)s(PyObject* m) {
#endif
}
ret = 0;
-
+
bad:
Py_XDECREF(locals);
Py_XDECREF(list);
@@ -2568,7 +2568,7 @@ bad:
}
""" % {'IMPORT_STAR' : Naming.import_star,
'IMPORT_STAR_SET' : Naming.import_star_set }
-
+
refnanny_utility_code = UtilityCode(proto="""
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
diff --git a/Cython/Compiler/Nodes.py b/Cython/Compiler/Nodes.py
index 53062f75d..ff130a487 100644
--- a/Cython/Compiler/Nodes.py
+++ b/Cython/Compiler/Nodes.py
@@ -37,7 +37,7 @@ def relative_position(pos):
don't want to have to regnerate and compile all the source code
whenever the Python install directory moves (which could happen,
e.g,. when distributing binaries.)
-
+
INPUT:
a position tuple -- (absolute filename, line number column position)
@@ -49,7 +49,7 @@ def relative_position(pos):
"""
global absolute_path_length
if absolute_path_length==0:
- absolute_path_length = len(os.path.abspath(os.getcwd()))
+ absolute_path_length = len(os.path.abspath(os.getcwd()))
return (pos[0].get_filenametable_entry()[absolute_path_length+1:], pos[1])
def embed_position(pos, docstring):
@@ -89,8 +89,8 @@ def write_func_call(func):
node, code = args[:2]
marker = ' /* %s -> %s.%s %s */' % (
' ' * code.call_level,
- node.__class__.__name__,
- func.__name__,
+ node.__class__.__name__,
+ func.__name__,
node.pos[1:])
pristine = code.buffer.stream.tell()
code.putln(marker)
@@ -110,7 +110,7 @@ def write_func_call(func):
class VerboseCodeWriter(type):
# Set this as a metaclass to trace function calls in code.
- # This slows down code generation and makes much larger files.
+ # This slows down code generation and makes much larger files.
def __new__(cls, name, bases, attrs):
attrs = dict(attrs)
for mname, m in attrs.items():
@@ -126,7 +126,7 @@ class Node(object):
if DebugFlags.debug_trace_code_generation:
__metaclass__ = VerboseCodeWriter
-
+
is_name = 0
is_literal = 0
temps = None
@@ -135,20 +135,20 @@ class Node(object):
# containing nodes considered "children" in the tree. Each such attribute
# can either contain a single node or a list of nodes. See Visitor.py.
child_attrs = None
-
+
def __init__(self, pos, **kw):
self.pos = pos
self.__dict__.update(kw)
-
+
gil_message = "Operation"
nogil_check = None
def gil_error(self, env=None):
error(self.pos, "%s not allowed without gil" % self.gil_message)
-
+
cpp_message = "Operation"
-
+
def cpp_check(self, env):
if not env.is_cpp():
self.cpp_error()
@@ -168,8 +168,8 @@ class Node(object):
if isinstance(value, list):
setattr(result, attrname, [x for x in value])
return result
-
-
+
+
#
# There are 4 phases of parse tree processing, applied in order to
# all the statements in a given scope-block:
@@ -186,7 +186,7 @@ class Node(object):
# (2) analyse_expressions
# Determine the result types of expressions and fill in the
# 'type' attribute of each ExprNode. Insert coercion nodes into the
- # tree where needed to convert to and from Python objects.
+ # tree where needed to convert to and from Python objects.
# Allocate temporary locals for intermediate results. Fill
# in the 'result_code' attribute of each ExprNode with a C code
# fragment.
@@ -196,26 +196,26 @@ class Node(object):
# Recursively applies the 3 processing phases to the bodies of
# functions.
#
-
+
def analyse_control_flow(self, env):
pass
-
+
def analyse_declarations(self, env):
pass
-
+
def analyse_expressions(self, env):
raise InternalError("analyse_expressions not implemented for %s" % \
self.__class__.__name__)
-
+
def generate_code(self, code):
raise InternalError("generate_code not implemented for %s" % \
self.__class__.__name__)
-
+
def annotate(self, code):
# mro does the wrong thing
if isinstance(self, BlockNode):
self.body.annotate(code)
-
+
def end_pos(self):
try:
return self._end_pos
@@ -245,7 +245,7 @@ class Node(object):
if id(self) in encountered:
return "<%s (0x%x) -- already output>" % (self.__class__.__name__, id(self))
encountered.add(id(self))
-
+
def dump_child(x, level):
if isinstance(x, Node):
return x.dump(level, filter_out, cutoff-1, encountered)
@@ -253,8 +253,8 @@ class Node(object):
return "[%s]" % ", ".join([dump_child(item, level) for item in x])
else:
return repr(x)
-
-
+
+
attrs = [(key, value) for key, value in self.__dict__.items() if key not in filter_out]
if len(attrs) == 0:
return "<%s (0x%x)>" % (self.__class__.__name__, id(self))
@@ -286,7 +286,7 @@ class CompilerDirectivesNode(Node):
env.directives = self.directives
self.body.analyse_declarations(env)
env.directives = old
-
+
def analyse_expressions(self, env):
old = env.directives
env.directives = self.directives
@@ -300,19 +300,19 @@ class CompilerDirectivesNode(Node):
self.body.generate_function_definitions(env, code)
env.directives = env_old
code.globalstate.directives = code_old
-
+
def generate_execution_code(self, code):
old = code.globalstate.directives
code.globalstate.directives = self.directives
self.body.generate_execution_code(code)
code.globalstate.directives = old
-
+
def annotate(self, code):
old = code.globalstate.directives
code.globalstate.directives = self.directives
self.body.annotate(code)
code.globalstate.directives = old
-
+
class BlockNode(object):
# Mixin class for nodes representing a declaration block.
@@ -328,14 +328,14 @@ class BlockNode(object):
class StatListNode(Node):
# stats a list of StatNode
-
+
child_attrs = ["stats"]
def create_analysed(pos, env, *args, **kw):
node = StatListNode(pos, *args, **kw)
return node # No node-specific analysis necesarry
create_analysed = staticmethod(create_analysed)
-
+
def analyse_control_flow(self, env):
for stat in self.stats:
stat.analyse_control_flow(env)
@@ -344,27 +344,27 @@ class StatListNode(Node):
#print "StatListNode.analyse_declarations" ###
for stat in self.stats:
stat.analyse_declarations(env)
-
+
def analyse_expressions(self, env):
#print "StatListNode.analyse_expressions" ###
for stat in self.stats:
stat.analyse_expressions(env)
-
+
def generate_function_definitions(self, env, code):
#print "StatListNode.generate_function_definitions" ###
for stat in self.stats:
stat.generate_function_definitions(env, code)
-
+
def generate_execution_code(self, code):
#print "StatListNode.generate_execution_code" ###
for stat in self.stats:
code.mark_pos(stat.pos)
stat.generate_execution_code(code)
-
+
def annotate(self, code):
for stat in self.stats:
stat.annotate(code)
-
+
class StatNode(Node):
#
@@ -378,10 +378,10 @@ class StatNode(Node):
# (2) generate_execution_code
# Emit C code for executable statements.
#
-
+
def generate_function_definitions(self, env, code):
pass
-
+
def generate_execution_code(self, code):
raise InternalError("generate_execution_code not implemented for %s" % \
self.__class__.__name__)
@@ -390,9 +390,9 @@ class StatNode(Node):
class CDefExternNode(StatNode):
# include_file string or None
# body StatNode
-
+
child_attrs = ["body"]
-
+
def analyse_declarations(self, env):
if self.include_file:
env.add_include_file(self.include_file)
@@ -400,16 +400,16 @@ class CDefExternNode(StatNode):
env.in_cinclude = 1
self.body.analyse_declarations(env)
env.in_cinclude = old_cinclude_flag
-
+
def analyse_expressions(self, env):
pass
-
+
def generate_execution_code(self, code):
pass
def annotate(self, code):
self.body.annotate(code)
-
+
class CDeclaratorNode(Node):
# Part of a C declaration.
@@ -418,11 +418,11 @@ class CDeclaratorNode(Node):
#
# analyse
# Returns (name, type) pair where name is the
- # CNameDeclaratorNode of the name being declared
+ # CNameDeclaratorNode of the name being declared
# and type is the type it is being declared as.
#
# calling_convention string Calling convention of CFuncDeclaratorNode
- # for which this is a base
+ # for which this is a base
child_attrs = []
@@ -433,14 +433,14 @@ class CNameDeclaratorNode(CDeclaratorNode):
# name string The Pyrex name being declared
# cname string or None C name, if specified
# default ExprNode or None the value assigned on declaration
-
+
child_attrs = ['default']
-
+
default = None
-
+
def analyse(self, base_type, env, nonempty = 0):
if nonempty and self.name == '':
- # May have mistaken the name for the type.
+ # May have mistaken the name for the type.
if base_type.is_ptr or base_type.is_array or base_type.is_buffer:
error(self.pos, "Missing argument name")
elif base_type.is_void:
@@ -450,10 +450,10 @@ class CNameDeclaratorNode(CDeclaratorNode):
base_type = py_object_type
self.type = base_type
return self, base_type
-
+
class CPtrDeclaratorNode(CDeclaratorNode):
# base CDeclaratorNode
-
+
child_attrs = ["base"]
def analyse(self, base_type, env, nonempty = 0):
@@ -480,7 +480,7 @@ class CArrayDeclaratorNode(CDeclaratorNode):
# dimension ExprNode
child_attrs = ["base", "dimension"]
-
+
def analyse(self, base_type, env, nonempty = 0):
if base_type.is_cpp_class:
from ExprNodes import TupleNode
@@ -529,7 +529,7 @@ class CFuncDeclaratorNode(CDeclaratorNode):
# exception_check boolean True if PyErr_Occurred check needed
# nogil boolean Can be called without gil
# with_gil boolean Acquire gil around function body
-
+
child_attrs = ["base", "args", "exception_value"]
overridable = 0
@@ -544,7 +544,7 @@ class CFuncDeclaratorNode(CDeclaratorNode):
is_self_arg = (i == 0 and env.is_c_class_scope))
name = name_declarator.name
if name_declarator.cname:
- error(self.pos,
+ error(self.pos,
"Function argument cannot have C name specification")
# Turn *[] argument into **
if type.is_array:
@@ -558,7 +558,7 @@ class CFuncDeclaratorNode(CDeclaratorNode):
self.optional_arg_count += 1
elif self.optional_arg_count:
error(self.pos, "Non-default argument follows default argument")
-
+
if self.optional_arg_count:
scope = StructOrUnionScope()
arg_count_member = '%sn' % Naming.pyrex_prefix
@@ -574,7 +574,7 @@ class CFuncDeclaratorNode(CDeclaratorNode):
cname = struct_cname)
self.op_args_struct.defined_in_pxd = 1
self.op_args_struct.used = 1
-
+
exc_val = None
exc_check = 0
if self.exception_check == '+':
@@ -614,7 +614,7 @@ class CFuncDeclaratorNode(CDeclaratorNode):
error(self.pos,
"Function cannot return a function")
func_type = PyrexTypes.CFuncType(
- return_type, func_type_args, self.has_varargs,
+ return_type, func_type_args, self.has_varargs,
optional_arg_count = self.optional_arg_count,
exception_value = exc_val, exception_check = exc_check,
calling_convention = self.base.calling_convention,
@@ -673,11 +673,11 @@ class CArgDeclNode(Node):
base_type = self.base_type.analyse(env, could_be_name = could_be_name)
if hasattr(self.base_type, 'arg_name') and self.base_type.arg_name:
self.declarator.name = self.base_type.arg_name
- # The parser is unable to resolve the ambiguity of [] as part of the
- # type (e.g. in buffers) or empty declarator (as with arrays).
+ # The parser is unable to resolve the ambiguity of [] as part of the
+ # type (e.g. in buffers) or empty declarator (as with arrays).
# This is only arises for empty multi-dimensional arrays.
- if (base_type.is_array
- and isinstance(self.base_type, TemplatedTypeNode)
+ if (base_type.is_array
+ and isinstance(self.base_type, TemplatedTypeNode)
and isinstance(self.declarator, CArrayDeclaratorNode)):
declarator = self.declarator
while isinstance(declarator.base, CArrayDeclaratorNode):
@@ -710,17 +710,17 @@ class CBaseTypeNode(Node):
#
# analyse
# Returns the type.
-
+
pass
-
+
def analyse_as_type(self, env):
return self.analyse(env)
-
+
class CAnalysedBaseTypeNode(Node):
# type type
-
+
child_attrs = []
-
+
def analyse(self, env, could_be_name = False):
return self.type
@@ -736,7 +736,7 @@ class CSimpleBaseTypeNode(CBaseTypeNode):
child_attrs = []
arg_name = None # in case the argument name was interpreted as a type
-
+
def analyse(self, env, could_be_name = False):
# Return type descriptor.
#print "CSimpleBaseTypeNode.analyse: is_self_arg =", self.is_self_arg ###
@@ -800,7 +800,7 @@ class CSimpleBaseTypeNode(CBaseTypeNode):
return PyrexTypes.error_type
class CNestedBaseTypeNode(CBaseTypeNode):
- # For C++ classes that live inside other C++ classes.
+ # For C++ classes that live inside other C++ classes.
# name string
# base_type CBaseTypeNode
@@ -836,12 +836,12 @@ class TemplatedTypeNode(CBaseTypeNode):
dtype_node = None
name = None
-
+
def analyse(self, env, could_be_name = False, base_type = None):
if base_type is None:
base_type = self.base_type_node.analyse(env)
if base_type.is_error: return base_type
-
+
if base_type.is_cpp_class:
# Templated class
if self.keyword_args and self.keyword_args.key_value_pairs:
@@ -856,7 +856,7 @@ class TemplatedTypeNode(CBaseTypeNode):
return error_type
template_types.append(type)
self.type = base_type.specialize_here(self.pos, template_types)
-
+
elif base_type.is_pyobject:
# Buffer
import Buffer
@@ -874,7 +874,7 @@ class TemplatedTypeNode(CBaseTypeNode):
for name, value in options.items() ])
self.type = PyrexTypes.BufferType(base_type, **options)
-
+
else:
# Array
empty_declarator = CNameDeclaratorNode(self.pos, name="", cname=None)
@@ -882,23 +882,23 @@ class TemplatedTypeNode(CBaseTypeNode):
error(self.pos, "invalid array declaration")
self.type = PyrexTypes.error_type
else:
- # It would be nice to merge this class with CArrayDeclaratorNode,
+ # It would be nice to merge this class with CArrayDeclaratorNode,
# but arrays are part of the declaration, not the type...
if not self.positional_args:
dimension = None
else:
dimension = self.positional_args[0]
- self.array_declarator = CArrayDeclaratorNode(self.pos,
- base = empty_declarator,
+ self.array_declarator = CArrayDeclaratorNode(self.pos,
+ base = empty_declarator,
dimension = dimension)
self.type = self.array_declarator.analyse(base_type, env)[1]
-
+
return self.type
class CComplexBaseTypeNode(CBaseTypeNode):
# base_type CBaseTypeNode
# declarator CDeclaratorNode
-
+
child_attrs = ["base_type", "declarator"]
def analyse(self, env, could_be_name = False):
@@ -916,14 +916,14 @@ class CVarDefNode(StatNode):
# in_pxd boolean
# api boolean
- # decorators [cython.locals(...)] or None
+ # decorators [cython.locals(...)] or None
# directive_locals { string : NameNode } locals defined by cython.locals(...)
child_attrs = ["base_type", "declarators"]
-
+
decorators = None
directive_locals = {}
-
+
def analyse_declarations(self, env, dest_scope = None):
if not dest_scope:
dest_scope = env
@@ -939,7 +939,7 @@ class CVarDefNode(StatNode):
else:
need_property = False
visibility = self.visibility
-
+
for declarator in self.declarators:
name_declarator, type = declarator.analyse(base_type, env)
if not type.is_complete():
@@ -964,12 +964,12 @@ class CVarDefNode(StatNode):
if self.directive_locals:
error(self.pos, "Decorators can only be followed by functions")
if self.in_pxd and self.visibility != 'extern':
- error(self.pos,
+ error(self.pos,
"Only 'extern' C variable declaration allowed in .pxd file")
entry = dest_scope.declare_var(name, type, declarator.pos,
cname = cname, visibility = visibility, is_cdef = 1)
entry.needs_property = need_property
-
+
class CStructOrUnionDefNode(StatNode):
# name string
@@ -981,7 +981,7 @@ class CStructOrUnionDefNode(StatNode):
# attributes [CVarDefNode] or None
# entry Entry
# packed boolean
-
+
child_attrs = ["attributes"]
def analyse_declarations(self, env):
@@ -1012,7 +1012,7 @@ class CStructOrUnionDefNode(StatNode):
if type == self.entry.type:
need_typedef_indirection = True
if need_typedef_indirection:
- # C can't handle typedef structs that refer to themselves.
+ # C can't handle typedef structs that refer to themselves.
struct_entry = self.entry
self.entry = env.declare_typedef(
self.name, struct_entry.type, self.pos,
@@ -1021,10 +1021,10 @@ class CStructOrUnionDefNode(StatNode):
# FIXME: this might be considered a hack ;-)
struct_entry.cname = struct_entry.type.cname = \
'_' + self.entry.type.typedef_cname
-
+
def analyse_expressions(self, env):
pass
-
+
def generate_execution_code(self, code):
pass
@@ -1075,9 +1075,9 @@ class CEnumDefNode(StatNode):
# visibility "public" or "private"
# in_pxd boolean
# entry Entry
-
+
child_attrs = ["items"]
-
+
def analyse_declarations(self, env):
self.entry = env.declare_enum(self.name, self.pos,
cname = self.cname, typedef_flag = self.typedef_flag,
@@ -1101,8 +1101,8 @@ class CEnumDefNode(StatNode):
code.error_goto_if_null(temp, item.pos)))
code.put_gotref(temp)
code.putln('if (__Pyx_SetAttrString(%s, "%s", %s) < 0) %s' % (
- Naming.module_cname,
- item.name,
+ Naming.module_cname,
+ item.name,
temp,
code.error_goto(item.pos)))
code.put_decref_clear(temp, PyrexTypes.py_object_type)
@@ -1113,7 +1113,7 @@ class CEnumDefItemNode(StatNode):
# name string
# cname string or None
# value ExprNode or None
-
+
child_attrs = ["value"]
def analyse_declarations(self, env, enum_entry):
@@ -1122,7 +1122,7 @@ class CEnumDefItemNode(StatNode):
if not self.value.type.is_int:
self.value = self.value.coerce_to(PyrexTypes.c_int_type, env)
self.value.analyse_const_expression(env)
- entry = env.declare_const(self.name, enum_entry.type,
+ entry = env.declare_const(self.name, enum_entry.type,
self.value, self.pos, cname = self.cname,
visibility = enum_entry.visibility)
enum_entry.enum_values.append(entry)
@@ -1135,7 +1135,7 @@ class CTypeDefNode(StatNode):
# in_pxd boolean
child_attrs = ["base_type", "declarator"]
-
+
def analyse_declarations(self, env):
base = self.base_type.analyse(env)
name_declarator, type = self.declarator.analyse(base, env)
@@ -1145,7 +1145,7 @@ class CTypeDefNode(StatNode):
cname = cname, visibility = self.visibility)
if self.in_pxd and not env.in_cinclude:
entry.defined_in_pxd = 1
-
+
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
@@ -1161,13 +1161,13 @@ class FuncDefNode(StatNode, BlockNode):
# needs_closure boolean Whether or not this function has inner functions/classes/yield
# needs_outer_scope boolean Whether or not this function requires outer scope
# directive_locals { string : NameNode } locals defined by cython.locals(...)
-
+
py_func = None
assmt = None
needs_closure = False
needs_outer_scope = False
modifiers = []
-
+
def analyse_default_values(self, env):
genv = env.global_scope()
default_seen = 0
@@ -1188,7 +1188,7 @@ class FuncDefNode(StatNode, BlockNode):
def need_gil_acquisition(self, lenv):
return 0
-
+
def create_local_scope(self, env):
genv = env
while genv.is_py_class_scope or genv.is_c_class_scope:
@@ -1208,7 +1208,7 @@ class FuncDefNode(StatNode, BlockNode):
self.local_scope = lenv
lenv.directives = env.directives
return lenv
-
+
def generate_function_definitions(self, env, code):
import Buffer
@@ -1241,7 +1241,7 @@ class FuncDefNode(StatNode, BlockNode):
if (self.entry.name == '__long__' and
not self.entry.scope.lookup_here('__int__')):
preprocessor_guard = None
-
+
profile = code.globalstate.directives['profile']
if profile:
if lenv.nogil:
@@ -1251,7 +1251,7 @@ class FuncDefNode(StatNode, BlockNode):
# Generate C code for header and body of function
code.enter_cfunc_scope()
code.return_from_error_cleanup_label = code.new_label()
-
+
# ----- Top-level constants used by this function
code.mark_pos(self.pos)
self.generate_cached_builtins_decls(lenv, code)
@@ -1263,7 +1263,7 @@ class FuncDefNode(StatNode, BlockNode):
with_pymethdef = self.needs_assignment_synthesis(env, code)
if self.py_func:
- self.py_func.generate_function_header(code,
+ self.py_func.generate_function_header(code,
with_pymethdef = with_pymethdef,
proto_only=True)
self.generate_function_header(code,
@@ -1291,7 +1291,7 @@ class FuncDefNode(StatNode, BlockNode):
if self.return_type.is_pyobject:
init = " = NULL"
code.putln(
- "%s%s;" %
+ "%s%s;" %
(self.return_type.declaration_code(Naming.retval_cname),
init))
tempvardecl_code = code.insertion_point()
@@ -1318,7 +1318,7 @@ class FuncDefNode(StatNode, BlockNode):
code.putln("%s = (%s)%s->tp_new(%s, %s, NULL);" % (
Naming.cur_scope_cname,
lenv.scope_class.type.declaration_code(''),
- lenv.scope_class.type.typeptr_cname,
+ lenv.scope_class.type.typeptr_cname,
lenv.scope_class.type.typeptr_cname,
Naming.empty_tuple))
code.putln("if (unlikely(!%s)) {" % Naming.cur_scope_cname)
@@ -1349,13 +1349,13 @@ class FuncDefNode(StatNode, BlockNode):
code.put_trace_call(self.entry.name, self.pos)
# ----- Fetch arguments
self.generate_argument_parsing_code(env, code)
- # If an argument is assigned to in the body, we must
+ # If an argument is assigned to in the body, we must
# incref it to properly keep track of refcounts.
for entry in lenv.arg_entries:
if entry.type.is_pyobject:
if entry.assignments and not entry.in_closure:
code.put_var_incref(entry)
- # ----- Initialise local variables
+ # ----- Initialise local variables
for entry in lenv.var_entries:
if entry.type.is_pyobject and entry.init_to_none and entry.used:
code.put_init_var_to_py_none(entry)
@@ -1402,7 +1402,7 @@ class FuncDefNode(StatNode, BlockNode):
code.globalstate.use_utility_code(restore_exception_utility_code)
code.putln("{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;")
code.putln("__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);")
- for entry in lenv.buffer_entries:
+ for entry in lenv.buffer_entries:
Buffer.put_release_buffer_code(code, entry)
#code.putln("%s = 0;" % entry.cname)
code.putln("__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}")
@@ -1410,7 +1410,7 @@ class FuncDefNode(StatNode, BlockNode):
err_val = self.error_value()
exc_check = self.caller_will_check_exceptions()
if err_val is not None or exc_check:
- # TODO: Fix exception tracing (though currently unused by cProfile).
+ # TODO: Fix exception tracing (though currently unused by cProfile).
# code.globalstate.use_utility_code(get_exception_tuple_utility_code)
# code.put_trace_exception()
code.putln('__Pyx_AddTraceback("%s");' % self.entry.qualified_name)
@@ -1418,7 +1418,7 @@ class FuncDefNode(StatNode, BlockNode):
warning(self.entry.pos, "Unraisable exception in function '%s'." \
% self.entry.qualified_name, 0)
code.putln(
- '__Pyx_WriteUnraisable("%s");' %
+ '__Pyx_WriteUnraisable("%s");' %
self.entry.qualified_name)
env.use_utility_code(unraisable_exception_utility_code)
env.use_utility_code(restore_exception_utility_code)
@@ -1451,7 +1451,7 @@ class FuncDefNode(StatNode, BlockNode):
for entry in lenv.var_entries:
if lenv.control_flow.get_state((entry.name, 'initialized')) is not True:
entry.xdecref_cleanup = 1
-
+
for entry in lenv.var_entries:
if entry.type.is_pyobject:
if entry.used and not entry.in_closure:
@@ -1467,7 +1467,7 @@ class FuncDefNode(StatNode, BlockNode):
code.put_var_decref(entry)
if self.needs_closure:
code.put_decref(Naming.cur_scope_cname, lenv.scope_class.type)
-
+
# ----- Return
# This code is duplicated in ModuleNode.generate_module_init_func
if not lenv.nogil:
@@ -1480,7 +1480,7 @@ class FuncDefNode(StatNode, BlockNode):
if self.entry.is_special and self.entry.name == "__hash__":
# Returning -1 for __hash__ is supposed to signal an error
- # We do as Python instances and coerce -1 into -2.
+ # We do as Python instances and coerce -1 into -2.
code.putln("if (unlikely(%s == -1) && !PyErr_Occurred()) %s = -2;" % (
Naming.retval_cname, Naming.retval_cname))
@@ -1491,7 +1491,7 @@ class FuncDefNode(StatNode, BlockNode):
code.put_trace_return("Py_None")
if not lenv.nogil:
code.put_finish_refcount_context()
-
+
if acquire_gil:
code.putln("#ifdef WITH_THREAD")
code.putln("PyGILState_Release(_save);")
@@ -1499,7 +1499,7 @@ class FuncDefNode(StatNode, BlockNode):
if not self.return_type.is_void:
code.putln("return %s;" % Naming.retval_cname)
-
+
code.putln("}")
if preprocessor_guard:
@@ -1520,7 +1520,7 @@ class FuncDefNode(StatNode, BlockNode):
error(arg.pos,
"Argument type '%s' is incomplete" % arg.type)
return env.declare_arg(arg.name, arg.type, arg.pos)
-
+
def generate_arg_type_test(self, arg, code):
# Generate type test for one argument.
if arg.type.typeobj_is_available():
@@ -1529,7 +1529,7 @@ class FuncDefNode(StatNode, BlockNode):
arg_code = "((PyObject *)%s)" % arg.entry.cname
code.putln(
'if (unlikely(!__Pyx_ArgTypeTest(%s, %s, %d, "%s", %s))) %s' % (
- arg_code,
+ arg_code,
typeptr_cname,
arg.accept_none,
arg.name,
@@ -1547,7 +1547,7 @@ class FuncDefNode(StatNode, BlockNode):
arg.name,
code.error_goto(arg.pos)))
code.putln('}')
-
+
def generate_wrapper_functions(self, code):
pass
@@ -1580,7 +1580,7 @@ class FuncDefNode(StatNode, BlockNode):
# Python 3.0 betas have a bug in memoryview which makes it call
# getbuffer with a NULL parameter. For now we work around this;
# the following line should be removed when this bug is fixed.
- code.putln("if (%s == NULL) return 0;" % info)
+ code.putln("if (%s == NULL) return 0;" % info)
code.putln("%s->obj = Py_None; __Pyx_INCREF(Py_None);" % info)
code.put_giveref("%s->obj" % info) # Do not refnanny object within structs
@@ -1613,7 +1613,7 @@ class CFuncDefNode(FuncDefNode):
# py_func wrapper for calling from Python
# overridable whether or not this is a cpdef function
# inline_in_pxd whether this is an inline function in a pxd file
-
+
child_attrs = ["base_type", "declarator", "body", "py_func"]
inline_in_pxd = False
@@ -1622,14 +1622,14 @@ class CFuncDefNode(FuncDefNode):
def unqualified_name(self):
return self.entry.name
-
+
def analyse_declarations(self, env):
self.directive_locals.update(env.directives['locals'])
base_type = self.base_type.analyse(env)
# The 2 here is because we need both function and argument names.
name_declarator, type = self.declarator.analyse(base_type, env, nonempty = 2 * (self.body is not None))
if not type.is_cfunction:
- error(self.pos,
+ error(self.pos,
"Suite attached to non-function declaration")
# Remember the actual type according to the function header
# written here, because the type in the symbol table entry
@@ -1648,22 +1648,22 @@ class CFuncDefNode(FuncDefNode):
name = name_declarator.name
cname = name_declarator.cname
self.entry = env.declare_cfunction(
- name, type, self.pos,
+ name, type, self.pos,
cname = cname, visibility = self.visibility,
defining = self.body is not None,
api = self.api, modifiers = self.modifiers)
self.entry.inline_func_in_pxd = self.inline_in_pxd
self.return_type = type.return_type
-
+
if self.overridable and not env.is_module_scope:
if len(self.args) < 1 or not self.args[0].type.is_pyobject:
# An error will be produced in the cdef function
self.overridable = False
-
+
if self.overridable:
import ExprNodes
py_func_body = self.call_self_node(is_module_scope = env.is_module_scope)
- self.py_func = DefNode(pos = self.pos,
+ self.py_func = DefNode(pos = self.pos,
name = self.entry.name,
args = self.args,
star_arg = None,
@@ -1680,7 +1680,7 @@ class CFuncDefNode(FuncDefNode):
self.override = OverrideCheckNode(self.pos, py_func = self.py_func)
self.body = StatListNode(self.pos, stats=[self.override, self.body])
self.create_local_scope(env)
-
+
def call_self_node(self, omit_optional_args=0, is_module_scope=0):
import ExprNodes
args = self.type.args
@@ -1695,7 +1695,7 @@ class CFuncDefNode(FuncDefNode):
skip_dispatch = not is_module_scope or Options.lookup_module_cpdef
c_call = ExprNodes.SimpleCallNode(self.pos, function=cfunc, args=[ExprNodes.NameNode(self.pos, name=n) for n in arg_names[1-is_module_scope:]], wrapper_call=skip_dispatch)
return ReturnStatNode(pos=self.pos, return_type=PyrexTypes.py_object_type, value=c_call)
-
+
def declare_arguments(self, env):
for arg in self.type.args:
if not arg.name:
@@ -1761,7 +1761,7 @@ class CFuncDefNode(FuncDefNode):
self.modifiers[self.modifiers.index('inline')] = 'cython_inline'
code.putln("%s%s %s {" % (
storage_class,
- ' '.join(self.modifiers).upper(), # macro forms
+ ' '.join(self.modifiers).upper(), # macro forms
header))
def generate_argument_declarations(self, env, code):
@@ -1773,7 +1773,7 @@ class CFuncDefNode(FuncDefNode):
def generate_keyword_list(self, code):
pass
-
+
def generate_argument_parsing_code(self, env, code):
i = 0
if self.type.optional_arg_count:
@@ -1789,10 +1789,10 @@ class CFuncDefNode(FuncDefNode):
for _ in range(self.type.optional_arg_count):
code.putln('}')
code.putln('}')
-
+
def generate_argument_conversion_code(self, code):
pass
-
+
def generate_argument_type_tests(self, code):
# Generate type tests for args whose type in a parent
# class is a supertype of the declared type.
@@ -1808,13 +1808,13 @@ class CFuncDefNode(FuncDefNode):
else:
#return None
return self.entry.type.exception_value
-
+
def caller_will_check_exceptions(self):
return self.entry.type.exception_check
-
+
def generate_wrapper_functions(self, code):
# If the C signature of a function has changed, we need to generate
- # wrappers to put in the slots here.
+ # wrappers to put in the slots here.
k = 0
entry = self.entry
func_type = entry.type
@@ -1823,10 +1823,10 @@ class CFuncDefNode(FuncDefNode):
entry = entry.prev_entry
entry.func_cname = "%s%swrap_%s" % (self.entry.func_cname, Naming.pyrex_prefix, k)
code.putln()
- self.generate_function_header(code,
+ self.generate_function_header(code,
0,
- with_dispatch = entry.type.is_overridable,
- with_opt_args = entry.type.optional_arg_count,
+ with_dispatch = entry.type.is_overridable,
+ with_opt_args = entry.type.optional_arg_count,
cname = entry.func_cname)
if not self.return_type.is_void:
code.put('return ')
@@ -1842,7 +1842,7 @@ class CFuncDefNode(FuncDefNode):
arglist.append('NULL')
code.putln('%s(%s);' % (self.entry.func_cname, ', '.join(arglist)))
code.putln('}')
-
+
class PyArgDeclNode(Node):
# Argument which must be a Python object (used
@@ -1881,7 +1881,7 @@ class DefNode(FuncDefNode):
# when the def statement is inside a Python class definition.
#
# assmt AssignmentNode Function construction/assignment
-
+
child_attrs = ["args", "star_arg", "starstar_arg", "body", "decorators"]
lambda_name = None
@@ -1909,7 +1909,7 @@ class DefNode(FuncDefNode):
self.num_kwonly_args = k
self.num_required_kw_args = rk
self.num_required_args = r
-
+
def as_cfunction(self, cfunc=None, scope=None):
if self.star_arg:
error(self.star_arg.pos, "cdef function cannot have star argument")
@@ -1950,7 +1950,7 @@ class DefNode(FuncDefNode):
exception_value = None
else:
exception_value = ExprNodes.ConstNode(self.pos, value=cfunc_type.exception_value, type=cfunc_type.return_type)
- declarator = CFuncDeclaratorNode(self.pos,
+ declarator = CFuncDeclaratorNode(self.pos,
base = CNameDeclaratorNode(self.pos, name=self.name, cname=None),
args = self.args,
has_varargs = False,
@@ -1958,7 +1958,7 @@ class DefNode(FuncDefNode):
exception_value = exception_value,
with_gil = cfunc_type.with_gil,
nogil = cfunc_type.nogil)
- return CFuncDefNode(self.pos,
+ return CFuncDefNode(self.pos,
modifiers = [],
base_type = CAnalysedBaseTypeNode(self.pos, type=cfunc_type.return_type),
declarator = declarator,
@@ -1971,7 +1971,7 @@ class DefNode(FuncDefNode):
visibility = 'private',
api = False,
directive_locals = getattr(cfunc, 'directive_locals', {}))
-
+
def analyse_declarations(self, env):
self.is_classmethod = self.is_staticmethod = False
if self.decorators:
@@ -2017,7 +2017,7 @@ class DefNode(FuncDefNode):
other_type = type_node.analyse_as_type(env)
if other_type is None:
error(type_node.pos, "Not a type")
- elif (type is not PyrexTypes.py_object_type
+ elif (type is not PyrexTypes.py_object_type
and not type.same_as(other_type)):
error(arg.base_type.pos, "Signature does not agree with previous declaration")
error(type_node.pos, "Previous declaration here")
@@ -2141,7 +2141,7 @@ class DefNode(FuncDefNode):
def signature_has_generic_args(self):
return self.entry.signature.has_generic_args
-
+
def declare_pyfunction(self, env):
#print "DefNode.declare_pyfunction:", self.name, "in", env ###
name = self.name
@@ -2216,7 +2216,7 @@ class DefNode(FuncDefNode):
entry.xdecref_cleanup = 1
arg.entry = entry
env.control_flow.set_state((), (arg.name, 'initialized'), True)
-
+
def analyse_expressions(self, env):
self.local_scope.directives = env.directives
self.analyse_default_values(env)
@@ -2257,7 +2257,7 @@ class DefNode(FuncDefNode):
rhs = rhs)
self.assmt.analyse_declarations(env)
self.assmt.analyse_expressions(env)
-
+
def generate_function_header(self, code, with_pymethdef, proto_only=0):
arg_code_list = []
sig = self.entry.signature
@@ -2300,7 +2300,7 @@ class DefNode(FuncDefNode):
"struct wrapperbase %s;" % self.entry.wrapperbase_cname)
if with_pymethdef:
code.put(
- "static PyMethodDef %s = " %
+ "static PyMethodDef %s = " %
self.entry.pymethdef_cname)
code.put_pymethoddef(self.entry, ";", allow_skip=False)
code.putln("%s {" % header)
@@ -2422,11 +2422,11 @@ class DefNode(FuncDefNode):
code.error_goto_if(arg.type.error_condition(arg.entry.cname), arg.pos)))
else:
error(arg.pos, "Cannot convert Python object argument to type '%s'" % arg.type)
-
+
def generate_arg_xdecref(self, arg, code):
if arg:
code.put_var_xdecref(arg.entry)
-
+
def generate_arg_decref(self, arg, code):
if arg:
code.put_var_decref(arg.entry)
@@ -2838,7 +2838,7 @@ class DefNode(FuncDefNode):
error(arg.pos,
"Cannot convert 1 argument from '%s' to '%s'" %
(old_type, new_type))
-
+
def generate_arg_conversion_from_pyobject(self, arg, code):
new_type = arg.type
func = new_type.from_py_function
@@ -2849,14 +2849,14 @@ class DefNode(FuncDefNode):
if new_type.is_enum:
rhs = PyrexTypes.typecast(new_type, PyrexTypes.c_long_type, rhs)
code.putln("%s = %s; %s" % (
- lhs,
+ lhs,
rhs,
code.error_goto_if(new_type.error_condition(arg.entry.cname), arg.pos)))
else:
- error(arg.pos,
- "Cannot convert Python object argument to type '%s'"
+ error(arg.pos,
+ "Cannot convert Python object argument to type '%s'"
% new_type)
-
+
def generate_arg_conversion_to_pyobject(self, arg, code):
old_type = arg.hdr_type
func = old_type.to_py_function
@@ -2884,22 +2884,22 @@ class DefNode(FuncDefNode):
def error_value(self):
return self.entry.signature.error_value
-
+
def caller_will_check_exceptions(self):
return 1
-
+
class OverrideCheckNode(StatNode):
# A Node for dispatching to the def method if it
- # is overriden.
+ # is overriden.
#
# py_func
#
# args
# func_temp
# body
-
+
child_attrs = ['body']
-
+
body = None
def analyse_expressions(self, env):
@@ -2912,11 +2912,11 @@ class OverrideCheckNode(StatNode):
self.func_node = ExprNodes.RawCNameExprNode(self.pos, py_object_type)
call_tuple = ExprNodes.TupleNode(self.pos, args=[ExprNodes.NameNode(self.pos, name=arg.name) for arg in self.args[first_arg:]])
call_node = ExprNodes.SimpleCallNode(self.pos,
- function=self.func_node,
+ function=self.func_node,
args=[ExprNodes.NameNode(self.pos, name=arg.name) for arg in self.args[first_arg:]])
self.body = ReturnStatNode(self.pos, value=call_node)
self.body.analyse_expressions(env)
-
+
def generate_execution_code(self, code):
interned_attr_cname = code.intern_identifier(self.py_func.entry.name)
# Check to see if we are an extension type
@@ -2970,7 +2970,7 @@ class PyClassDefNode(ClassDefNode):
child_attrs = ["body", "dict", "metaclass", "mkw", "bases", "classobj", "target"]
decorators = None
py3_style_class = False # Python3 style class (bases+kwargs)
-
+
def __init__(self, pos, name, bases, doc, body, decorators = None,
keyword_args = None, starstar_arg = None):
StatNode.__init__(self, pos)
@@ -3020,7 +3020,7 @@ class PyClassDefNode(ClassDefNode):
self.classobj = ExprNodes.ClassNode(pos, name = name,
bases = bases, dict = self.dict, doc = doc_node)
self.target = ExprNodes.NameNode(pos, name = name)
-
+
def as_cclass(self):
"""
Return this node as if it were declared as an extension class
@@ -3051,8 +3051,8 @@ class PyClassDefNode(ClassDefNode):
else:
error(self.classobj.bases.args.pos, "C class may only have one base class")
return None
-
- return CClassDefNode(self.pos,
+
+ return CClassDefNode(self.pos,
visibility = 'private',
module_name = None,
class_name = self.name,
@@ -3062,21 +3062,21 @@ class PyClassDefNode(ClassDefNode):
body = self.body,
in_pxd = False,
doc = self.doc)
-
+
def create_scope(self, env):
genv = env
while genv.is_py_class_scope or genv.is_c_class_scope:
genv = genv.outer_scope
cenv = self.scope = PyClassScope(name = self.name, outer_scope = genv)
return cenv
-
+
def analyse_declarations(self, env):
self.target.analyse_target_declaration(env)
cenv = self.create_scope(env)
cenv.directives = env.directives
cenv.class_obj_cname = self.target.entry.cname
self.body.analyse_declarations(cenv)
-
+
def analyse_expressions(self, env):
if self.py3_style_class:
self.bases.analyse_expressions(env)
@@ -3088,11 +3088,11 @@ class PyClassDefNode(ClassDefNode):
cenv = self.scope
self.body.analyse_expressions(cenv)
self.target.analyse_target_expression(env, self.classobj)
-
+
def generate_function_definitions(self, env, code):
self.generate_lambda_definitions(self.scope, code)
self.body.generate_function_definitions(self.scope, code)
-
+
def generate_execution_code(self, code):
code.pyclass_stack.append(self)
cenv = self.scope
@@ -3165,8 +3165,8 @@ class CClassDefNode(ClassDefNode):
error(self.pos, "Object struct name specification required for "
"C class defined in 'extern from' block")
self.base_type = None
- # Now that module imports are cached, we need to
- # import the modules for extern classes.
+ # Now that module imports are cached, we need to
+ # import the modules for extern classes.
if self.module_name:
self.module = None
for module in env.cimported_modules:
@@ -3224,7 +3224,7 @@ class CClassDefNode(ClassDefNode):
warning(self.pos, "%s already a builtin Cython type" % self.class_name, 1)
self.entry = home_scope.declare_c_class(
- name = self.class_name,
+ name = self.class_name,
pos = self.pos,
defining = has_body and self.in_pxd,
implementing = has_body and not self.in_pxd,
@@ -3244,7 +3244,7 @@ class CClassDefNode(ClassDefNode):
if self.doc and Options.docstrings:
scope.doc = embed_position(self.pos, self.doc)
-
+
if has_body:
self.body.analyse_declarations(scope)
if self.in_pxd:
@@ -3252,23 +3252,23 @@ class CClassDefNode(ClassDefNode):
else:
scope.implemented = 1
env.allocate_vtable_names(self.entry)
-
+
def analyse_expressions(self, env):
if self.body:
scope = self.entry.type.scope
self.body.analyse_expressions(scope)
-
+
def generate_function_definitions(self, env, code):
if self.body:
self.generate_lambda_definitions(self.scope, code)
self.body.generate_function_definitions(self.scope, code)
-
+
def generate_execution_code(self, code):
# This is needed to generate evaluation code for
# default values of method arguments.
if self.body:
self.body.generate_execution_code(code)
-
+
def annotate(self, code):
if self.body:
self.body.annotate(code)
@@ -3280,7 +3280,7 @@ class PropertyNode(StatNode):
# name string
# doc EncodedString or None Doc string
# body StatListNode
-
+
child_attrs = ["body"]
def analyse_declarations(self, env):
@@ -3291,7 +3291,7 @@ class PropertyNode(StatNode):
def analyse_expressions(self, env):
self.body.analyse_expressions(env)
-
+
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(env, code)
@@ -3306,7 +3306,7 @@ class GlobalNode(StatNode):
# Global variable declaration.
#
# names [string]
-
+
child_attrs = []
def analyse_declarations(self, env):
@@ -3315,7 +3315,7 @@ class GlobalNode(StatNode):
def analyse_expressions(self, env):
pass
-
+
def generate_execution_code(self, code):
pass
@@ -3326,7 +3326,7 @@ class ExprStatNode(StatNode):
# expr ExprNode
child_attrs = ["expr"]
-
+
def analyse_declarations(self, env):
import ExprNodes
if isinstance(self.expr, ExprNodes.GeneralCallNode):
@@ -3342,10 +3342,10 @@ class ExprStatNode(StatNode):
else:
env.declare_var(var.value, type, var.pos, is_cdef = True)
self.__class__ = PassStatNode
-
+
def analyse_expressions(self, env):
self.expr.analyse_expressions(env)
-
+
def generate_execution_code(self, code):
self.expr.generate_evaluation_code(code)
if not self.expr.is_temp and self.expr.result():
@@ -3379,7 +3379,7 @@ class AssignmentNode(StatNode):
def generate_execution_code(self, code):
self.generate_rhs_evaluation_code(code)
self.generate_assignment_code(code)
-
+
class SingleAssignmentNode(AssignmentNode):
# The simplest case:
@@ -3389,20 +3389,20 @@ class SingleAssignmentNode(AssignmentNode):
# lhs ExprNode Left hand side
# rhs ExprNode Right hand side
# first bool Is this guaranteed the first assignment to lhs?
-
+
child_attrs = ["lhs", "rhs"]
first = False
declaration_only = False
def analyse_declarations(self, env):
import ExprNodes
-
+
# handle declarations of the form x = cython.foo()
if isinstance(self.rhs, ExprNodes.CallNode):
func_name = self.rhs.function.as_cython_attribute()
if func_name:
args, kwds = self.rhs.explicit_args_kwds()
-
+
if func_name in ['declare', 'typedef']:
if len(args) > 2 or kwds is not None:
error(self.rhs.pos, "Can only declare one type at a time.")
@@ -3432,7 +3432,7 @@ class SingleAssignmentNode(AssignmentNode):
if not isinstance(lhs, ExprNodes.NameNode):
error(lhs.pos, "Invalid declaration.")
env.declare_typedef(lhs.name, type, self.pos, visibility='private')
-
+
elif func_name in ['struct', 'union']:
self.declaration_only = True
if len(args) > 0 or kwds is None:
@@ -3454,12 +3454,12 @@ class SingleAssignmentNode(AssignmentNode):
env.declare_struct_or_union(name, func_name, scope, False, self.rhs.pos)
for member, type, pos in members:
scope.declare_var(member, type, pos)
-
+
if self.declaration_only:
return
else:
self.lhs.analyse_target_declaration(env)
-
+
def analyse_types(self, env, use_temp = 0):
self.rhs.analyse_types(env)
self.lhs.analyse_target_types(env)
@@ -3467,10 +3467,10 @@ class SingleAssignmentNode(AssignmentNode):
self.rhs = self.rhs.coerce_to(self.lhs.type, env)
if use_temp:
self.rhs = self.rhs.coerce_to_temp(env)
-
+
def generate_rhs_evaluation_code(self, code):
self.rhs.generate_evaluation_code(code)
-
+
def generate_assignment_code(self, code):
self.lhs.generate_assignment_code(self.rhs, code)
@@ -3493,14 +3493,14 @@ class CascadedAssignmentNode(AssignmentNode):
# Used internally:
#
# coerced_rhs_list [ExprNode] RHS coerced to type of each LHS
-
+
child_attrs = ["lhs_list", "rhs", "coerced_rhs_list"]
coerced_rhs_list = None
def analyse_declarations(self, env):
for lhs in self.lhs_list:
lhs.analyse_target_declaration(env)
-
+
def analyse_types(self, env, use_temp = 0):
self.rhs.analyse_types(env)
if not self.rhs.is_simple():
@@ -3519,7 +3519,7 @@ class CascadedAssignmentNode(AssignmentNode):
def generate_rhs_evaluation_code(self, code):
self.rhs.generate_evaluation_code(code)
-
+
def generate_assignment_code(self, code):
for i in range(len(self.lhs_list)):
lhs = self.lhs_list[i]
@@ -3538,7 +3538,7 @@ class CascadedAssignmentNode(AssignmentNode):
lhs = self.lhs_list[i].annotate(code)
rhs = self.coerced_rhs_list[i].annotate(code)
self.rhs.annotate(code)
-
+
class ParallelAssignmentNode(AssignmentNode):
# A combined packing/unpacking assignment:
@@ -3553,13 +3553,13 @@ class ParallelAssignmentNode(AssignmentNode):
# before assigning to any of the left hand sides.
#
# stats [AssignmentNode] The constituent assignments
-
+
child_attrs = ["stats"]
def analyse_declarations(self, env):
for stat in self.stats:
stat.analyse_declarations(env)
-
+
def analyse_expressions(self, env):
for stat in self.stats:
stat.analyse_types(env, use_temp = 1)
@@ -3569,7 +3569,7 @@ class ParallelAssignmentNode(AssignmentNode):
# stat.analyse_expressions_1(env, use_temp = 1)
# for stat in self.stats:
# stat.analyse_expressions_2(env)
-
+
def generate_execution_code(self, code):
for stat in self.stats:
stat.generate_rhs_evaluation_code(code)
@@ -3597,18 +3597,18 @@ class InPlaceAssignmentNode(AssignmentNode):
# op char one of "+-*/%^&|"
# dup (ExprNode) copy of lhs used for operation (auto-generated)
#
- # This code is a bit tricky because in order to obey Python
- # semantics the sub-expressions (e.g. indices) of the lhs must
- # not be evaluated twice. So we must re-use the values calculated
- # in evaluation phase for the assignment phase as well.
- # Fortunately, the type of the lhs node is fairly constrained
- # (it must be a NameNode, AttributeNode, or IndexNode).
-
+ # This code is a bit tricky because in order to obey Python
+ # semantics the sub-expressions (e.g. indices) of the lhs must
+ # not be evaluated twice. So we must re-use the values calculated
+ # in evaluation phase for the assignment phase as well.
+ # Fortunately, the type of the lhs node is fairly constrained
+ # (it must be a NameNode, AttributeNode, or IndexNode).
+
child_attrs = ["lhs", "rhs"]
def analyse_declarations(self, env):
self.lhs.analyse_target_declaration(env)
-
+
def analyse_types(self, env):
self.rhs.analyse_types(env)
self.lhs.analyse_target_types(env)
@@ -3640,7 +3640,7 @@ class InPlaceAssignmentNode(AssignmentNode):
def annotate(self, code):
self.lhs.annotate(code)
self.rhs.annotate(code)
-
+
def create_binop_node(self):
import ExprNodes
return ExprNodes.binop_node(self.pos, self.operator, self.lhs, self.rhs)
@@ -3677,7 +3677,7 @@ class PrintStatNode(StatNode):
if len(self.arg_tuple.args) == 1 and self.append_newline:
arg = self.arg_tuple.args[0]
arg.generate_evaluation_code(code)
-
+
code.putln(
"if (__Pyx_PrintOne(%s, %s) < 0) %s" % (
stream_result,
@@ -3755,13 +3755,13 @@ class DelStatNode(StatNode):
# del statement
#
# args [ExprNode]
-
+
child_attrs = ["args"]
def analyse_declarations(self, env):
for arg in self.args:
arg.analyse_target_declaration(env)
-
+
def analyse_expressions(self, env):
for arg in self.args:
arg.analyse_target_expression(env, None)
@@ -3800,10 +3800,10 @@ class PassStatNode(StatNode):
# pass statement
child_attrs = []
-
+
def analyse_expressions(self, env):
pass
-
+
def generate_execution_code(self, code):
pass
@@ -3814,7 +3814,7 @@ class BreakStatNode(StatNode):
def analyse_expressions(self, env):
pass
-
+
def generate_execution_code(self, code):
if not code.break_label:
error(self.pos, "break statement not inside loop")
@@ -3828,7 +3828,7 @@ class ContinueStatNode(StatNode):
def analyse_expressions(self, env):
pass
-
+
def generate_execution_code(self, code):
if code.funcstate.in_try_finally:
error(self.pos, "continue statement inside try of try...finally")
@@ -3843,7 +3843,7 @@ class ReturnStatNode(StatNode):
#
# value ExprNode or None
# return_type PyrexType
-
+
child_attrs = ["value"]
def analyse_expressions(self, env):
@@ -3855,7 +3855,7 @@ class ReturnStatNode(StatNode):
if self.value:
self.value.analyse_types(env)
if return_type.is_void or return_type.is_returncode:
- error(self.value.pos,
+ error(self.value.pos,
"Return with value in void function")
else:
self.value = self.value.coerce_to(env.return_type, env)
@@ -3903,7 +3903,7 @@ class ReturnStatNode(StatNode):
def generate_function_definitions(self, env, code):
if self.value is not None:
self.value.generate_function_definitions(env, code)
-
+
def annotate(self, code):
if self.value:
self.value.annotate(code)
@@ -3915,7 +3915,7 @@ class RaiseStatNode(StatNode):
# exc_type ExprNode or None
# exc_value ExprNode or None
# exc_tb ExprNode or None
-
+
child_attrs = ["exc_type", "exc_value", "exc_tb"]
def analyse_expressions(self, env):
@@ -4000,14 +4000,14 @@ class ReraiseStatNode(StatNode):
code.putln(code.error_goto(self.pos))
else:
error(self.pos, "Reraise not inside except clause")
-
+
class AssertStatNode(StatNode):
# assert statement
#
# cond ExprNode
# value ExprNode or None
-
+
child_attrs = ["cond", "value"]
def analyse_expressions(self, env):
@@ -4018,7 +4018,7 @@ class AssertStatNode(StatNode):
nogil_check = Node.gil_error
gil_message = "Raising exception"
-
+
def generate_execution_code(self, code):
code.putln("#ifndef CYTHON_WITHOUT_ASSERTIONS")
self.cond.generate_evaluation_code(code)
@@ -4061,7 +4061,7 @@ class IfStatNode(StatNode):
# else_clause StatNode or None
child_attrs = ["if_clauses", "else_clause"]
-
+
def analyse_control_flow(self, env):
env.start_branching(self.pos)
for if_clause in self.if_clauses:
@@ -4076,7 +4076,7 @@ class IfStatNode(StatNode):
if_clause.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
-
+
def analyse_expressions(self, env):
for if_clause in self.if_clauses:
if_clause.analyse_expressions(env)
@@ -4112,15 +4112,15 @@ class IfClauseNode(Node):
#
# condition ExprNode
# body StatNode
-
+
child_attrs = ["condition", "body"]
def analyse_control_flow(self, env):
self.body.analyse_control_flow(env)
-
+
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
-
+
def analyse_expressions(self, env):
self.condition = \
self.condition.analyse_temp_boolean_expression(env)
@@ -4150,14 +4150,14 @@ class IfClauseNode(Node):
def annotate(self, code):
self.condition.annotate(code)
self.body.annotate(code)
-
+
class SwitchCaseNode(StatNode):
# Generated in the optimization of an if-elif-else node
#
# conditions [ExprNode]
# body StatNode
-
+
child_attrs = ['conditions', 'body']
def generate_execution_code(self, code):
@@ -4172,7 +4172,7 @@ class SwitchCaseNode(StatNode):
for cond in self.conditions:
cond.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
-
+
def annotate(self, code):
for cond in self.conditions:
cond.annotate(code)
@@ -4184,9 +4184,9 @@ class SwitchStatNode(StatNode):
# test ExprNode
# cases [SwitchCaseNode]
# else_clause StatNode or None
-
+
child_attrs = ['test', 'cases', 'else_clause']
-
+
def generate_execution_code(self, code):
self.test.generate_evaluation_code(code)
code.putln("switch (%s) {" % self.test.result())
@@ -4211,9 +4211,9 @@ class SwitchStatNode(StatNode):
case.annotate(code)
if self.else_clause is not None:
self.else_clause.annotate(code)
-
+
class LoopNode(object):
-
+
def analyse_control_flow(self, env):
env.start_branching(self.pos)
self.body.analyse_control_flow(env)
@@ -4222,7 +4222,7 @@ class LoopNode(object):
self.else_clause.analyse_control_flow(env)
env.finish_branching(self.end_pos())
-
+
class WhileStatNode(LoopNode, StatNode):
# while statement
#
@@ -4236,14 +4236,14 @@ class WhileStatNode(LoopNode, StatNode):
self.body.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
-
+
def analyse_expressions(self, env):
self.condition = \
self.condition.analyse_temp_boolean_expression(env)
self.body.analyse_expressions(env)
if self.else_clause:
self.else_clause.analyse_expressions(env)
-
+
def generate_execution_code(self, code):
old_loop_labels = code.new_loop_labels()
code.putln(
@@ -4292,10 +4292,10 @@ class ForInStatNode(LoopNode, StatNode):
# body StatNode
# else_clause StatNode
# item NextNode used internally
-
+
child_attrs = ["target", "iterator", "body", "else_clause"]
item = None
-
+
def analyse_declarations(self, env):
self.target.analyse_target_declaration(env)
self.body.analyse_declarations(env)
@@ -4415,7 +4415,7 @@ class ForFromStatNode(LoopNode, StatNode):
if isinstance(self.step, ExprNodes.UnaryMinusNode):
warning(self.step.pos, "Probable infinite loop in for-from-by statment. Consider switching the directions of the relations.", 2)
self.step.analyse_types(env)
-
+
target_type = self.target.type
if self.target.type.is_numeric:
loop_type = self.target.type
@@ -4432,7 +4432,7 @@ class ForFromStatNode(LoopNode, StatNode):
if not self.bound2.is_literal:
self.bound2 = self.bound2.coerce_to_temp(env)
if self.step is not None:
- self.step = self.step.coerce_to(loop_type, env)
+ self.step = self.step.coerce_to(loop_type, env)
if not self.step.is_literal:
self.step = self.step.coerce_to_temp(env)
@@ -4455,7 +4455,7 @@ class ForFromStatNode(LoopNode, StatNode):
self.body.analyse_expressions(env)
if self.else_clause:
self.else_clause.analyse_expressions(env)
-
+
def generate_execution_code(self, code):
old_loop_labels = code.new_loop_labels()
from_range = self.from_range
@@ -4490,19 +4490,19 @@ class ForFromStatNode(LoopNode, StatNode):
self.body.generate_execution_code(code)
code.put_label(code.continue_label)
if self.py_loopvar_node:
- # This mess is to make for..from loops with python targets behave
- # exactly like those with C targets with regards to re-assignment
- # of the loop variable.
+ # This mess is to make for..from loops with python targets behave
+ # exactly like those with C targets with regards to re-assignment
+ # of the loop variable.
import ExprNodes
if self.target.entry.is_pyglobal:
- # We know target is a NameNode, this is the only ugly case.
+ # We know target is a NameNode, this is the only ugly case.
target_node = ExprNodes.PyTempNode(self.target.pos, None)
target_node.allocate(code)
interned_cname = code.intern_identifier(self.target.entry.name)
code.globalstate.use_utility_code(ExprNodes.get_name_interned_utility_code)
code.putln("%s = __Pyx_GetName(%s, %s); %s" % (
target_node.result(),
- Naming.module_cname,
+ Naming.module_cname,
interned_cname,
code.error_goto_if_null(target_node.result(), self.target.pos)))
code.put_gotref(target_node.result())
@@ -4516,8 +4516,8 @@ class ForFromStatNode(LoopNode, StatNode):
target_node.release(code)
code.putln("}")
if self.py_loopvar_node:
- # This is potentially wasteful, but we don't want the semantics to
- # depend on whether or not the loop is a python type.
+ # This is potentially wasteful, but we don't want the semantics to
+ # depend on whether or not the loop is a python type.
self.py_loopvar_node.generate_evaluation_code(code)
self.target.generate_assignment_code(self.py_loopvar_node, code)
if from_range:
@@ -4540,7 +4540,7 @@ class ForFromStatNode(LoopNode, StatNode):
if self.step is not None:
self.step.generate_disposal_code(code)
self.step.free_temps(code)
-
+
relation_table = {
# {relop : (initial offset, increment op)}
'<=': ("", "++"),
@@ -4558,7 +4558,7 @@ class ForFromStatNode(LoopNode, StatNode):
self.body.generate_function_definitions(env, code)
if self.else_clause is not None:
self.else_clause.generate_function_definitions(env, code)
-
+
def annotate(self, code):
self.target.annotate(code)
self.bound1.annotate(code)
@@ -4573,7 +4573,7 @@ class ForFromStatNode(LoopNode, StatNode):
class WithStatNode(StatNode):
"""
Represents a Python with statement.
-
+
This is only used at parse tree level; and is not present in
analysis or generation phases.
"""
@@ -4590,19 +4590,19 @@ class TryExceptStatNode(StatNode):
# else_clause StatNode or None
child_attrs = ["body", "except_clauses", "else_clause"]
-
+
def analyse_control_flow(self, env):
env.start_branching(self.pos)
self.body.analyse_control_flow(env)
successful_try = env.control_flow # grab this for later
env.next_branch(self.body.end_pos())
env.finish_branching(self.body.end_pos())
-
+
env.start_branching(self.except_clauses[0].pos)
for except_clause in self.except_clauses:
except_clause.analyse_control_flow(env)
env.next_branch(except_clause.end_pos())
-
+
# the else cause it executed only when the try clause finishes
env.control_flow.incoming = successful_try
if self.else_clause:
@@ -4749,7 +4749,7 @@ class ExceptClauseNode(Node):
# excinfo_target is never set by the parser, but can be set by a transform
# in order to extract more extensive information about the exception as a
# sys.exc_info()-style tuple into a target variable
-
+
child_attrs = ["pattern", "target", "body", "exc_value", "excinfo_target"]
exc_value = None
@@ -4761,7 +4761,7 @@ class ExceptClauseNode(Node):
if self.excinfo_target is not None:
self.excinfo_target.analyse_target_declaration(env)
self.body.analyse_declarations(env)
-
+
def analyse_expressions(self, env):
import ExprNodes
genv = env.global_scope()
@@ -4813,13 +4813,13 @@ class ExceptClauseNode(Node):
code.put_goto(end_label)
code.putln("}")
return
-
+
exc_vars = [code.funcstate.allocate_temp(py_object_type,
manage_ref=True)
for i in xrange(3)]
code.putln('__Pyx_AddTraceback("%s");' % self.function_name)
# We always have to fetch the exception value even if
- # there is no target, because this also normalises the
+ # there is no target, because this also normalises the
# exception and stores it in the thread state.
code.globalstate.use_utility_code(get_exception_utility_code)
exc_args = "&%s, &%s, &%s" % tuple(exc_vars)
@@ -4848,7 +4848,7 @@ class ExceptClauseNode(Node):
for var in exc_vars:
code.putln("__Pyx_DECREF(%s); %s = 0;" % (var, var))
code.put_goto(end_label)
-
+
if code.label_used(code.break_label):
code.put_label(code.break_label)
for var in exc_vars:
@@ -4873,7 +4873,7 @@ class ExceptClauseNode(Node):
if self.target is not None:
self.target.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
-
+
def annotate(self, code):
if self.pattern:
for pattern in self.pattern:
@@ -4899,9 +4899,9 @@ class TryFinallyStatNode(StatNode):
# it on exit.
child_attrs = ["body", "finally_clause"]
-
+
preserve_exception = 1
-
+
disallow_continue_in_try_finally = 0
# There doesn't seem to be any point in disallowing
# continue in the try block, since we have no problem
@@ -4911,7 +4911,7 @@ class TryFinallyStatNode(StatNode):
node = TryFinallyStatNode(pos, body=body, finally_clause=finally_clause)
return node
create_analysed = staticmethod(create_analysed)
-
+
def analyse_control_flow(self, env):
env.start_branching(self.pos)
self.body.analyse_control_flow(env)
@@ -4922,7 +4922,7 @@ class TryFinallyStatNode(StatNode):
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
self.finally_clause.analyse_declarations(env)
-
+
def analyse_expressions(self, env):
self.body.analyse_expressions(env)
self.finally_clause.analyse_expressions(env)
@@ -4978,7 +4978,7 @@ class TryFinallyStatNode(StatNode):
new_label = new_labels[i]
#if new_label and new_label != "<try>":
if new_label == new_error_label and self.preserve_exception:
- self.put_error_catcher(code,
+ self.put_error_catcher(code,
new_error_label, i+1, catch_label, temps_to_clean_up)
else:
code.put('%s: ' % new_label)
@@ -5045,7 +5045,7 @@ class TryFinallyStatNode(StatNode):
Naming.exc_lineno_name, Naming.lineno_cname))
code.put_goto(catch_label)
code.putln("}")
-
+
def put_error_uncatcher(self, code, i, error_label):
code.globalstate.use_utility_code(restore_exception_utility_code)
code.putln(
@@ -5076,7 +5076,7 @@ class GILStatNode(TryFinallyStatNode):
# state string 'gil' or 'nogil'
# child_attrs = []
-
+
preserve_exception = 0
def __init__(self, pos, state, body):
@@ -5136,7 +5136,7 @@ class CImportStatNode(StatNode):
# as_name string or None Name specified in "as" clause, if any
child_attrs = []
-
+
def analyse_declarations(self, env):
if not env.is_module_scope:
error(self.pos, "cimport only allowed at module level")
@@ -5161,17 +5161,17 @@ class CImportStatNode(StatNode):
def analyse_expressions(self, env):
pass
-
+
def generate_execution_code(self, code):
pass
-
+
class FromCImportStatNode(StatNode):
# from ... cimport statement
#
# module_name string Qualified name of module
# imported_names [(pos, name, as_name, kind)] Names to be imported
-
+
child_attrs = []
def analyse_declarations(self, env):
@@ -5203,11 +5203,11 @@ class FromCImportStatNode(StatNode):
else:
error(pos, "Name '%s' not declared in module '%s'"
% (name, self.module_name))
-
+
if entry:
local_name = as_name or name
env.add_imported_entry(local_name, entry, pos)
-
+
def declaration_matches(self, entry, kind):
if not entry.is_type:
return 0
@@ -5224,7 +5224,7 @@ class FromCImportStatNode(StatNode):
def analyse_expressions(self, env):
pass
-
+
def generate_execution_code(self, code):
pass
@@ -5240,7 +5240,7 @@ class FromImportStatNode(StatNode):
child_attrs = ["module"]
import_star = 0
-
+
def analyse_declarations(self, env):
for name, target in self.items:
if name == "*":
@@ -5251,7 +5251,7 @@ class FromImportStatNode(StatNode):
self.import_star = 1
else:
target.analyse_target_declaration(env)
-
+
def analyse_expressions(self, env):
import ExprNodes
self.module.analyse_expressions(env)
@@ -5278,14 +5278,14 @@ class FromImportStatNode(StatNode):
if entry.type.module_name == module.qualified_name:
continue
except AttributeError:
- pass
+ pass
target.analyse_target_expression(env, None)
if target.type is py_object_type:
coerced_item = None
else:
coerced_item = self.item.coerce_to(target.type, env)
self.interned_items.append((name, target, coerced_item))
-
+
def generate_execution_code(self, code):
self.module.generate_evaluation_code(code)
if self.import_star:
@@ -5335,7 +5335,7 @@ utility_function_predeclarations = \
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
- #define CYTHON_INLINE
+ #define CYTHON_INLINE
#endif
#endif
@@ -5343,14 +5343,14 @@ utility_function_predeclarations = \
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
-# define CYTHON_UNUSED __attribute__ ((__unused__))
+# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || defined(__INTEL_COMPILER)
-# define CYTHON_UNUSED __attribute__ ((__unused__))
+# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
-# define CYTHON_UNUSED
+# define CYTHON_UNUSED
# endif
#endif
@@ -5363,7 +5363,7 @@ if Options.gcc_branch_hints:
"""
#ifdef __GNUC__
/* Test for GCC > 2.95 */
-#if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))
+#if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* __GNUC__ > 2 ... */
@@ -6313,8 +6313,8 @@ proto="""
#------------------------------------------------------------------------------------
-# Note that cPython ignores PyTrace_EXCEPTION,
-# but maybe some other profilers don't.
+# Note that cPython ignores PyTrace_EXCEPTION,
+# but maybe some other profilers don't.
profile_utility_code = UtilityCode(proto="""
#ifndef CYTHON_PROFILE
@@ -6342,7 +6342,7 @@ profile_utility_code = UtilityCode(proto="""
#define __Pyx_TraceDeclarations \\
static PyCodeObject *%(FRAME_CODE)s = NULL; \\
CYTHON_FRAME_MODIFIER PyFrameObject *%(FRAME)s = NULL; \\
- int __Pyx_use_tracing = 0;
+ int __Pyx_use_tracing = 0;
#define __Pyx_TraceCall(funcname, srcfile, firstlineno) \\
if (unlikely(PyThreadState_GET()->use_tracing && PyThreadState_GET()->c_profilefunc)) { \\
@@ -6372,12 +6372,12 @@ profile_utility_code = UtilityCode(proto="""
#else
#define __Pyx_TraceDeclarations
- #define __Pyx_TraceCall(funcname, srcfile, firstlineno)
- #define __Pyx_TraceException()
- #define __Pyx_TraceReturn(result)
+ #define __Pyx_TraceCall(funcname, srcfile, firstlineno)
+ #define __Pyx_TraceException()
+ #define __Pyx_TraceReturn(result)
#endif /* CYTHON_PROFILE */
-"""
+"""
% {
"FRAME": Naming.frame_cname,
"FRAME_CODE": Naming.frame_code_cname,
@@ -6444,10 +6444,10 @@ static PyCodeObject *__Pyx_createFrameCodeObject(const char *funcname, const cha
%(EMPTY_BYTES)s /*PyObject *lnotab*/
);
-bad:
+bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
-
+
return py_code;
}
diff --git a/Cython/Compiler/Optimize.py b/Cython/Compiler/Optimize.py
index 9a3741e9c..3a687b407 100644
--- a/Cython/Compiler/Optimize.py
+++ b/Cython/Compiler/Optimize.py
@@ -90,17 +90,17 @@ class IterationTransform(Visitor.VisitorTransform):
self.visitchildren(node)
self.current_scope = oldscope
return node
-
+
def visit_PrimaryCmpNode(self, node):
if node.is_ptr_contains():
-
+
# for t in operand2:
# if operand1 == t:
# res = True
# break
# else:
# res = False
-
+
pos = node.pos
res_handle = UtilNodes.TempHandle(PyrexTypes.c_bint_type)
res = res_handle.ref(pos)
@@ -114,7 +114,7 @@ class IterationTransform(Visitor.VisitorTransform):
cmp_node = ExprNodes.PrimaryCmpNode(
pos, operator=u'==', operand1=node.operand1, operand2=target)
if_body = Nodes.StatListNode(
- pos,
+ pos,
stats = [Nodes.SingleAssignmentNode(pos, lhs=result_ref, rhs=ExprNodes.BoolNode(pos, value=1)),
Nodes.BreakStatNode(pos)])
if_node = Nodes.IfStatNode(
@@ -133,7 +133,7 @@ class IterationTransform(Visitor.VisitorTransform):
for_loop.analyse_expressions(self.current_scope)
for_loop = self(for_loop)
new_node = UtilNodes.TempResultFromStatNode(result_ref, for_loop)
-
+
if node.operator == 'not_in':
new_node = ExprNodes.NotNode(pos, operand=new_node)
return new_node
@@ -145,7 +145,7 @@ class IterationTransform(Visitor.VisitorTransform):
def visit_ForInStatNode(self, node):
self.visitchildren(node)
return self._optimise_for_loop(node)
-
+
def _optimise_for_loop(self, node):
iterator = node.iterator.sequence
if iterator.type is Builtin.dict_type:
@@ -690,9 +690,9 @@ class IterationTransform(Visitor.VisitorTransform):
class SwitchTransform(Visitor.VisitorTransform):
"""
- This transformation tries to turn long if statements into C switch statements.
+ This transformation tries to turn long if statements into C switch statements.
The requirement is that every clause be an (or of) var == value, where the var
- is common among all clauses and both var and value are ints.
+ is common among all clauses and both var and value are ints.
"""
NO_MATCH = (None, None, None)
@@ -892,14 +892,14 @@ class SwitchTransform(Visitor.VisitorTransform):
return UtilNodes.TempResultFromStatNode(result_ref, switch_node)
visit_Node = Visitor.VisitorTransform.recurse_to_children
-
+
class FlattenInListTransform(Visitor.VisitorTransform, SkipDeclarations):
"""
This transformation flattens "x in [val1, ..., valn]" into a sequential list
- of comparisons.
+ of comparisons.
"""
-
+
def visit_PrimaryCmpNode(self, node):
self.visitchildren(node)
if node.cascade is not None:
@@ -938,12 +938,12 @@ class FlattenInListTransform(Visitor.VisitorTransform, SkipDeclarations):
operand2 = arg,
cascade = None)
conds.append(ExprNodes.TypecastNode(
- pos = node.pos,
+ pos = node.pos,
operand = cond,
type = PyrexTypes.c_bint_type))
def concat(left, right):
return ExprNodes.BoolBinopNode(
- pos = node.pos,
+ pos = node.pos,
operator = conjunction,
operand1 = left,
operand2 = right)
@@ -1008,7 +1008,7 @@ class DropRefcountingTransform(Visitor.VisitorTransform):
if not index_id:
return node
rindices.append(index_id)
-
+
if set(lindices) != set(rindices):
return node
if len(set(lindices)) != len(right_indices):
@@ -2128,7 +2128,7 @@ class OptimizeBuiltinCalls(Visitor.EnvTransform):
is_temp = node.is_temp,
utility_code = pop_index_utility_code
)
-
+
return node
_handle_simple_method_list_pop = _handle_simple_method_object_pop
@@ -3130,10 +3130,10 @@ class ConstantFolding(Visitor.VisitorTransform, SkipDeclarations):
class FinalOptimizePhase(Visitor.CythonTransform):
"""
This visitor handles several commuting optimizations, and is run
- just before the C code generation phase.
-
- The optimizations currently implemented in this class are:
- - eliminate None assignment and refcounting for first assignment.
+ just before the C code generation phase.
+
+ The optimizations currently implemented in this class are:
+ - eliminate None assignment and refcounting for first assignment.
- isinstance -> typecheck for cdef types
- eliminate checks for None and/or types that became redundant after tree changes
"""
diff --git a/Cython/Compiler/Options.py b/Cython/Compiler/Options.py
index f1de199e3..84a276536 100644
--- a/Cython/Compiler/Options.py
+++ b/Cython/Compiler/Options.py
@@ -10,10 +10,10 @@ gcc_branch_hints = 1
pre_import = None
docstrings = True
-# Decref global variables in this module on exit for garbage collection.
+# Decref global variables in this module on exit for garbage collection.
# 0: None, 1+: interned objects, 2+: cdef globals, 3+: types objects
# Mostly for reducing noise for Valgrind, only executes at process exit
-# (when all memory will be reclaimed anyways).
+# (when all memory will be reclaimed anyways).
generate_cleanup_code = 0
annotate = 0
@@ -22,33 +22,33 @@ annotate = 0
# to keep going and printing further error messages.
fast_fail = False
-# This will convert statements of the form "for i in range(...)"
+# This will convert statements of the form "for i in range(...)"
# to "for i from ..." when i is a cdef'd integer type, and the direction
-# (i.e. sign of step) can be determined.
-# WARNING: This may change the semantics if the range causes assignment to
+# (i.e. sign of step) can be determined.
+# WARNING: This may change the semantics if the range causes assignment to
# i to overflow. Specifically, if this option is set, an error will be
# raised before the loop is entered, wheras without this option the loop
-# will execute until an overflowing value is encountered.
+# will execute until an overflowing value is encountered.
convert_range = 1
-# Enable this to allow one to write your_module.foo = ... to overwrite the
-# definition if the cpdef function foo, at the cost of an extra dictionary
-# lookup on every call.
-# If this is 0 it simply creates a wrapper.
+# Enable this to allow one to write your_module.foo = ... to overwrite the
+# definition if the cpdef function foo, at the cost of an extra dictionary
+# lookup on every call.
+# If this is 0 it simply creates a wrapper.
lookup_module_cpdef = 0
-# This will set local variables to None rather than NULL which may cause
-# surpress what would be an UnboundLocalError in pure Python but eliminates
-# checking for NULL on every use, and can decref rather than xdecref at the end.
+# This will set local variables to None rather than NULL which may cause
+# surpress what would be an UnboundLocalError in pure Python but eliminates
+# checking for NULL on every use, and can decref rather than xdecref at the end.
# WARNING: This is a work in progress, may currently segfault.
init_local_none = 1
-# Append the c file and line number to the traceback for exceptions.
+# Append the c file and line number to the traceback for exceptions.
c_line_in_traceback = 1
-# Whether or not to embed the Python interpreter, for use in making a
-# standalone executable. This will provide a main() method which simply
-# executes the body of this module.
+# Whether or not to embed the Python interpreter, for use in making a
+# standalone executable. This will provide a main() method which simply
+# executes the body of this module.
embed = False
@@ -76,7 +76,7 @@ directive_defaults = {
'autotestdict.all': False,
'language_level': 2,
'fast_getattr': False, # Undocumented until we come up with a better way to handle this everywhere.
-
+
'warn': None,
'warn.undeclared': False,
@@ -123,7 +123,7 @@ def parse_directive_value(name, value, relaxed_bool=False):
Traceback (most recent call last):
...
ValueError: boundscheck directive must be set to True or False, got 'true'
-
+
"""
type = directive_types.get(name)
if not type: return None
diff --git a/Cython/Compiler/ParseTreeTransforms.py b/Cython/Compiler/ParseTreeTransforms.py
index ad3b390ba..76e7ea97e 100644
--- a/Cython/Compiler/ParseTreeTransforms.py
+++ b/Cython/Compiler/ParseTreeTransforms.py
@@ -40,24 +40,24 @@ class NameNodeCollector(TreeVisitor):
class SkipDeclarations(object):
"""
- Variable and function declarations can often have a deep tree structure,
- and yet most transformations don't need to descend to this depth.
-
- Declaration nodes are removed after AnalyseDeclarationsTransform, so there
- is no need to use this for transformations after that point.
+ Variable and function declarations can often have a deep tree structure,
+ and yet most transformations don't need to descend to this depth.
+
+ Declaration nodes are removed after AnalyseDeclarationsTransform, so there
+ is no need to use this for transformations after that point.
"""
def visit_CTypeDefNode(self, node):
return node
-
+
def visit_CVarDefNode(self, node):
return node
-
+
def visit_CDeclaratorNode(self, node):
return node
-
+
def visit_CBaseTypeNode(self, node):
return node
-
+
def visit_CEnumDefNode(self, node):
return node
@@ -116,7 +116,7 @@ class NormalizeTree(CythonTransform):
def visit_ParallelAssignmentNode(self, node):
return self.visit_StatNode(node, True)
-
+
def visit_CEnumDefNode(self, node):
return self.visit_StatNode(node, True)
@@ -131,7 +131,7 @@ class NormalizeTree(CythonTransform):
return []
def visit_CDeclaratorNode(self, node):
- return node
+ return node
class PostParseError(CompileError): pass
@@ -151,7 +151,7 @@ class PostParse(ScopeTrackingTransform):
- Default values to cdef assignments are turned into single
assignments following the declaration (everywhere but in class
bodies, where they raise a compile error)
-
+
- Interpret some node structures into Python runtime values.
Some nodes take compile-time arguments (currently:
TemplatedTypeNode[args] and __cythonbufferdefaults__ = {args}),
@@ -279,7 +279,7 @@ class PostParse(ScopeTrackingTransform):
lhs_list = expr_list[:-1]
rhs = expr_list[-1]
if len(lhs_list) == 1:
- node = Nodes.SingleAssignmentNode(rhs.pos,
+ node = Nodes.SingleAssignmentNode(rhs.pos,
lhs = lhs_list[0], rhs = rhs)
else:
node = Nodes.CascadedAssignmentNode(rhs.pos,
@@ -488,7 +488,7 @@ class PxdPostParse(CythonTransform, SkipDeclarations):
- "def" functions are let through only if they fill the
getbuffer/releasebuffer slots
-
+
- cdef functions are let through only if they are on the
top level and are declared "inline"
"""
@@ -514,7 +514,7 @@ class PxdPostParse(CythonTransform, SkipDeclarations):
if (isinstance(node, Nodes.DefNode) and self.scope_type == 'cclass'
and node.name in ('__getbuffer__', '__releasebuffer__')):
err = None # allow these slots
-
+
if isinstance(node, Nodes.CFuncDefNode):
if u'inline' in node.modifiers and self.scope_type == 'pxd':
node.inline_in_pxd = True
@@ -532,7 +532,7 @@ class PxdPostParse(CythonTransform, SkipDeclarations):
return None
else:
return node
-
+
class InterpretCompilerDirectives(CythonTransform, SkipDeclarations):
"""
After parsing, directives can be stored in a number of places:
@@ -561,14 +561,14 @@ class InterpretCompilerDirectives(CythonTransform, SkipDeclarations):
"""
unop_method_nodes = {
'typeof': ExprNodes.TypeofNode,
-
+
'operator.address': ExprNodes.AmpersandNode,
'operator.dereference': ExprNodes.DereferenceNode,
'operator.preincrement' : ExprNodes.inc_dec_constructor(True, '++'),
'operator.predecrement' : ExprNodes.inc_dec_constructor(True, '--'),
'operator.postincrement': ExprNodes.inc_dec_constructor(False, '++'),
'operator.postdecrement': ExprNodes.inc_dec_constructor(False, '--'),
-
+
# For backwards compatability.
'address': ExprNodes.AmpersandNode,
}
@@ -576,7 +576,7 @@ class InterpretCompilerDirectives(CythonTransform, SkipDeclarations):
binop_method_nodes = {
'operator.comma' : ExprNodes.c_binop_constructor(','),
}
-
+
special_methods = cython.set(['declare', 'union', 'struct', 'typedef', 'sizeof',
'cast', 'pointer', 'compiled', 'NULL'])
special_methods.update(unop_method_nodes.keys())
@@ -597,7 +597,7 @@ class InterpretCompilerDirectives(CythonTransform, SkipDeclarations):
return False
else:
return True
-
+
# Set up processing and handle the cython: comments.
def visit_ModuleNode(self, node):
for key, value in node.directive_comments.items():
@@ -633,7 +633,7 @@ class InterpretCompilerDirectives(CythonTransform, SkipDeclarations):
# want to leave the cimport node sitting in the tree
return None
return node
-
+
def visit_FromCImportStatNode(self, node):
if (node.module_name == u"cython") or \
node.module_name.startswith(u"cython."):
@@ -654,7 +654,7 @@ class InterpretCompilerDirectives(CythonTransform, SkipDeclarations):
return None
node.imported_names = newimp
return node
-
+
def visit_FromImportStatNode(self, node):
if (node.module.module_name.value == u"cython") or \
node.module.module_name.value.startswith(u"cython."):
@@ -674,14 +674,14 @@ class InterpretCompilerDirectives(CythonTransform, SkipDeclarations):
def visit_SingleAssignmentNode(self, node):
if (isinstance(node.rhs, ExprNodes.ImportNode) and
node.rhs.module_name.value == u'cython'):
- node = Nodes.CImportStatNode(node.pos,
+ node = Nodes.CImportStatNode(node.pos,
module_name = u'cython',
as_name = node.lhs.name)
self.visit_CImportStatNode(node)
else:
self.visitchildren(node)
return node
-
+
def visit_NameNode(self, node):
if node.name in self.cython_module_names:
node.is_cython_module = True
@@ -771,7 +771,7 @@ class InterpretCompilerDirectives(CythonTransform, SkipDeclarations):
directives=newdirectives)
self.directives = olddirectives
return directive
-
+
# Handle decorators
def visit_FuncDefNode(self, node):
directives = self._extract_directives(node, 'function')
@@ -905,7 +905,7 @@ class WithTransform(CythonTransform, SkipDeclarations):
if EXC:
EXIT(None, None, None)
MGR = EXIT = VALUE = EXC = None
-
+
""", temps=[u'MGR', u'EXC', u"EXIT", u"VALUE"],
pipeline=[NormalizeTree(None)])
@@ -913,7 +913,7 @@ class WithTransform(CythonTransform, SkipDeclarations):
# TODO: Cleanup badly needed
TemplateTransform.temp_name_counter += 1
handle = "__tmpvar_%d" % TemplateTransform.temp_name_counter
-
+
self.visitchildren(node, ['body'])
excinfo_temp = ExprNodes.NameNode(node.pos, name=handle)#TempHandle(Builtin.tuple_type)
if node.target is not None:
@@ -939,11 +939,11 @@ class WithTransform(CythonTransform, SkipDeclarations):
# node.pos, temps=[excinfo_temp], body=try_except)
return result
-
+
def visit_ExprNode(self, node):
# With statements are never inside expressions.
return node
-
+
class DecoratorTransform(CythonTransform, SkipDeclarations):
@@ -1022,8 +1022,8 @@ property NAME:
self.env_stack = [root.scope]
# needed to determine if a cdef var is declared after it's used.
self.seen_vars_stack = []
- return super(AnalyseDeclarationsTransform, self).__call__(root)
-
+ return super(AnalyseDeclarationsTransform, self).__call__(root)
+
def visit_NameNode(self, node):
self.seen_vars_stack[-1].add(node.name)
return node
@@ -1045,7 +1045,7 @@ property NAME:
self.visitchildren(node)
self.env_stack.pop()
return node
-
+
def visit_CClassDefNode(self, node):
node = self.visit_ClassDefNode(node)
if node.scope and node.scope.implemented:
@@ -1059,7 +1059,7 @@ property NAME:
if stats:
node.body.stats += stats
return node
-
+
def visit_FuncDefNode(self, node):
self.seen_vars_stack.append(cython.set())
lenv = node.local_scope
@@ -1108,13 +1108,13 @@ property NAME:
# necessary to ensure that all CNameDeclaratorNodes are visited.
self.visitchildren(node)
return node
-
+
def visit_CTypeDefNode(self, node):
return node
def visit_CBaseTypeNode(self, node):
return None
-
+
def visit_CEnumDefNode(self, node):
if node.visibility == 'public':
return node
@@ -1136,7 +1136,7 @@ property NAME:
# to ensure all CNameDeclaratorNodes are visited.
self.visitchildren(node)
return None
-
+
def create_Property(self, entry):
if entry.visibility == 'public':
if entry.type.is_pyobject:
@@ -1147,14 +1147,14 @@ property NAME:
template = self.basic_property_ro
property = template.substitute({
u"ATTR": ExprNodes.AttributeNode(pos=entry.pos,
- obj=ExprNodes.NameNode(pos=entry.pos, name="self"),
+ obj=ExprNodes.NameNode(pos=entry.pos, name="self"),
attribute=entry.name),
}, pos=entry.pos).stats[0]
property.name = entry.name
# ---------------------------------------
# XXX This should go to AutoDocTransforms
# ---------------------------------------
- if (Options.docstrings and
+ if (Options.docstrings and
self.current_directives['embedsignature']):
attr_name = entry.name
type_name = entry.type.declaration_code("", for_display=1)
@@ -1179,7 +1179,7 @@ class AnalyseExpressionsTransform(CythonTransform):
node.body.analyse_expressions(node.scope)
self.visitchildren(node)
return node
-
+
def visit_FuncDefNode(self, node):
node.local_scope.infer_types()
node.body.analyse_expressions(node.local_scope)
@@ -1194,7 +1194,7 @@ class AnalyseExpressionsTransform(CythonTransform):
return node
class ExpandInplaceOperators(EnvTransform):
-
+
def visit_InPlaceAssignmentNode(self, node):
lhs = node.lhs
rhs = node.rhs
@@ -1229,7 +1229,7 @@ class ExpandInplaceOperators(EnvTransform):
except ValueError:
return node
dup = lhs.__class__(**lhs.__dict__)
- binop = ExprNodes.binop_node(node.pos,
+ binop = ExprNodes.binop_node(node.pos,
operator = node.operator,
operand1 = dup,
operand2 = rhs,
@@ -1239,7 +1239,7 @@ class ExpandInplaceOperators(EnvTransform):
dup.analyse_types(env)
binop.analyse_operation(env)
node = Nodes.SingleAssignmentNode(
- node.pos,
+ node.pos,
lhs = lhs,
rhs=binop.coerce_to(lhs.type, env))
# Use LetRefNode to avoid side effects.
@@ -1255,16 +1255,16 @@ class ExpandInplaceOperators(EnvTransform):
class AlignFunctionDefinitions(CythonTransform):
"""
- This class takes the signatures from a .pxd file and applies them to
- the def methods in a .py file.
+ This class takes the signatures from a .pxd file and applies them to
+ the def methods in a .py file.
"""
-
+
def visit_ModuleNode(self, node):
self.scope = node.scope
self.directives = node.directives
self.visitchildren(node)
return node
-
+
def visit_PyClassDefNode(self, node):
pxd_def = self.scope.lookup(node.name)
if pxd_def:
@@ -1276,7 +1276,7 @@ class AlignFunctionDefinitions(CythonTransform):
return None
else:
return node
-
+
def visit_CClassDefNode(self, node, pxd_def=None):
if pxd_def is None:
pxd_def = self.scope.lookup(node.class_name)
@@ -1287,7 +1287,7 @@ class AlignFunctionDefinitions(CythonTransform):
if pxd_def:
self.scope = outer_scope
return node
-
+
def visit_DefNode(self, node):
pxd_def = self.scope.lookup(node.name)
if pxd_def:
@@ -1298,10 +1298,10 @@ class AlignFunctionDefinitions(CythonTransform):
node = node.as_cfunction(pxd_def)
elif self.scope.is_module_scope and self.directives['auto_cpdef']:
node = node.as_cfunction(scope=self.scope)
- # Enable this when internal def functions are allowed.
+ # Enable this when internal def functions are allowed.
# self.visitchildren(node)
return node
-
+
class MarkClosureVisitor(CythonTransform):
@@ -1316,7 +1316,7 @@ class MarkClosureVisitor(CythonTransform):
node.needs_closure = self.needs_closure
self.needs_closure = True
return node
-
+
def visit_CFuncDefNode(self, node):
self.visit_FuncDefNode(node)
if node.needs_closure:
@@ -1482,14 +1482,14 @@ class TransformBuiltinMethods(EnvTransform):
else:
self.visitchildren(node)
return node
-
+
def visit_AttributeNode(self, node):
self.visitchildren(node)
return self.visit_cython_attribute(node)
def visit_NameNode(self, node):
return self.visit_cython_attribute(node)
-
+
def visit_cython_attribute(self, node):
attribute = node.as_cython_attribute()
if attribute:
@@ -1571,7 +1571,7 @@ class TransformBuiltinMethods(EnvTransform):
node.function = ExprNodes.NameNode(node.pos, name=EncodedString('set'))
else:
error(node.function.pos, u"'%s' not a valid cython language construct" % function)
-
+
self.visitchildren(node)
return node
@@ -1581,50 +1581,50 @@ class DebugTransform(CythonTransform):
Create debug information and all functions' visibility to extern in order
to enable debugging.
"""
-
+
def __init__(self, context, options, result):
super(DebugTransform, self).__init__(context)
self.visited = cython.set()
- # our treebuilder and debug output writer
+ # our treebuilder and debug output writer
# (see Cython.Debugger.debug_output.CythonDebugWriter)
self.tb = self.context.gdb_debug_outputwriter
- #self.c_output_file = options.output_file
+ #self.c_output_file = options.output_file
self.c_output_file = result.c_file
-
+
# tells visit_NameNode whether it should register step-into functions
self.register_stepinto = False
-
+
def visit_ModuleNode(self, node):
self.tb.module_name = node.full_module_name
attrs = dict(
module_name=node.full_module_name,
filename=node.pos[0].filename,
c_filename=self.c_output_file)
-
+
self.tb.start('Module', attrs)
-
+
# serialize functions
self.tb.start('Functions')
self.visitchildren(node)
self.tb.end('Functions')
-
+
# 2.3 compatibility. Serialize global variables
self.tb.start('Globals')
entries = {}
for k, v in node.scope.entries.iteritems():
if (v.qualified_name not in self.visited and not
- v.name.startswith('__pyx_') and not
+ v.name.startswith('__pyx_') and not
v.type.is_cfunction and not
v.type.is_extension_type):
entries[k]= v
-
+
self.serialize_local_variables(entries)
self.tb.end('Globals')
# self.tb.end('Module') # end Module after the line number mapping in
# Cython.Compiler.ModuleNode.ModuleNode._serialize_lineno_map
return node
-
+
def visit_FuncDefNode(self, node):
self.visited.add(node.local_scope.qualified_name)
# node.entry.visibility = 'extern'
@@ -1632,16 +1632,16 @@ class DebugTransform(CythonTransform):
pf_cname = ''
else:
pf_cname = node.py_func.entry.func_cname
-
+
attrs = dict(
name=node.entry.name,
cname=node.entry.func_cname,
pf_cname=pf_cname,
qualified_name=node.local_scope.qualified_name,
lineno=str(node.pos[1]))
-
+
self.tb.start('Function', attrs=attrs)
-
+
self.tb.start('Locals')
self.serialize_local_variables(node.local_scope.entries)
self.tb.end('Locals')
@@ -1662,33 +1662,33 @@ class DebugTransform(CythonTransform):
return node
def visit_NameNode(self, node):
- if (self.register_stepinto and
- node.type.is_cfunction and
+ if (self.register_stepinto and
+ node.type.is_cfunction and
getattr(node, 'is_called', False) and
node.entry.func_cname is not None):
- # don't check node.entry.in_cinclude, as 'cdef extern: ...'
- # declared functions are not 'in_cinclude'.
- # This means we will list called 'cdef' functions as
- # "step into functions", but this is not an issue as they will be
+ # don't check node.entry.in_cinclude, as 'cdef extern: ...'
+ # declared functions are not 'in_cinclude'.
+ # This means we will list called 'cdef' functions as
+ # "step into functions", but this is not an issue as they will be
# recognized as Cython functions anyway.
attrs = dict(name=node.entry.func_cname)
self.tb.start('StepIntoFunction', attrs=attrs)
self.tb.end('StepIntoFunction')
-
+
self.visitchildren(node)
return node
-
+
def serialize_local_variables(self, entries):
for entry in entries.values():
if entry.type.is_pyobject:
vartype = 'PythonObject'
else:
vartype = 'CObject'
-
+
cname = entry.cname
# if entry.type.is_extension_type:
# cname = entry.type.typeptr_cname
-
+
if not entry.pos:
# this happens for variables that are not in the user's code,
# e.g. for the global __builtins__, __doc__, etc. We can just
@@ -1696,14 +1696,14 @@ class DebugTransform(CythonTransform):
lineno = '0'
else:
lineno = str(entry.pos[1])
-
+
attrs = dict(
name=entry.name,
cname=cname,
qualified_name=entry.qualified_name,
type=vartype,
lineno=lineno)
-
+
self.tb.start('LocalVar', attrs)
self.tb.end('LocalVar')
-
+
diff --git a/Cython/Compiler/Parsing.py b/Cython/Compiler/Parsing.py
index f2994d030..3c3146236 100644
--- a/Cython/Compiler/Parsing.py
+++ b/Cython/Compiler/Parsing.py
@@ -178,7 +178,7 @@ def p_comparison(s):
pos = s.position()
op = p_cmp_op(s)
n2 = p_starred_expr(s)
- n1 = ExprNodes.PrimaryCmpNode(pos,
+ n1 = ExprNodes.PrimaryCmpNode(pos,
operator = op, operand1 = n1, operand2 = n2)
if s.sy in comparison_ops:
n1.cascade = p_cascaded_cmp(s)
@@ -206,7 +206,7 @@ def p_cascaded_cmp(s):
pos = s.position()
op = p_cmp_op(s)
n2 = p_starred_expr(s)
- result = ExprNodes.CascadedCmpNode(pos,
+ result = ExprNodes.CascadedCmpNode(pos,
operator = op, operand2 = n2)
if s.sy in comparison_ops:
result.cascade = p_cascaded_cmp(s)
@@ -230,9 +230,9 @@ def p_cmp_op(s):
if op == '<>':
op = '!='
return op
-
+
comparison_ops = (
- '<', '>', '==', '>=', '<=', '<>', '!=',
+ '<', '>', '==', '>=', '<=', '<>', '!=',
'in', 'is', 'not'
)
@@ -306,8 +306,8 @@ def p_typecast(s):
typecheck = 0
s.expect(">")
operand = p_factor(s)
- return ExprNodes.TypecastNode(pos,
- base_type = base_type,
+ return ExprNodes.TypecastNode(pos,
+ base_type = base_type,
declarator = declarator,
operand = operand,
typecheck = typecheck)
@@ -318,15 +318,15 @@ def p_sizeof(s):
s.next()
s.expect('(')
# Here we decide if we are looking at an expression or type
- # If it is actually a type, but parsable as an expression,
- # we treat it as an expression here.
+ # If it is actually a type, but parsable as an expression,
+ # we treat it as an expression here.
if looking_at_expr(s):
operand = p_test(s)
node = ExprNodes.SizeofVarNode(pos, operand = operand)
else:
base_type = p_c_base_type(s)
declarator = p_c_declarator(s, empty = 1)
- node = ExprNodes.SizeofTypeNode(pos,
+ node = ExprNodes.SizeofTypeNode(pos,
base_type = base_type, declarator = declarator)
s.expect(')')
return node
@@ -379,7 +379,7 @@ def p_trailer(s, node1):
else: # s.sy == '.'
s.next()
name = EncodedString( p_ident(s) )
- return ExprNodes.AttributeNode(pos,
+ return ExprNodes.AttributeNode(pos,
obj = node1, attribute = name)
# arglist: argument (',' argument)* [',']
@@ -469,7 +469,7 @@ def p_call(s, function):
else:
arg_tuple, keyword_dict = p_call_build_packed_args(
pos, positional_args, keyword_args, star_arg)
- return ExprNodes.GeneralCallNode(pos,
+ return ExprNodes.GeneralCallNode(pos,
function = function,
positional_args = arg_tuple,
keyword_args = keyword_dict,
@@ -486,7 +486,7 @@ def p_index(s, base):
subscripts = p_subscript_list(s)
if len(subscripts) == 1 and len(subscripts[0]) == 2:
start, stop = subscripts[0]
- result = ExprNodes.SliceIndexNode(pos,
+ result = ExprNodes.SliceIndexNode(pos,
base = base, start = start, stop = stop)
else:
indexes = make_slice_nodes(pos, subscripts)
@@ -835,7 +835,7 @@ def p_string_literal(s, kind_override=None):
# comp_iter ::= comp_for | comp_if
# comp_for ::= "for" expression_list "in" testlist [comp_iter]
# comp_if ::= "if" test [comp_iter]
-
+
def p_list_maker(s):
# s.sy == '['
pos = s.position()
@@ -862,7 +862,7 @@ def p_list_maker(s):
exprs = [expr]
s.expect(']')
return ExprNodes.ListNode(pos, args = exprs)
-
+
def p_comp_iter(s, body):
if s.sy == 'for':
return p_comp_for(s, body)
@@ -879,13 +879,13 @@ def p_comp_for(s, body):
kw = p_for_bounds(s, allow_testlist=False)
kw.update(dict(else_clause = None, body = p_comp_iter(s, body)))
return Nodes.ForStatNode(pos, **kw)
-
+
def p_comp_if(s, body):
# s.sy == 'if'
pos = s.position()
s.next()
test = p_test_nocond(s)
- return Nodes.IfStatNode(pos,
+ return Nodes.IfStatNode(pos,
if_clauses = [Nodes.IfClauseNode(pos, condition = test,
body = p_comp_iter(s, body))],
else_clause = None )
@@ -986,7 +986,7 @@ def p_test_or_starred_expr_list(s, expr=None):
break
s.next()
return exprs
-
+
#testlist: test (',' test)* [',']
@@ -1076,7 +1076,7 @@ def p_expression_or_assignment(s):
rhs = expr_list[-1]
if len(expr_list) == 2:
- return Nodes.SingleAssignmentNode(rhs.pos,
+ return Nodes.SingleAssignmentNode(rhs.pos,
lhs = expr_list[0], rhs = rhs)
else:
return Nodes.CascadedAssignmentNode(rhs.pos,
@@ -1177,7 +1177,7 @@ def p_raise_statement(s):
s.next()
exc_tb = p_test(s)
if exc_type or exc_value or exc_tb:
- return Nodes.RaiseStatNode(pos,
+ return Nodes.RaiseStatNode(pos,
exc_type = exc_type,
exc_value = exc_value,
exc_tb = exc_tb)
@@ -1197,7 +1197,7 @@ def p_import_statement(s):
for pos, target_name, dotted_name, as_name in items:
dotted_name = EncodedString(dotted_name)
if kind == 'cimport':
- stat = Nodes.CImportStatNode(pos,
+ stat = Nodes.CImportStatNode(pos,
module_name = dotted_name,
as_name = as_name)
else:
@@ -1207,9 +1207,9 @@ def p_import_statement(s):
else:
name_list = None
stat = Nodes.SingleAssignmentNode(pos,
- lhs = ExprNodes.NameNode(pos,
+ lhs = ExprNodes.NameNode(pos,
name = as_name or target_name),
- rhs = ExprNodes.ImportNode(pos,
+ rhs = ExprNodes.ImportNode(pos,
module_name = ExprNodes.IdentifierStringNode(
pos, value = dotted_name),
name_list = name_list))
@@ -1273,7 +1273,7 @@ def p_from_import_statement(s, first_statement = 0):
ExprNodes.IdentifierStringNode(name_pos, value = encoded_name))
items.append(
(name,
- ExprNodes.NameNode(name_pos,
+ ExprNodes.NameNode(name_pos,
name = as_name or name)))
import_list = ExprNodes.ListNode(
imported_names[0][0], args = imported_name_strings)
@@ -1362,8 +1362,8 @@ def p_while_statement(s):
test = p_test(s)
body = p_suite(s)
else_clause = p_else_clause(s)
- return Nodes.WhileStatNode(pos,
- condition = test, body = body,
+ return Nodes.WhileStatNode(pos,
+ condition = test, body = body,
else_clause = else_clause)
def p_for_statement(s):
@@ -1375,7 +1375,7 @@ def p_for_statement(s):
else_clause = p_else_clause(s)
kw.update(dict(body = body, else_clause = else_clause))
return Nodes.ForStatNode(pos, **kw)
-
+
def p_for_bounds(s, allow_testlist=True):
target = p_for_target(s)
if s.sy == 'in':
@@ -1400,7 +1400,7 @@ def p_for_bounds(s, allow_testlist=True):
target = ExprNodes.NameNode(name2_pos, name = name2)
else:
if not target.is_name:
- error(target.pos,
+ error(target.pos,
"Target of for-from statement must be a variable name")
elif name2 != target.name:
error(name2_pos,
@@ -1408,9 +1408,9 @@ def p_for_bounds(s, allow_testlist=True):
if rel1[0] != rel2[0]:
error(rel2_pos,
"Relation directions in for-from do not match")
- return dict(target = target,
- bound1 = bound1,
- relation1 = rel1,
+ return dict(target = target,
+ bound1 = bound1,
+ relation1 = rel1,
relation2 = rel2,
bound2 = bound2,
step = step,
@@ -1569,7 +1569,7 @@ def p_with_items(s):
body = p_with_items(s)
else:
body = p_suite(s)
- return Nodes.WithStatNode(pos, manager = manager,
+ return Nodes.WithStatNode(pos, manager = manager,
target = target, body = body)
def p_with_template(s):
@@ -1835,21 +1835,21 @@ def p_positional_and_keyword_args(s, end_sy_set, templates = None):
else:
base_type = p_c_base_type(s, templates = templates)
declarator = p_c_declarator(s, empty = 1)
- arg = Nodes.CComplexBaseTypeNode(base_type.pos,
+ arg = Nodes.CComplexBaseTypeNode(base_type.pos,
base_type = base_type, declarator = declarator)
parsed_type = True
keyword_node = ExprNodes.IdentifierStringNode(
arg.pos, value = EncodedString(ident))
keyword_args.append((keyword_node, arg))
was_keyword = True
-
+
else:
if looking_at_expr(s):
arg = p_test(s)
else:
base_type = p_c_base_type(s, templates = templates)
declarator = p_c_declarator(s, empty = 1)
- arg = Nodes.CComplexBaseTypeNode(base_type.pos,
+ arg = Nodes.CComplexBaseTypeNode(base_type.pos,
base_type = base_type, declarator = declarator)
parsed_type = True
positional_args.append(arg)
@@ -1891,7 +1891,7 @@ def p_c_complex_base_type(s):
base_type = p_c_base_type(s)
declarator = p_c_declarator(s, empty = 1)
s.expect(')')
- return Nodes.CComplexBaseTypeNode(pos,
+ return Nodes.CComplexBaseTypeNode(pos,
base_type = base_type, declarator = declarator)
def p_c_simple_base_type(s, self_flag, nonempty, templates = None):
@@ -1933,7 +1933,7 @@ def p_c_simple_base_type(s, self_flag, nonempty, templates = None):
name = s.systring
s.next()
if nonempty and s.sy != 'IDENT':
- # Make sure this is not a declaration of a variable or function.
+ # Make sure this is not a declaration of a variable or function.
if s.sy == '(':
s.next()
if s.sy == '*' or s.sy == '**' or s.sy == '&':
@@ -1946,28 +1946,28 @@ def p_c_simple_base_type(s, self_flag, nonempty, templates = None):
s.put_back('IDENT', name)
name = None
- type_node = Nodes.CSimpleBaseTypeNode(pos,
+ type_node = Nodes.CSimpleBaseTypeNode(pos,
name = name, module_path = module_path,
is_basic_c_type = is_basic, signed = signed,
- complex = complex, longness = longness,
+ complex = complex, longness = longness,
is_self_arg = self_flag, templates = templates)
if s.sy == '[':
type_node = p_buffer_or_template(s, type_node, templates)
-
+
if s.sy == '.':
s.next()
name = p_ident(s)
type_node = Nodes.CNestedBaseTypeNode(pos, base_type = type_node, name = name)
-
+
return type_node
def p_buffer_or_template(s, base_type_node, templates):
# s.sy == '['
pos = s.position()
s.next()
- # Note that buffer_positional_options_count=1, so the only positional argument is dtype.
- # For templated types, all parameters are types.
+ # Note that buffer_positional_options_count=1, so the only positional argument is dtype.
+ # For templated types, all parameters are types.
positional_args, keyword_args = (
p_positional_and_keyword_args(s, (']',), templates)
)
@@ -1983,7 +1983,7 @@ def p_buffer_or_template(s, base_type_node, templates):
keyword_args = keyword_dict,
base_type_node = base_type_node)
return result
-
+
def looking_at_name(s):
return s.sy == 'IDENT' and not s.systring in calling_convention_words
@@ -2131,13 +2131,13 @@ def p_c_func_declarator(s, pos, ctx, base, cmethod_flag):
nogil = p_nogil(s)
exc_val, exc_check = p_exception_value_clause(s)
with_gil = p_with_gil(s)
- return Nodes.CFuncDeclaratorNode(pos,
+ return Nodes.CFuncDeclaratorNode(pos,
base = base, args = args, has_varargs = ellipsis,
exception_value = exc_val, exception_check = exc_check,
nogil = nogil or ctx.nogil or with_gil, with_gil = with_gil)
supported_overloaded_operators = cython.set([
- '+', '-', '*', '/', '%',
+ '+', '-', '*', '/', '%',
'++', '--', '~', '|', '&', '^', '<<', '>>', ',',
'==', '!=', '>=', '>', '<=', '<',
'[]', '()',
@@ -2152,7 +2152,7 @@ def p_c_simple_declarator(s, ctx, empty, is_type, cmethod_flag,
base = p_c_declarator(s, ctx, empty = empty, is_type = is_type,
cmethod_flag = cmethod_flag,
assignable = assignable, nonempty = nonempty)
- result = Nodes.CPtrDeclaratorNode(pos,
+ result = Nodes.CPtrDeclaratorNode(pos,
base = base)
elif s.sy == '**': # scanner returns this as a single token
s.next()
@@ -2445,7 +2445,7 @@ def p_c_enum_item(s, ctx, items):
if s.sy == '=':
s.next()
value = p_test(s)
- items.append(Nodes.CEnumDefItemNode(pos,
+ items.append(Nodes.CEnumDefItemNode(pos,
name = name, cname = cname, value = value))
def p_c_struct_or_union_definition(s, pos, ctx):
@@ -2479,7 +2479,7 @@ def p_c_struct_or_union_definition(s, pos, ctx):
s.expect_dedent()
else:
s.expect_newline("Syntax error in struct or union definition")
- return Nodes.CStructOrUnionDefNode(pos,
+ return Nodes.CStructOrUnionDefNode(pos,
name = name, cname = cname, kind = kind, attributes = attributes,
typedef_flag = ctx.typedef_flag, visibility = ctx.visibility,
in_pxd = ctx.level == 'module_pxd', packed = packed)
@@ -2494,7 +2494,7 @@ def p_visibility(s, prev_visibility):
% (prev_visibility, visibility))
s.next()
return visibility
-
+
def p_c_modifiers(s):
if s.sy == 'IDENT' and s.systring in ('inline',):
modifier = s.systring
@@ -2516,7 +2516,7 @@ def p_c_func_or_var_declaration(s, pos, ctx):
result = Nodes.CFuncDefNode(pos,
visibility = ctx.visibility,
base_type = base_type,
- declarator = declarator,
+ declarator = declarator,
body = suite,
doc = doc,
modifiers = modifiers,
@@ -2534,7 +2534,7 @@ def p_c_func_or_var_declaration(s, pos, ctx):
assignable = 1, nonempty = 1)
declarators.append(declarator)
s.expect_newline("Syntax error in C variable declaration")
- result = Nodes.CVarDefNode(pos,
+ result = Nodes.CVarDefNode(pos,
visibility = ctx.visibility,
base_type = base_type,
declarators = declarators,
@@ -2603,7 +2603,7 @@ def p_def_statement(s, decorators=None):
s.next()
return_type_annotation = p_test(s)
doc, body = p_suite(s, Ctx(level = 'function'), with_doc = 1)
- return Nodes.DefNode(pos, name = name, args = args,
+ return Nodes.DefNode(pos, name = name, args = args,
star_arg = star_arg, starstar_arg = starstar_arg,
doc = doc, body = body, decorators = decorators,
return_type_annotation = return_type_annotation)
diff --git a/Cython/Compiler/PyrexTypes.py b/Cython/Compiler/PyrexTypes.py
index 43873df16..10c2f632f 100755
--- a/Cython/Compiler/PyrexTypes.py
+++ b/Cython/Compiler/PyrexTypes.py
@@ -17,10 +17,10 @@ class BaseType(object):
def cast_code(self, expr_code):
return "((%s)%s)" % (self.declaration_code(""), expr_code)
-
+
def specialization_name(self):
return self.declaration_code("").replace(" ", "__")
-
+
def base_declaration_code(self, base_code, entity_code):
if entity_code:
return "%s %s" % (base_code, entity_code)
@@ -55,7 +55,7 @@ class PyrexType(BaseType):
# has_attributes boolean Has C dot-selectable attributes
# default_value string Initial value
#
- # declaration_code(entity_code,
+ # declaration_code(entity_code,
# for_display = 0, dll_linkage = None, pyrex = 0)
# Returns a code fragment for the declaration of an entity
# of this type, given a code fragment for the entity.
@@ -79,7 +79,7 @@ class PyrexType(BaseType):
# Coerces array type into pointer type for use as
# a formal argument type.
#
-
+
is_pyobject = 0
is_unspecified = 0
is_extension_type = 0
@@ -106,44 +106,44 @@ class PyrexType(BaseType):
is_buffer = 0
has_attributes = 0
default_value = ""
-
+
def resolve(self):
# If a typedef, returns the base type.
return self
-
+
def specialize(self, values):
# TODO(danilo): Override wherever it makes sense.
return self
-
+
def literal_code(self, value):
# Returns a C code fragment representing a literal
# value of this type.
return str(value)
-
+
def __str__(self):
return self.declaration_code("", for_display = 1).strip()
-
+
def same_as(self, other_type, **kwds):
return self.same_as_resolved_type(other_type.resolve(), **kwds)
-
+
def same_as_resolved_type(self, other_type):
return self == other_type or other_type is error_type
-
+
def subtype_of(self, other_type):
return self.subtype_of_resolved_type(other_type.resolve())
-
+
def subtype_of_resolved_type(self, other_type):
return self.same_as(other_type)
-
+
def assignable_from(self, src_type):
return self.assignable_from_resolved_type(src_type.resolve())
-
+
def assignable_from_resolved_type(self, src_type):
return self.same_as(src_type)
-
+
def as_argument_type(self):
return self
-
+
def is_complete(self):
# A type is incomplete if it is an unsized array,
# a struct whose attributes are not defined, etc.
@@ -165,7 +165,7 @@ def public_decl(base_code, dll_linkage):
return "%s(%s)" % (dll_linkage, base_code)
else:
return base_code
-
+
def create_typedef_type(name, base_type, cname, is_external=0):
if base_type.is_complex:
if is_external:
@@ -187,38 +187,38 @@ class CTypedefType(BaseType):
# typedef_cname string
# typedef_base_type PyrexType
# typedef_is_external bool
-
+
is_typedef = 1
typedef_is_external = 0
to_py_utility_code = None
from_py_utility_code = None
-
-
+
+
def __init__(self, name, base_type, cname, is_external=0):
assert not base_type.is_complex
self.typedef_name = name
self.typedef_cname = cname
self.typedef_base_type = base_type
self.typedef_is_external = is_external
-
+
def resolve(self):
return self.typedef_base_type.resolve()
-
- def declaration_code(self, entity_code,
+
+ def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = self.typedef_name
else:
base_code = public_decl(self.typedef_cname, dll_linkage)
return self.base_declaration_code(base_code, entity_code)
-
+
def as_argument_type(self):
return self
def cast_code(self, expr_code):
# If self is really an array (rather than pointer), we can't cast.
- # For example, the gmp mpz_t.
+ # For example, the gmp mpz_t.
if self.typedef_base_type.is_array:
base_type = self.typedef_base_type.base_type
return CPtrType(base_type).cast_code(expr_code)
@@ -227,7 +227,7 @@ class CTypedefType(BaseType):
def __repr__(self):
return "<CTypedefType %s>" % self.typedef_cname
-
+
def __str__(self):
return self.typedef_name
@@ -301,7 +301,7 @@ class BufferType(BaseType):
# Delegates most attribute
# lookups to the base type. ANYTHING NOT DEFINED
# HERE IS DELEGATED!
-
+
# dtype PyrexType
# ndim int
# mode str
@@ -320,7 +320,7 @@ class BufferType(BaseType):
self.mode = mode
self.negative_indices = negative_indices
self.cast = cast
-
+
def as_argument_type(self):
return self
@@ -343,10 +343,10 @@ class PyObjectType(PyrexType):
buffer_defaults = None
is_extern = False
is_subclassed = False
-
+
def __str__(self):
return "Python object"
-
+
def __repr__(self):
return "<PyObjectType>"
@@ -356,8 +356,8 @@ class PyObjectType(PyrexType):
def assignable_from(self, src_type):
# except for pointers, conversion will be attempted
return not src_type.is_ptr or src_type.is_string
-
- def declaration_code(self, entity_code,
+
+ def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = "object"
@@ -392,18 +392,18 @@ class BuiltinObjectType(PyObjectType):
self.cname = cname
self.typeptr_cname = "(&%s)" % cname
self.objstruct_cname = objstruct_cname
-
+
def set_scope(self, scope):
self.scope = scope
if scope:
scope.parent_type = self
-
+
def __str__(self):
return "%s object" % self.name
-
+
def __repr__(self):
return "<%s>"% self.cname
-
+
def assignable_from(self, src_type):
if isinstance(src_type, BuiltinObjectType):
return src_type.name == self.name
@@ -412,13 +412,13 @@ class BuiltinObjectType(PyObjectType):
src_type.name == self.name)
else:
return True
-
+
def typeobj_is_available(self):
return True
-
+
def attributes_known(self):
return True
-
+
def subtype_of(self, type):
return type.is_pyobject and self.assignable_from(type)
@@ -445,7 +445,7 @@ class BuiltinObjectType(PyObjectType):
error = '(PyErr_Format(PyExc_TypeError, "Expected %s, got %%.200s", Py_TYPE(%s)->tp_name), 0)' % (self.name, arg)
return check + '||' + error
- def declaration_code(self, entity_code,
+ def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = self.name
@@ -478,12 +478,12 @@ class PyExtensionType(PyObjectType):
# vtabstruct_cname string Name of C method table struct
# vtabptr_cname string Name of pointer to C method table
# vtable_cname string Name of C method table definition
-
+
is_extension_type = 1
has_attributes = 1
-
+
objtypedef_cname = None
-
+
def __init__(self, name, typedef_flag, base_type, is_external=0):
self.name = name
self.scope = None
@@ -500,28 +500,28 @@ class PyExtensionType(PyObjectType):
self.vtabptr_cname = None
self.vtable_cname = None
self.is_external = is_external
-
+
def set_scope(self, scope):
self.scope = scope
if scope:
scope.parent_type = self
-
+
def subtype_of_resolved_type(self, other_type):
if other_type.is_extension_type:
return self is other_type or (
self.base_type and self.base_type.subtype_of(other_type))
else:
return other_type is py_object_type
-
+
def typeobj_is_available(self):
# Do we have a pointer to the type object?
return self.typeptr_cname
-
+
def typeobj_is_imported(self):
# If we don't know the C name of the type object but we do
# know which module it's defined in, it will be imported.
return self.typeobj_cname is None and self.module_name is not None
-
+
def assignable_from(self, src_type):
if self == src_type:
return True
@@ -530,7 +530,7 @@ class PyExtensionType(PyObjectType):
return self.assignable_from(src_type.base_type)
return False
- def declaration_code(self, entity_code,
+ def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0, deref = 0):
if pyrex or for_display:
base_code = self.name
@@ -558,14 +558,14 @@ class PyExtensionType(PyObjectType):
def attributes_known(self):
return self.scope is not None
-
+
def __str__(self):
return self.name
-
+
def __repr__(self):
return "<PyExtensionType %s%s>" % (self.scope.class_name,
("", " typedef")[self.typedef_flag])
-
+
class CType(PyrexType):
#
@@ -574,7 +574,7 @@ class CType(PyrexType):
# to_py_function string C function for converting to Python object
# from_py_function string C function for constructing from Python object
#
-
+
to_py_function = None
from_py_function = None
exception_value = None
@@ -582,7 +582,7 @@ class CType(PyrexType):
def create_to_py_utility_code(self, env):
return self.to_py_function is not None
-
+
def create_from_py_utility_code(self, env):
return self.from_py_function is not None
@@ -609,18 +609,18 @@ class CVoidType(CType):
#
is_void = 1
-
+
def __repr__(self):
return "<CVoidType>"
-
- def declaration_code(self, entity_code,
+
+ def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = "void"
else:
base_code = public_decl("void", dll_linkage)
return self.base_declaration_code(base_code, entity_code)
-
+
def is_complete(self):
return 0
@@ -632,25 +632,25 @@ class CNumericType(CType):
# rank integer Relative size
# signed integer 0 = unsigned, 1 = unspecified, 2 = explicitly signed
#
-
+
is_numeric = 1
default_value = "0"
-
+
sign_words = ("unsigned ", "", "signed ")
-
+
def __init__(self, rank, signed = 1):
self.rank = rank
self.signed = signed
-
+
def sign_and_name(self):
s = self.sign_words[self.signed]
n = rank_to_type_name[self.rank]
return s + n
-
+
def __repr__(self):
return "<CNumericType %s>" % self.sign_and_name()
-
- def declaration_code(self, entity_code,
+
+ def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
type_name = self.sign_and_name()
if pyrex or for_display:
@@ -812,7 +812,7 @@ static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_%(TypeName)s(%(type)s val) {
} else {
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
- return _PyLong_FromByteArray(bytes, sizeof(%(type)s),
+ return _PyLong_FromByteArray(bytes, sizeof(%(type)s),
little, !is_unsigned);
}
}
@@ -982,22 +982,22 @@ class CFloatType(CNumericType):
from_py_function = "__pyx_PyFloat_AsDouble"
exception_value = -1
-
+
def __init__(self, rank, math_h_modifier = ''):
CNumericType.__init__(self, rank, 1)
self.math_h_modifier = math_h_modifier
-
+
def assignable_from_resolved_type(self, src_type):
return (src_type.is_numeric and not src_type.is_complex) or src_type is error_type
class CComplexType(CNumericType):
-
+
is_complex = 1
to_py_function = "__pyx_PyComplex_FromComplex"
has_attributes = 1
scope = None
-
+
def __init__(self, real_type):
while real_type.is_typedef and not real_type.typedef_is_external:
real_type = real_type.typedef_base_type
@@ -1009,7 +1009,7 @@ class CComplexType(CNumericType):
self.funcsuffix = real_type.math_h_modifier
else:
self.funcsuffix = "_%s" % real_type.specialization_name()
-
+
self.real_type = real_type
CNumericType.__init__(self, real_type.rank + 0.5, real_type.signed)
self.binops = {}
@@ -1021,7 +1021,7 @@ class CComplexType(CNumericType):
return self.real_type == other.real_type
else:
return False
-
+
def __ne__(self, other):
if isinstance(self, CComplexType) and isinstance(other, CComplexType):
return self.real_type != other.real_type
@@ -1039,7 +1039,7 @@ class CComplexType(CNumericType):
def __hash__(self):
return ~hash(self.real_type)
- def declaration_code(self, entity_code,
+ def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
real_code = self.real_type.declaration_code("", for_display, dll_linkage, pyrex)
@@ -1053,7 +1053,7 @@ class CComplexType(CNumericType):
real_type_name = real_type_name.replace('long__double','long_double')
real_type_name = real_type_name.replace('PY_LONG_LONG','long_long')
return Naming.type_prefix + real_type_name + "_complex"
-
+
def assignable_from(self, src_type):
# Temporary hack/feature disabling, see #441
if (not src_type.is_complex and src_type.is_numeric and src_type.is_typedef
@@ -1061,12 +1061,12 @@ class CComplexType(CNumericType):
return False
else:
return super(CComplexType, self).assignable_from(src_type)
-
+
def assignable_from_resolved_type(self, src_type):
return (src_type.is_complex and self.real_type.assignable_from_resolved_type(src_type.real_type)
- or src_type.is_numeric and self.real_type.assignable_from_resolved_type(src_type)
+ or src_type.is_numeric and self.real_type.assignable_from_resolved_type(src_type)
or src_type is error_type)
-
+
def attributes_known(self):
if self.scope is None:
import Symtab
@@ -1097,7 +1097,7 @@ class CComplexType(CNumericType):
complex_arithmetic_utility_code):
env.use_utility_code(
utility_code.specialize(
- self,
+ self,
real_type = self.real_type.declaration_code(''),
m = self.funcsuffix,
is_float = self.real_type.is_float))
@@ -1115,13 +1115,13 @@ class CComplexType(CNumericType):
complex_from_py_utility_code):
env.use_utility_code(
utility_code.specialize(
- self,
+ self,
real_type = self.real_type.declaration_code(''),
m = self.funcsuffix,
is_float = self.real_type.is_float))
self.from_py_function = "__Pyx_PyComplex_As_" + self.specialization_name()
return True
-
+
def lookup_op(self, nargs, op):
try:
return self.binops[nargs, op]
@@ -1136,10 +1136,10 @@ class CComplexType(CNumericType):
def unary_op(self, op):
return self.lookup_op(1, op)
-
+
def binary_op(self, op):
return self.lookup_op(2, op)
-
+
complex_ops = {
(1, '-'): 'neg',
(1, 'zero'): 'is_zero',
@@ -1410,31 +1410,31 @@ impl="""
class CArrayType(CType):
# base_type CType Element type
# size integer or None Number of elements
-
+
is_array = 1
-
+
def __init__(self, base_type, size):
self.base_type = base_type
self.size = size
if base_type is c_char_type:
self.is_string = 1
-
+
def __repr__(self):
return "<CArrayType %s %s>" % (self.size, repr(self.base_type))
-
+
def same_as_resolved_type(self, other_type):
return ((other_type.is_array and
self.base_type.same_as(other_type.base_type))
or other_type is error_type)
-
+
def assignable_from_resolved_type(self, src_type):
# Can't assign to a variable of an array type
return 0
-
+
def element_ptr_type(self):
return c_ptr_type(self.base_type)
- def declaration_code(self, entity_code,
+ def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if self.size is not None:
dimension_code = self.size
@@ -1445,38 +1445,38 @@ class CArrayType(CType):
return self.base_type.declaration_code(
"%s[%s]" % (entity_code, dimension_code),
for_display, dll_linkage, pyrex)
-
+
def as_argument_type(self):
return c_ptr_type(self.base_type)
-
+
def is_complete(self):
return self.size is not None
class CPtrType(CType):
# base_type CType Referenced type
-
+
is_ptr = 1
default_value = "0"
-
+
def __init__(self, base_type):
self.base_type = base_type
-
+
def __repr__(self):
return "<CPtrType %s>" % repr(self.base_type)
-
+
def same_as_resolved_type(self, other_type):
return ((other_type.is_ptr and
self.base_type.same_as(other_type.base_type))
or other_type is error_type)
-
- def declaration_code(self, entity_code,
+
+ def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
#print "CPtrType.declaration_code: pointer to", self.base_type ###
return self.base_type.declaration_code(
"*%s" % entity_code,
for_display, dll_linkage, pyrex)
-
+
def assignable_from_resolved_type(self, other_type):
if other_type is error_type:
return 1
@@ -1489,13 +1489,13 @@ class CPtrType(CType):
return self.base_type.pointer_assignable_from_resolved_type(other_type)
else:
return 0
- if (self.base_type.is_cpp_class and other_type.is_ptr
+ if (self.base_type.is_cpp_class and other_type.is_ptr
and other_type.base_type.is_cpp_class and other_type.base_type.is_subclass(self.base_type)):
return 1
if other_type.is_array or other_type.is_ptr:
return self.base_type.is_void or self.base_type.same_as(other_type.base_type)
return 0
-
+
def specialize(self, values):
base_type = self.base_type.specialize(values)
if base_type == self.base_type:
@@ -1507,7 +1507,7 @@ class CPtrType(CType):
class CNullPtrType(CPtrType):
is_null_ptr = 1
-
+
class CReferenceType(BaseType):
@@ -1518,20 +1518,20 @@ class CReferenceType(BaseType):
def __repr__(self):
return "<CReferenceType %s>" % repr(self.ref_base_type)
-
+
def __str__(self):
return "%s &" % self.ref_base_type
def as_argument_type(self):
return self
- def declaration_code(self, entity_code,
+ def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
#print "CReferenceType.declaration_code: pointer to", self.base_type ###
return self.ref_base_type.declaration_code(
"&%s" % entity_code,
for_display, dll_linkage, pyrex)
-
+
def specialize(self, values):
base_type = self.ref_base_type.specialize(values)
if base_type == self.ref_base_type:
@@ -1553,10 +1553,10 @@ class CFuncType(CType):
# nogil boolean Can be called without gil
# with_gil boolean Acquire gil around function body
# templates [string] or None
-
+
is_cfunction = 1
original_sig = None
-
+
def __init__(self, return_type, args, has_varargs = 0,
exception_value = None, exception_check = 0, calling_convention = "",
nogil = 0, with_gil = 0, is_overridable = 0, optional_arg_count = 0,
@@ -1572,7 +1572,7 @@ class CFuncType(CType):
self.with_gil = with_gil
self.is_overridable = is_overridable
self.templates = templates
-
+
def __repr__(self):
arg_reprs = map(repr, self.args)
if self.has_varargs:
@@ -1588,14 +1588,14 @@ class CFuncType(CType):
self.calling_convention_prefix(),
",".join(arg_reprs),
except_clause)
-
+
def calling_convention_prefix(self):
cc = self.calling_convention
if cc:
return cc + " "
else:
return ""
-
+
def same_c_signature_as(self, other_type, as_cmethod = 0):
return self.same_c_signature_as_resolved_type(
other_type.resolve(), as_cmethod)
@@ -1631,7 +1631,7 @@ class CFuncType(CType):
def compatible_signature_with(self, other_type, as_cmethod = 0):
return self.compatible_signature_with_resolved_type(other_type.resolve(), as_cmethod)
-
+
def compatible_signature_with_resolved_type(self, other_type, as_cmethod):
#print "CFuncType.same_c_signature_as_resolved_type:", \
# self, other_type, "as_cmethod =", as_cmethod ###
@@ -1665,11 +1665,11 @@ class CFuncType(CType):
if as_cmethod:
self.args[0] = other_type.args[0]
return 1
-
-
+
+
def narrower_c_signature_than(self, other_type, as_cmethod = 0):
return self.narrower_c_signature_than_resolved_type(other_type.resolve(), as_cmethod)
-
+
def narrower_c_signature_than_resolved_type(self, other_type, as_cmethod):
if other_type is error_type:
return 1
@@ -1705,7 +1705,7 @@ class CFuncType(CType):
sc1 = self.calling_convention == '__stdcall'
sc2 = other.calling_convention == '__stdcall'
return sc1 == sc2
-
+
def same_exception_signature_as(self, other_type):
return self.same_exception_signature_as_resolved_type(
other_type.resolve())
@@ -1713,18 +1713,18 @@ class CFuncType(CType):
def same_exception_signature_as_resolved_type(self, other_type):
return self.exception_value == other_type.exception_value \
and self.exception_check == other_type.exception_check
-
+
def same_as_resolved_type(self, other_type, as_cmethod = 0):
return self.same_c_signature_as_resolved_type(other_type, as_cmethod) \
and self.same_exception_signature_as_resolved_type(other_type) \
and self.nogil == other_type.nogil
-
+
def pointer_assignable_from_resolved_type(self, other_type):
return self.same_c_signature_as_resolved_type(other_type) \
and self.same_exception_signature_as_resolved_type(other_type) \
and not (self.nogil and not other_type.nogil)
-
- def declaration_code(self, entity_code,
+
+ def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0,
with_calling_convention = 1):
arg_decl_list = []
@@ -1762,7 +1762,7 @@ class CFuncType(CType):
return self.return_type.declaration_code(
"%s%s(%s)%s" % (cc, entity_code, arg_decl_code, trailer),
for_display, dll_linkage, pyrex)
-
+
def function_header_code(self, func_name, arg_code):
return "%s%s(%s)" % (self.calling_convention_prefix(),
func_name, arg_code)
@@ -1774,7 +1774,7 @@ class CFuncType(CType):
def signature_cast_string(self):
s = self.declaration_code("(*)", with_calling_convention=False)
return '(%s)' % s
-
+
def specialize(self, values):
if self.templates is None:
new_templates = None
@@ -1791,7 +1791,7 @@ class CFuncType(CType):
is_overridable = self.is_overridable,
optional_arg_count = self.optional_arg_count,
templates = new_templates)
-
+
def opt_arg_cname(self, arg_name):
return self.op_arg_struct.base_type.scope.lookup(arg_name).cname
@@ -1816,13 +1816,13 @@ class CFuncTypeArg(object):
self.type = type
self.pos = pos
self.needs_type_test = False # TODO: should these defaults be set in analyse_types()?
-
+
def __repr__(self):
return "%s:%s" % (self.name, repr(self.type))
-
+
def declaration_code(self, for_display = 0):
return self.type.declaration_code(self.cname, for_display)
-
+
def specialize(self, values):
return CFuncTypeArg(self.name, self.type.specialize(values), self.pos, self.cname)
@@ -1836,11 +1836,11 @@ class StructUtilityCode(object):
return isinstance(other, StructUtilityCode) and self.header == other.header
def __hash__(self):
return hash(self.header)
-
+
def put_code(self, output):
code = output['utility_code_def']
proto = output['utility_code_proto']
-
+
code.putln("%s {" % self.header)
code.putln("PyObject* res;")
code.putln("PyObject* member;")
@@ -1863,7 +1863,7 @@ class StructUtilityCode(object):
if self.forward_decl:
proto.putln(self.type.declaration_code('') + ';')
proto.putln(self.header + ";")
-
+
class CStructOrUnionType(CType):
# name string
@@ -1872,12 +1872,12 @@ class CStructOrUnionType(CType):
# scope StructOrUnionScope, or None if incomplete
# typedef_flag boolean
# packed boolean
-
+
# entry Entry
-
+
is_struct_or_union = 1
has_attributes = 1
-
+
def __init__(self, name, kind, scope, typedef_flag, cname, packed=False):
self.name = name
self.cname = cname
@@ -1890,7 +1890,7 @@ class CStructOrUnionType(CType):
self.exception_check = True
self._convert_code = None
self.packed = packed
-
+
def create_to_py_utility_code(self, env):
if env.outer_scope is None:
return False
@@ -1905,15 +1905,15 @@ class CStructOrUnionType(CType):
return False
forward_decl = (self.entry.visibility != 'extern')
self._convert_code = StructUtilityCode(self, forward_decl)
-
+
env.use_utility_code(self._convert_code)
return True
-
+
def __repr__(self):
return "<CStructOrUnionType %s %s%s>" % (self.name, self.cname,
("", " typedef")[self.typedef_flag])
- def declaration_code(self, entity_code,
+ def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = self.name
@@ -1945,7 +1945,7 @@ class CStructOrUnionType(CType):
def is_complete(self):
return self.scope is not None
-
+
def attributes_known(self):
return self.is_complete()
@@ -1968,12 +1968,12 @@ class CppClassType(CType):
# cname string
# scope CppClassScope
# templates [string] or None
-
+
is_cpp_class = 1
has_attributes = 1
exception_check = True
namespace = None
-
+
def __init__(self, name, scope, cname, base_classes, templates = None, template_type = None):
self.name = name
self.cname = cname
@@ -1989,11 +1989,11 @@ class CppClassType(CType):
error(pos, "'%s' type is not a template" % self);
return PyrexTypes.error_type
if len(self.templates) != len(template_values):
- error(pos, "%s templated type receives %d arguments, got %d" %
+ error(pos, "%s templated type receives %d arguments, got %d" %
(self.name, len(self.templates), len(template_values)))
return error_type
return self.specialize(dict(zip(self.templates, template_values)))
-
+
def specialize(self, values):
if not self.templates and not self.namespace:
return self
@@ -2040,7 +2040,7 @@ class CppClassType(CType):
if base_class.is_subclass(other_type):
return 1
return 0
-
+
def same_as_resolved_type(self, other_type):
if other_type.is_cpp_class:
if self == other_type:
@@ -2059,23 +2059,23 @@ class CppClassType(CType):
if other_type is error_type:
return True
return other_type.is_cpp_class and other_type.is_subclass(self)
-
+
def attributes_known(self):
return self.scope is not None
class TemplatePlaceholderType(CType):
-
+
def __init__(self, name):
self.name = name
-
- def declaration_code(self, entity_code,
+
+ def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if entity_code:
return self.name + " " + entity_code
else:
return self.name
-
+
def specialize(self, values):
if self in values:
return values[self]
@@ -2087,10 +2087,10 @@ class TemplatePlaceholderType(CType):
return self.name == other_type.name
else:
return 0
-
+
def __hash__(self):
return hash(self.name)
-
+
def __cmp__(self, other):
if isinstance(other, TemplatePlaceholderType):
return cmp(self.name, other.name)
@@ -2113,15 +2113,15 @@ class CEnumType(CType):
self.cname = cname
self.values = []
self.typedef_flag = typedef_flag
-
+
def __str__(self):
return self.name
-
+
def __repr__(self):
return "<CEnumType %s %s%s>" % (self.name, self.cname,
("", " typedef")[self.typedef_flag])
-
- def declaration_code(self, entity_code,
+
+ def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = self.name
@@ -2139,7 +2139,7 @@ class CStringType(object):
is_string = 1
is_unicode = 0
-
+
to_py_function = "PyBytes_FromString"
from_py_function = "PyBytes_AsString"
exception_value = "NULL"
@@ -2151,32 +2151,32 @@ class CStringType(object):
class CUTF8CharArrayType(CStringType, CArrayType):
# C 'char []' type.
-
+
is_unicode = 1
-
+
to_py_function = "PyUnicode_DecodeUTF8"
exception_value = "NULL"
-
+
def __init__(self, size):
CArrayType.__init__(self, c_char_type, size)
class CCharArrayType(CStringType, CArrayType):
# C 'char []' type.
-
+
def __init__(self, size):
CArrayType.__init__(self, c_char_type, size)
-
+
class CCharPtrType(CStringType, CPtrType):
# C 'char *' type.
-
+
def __init__(self):
CPtrType.__init__(self, c_char_type)
class CUCharPtrType(CStringType, CPtrType):
# C 'unsigned char *' type.
-
+
to_py_function = "__Pyx_PyBytes_FromUString"
from_py_function = "__Pyx_PyBytes_AsUString"
@@ -2186,39 +2186,39 @@ class CUCharPtrType(CStringType, CPtrType):
class UnspecifiedType(PyrexType):
# Used as a placeholder until the type can be determined.
-
+
is_unspecified = 1
-
- def declaration_code(self, entity_code,
+
+ def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
return "<unspecified>"
-
+
def same_as_resolved_type(self, other_type):
return False
-
+
class ErrorType(PyrexType):
# Used to prevent propagation of error messages.
-
+
is_error = 1
exception_value = "0"
exception_check = 0
to_py_function = "dummy"
from_py_function = "dummy"
-
+
def create_to_py_utility_code(self, env):
return True
-
+
def create_from_py_utility_code(self, env):
return True
-
- def declaration_code(self, entity_code,
+
+ def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
return "<error>"
-
+
def same_as_resolved_type(self, other_type):
return 1
-
+
def error_condition(self, result_code):
return "dummy"
@@ -2344,7 +2344,7 @@ modifiers_and_name_to_type = {
def is_promotion(src_type, dst_type):
# It's hard to find a hard definition of promotion, but empirical
- # evidence suggests that the below is all that's allowed.
+ # evidence suggests that the below is all that's allowed.
if src_type.is_numeric:
if dst_type.same_as(c_int_type):
unsigned = (not src_type.signed)
@@ -2367,7 +2367,7 @@ def best_match(args, functions, pos=None):
functions based on how much work must be done to convert the
arguments, with the following priorities:
* identical types or pointers to identical types
- * promotions
+ * promotions
* non-Python types
That is, we prefer functions where no arguments need converted,
and failing that, functions where only promotions are required, and
@@ -2377,7 +2377,7 @@ def best_match(args, functions, pos=None):
the same weight, we return None (as there is no best match). If pos
is not None, we also generate an error.
"""
- # TODO: args should be a list of types, not a list of Nodes.
+ # TODO: args should be a list of types, not a list of Nodes.
actual_nargs = len(args)
candidates = []
@@ -2409,7 +2409,7 @@ def best_match(args, functions, pos=None):
errors.append((func, error_mesg))
continue
candidates.append((func, func_type))
-
+
# Optimize the most common case of no overloading...
if len(candidates) == 1:
return candidates[0][0]
@@ -2420,7 +2420,7 @@ def best_match(args, functions, pos=None):
else:
error(pos, "no suitable method found")
return None
-
+
possibilities = []
bad_types = []
for func, func_type in candidates:
@@ -2470,7 +2470,7 @@ def widest_numeric_type(type1, type2):
return ntype
widest_type = CComplexType(
widest_numeric_type(
- real_type(type1),
+ real_type(type1),
real_type(type2)))
elif type1.is_enum and type2.is_enum:
widest_type = c_int_type
@@ -2562,7 +2562,7 @@ def simple_c_type(signed, longness, name):
# Find type descriptor for simple type given name and modifiers.
# Returns None if arguments don't make sense.
return modifiers_and_name_to_type.get((signed, longness, name))
-
+
def parse_basic_type(name):
base = None
if name.startswith('p_'):
@@ -2592,7 +2592,7 @@ def parse_basic_type(name):
if name.startswith('u'):
name = name[1:]
signed = 0
- elif (name.startswith('s') and
+ elif (name.startswith('s') and
not name.startswith('short')):
name = name[1:]
signed = 2
@@ -2636,7 +2636,7 @@ def c_ref_type(base_type):
def same_type(type1, type2):
return type1.same_as(type2)
-
+
def assignable_from(type1, type2):
return type1.assignable_from(type2)
diff --git a/Cython/Compiler/Scanning.py b/Cython/Compiler/Scanning.py
index 232e82eae..21fa526c1 100644
--- a/Cython/Compiler/Scanning.py
+++ b/Cython/Compiler/Scanning.py
@@ -32,7 +32,7 @@ def get_lexicon():
if not lexicon:
lexicon = make_lexicon()
return lexicon
-
+
#------------------------------------------------------------------
py_reserved_words = [
@@ -53,7 +53,7 @@ class Method(object):
def __init__(self, name):
self.name = name
self.__name__ = name # for Plex tracing
-
+
def __call__(self, stream, text):
return getattr(stream, self.name)(text)
@@ -64,16 +64,16 @@ class CompileTimeScope(object):
def __init__(self, outer = None):
self.entries = {}
self.outer = outer
-
+
def declare(self, name, value):
self.entries[name] = value
-
+
def lookup_here(self, name):
return self.entries[name]
-
+
def __contains__(self, name):
return name in self.entries
-
+
def lookup(self, name):
try:
return self.lookup_here(name)
@@ -171,20 +171,20 @@ class FileSourceDescriptor(SourceDescriptor):
self.filename = filename
self.set_file_type_from_name(filename)
self._cmp_name = filename
-
+
def get_lines(self, encoding=None, error_handling=None):
return Utils.open_source_file(
self.filename, encoding=encoding,
error_handling=error_handling,
# newline normalisation is costly before Py2.6
require_normalised_newlines=False)
-
+
def get_description(self):
return self.filename
-
+
def get_filenametable_entry(self):
return self.filename
-
+
def __eq__(self, other):
return isinstance(other, FileSourceDescriptor) and self.filename == other.filename
@@ -204,7 +204,7 @@ class StringSourceDescriptor(SourceDescriptor):
#self.set_file_type_from_name(name)
self.codelines = [x + "\n" for x in code.split("\n")]
self._cmp_name = name
-
+
def get_lines(self, encoding=None, error_handling=None):
if not encoding:
return self.codelines
@@ -236,7 +236,7 @@ class PyrexScanner(Scanner):
# compile_time_eval boolean In a true conditional compilation context
# compile_time_expr boolean In a compile-time expression context
- def __init__(self, file, filename, parent_scanner = None,
+ def __init__(self, file, filename, parent_scanner = None,
scope = None, context = None, source_encoding=None, parse_comments=True, initial_pos=None):
Scanner.__init__(self, get_lexicon(), file, filename, initial_pos)
if parent_scanner:
@@ -269,8 +269,8 @@ class PyrexScanner(Scanner):
def commentline(self, text):
if self.parse_comments:
- self.produce('commentline', text)
-
+ self.produce('commentline', text)
+
def current_level(self):
return self.indentation_stack[-1]
@@ -286,14 +286,14 @@ class PyrexScanner(Scanner):
if self.bracket_nesting_level == 0:
self.begin('INDENT')
self.produce('NEWLINE', '')
-
+
string_states = {
"'": 'SQ_STRING',
'"': 'DQ_STRING',
"'''": 'TSQ_STRING',
'"""': 'TDQ_STRING'
}
-
+
def begin_string_action(self, text):
if text[:1] in string_prefixes:
text = text[1:]
@@ -301,11 +301,11 @@ class PyrexScanner(Scanner):
text = text[1:]
self.begin(self.string_states[text])
self.produce('BEGIN_STRING')
-
+
def end_string_action(self, text):
self.begin('')
self.produce('END_STRING')
-
+
def unclosed_string_action(self, text):
self.end_string_action(text)
self.error("Unclosed string literal")
@@ -378,7 +378,7 @@ class PyrexScanner(Scanner):
else:
t = "%s %s" % (self.sy, self.systring)
print("--- %3d %2d %s" % (line, col, t))
-
+
def peek(self):
saved = self.sy, self.systring
self.next()
@@ -386,16 +386,16 @@ class PyrexScanner(Scanner):
self.unread(*next)
self.sy, self.systring = saved
return next
-
+
def put_back(self, sy, systring):
self.unread(self.sy, self.systring)
self.sy = sy
self.systring = systring
-
+
def unread(self, token, value):
# This method should be added to Plex
self.queue.insert(0, (token, value))
-
+
def error(self, message, pos = None, fatal = True):
if pos is None:
pos = self.position()
@@ -403,19 +403,19 @@ class PyrexScanner(Scanner):
err = error(pos, "Possible inconsistent indentation")
err = error(pos, message)
if fatal: raise err
-
+
def expect(self, what, message = None):
if self.sy == what:
self.next()
else:
self.expected(what, message)
-
+
def expect_keyword(self, what, message = None):
if self.sy == IDENT and self.systring == what:
self.next()
else:
self.expected(what, message)
-
+
def expected(self, what, message = None):
if message:
self.error(message)
@@ -425,7 +425,7 @@ class PyrexScanner(Scanner):
else:
found = self.sy
self.error("Expected '%s', found '%s'" % (what, found))
-
+
def expect_indent(self):
self.expect('INDENT',
"Expected an increase in indentation level")
diff --git a/Cython/Compiler/Symtab.py b/Cython/Compiler/Symtab.py
index 068e99e10..c1ac87eec 100644
--- a/Cython/Compiler/Symtab.py
+++ b/Cython/Compiler/Symtab.py
@@ -26,10 +26,10 @@ possible_identifier = re.compile(ur"(?![0-9])\w+$", re.U).match
nice_identifier = re.compile('^[a-zA-Z0-0_]+$').match
iso_c99_keywords = set(
-['auto', 'break', 'case', 'char', 'const', 'continue', 'default', 'do',
- 'double', 'else', 'enum', 'extern', 'float', 'for', 'goto', 'if',
- 'int', 'long', 'register', 'return', 'short', 'signed', 'sizeof',
- 'static', 'struct', 'switch', 'typedef', 'union', 'unsigned', 'void',
+['auto', 'break', 'case', 'char', 'const', 'continue', 'default', 'do',
+ 'double', 'else', 'enum', 'extern', 'float', 'for', 'goto', 'if',
+ 'int', 'long', 'register', 'return', 'short', 'signed', 'sizeof',
+ 'static', 'struct', 'switch', 'typedef', 'union', 'unsigned', 'void',
'volatile', 'while',
'_Bool', '_Complex'', _Imaginary', 'inline', 'restrict'])
@@ -44,14 +44,14 @@ def c_safe_identifier(cname):
class BufferAux(object):
writable_needed = False
-
+
def __init__(self, buffer_info_var, stridevars, shapevars,
suboffsetvars):
self.buffer_info_var = buffer_info_var
self.stridevars = stridevars
self.shapevars = shapevars
self.suboffsetvars = suboffsetvars
-
+
def __repr__(self):
return "<BufferAux %r>" % self.__dict__
@@ -185,14 +185,14 @@ class Entry(object):
self.init = init
self.overloaded_alternatives = []
self.assignments = []
-
+
def __repr__(self):
return "Entry(name=%s, type=%s)" % (self.name, self.type)
-
+
def redeclared(self, pos):
error(pos, "'%s' does not match previous declaration" % self.name)
error(self.pos, "Previous declaration is here")
-
+
def all_alternatives(self):
return [self] + self.overloaded_alternatives
@@ -239,7 +239,7 @@ class Scope(object):
scope_prefix = ""
in_cinclude = 0
nogil = 0
-
+
def __init__(self, name, outer_scope, parent_scope):
# The outer_scope is the next scope in the lookup chain.
# The parent_scope is used to derive the qualified name of this scope.
@@ -279,25 +279,25 @@ class Scope(object):
def start_branching(self, pos):
self.control_flow = self.control_flow.start_branch(pos)
-
+
def next_branch(self, pos):
self.control_flow = self.control_flow.next_branch(pos)
-
+
def finish_branching(self, pos):
self.control_flow = self.control_flow.finish_branch(pos)
-
+
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.qualified_name)
def qualifying_scope(self):
return self.parent_scope
-
+
def mangle(self, prefix, name = None):
if name:
return "%s%s%s" % (prefix, self.scope_prefix, name)
else:
return self.parent_scope.mangle(prefix, self.name)
-
+
def mangle_internal(self, name):
# Mangle an internal name so as not to clash with any
# user-defined name in this scope.
@@ -320,19 +320,19 @@ class Scope(object):
def global_scope(self):
# Return the module-level scope containing this scope.
return self.outer_scope.global_scope()
-
+
def builtin_scope(self):
# Return the module-level scope containing this scope.
return self.outer_scope.builtin_scope()
def declare(self, name, cname, type, pos, visibility):
# Create new entry, and add to dictionary if
- # name is not None. Reports a warning if already
+ # name is not None. Reports a warning if already
# declared.
if type.is_buffer and not isinstance(self, LocalScope):
error(pos, ERR_BUF_LOCALONLY)
if not self.in_cinclude and cname and re.match("^_[_A-Z]+$", cname):
- # See http://www.gnu.org/software/libc/manual/html_node/Reserved-Names.html#Reserved-Names
+ # See http://www.gnu.org/software/libc/manual/html_node/Reserved-Names.html#Reserved-Names
warning(pos, "'%s' is a reserved name in C." % cname, -1)
entries = self.entries
if name and name in entries:
@@ -352,7 +352,7 @@ class Scope(object):
entry.scope = self
entry.visibility = visibility
return entry
-
+
def qualify_name(self, name):
return EncodedString("%s.%s" % (self.qualified_name, name))
@@ -367,8 +367,8 @@ class Scope(object):
entry.is_const = 1
entry.value_node = value
return entry
-
- def declare_type(self, name, type, pos,
+
+ def declare_type(self, name, type, pos,
cname = None, visibility = 'private', defining = 1):
# Add an entry for a type definition.
if not cname:
@@ -379,7 +379,7 @@ class Scope(object):
self.type_entries.append(entry)
# here we would set as_variable to an object representing this type
return entry
-
+
def declare_typedef(self, name, base_type, pos, cname = None,
visibility = 'private'):
if not cname:
@@ -388,7 +388,7 @@ class Scope(object):
else:
cname = self.mangle(Naming.type_prefix, name)
try:
- type = PyrexTypes.create_typedef_type(name, base_type, cname,
+ type = PyrexTypes.create_typedef_type(name, base_type, cname,
(visibility == 'extern'))
except ValueError, e:
error(pos, e.args[0])
@@ -396,8 +396,8 @@ class Scope(object):
entry = self.declare_type(name, type, pos, cname, visibility)
type.qualified_name = entry.qualified_name
return entry
-
- def declare_struct_or_union(self, name, kind, scope,
+
+ def declare_struct_or_union(self, name, kind, scope,
typedef_flag, pos, cname = None, visibility = 'private',
packed = False):
# Add an entry for a struct or union definition.
@@ -429,7 +429,7 @@ class Scope(object):
if not scope and not entry.type.scope:
self.check_for_illegal_incomplete_ctypedef(typedef_flag, pos)
return entry
-
+
def declare_cpp_class(self, name, scope,
pos, cname = None, base_classes = [],
visibility = 'extern', templates = None):
@@ -456,11 +456,11 @@ class Scope(object):
for T in templates:
template_entry = entry.type.scope.declare(T.name, T.name, T, None, 'extern')
template_entry.is_type = 1
-
+
def declare_inherited_attributes(entry, base_classes):
for base_class in base_classes:
declare_inherited_attributes(entry, base_class.base_classes)
- entry.type.scope.declare_inherited_cpp_attributes(base_class.scope)
+ entry.type.scope.declare_inherited_cpp_attributes(base_class.scope)
if entry.type.scope:
declare_inherited_attributes(entry, base_classes)
if self.is_cpp_class_scope:
@@ -471,12 +471,12 @@ class Scope(object):
if typedef_flag != entry.type.typedef_flag:
error(pos, "'%s' previously declared using '%s'" % (
entry.name, ("cdef", "ctypedef")[entry.type.typedef_flag]))
-
+
def check_previous_visibility(self, entry, visibility, pos):
if entry.visibility != visibility:
error(pos, "'%s' previously declared as '%s'" % (
entry.name, entry.visibility))
-
+
def declare_enum(self, name, pos, cname, typedef_flag,
visibility = 'private'):
if name:
@@ -492,9 +492,9 @@ class Scope(object):
visibility = visibility)
entry.enum_values = []
self.sue_entries.append(entry)
- return entry
-
- def declare_var(self, name, type, pos,
+ return entry
+
+ def declare_var(self, name, type, pos,
cname = None, visibility = 'private', is_cdef = 0):
# Add an entry for a variable.
if not cname:
@@ -510,7 +510,7 @@ class Scope(object):
entry.is_variable = 1
self.control_flow.set_state((), (name, 'initialized'), False)
return entry
-
+
def declare_builtin(self, name, pos):
return self.outer_scope.declare_builtin(name, pos)
@@ -558,8 +558,8 @@ class Scope(object):
def register_pyfunction(self, entry):
self.pyfunc_entries.append(entry)
-
- def declare_cfunction(self, name, type, pos,
+
+ def declare_cfunction(self, name, type, pos,
cname = None, visibility = 'private', defining = 0,
api = 0, in_pxd = 0, modifiers = (), utility_code = None):
# Add an entry for a C function.
@@ -609,7 +609,7 @@ class Scope(object):
entry.func_modifiers = modifiers
entry.utility_code = utility_code
return entry
-
+
def add_cfunction(self, name, type, pos, cname, visibility, modifiers):
# Add a C function entry without giving it a func_cname.
entry = self.declare(name, cname, type, pos, visibility)
@@ -618,7 +618,7 @@ class Scope(object):
entry.func_modifiers = modifiers
self.cfunc_entries.append(entry)
return entry
-
+
def find(self, name, pos):
# Look up name, report error if not found.
entry = self.lookup(name)
@@ -626,7 +626,7 @@ class Scope(object):
return entry
else:
error(pos, "'%s' is not declared" % name)
-
+
def find_imported_module(self, path, pos):
# Look up qualified name, must be a module, report error if not found.
# Path is a list of names.
@@ -641,7 +641,7 @@ class Scope(object):
error(pos, "'%s' is not a cimported module" % '.'.join(path))
return None
return scope
-
+
def lookup(self, name):
# Look up name in this scope or an enclosing one.
# Return None if not found.
@@ -652,7 +652,7 @@ class Scope(object):
def lookup_here(self, name):
# Look up in this scope only, return None if not found.
return self.entries.get(name, None)
-
+
def lookup_target(self, name):
# Look up name in this scope only. Declare as Python
# variable if not found.
@@ -660,12 +660,12 @@ class Scope(object):
if not entry:
entry = self.declare_var(name, py_object_type, None)
return entry
-
+
def lookup_type(self, name):
entry = self.lookup(name)
if entry and entry.is_type:
return entry.type
-
+
def lookup_operator(self, operator, operands):
if operands[0].type.is_cpp_class:
obj_type = operands[0].type
@@ -685,19 +685,19 @@ class Scope(object):
def generate_library_function_declarations(self, code):
# Generate extern decls for C library funcs used.
pass
-
+
def defines_any(self, names):
# Test whether any of the given names are
# defined in this scope.
for name in names:
- if name in self.entries:
+ if name in self.entries:
return 1
return 0
-
+
def infer_types(self):
from TypeInference import get_type_inferer
get_type_inferer().infer_types(self)
-
+
def is_cpp(self):
outer = self.outer_scope
if outer is None:
@@ -711,7 +711,7 @@ class PreImportScope(Scope):
def __init__(self):
Scope.__init__(self, Options.pre_import, None, None)
-
+
def declare_builtin(self, name, pos):
entry = self.declare(name, name, py_object_type, pos, 'private')
entry.is_variable = True
@@ -721,25 +721,25 @@ class PreImportScope(Scope):
class BuiltinScope(Scope):
# The builtin namespace.
-
+
def __init__(self):
if Options.pre_import is None:
Scope.__init__(self, "__builtin__", None, None)
else:
Scope.__init__(self, "__builtin__", PreImportScope(), None)
self.type_names = {}
-
+
for name, definition in self.builtin_entries.iteritems():
cname, type = definition
self.declare_var(name, type, None, cname)
-
+
def declare_builtin(self, name, pos):
if not hasattr(builtins, name):
if self.outer_scope is not None:
return self.outer_scope.declare_builtin(name, pos)
else:
error(pos, "undeclared name not builtin: %s"%name)
-
+
def declare_builtin_cfunction(self, name, type, cname, python_equiv = None,
utility_code = None):
# If python_equiv == "*", the Python equivalent has the same name
@@ -758,7 +758,7 @@ class BuiltinScope(Scope):
var_entry.utility_code = utility_code
entry.as_variable = var_entry
return entry
-
+
def declare_builtin_type(self, name, cname, utility_code = None, objstruct_cname = None):
name = EncodedString(name)
type = PyrexTypes.BuiltinObjectType(name, cname, objstruct_cname)
@@ -839,7 +839,7 @@ class ModuleScope(Scope):
# types_imported {PyrexType : 1} Set of types for which import code generated
# has_import_star boolean Module contains import *
# cpp boolean Compiling a C++ file
-
+
is_module_scope = 1
has_import_star = 0
@@ -875,13 +875,13 @@ class ModuleScope(Scope):
self.namespace_cname = self.module_cname
for name in ['__builtins__', '__name__', '__file__', '__doc__']:
self.declare_var(EncodedString(name), py_object_type, None)
-
+
def qualifying_scope(self):
return self.parent_module
-
+
def global_scope(self):
return self
-
+
def declare_builtin(self, name, pos):
if not hasattr(builtins, name) and name != 'xrange':
# 'xrange' is special cased in Code.py
@@ -915,26 +915,26 @@ class ModuleScope(Scope):
# has not been referenced before.
return self.global_scope().context.find_module(
module_name, relative_to = self.parent_module, pos = pos)
-
+
def find_submodule(self, name):
# Find and return scope for a submodule of this module,
# creating a new empty one if necessary. Doesn't parse .pxd.
scope = self.lookup_submodule(name)
if not scope:
- scope = ModuleScope(name,
+ scope = ModuleScope(name,
parent_module = self, context = self.context)
self.module_entries[name] = scope
return scope
-
+
def lookup_submodule(self, name):
# Return scope for submodule of this module, or None.
return self.module_entries.get(name, None)
-
+
def add_include_file(self, filename):
if filename not in self.python_include_files \
and filename not in self.include_files:
self.include_files.append(filename)
-
+
def add_imported_module(self, scope):
if scope not in self.cimported_modules:
for filename in scope.include_files:
@@ -942,13 +942,13 @@ class ModuleScope(Scope):
self.cimported_modules.append(scope)
for m in scope.cimported_modules:
self.add_imported_module(m)
-
+
def add_imported_entry(self, name, entry, pos):
if entry not in self.entries:
self.entries[name] = entry
else:
warning(pos, "'%s' redeclared " % name, 0)
-
+
def declare_module(self, name, scope, pos):
# Declare a cimported module. This is represented as a
# Python module-level variable entry with a module
@@ -965,7 +965,7 @@ class ModuleScope(Scope):
# every module appearing in an import list.
# It shouldn't be an error for a module
# name to appear again, and indeed the generated
- # code compiles fine.
+ # code compiles fine.
return entry
warning(pos, "'%s' redeclared " % name, 0)
return None
@@ -974,14 +974,14 @@ class ModuleScope(Scope):
entry.as_module = scope
self.add_imported_module(scope)
return entry
-
- def declare_var(self, name, type, pos,
+
+ def declare_var(self, name, type, pos,
cname = None, visibility = 'private', is_cdef = 0):
# Add an entry for a global variable. If it is a Python
- # object type, and not declared with cdef, it will live
- # in the module dictionary, otherwise it will be a C
+ # object type, and not declared with cdef, it will live
+ # in the module dictionary, otherwise it will be a C
# global variable.
- entry = Scope.declare_var(self, name, type, pos,
+ entry = Scope.declare_var(self, name, type, pos,
cname, visibility, is_cdef)
if not visibility in ('private', 'public', 'extern'):
error(pos, "Module-level variable cannot be declared %s" % visibility)
@@ -998,12 +998,12 @@ class ModuleScope(Scope):
entry.init = 0
self.var_entries.append(entry)
return entry
-
+
def declare_global(self, name, pos):
entry = self.lookup_here(name)
if not entry:
self.declare_var(name, py_object_type, pos)
-
+
def use_utility_code(self, new_code):
if new_code is not None:
self.utility_code_list.append(new_code)
@@ -1014,7 +1014,7 @@ class ModuleScope(Scope):
buffer_defaults = None):
# If this is a non-extern typedef class, expose the typedef, but use
# the non-typedef struct internally to avoid needing forward
- # declarations for anonymous structs.
+ # declarations for anonymous structs.
if typedef_flag and visibility != 'extern':
if visibility != 'public':
warning(pos, "ctypedef only valid for public and extern classes", 2)
@@ -1061,7 +1061,7 @@ class ModuleScope(Scope):
elif not entry.in_cinclude:
type.objstruct_cname = self.mangle(Naming.objstruct_prefix, name)
else:
- error(entry.pos,
+ error(entry.pos,
"Object name required for 'public' or 'extern' C class")
self.attach_var_entry_to_c_class(entry)
self.c_class_entries.append(entry)
@@ -1098,20 +1098,20 @@ class ModuleScope(Scope):
if objstruct_cname:
if type.objstruct_cname and type.objstruct_cname != objstruct_cname:
error(pos, "Object struct name differs from previous declaration")
- type.objstruct_cname = objstruct_cname
+ type.objstruct_cname = objstruct_cname
if typeobj_cname:
if type.typeobj_cname and type.typeobj_cname != typeobj_cname:
error(pos, "Type object name differs from previous declaration")
type.typeobj_cname = typeobj_cname
#
- # Return new or existing entry
+ # Return new or existing entry
#
return entry
-
+
def check_for_illegal_incomplete_ctypedef(self, typedef_flag, pos):
if typedef_flag and not self.in_cinclude:
error(pos, "Forward-referenced type must use 'cdef', not 'ctypedef'")
-
+
def allocate_vtable_names(self, entry):
# If extension type has a vtable, allocate vtable struct and
# slot names for it.
@@ -1192,17 +1192,17 @@ class ModuleScope(Scope):
self.check_c_class(entry)
def check_c_functions(self):
- # Performs post-analysis checking making sure all
+ # Performs post-analysis checking making sure all
# defined c functions are actually implemented.
for name, entry in self.entries.items():
if entry.is_cfunction:
- if (entry.defined_in_pxd
+ if (entry.defined_in_pxd
and entry.scope is self
and entry.visibility != 'extern'
- and not entry.in_cinclude
+ and not entry.in_cinclude
and not entry.is_implemented):
error(entry.pos, "Non-extern C function '%s' declared but not defined" % name)
-
+
def attach_var_entry_to_c_class(self, entry):
# The name of an extension class has to serve as both a type
# name and a variable name holding the type object. It is
@@ -1219,21 +1219,21 @@ class ModuleScope(Scope):
var_entry.is_cglobal = 1
var_entry.is_readonly = 1
entry.as_variable = var_entry
-
+
def is_cpp(self):
return self.cpp
-
+
def infer_types(self):
from TypeInference import PyObjectTypeInferer
PyObjectTypeInferer().infer_types(self)
-
+
class LocalScope(Scope):
def __init__(self, name, outer_scope, parent_scope = None):
if parent_scope is None:
parent_scope = outer_scope
Scope.__init__(self, name, outer_scope, parent_scope)
-
+
def mangle(self, prefix, name):
return prefix + name
@@ -1249,13 +1249,13 @@ class LocalScope(Scope):
self.arg_entries.append(entry)
self.control_flow.set_state((), (name, 'source'), 'arg')
return entry
-
- def declare_var(self, name, type, pos,
+
+ def declare_var(self, name, type, pos,
cname = None, visibility = 'private', is_cdef = 0):
# Add an entry for a local variable.
if visibility in ('public', 'readonly'):
error(pos, "Local variable cannot be declared %s" % visibility)
- entry = Scope.declare_var(self, name, type, pos,
+ entry = Scope.declare_var(self, name, type, pos,
cname, visibility, is_cdef)
if type.is_pyobject and not Options.init_local_none:
entry.init = "0"
@@ -1263,7 +1263,7 @@ class LocalScope(Scope):
entry.is_local = 1
self.var_entries.append(entry)
return entry
-
+
def declare_global(self, name, pos):
# Pull entry from global scope into local scope.
if self.lookup_here(name):
@@ -1271,7 +1271,7 @@ class LocalScope(Scope):
else:
entry = self.global_scope().lookup_target(name)
self.entries[name] = entry
-
+
def lookup(self, name):
# Look up name in this scope or an enclosing one.
# Return None if not found.
@@ -1280,7 +1280,7 @@ class LocalScope(Scope):
if entry.scope is not self and entry.scope.is_closure_scope:
if hasattr(entry.scope, "scope_class"):
raise InternalError, "lookup() after scope class created."
- # The actual c fragment for the different scopes differs
+ # The actual c fragment for the different scopes differs
# on the outside and inside, so we make a new entry
entry.in_closure = True
# Would it be better to declare_var here?
@@ -1292,7 +1292,7 @@ class LocalScope(Scope):
self.entries[name] = inner_entry
return inner_entry
return entry
-
+
def mangle_closure_cnames(self, outer_scope_cname):
for entry in self.entries.values():
if entry.from_closure:
@@ -1350,7 +1350,7 @@ class ClosureScope(LocalScope):
# for entry in self.entries.values() + self.temp_entries:
# entry.in_closure = 1
# LocalScope.mangle_closure_cnames(self, scope_var)
-
+
# def mangle(self, prefix, name):
# return "%s->%s" % (self.cur_scope_cname, name)
# return "%s->%s" % (self.closure_cname, name)
@@ -1360,11 +1360,11 @@ class ClosureScope(LocalScope):
class StructOrUnionScope(Scope):
# Namespace of a C struct or union.
-
+
def __init__(self, name="?"):
Scope.__init__(self, name, None, None)
- def declare_var(self, name, type, pos,
+ def declare_var(self, name, type, pos,
cname = None, visibility = 'private', is_cdef = 0, allow_pyobject = 0):
# Add an entry for an attribute.
if not cname:
@@ -1384,7 +1384,7 @@ class StructOrUnionScope(Scope):
"C struct/union member cannot be declared %s" % visibility)
return entry
- def declare_cfunction(self, name, type, pos,
+ def declare_cfunction(self, name, type, pos,
cname = None, visibility = 'private', defining = 0,
api = 0, in_pxd = 0, modifiers = ()): # currently no utility code ...
return self.declare_var(name, type, pos, cname, visibility)
@@ -1408,20 +1408,20 @@ class ClassScope(Scope):
if entry:
return entry
if name == "classmethod":
- # We don't want to use the builtin classmethod here 'cause it won't do the
- # right thing in this scope (as the class memebers aren't still functions).
- # Don't want to add a cfunction to this scope 'cause that would mess with
- # the type definition, so we just return the right entry.
+ # We don't want to use the builtin classmethod here 'cause it won't do the
+ # right thing in this scope (as the class memebers aren't still functions).
+ # Don't want to add a cfunction to this scope 'cause that would mess with
+ # the type definition, so we just return the right entry.
self.use_utility_code(classmethod_utility_code)
entry = Entry(
- "classmethod",
- "__Pyx_Method_ClassMethod",
+ "classmethod",
+ "__Pyx_Method_ClassMethod",
PyrexTypes.CFuncType(
py_object_type,
[PyrexTypes.CFuncTypeArg("", py_object_type, None)], 0, 0))
entry.is_cfunction = 1
return entry
-
+
class PyClassScope(ClassScope):
# Namespace of a Python class.
@@ -1429,13 +1429,13 @@ class PyClassScope(ClassScope):
# class_obj_cname string C variable holding class object
is_py_class_scope = 1
-
- def declare_var(self, name, type, pos,
+
+ def declare_var(self, name, type, pos,
cname = None, visibility = 'private', is_cdef = 0):
if type is unspecified_type:
type = py_object_type
# Add an entry for a class attribute.
- entry = Scope.declare_var(self, name, type, pos,
+ entry = Scope.declare_var(self, name, type, pos,
cname, visibility, is_cdef)
entry.is_pyglobal = 1
entry.is_pyclass_attr = 1
@@ -1458,9 +1458,9 @@ class CClassScope(ClassScope):
# defined boolean Defined in .pxd file
# implemented boolean Defined in .pyx file
# inherited_var_entries [Entry] Adapted var entries from base class
-
+
is_c_class_scope = 1
-
+
def __init__(self, name, outer_scope, visibility):
ClassScope.__init__(self, name, outer_scope)
if visibility != 'extern':
@@ -1471,7 +1471,7 @@ class CClassScope(ClassScope):
self.inherited_var_entries = []
self.defined = 0
self.implemented = 0
-
+
def needs_gc(self):
# If the type or any of its base types have Python-valued
# C attributes, then it needs to participate in GC.
@@ -1480,7 +1480,7 @@ class CClassScope(ClassScope):
self.parent_type.base_type.scope is not None and
self.parent_type.base_type.scope.needs_gc())
- def declare_var(self, name, type, pos,
+ def declare_var(self, name, type, pos,
cname = None, visibility = 'private', is_cdef = 0):
if is_cdef:
# Add an entry for an attribute.
@@ -1489,7 +1489,7 @@ class CClassScope(ClassScope):
"C attributes cannot be added in implementation part of"
" extension type defined in a pxd")
if get_special_method_signature(name):
- error(pos,
+ error(pos,
"The name '%s' is reserved for a special method."
% name)
if not cname:
@@ -1520,7 +1520,7 @@ class CClassScope(ClassScope):
if type is unspecified_type:
type = py_object_type
# Add an entry for a class attribute.
- entry = Scope.declare_var(self, name, type, pos,
+ entry = Scope.declare_var(self, name, type, pos,
cname, visibility, is_cdef)
entry.is_member = 1
entry.is_pyglobal = 1 # xxx: is_pyglobal changes behaviour in so many places that
@@ -1551,12 +1551,12 @@ class CClassScope(ClassScope):
self.pyfunc_entries.append(entry)
return entry
-
+
def lookup_here(self, name):
if name == "__new__":
name = EncodedString("__cinit__")
return ClassScope.lookup_here(self, name)
-
+
def declare_cfunction(self, name, type, pos,
cname = None, visibility = 'private',
defining = 0, api = 0, in_pxd = 0, modifiers = (),
@@ -1596,7 +1596,7 @@ class CClassScope(ClassScope):
entry.func_cname = self.mangle(Naming.func_prefix, name)
entry.utility_code = utility_code
return entry
-
+
def add_cfunction(self, name, type, pos, cname, visibility, modifiers):
# Add a cfunction entry without giving it a func_cname.
prev_entry = self.lookup_here(name)
@@ -1625,12 +1625,12 @@ class CClassScope(ClassScope):
entry = self.declare(name, name, py_object_type, pos, 'private')
entry.is_property = 1
entry.doc = doc
- entry.scope = PropertyScope(name,
+ entry.scope = PropertyScope(name,
outer_scope = self.global_scope(), parent_scope = self)
entry.scope.parent_type = self.parent_type
self.property_entries.append(entry)
return entry
-
+
def declare_inherited_c_attributes(self, base_scope):
# Declare entries for all the C attributes of an
# inherited type, with cnames modified appropriately
@@ -1639,7 +1639,7 @@ class CClassScope(ClassScope):
return "%s.%s" % (Naming.obj_base_cname, base_entry.cname)
for base_entry in \
base_scope.inherited_var_entries + base_scope.var_entries:
- entry = self.declare(base_entry.name, adapt(base_entry.cname),
+ entry = self.declare(base_entry.name, adapt(base_entry.cname),
base_entry.type, None, 'private')
entry.is_variable = 1
self.inherited_var_entries.append(entry)
@@ -1648,21 +1648,21 @@ class CClassScope(ClassScope):
base_entry.pos, adapt(base_entry.cname),
base_entry.visibility, base_entry.func_modifiers)
entry.is_inherited = 1
-
-
+
+
class CppClassScope(Scope):
# Namespace of a C++ class.
-
+
is_cpp_class_scope = 1
-
+
default_constructor = None
-
+
def __init__(self, name, outer_scope):
Scope.__init__(self, name, outer_scope, None)
self.directives = outer_scope.directives
self.inherited_var_entries = []
- def declare_var(self, name, type, pos,
+ def declare_var(self, name, type, pos,
cname = None, visibility = 'extern', is_cdef = 0, allow_pyobject = 0):
# Add an entry for an attribute.
if not cname:
@@ -1676,7 +1676,7 @@ class CppClassScope(Scope):
error(pos,
"C++ class member cannot be a Python object")
return entry
-
+
def check_base_default_constructor(self, pos):
# Look for default constructors in all base classes.
if self.default_constructor is None:
@@ -1730,7 +1730,7 @@ class CppClassScope(Scope):
#print base_entry.name, self.entries
if base_entry.name in self.entries:
base_entry.name
- entry = self.declare(base_entry.name, base_entry.cname,
+ entry = self.declare(base_entry.name, base_entry.cname,
base_entry.type, None, 'extern')
entry.is_variable = 1
self.inherited_var_entries.append(entry)
@@ -1740,7 +1740,7 @@ class CppClassScope(Scope):
base_entry.visibility, base_entry.func_modifiers,
utility_code = base_entry.utility_code)
entry.is_inherited = 1
-
+
def specialize(self, values):
scope = CppClassScope(self.name, self.outer_scope)
for entry in self.entries.values():
@@ -1764,8 +1764,8 @@ class CppClassScope(Scope):
return scope
def add_include_file(self, filename):
- self.outer_scope.add_include_file(filename)
-
+ self.outer_scope.add_include_file(filename)
+
class PropertyScope(Scope):
# Scope holding the __get__, __set__ and __del__ methods for
# a property of an extension type.
@@ -1773,7 +1773,7 @@ class PropertyScope(Scope):
# parent_type PyExtensionType The type to which the property belongs
is_property_scope = 1
-
+
def declare_pyfunction(self, name, pos, allow_redefine=False):
# Add an entry for a method.
signature = get_property_accessor_signature(name)
diff --git a/Cython/Compiler/Tests/TestBuffer.py b/Cython/Compiler/Tests/TestBuffer.py
index 94792360c..e27184f3c 100644
--- a/Cython/Compiler/Tests/TestBuffer.py
+++ b/Cython/Compiler/Tests/TestBuffer.py
@@ -17,7 +17,7 @@ class TestBufferParsing(CythonTest):
def not_parseable(self, expected_error, s):
e = self.should_fail(lambda: self.fragment(s), Errors.CompileError)
self.assertEqual(expected_error, e.message_only)
-
+
def test_basic(self):
t = self.parse(u"cdef object[float, 4, ndim=2, foo=foo] x")
bufnode = t.stats[0].base_type
@@ -25,7 +25,7 @@ class TestBufferParsing(CythonTest):
self.assertEqual(2, len(bufnode.positional_args))
# print bufnode.dump()
# should put more here...
-
+
def test_type_pos(self):
self.parse(u"cdef object[short unsigned int, 3] x")
@@ -68,7 +68,7 @@ class TestBufferOptions(CythonTest):
self.parse_opts(opts, expect_error=True)
# e = self.should_fail(lambda: self.parse_opts(opts))
self.assertEqual(expected_err, self.error.message_only)
-
+
def __test_basic(self):
buf = self.parse_opts(u"unsigned short int, 3")
self.assert_(isinstance(buf.dtype_node, CSimpleBaseTypeNode))
@@ -80,7 +80,7 @@ class TestBufferOptions(CythonTest):
self.assert_(isinstance(buf.dtype_node, CSimpleBaseTypeNode))
self.assert_(buf.dtype_node.signed == 0 and buf.dtype_node.longness == -1)
self.assertEqual(3, buf.ndim)
-
+
def __test_ndim(self):
self.parse_opts(u"int, 2")
self.non_parse(ERR_BUF_NDIM, u"int, 'a'")
diff --git a/Cython/Compiler/Tests/TestDecorators.py b/Cython/Compiler/Tests/TestDecorators.py
index 7f494351b..5b9286f86 100644
--- a/Cython/Compiler/Tests/TestDecorators.py
+++ b/Cython/Compiler/Tests/TestDecorators.py
@@ -12,7 +12,7 @@ class TestDecorator(TransformTest):
def decorated():
pass
""")
-
+
self.assertCode(u"""
def decorator(fun):
return fun
diff --git a/Cython/Compiler/Tests/TestParseTreeTransforms.py b/Cython/Compiler/Tests/TestParseTreeTransforms.py
index adf8105fc..320bcdd21 100644
--- a/Cython/Compiler/Tests/TestParseTreeTransforms.py
+++ b/Cython/Compiler/Tests/TestParseTreeTransforms.py
@@ -17,7 +17,7 @@ class TestNormalizeTree(TransformTest):
body: ExprStatNode
expr: NameNode
""", self.treetypes(t))
-
+
def test_wrap_singlestat(self):
t = self.run_pipeline([NormalizeTree(None)], u"if x: y")
self.assertLines(u"""
@@ -83,7 +83,7 @@ class TestNormalizeTree(TransformTest):
stats[0]: ExprStatNode
expr: NameNode
""", self.treetypes(t))
-
+
def test_pass_eliminated(self):
t = self.run_pipeline([NormalizeTree(None)], u"pass")
@@ -142,7 +142,7 @@ class TestWithTransform(object): # (TransformTest): # Disabled!
$0_2(None, None, None)
""", t)
-
+
# TODO: Re-enable once they're more robust.
if sys.version_info[:2] >= (2, 5) and False:
@@ -153,15 +153,15 @@ else:
DebuggerTestCase = object
class TestDebugTransform(DebuggerTestCase):
-
+
def elem_hasattrs(self, elem, attrs):
# we shall supporteth python 2.3 !
return all([attr in elem.attrib for attr in attrs])
-
+
def test_debug_info(self):
try:
assert os.path.exists(self.debug_dest)
-
+
t = DebugWriter.etree.parse(self.debug_dest)
# the xpath of the standard ElementTree is primitive, don't use
# anything fancy
@@ -171,22 +171,22 @@ class TestDebugTransform(DebuggerTestCase):
xml_globals = dict(
[(e.attrib['name'], e.attrib['type']) for e in L])
self.assertEqual(len(L), len(xml_globals))
-
+
L = list(t.find('/Module/Functions'))
assert L
xml_funcs = dict([(e.attrib['qualified_name'], e) for e in L])
self.assertEqual(len(L), len(xml_funcs))
-
+
# test globals
self.assertEqual('CObject', xml_globals.get('c_var'))
self.assertEqual('PythonObject', xml_globals.get('python_var'))
-
+
# test functions
funcnames = 'codefile.spam', 'codefile.ham', 'codefile.eggs'
required_xml_attrs = 'name', 'cname', 'qualified_name'
assert all([f in xml_funcs for f in funcnames])
spam, ham, eggs = [xml_funcs[funcname] for funcname in funcnames]
-
+
self.assertEqual(spam.attrib['name'], 'spam')
self.assertNotEqual('spam', spam.attrib['cname'])
assert self.elem_hasattrs(spam, required_xml_attrs)
@@ -198,12 +198,12 @@ class TestDebugTransform(DebuggerTestCase):
names = [e.attrib['name'] for e in spam_locals]
self.assertEqual(list('abcd'), names)
assert self.elem_hasattrs(spam_locals[0], required_xml_attrs)
-
+
# test arguments of functions
spam_arguments = list(spam.find('Arguments'))
assert spam_arguments
self.assertEqual(1, len(list(spam_arguments)))
-
+
# test step-into functions
step_into = spam.find('StepIntoFunctions')
spam_stepinto = [x.attrib['name'] for x in step_into]
@@ -214,10 +214,10 @@ class TestDebugTransform(DebuggerTestCase):
except:
print open(self.debug_dest).read()
raise
-
-
+
+
if __name__ == "__main__":
import unittest
diff --git a/Cython/Compiler/Tests/TestTreeFragment.py b/Cython/Compiler/Tests/TestTreeFragment.py
index 9ec694abc..76a0af40f 100644
--- a/Cython/Compiler/Tests/TestTreeFragment.py
+++ b/Cython/Compiler/Tests/TestTreeFragment.py
@@ -5,12 +5,12 @@ from Cython.Compiler.UtilNodes import *
import Cython.Compiler.Naming as Naming
class TestTreeFragments(CythonTest):
-
+
def test_basic(self):
F = self.fragment(u"x = 4")
T = F.copy()
self.assertCode(u"x = 4", T)
-
+
def test_copy_is_taken(self):
F = self.fragment(u"if True: x = 4")
T1 = F.root
@@ -46,7 +46,7 @@ class TestTreeFragments(CythonTest):
v = F.root.stats[1].rhs.operand2.operand1
a = T.stats[1].rhs.operand2.operand1
self.assertEquals(v.pos, a.pos)
-
+
def test_temps(self):
TemplateTransform.temp_name_counter = 0
F = self.fragment(u"""
diff --git a/Cython/Compiler/TreeFragment.py b/Cython/Compiler/TreeFragment.py
index 13e0dc111..11721d144 100644
--- a/Cython/Compiler/TreeFragment.py
+++ b/Cython/Compiler/TreeFragment.py
@@ -23,18 +23,18 @@ class StringParseContext(Main.Context):
def __init__(self, include_directories, name):
Main.Context.__init__(self, include_directories, {})
self.module_name = name
-
+
def find_module(self, module_name, relative_to = None, pos = None, need_pxd = 1):
if module_name != self.module_name:
raise AssertionError("Not yet supporting any cimports/includes from string code snippets")
return ModuleScope(module_name, parent_module = None, context = self)
-
+
def parse_from_strings(name, code, pxds={}, level=None, initial_pos=None):
"""
Utility method to parse a (unicode) string of code. This is mostly
used for internal Cython compiler purposes (creating code snippets
that transforms should emit, as well as unit testing).
-
+
code - a unicode string containing Cython (module-level) code
name - a descriptive name for the code source (to use in error messages etc.)
"""
@@ -78,7 +78,7 @@ class ApplyPositionAndCopy(TreeCopier):
def __init__(self, pos):
super(ApplyPositionAndCopy, self).__init__()
self.pos = pos
-
+
def visit_Node(self, node):
copy = super(ApplyPositionAndCopy, self).visit_Node(node)
copy.pos = self.pos
@@ -87,7 +87,7 @@ class ApplyPositionAndCopy(TreeCopier):
class TemplateTransform(VisitorTransform):
"""
Makes a copy of a template tree while doing substitutions.
-
+
A dictionary "substitutions" should be passed in when calling
the transform; mapping names to replacement nodes. Then replacement
happens like this:
@@ -103,11 +103,11 @@ class TemplateTransform(VisitorTransform):
Also a list "temps" should be passed. Any names listed will
be transformed into anonymous, temporary names.
-
+
Currently supported for tempnames is:
NameNode
(various function and class definition nodes etc. should be added to this)
-
+
Each replacement node gets the position of the substituted node
recursively applied to every member node.
"""
@@ -148,7 +148,7 @@ class TemplateTransform(VisitorTransform):
c.pos = self.pos
self.visitchildren(c)
return c
-
+
def try_substitution(self, node, key):
sub = self.substitutions.get(key)
if sub is not None:
@@ -157,7 +157,7 @@ class TemplateTransform(VisitorTransform):
return ApplyPositionAndCopy(pos)(sub)
else:
return self.visit_Node(node) # make copy as usual
-
+
def visit_NameNode(self, node):
temphandle = self.tempmap.get(node.name)
if temphandle:
@@ -174,7 +174,7 @@ class TemplateTransform(VisitorTransform):
return self.try_substitution(node, node.expr.name)
else:
return self.visit_Node(node)
-
+
def copy_code_tree(node):
return TreeCopier()(node)
@@ -186,12 +186,12 @@ def strip_common_indent(lines):
minindent = min([len(INDENT_RE.match(x).group(0)) for x in lines])
lines = [x[minindent:] for x in lines]
return lines
-
+
class TreeFragment(object):
def __init__(self, code, name="(tree fragment)", pxds={}, temps=[], pipeline=[], level=None, initial_pos=None):
if isinstance(code, unicode):
- def fmt(x): return u"\n".join(strip_common_indent(x.split(u"\n")))
-
+ def fmt(x): return u"\n".join(strip_common_indent(x.split(u"\n")))
+
fmt_code = fmt(code)
fmt_pxds = {}
for key, value in pxds.iteritems():
diff --git a/Cython/Compiler/TreePath.py b/Cython/Compiler/TreePath.py
index 7d6055e3d..ee996e821 100644
--- a/Cython/Compiler/TreePath.py
+++ b/Cython/Compiler/TreePath.py
@@ -129,7 +129,7 @@ def handle_descendants(next, token):
for node in result:
for child in iter_recursive(node):
yield child
-
+
return select
def handle_attribute(next, token):
@@ -231,7 +231,7 @@ def logical_and(lhs_selects, rhs_select):
for result_node in rhs_select(subresult):
yield node
return select
-
+
operations = {
"@": handle_attribute,
diff --git a/Cython/Compiler/TypeInference.py b/Cython/Compiler/TypeInference.py
index 0cf100ad4..857a28b0c 100644
--- a/Cython/Compiler/TypeInference.py
+++ b/Cython/Compiler/TypeInference.py
@@ -22,7 +22,7 @@ class TypedExprNode(ExprNodes.ExprNode):
object_expr = TypedExprNode(py_object_type)
class MarkAssignments(CythonTransform):
-
+
def mark_assignment(self, lhs, rhs):
if isinstance(lhs, (ExprNodes.NameNode, Nodes.PyArgDeclNode)):
if lhs.entry is None:
@@ -35,7 +35,7 @@ class MarkAssignments(CythonTransform):
else:
# Could use this info to infer cdef class attributes...
pass
-
+
def visit_SingleAssignmentNode(self, node):
self.mark_assignment(node.lhs, node.rhs)
self.visitchildren(node)
@@ -46,7 +46,7 @@ class MarkAssignments(CythonTransform):
self.mark_assignment(lhs, node.rhs)
self.visitchildren(node)
return node
-
+
def visit_InPlaceAssignmentNode(self, node):
self.mark_assignment(node.lhs, node.create_binop_node())
self.visitchildren(node)
@@ -65,7 +65,7 @@ class MarkAssignments(CythonTransform):
self.mark_assignment(node.target, arg)
if len(sequence.args) > 2:
self.mark_assignment(
- node.target,
+ node.target,
ExprNodes.binop_node(node.pos,
'+',
sequence.args[0],
@@ -87,9 +87,9 @@ class MarkAssignments(CythonTransform):
self.mark_assignment(node.target, node.bound1)
if node.step is not None:
self.mark_assignment(node.target,
- ExprNodes.binop_node(node.pos,
- '+',
- node.bound1,
+ ExprNodes.binop_node(node.pos,
+ '+',
+ node.bound1,
node.step))
self.visitchildren(node)
return node
@@ -99,7 +99,7 @@ class MarkAssignments(CythonTransform):
self.mark_assignment(node.target, object_expr)
self.visitchildren(node)
return node
-
+
def visit_FromCImportStatNode(self, node):
pass # Can't be assigned to...
@@ -131,7 +131,7 @@ class MarkOverflowingArithmetic(CythonTransform):
def __call__(self, root):
self.env_stack = []
self.env = root.scope
- return super(MarkOverflowingArithmetic, self).__call__(root)
+ return super(MarkOverflowingArithmetic, self).__call__(root)
def visit_safe_node(self, node):
self.might_overflow, saved = False, self.might_overflow
@@ -148,7 +148,7 @@ class MarkOverflowingArithmetic(CythonTransform):
self.visitchildren(node)
self.might_overflow = saved
return node
-
+
def visit_FuncDefNode(self, node):
self.env_stack.append(self.env)
self.env = node.local_scope
@@ -162,34 +162,34 @@ class MarkOverflowingArithmetic(CythonTransform):
if entry:
entry.might_overflow = True
return node
-
+
def visit_BinopNode(self, node):
if node.operator in '&|^':
return self.visit_neutral_node(node)
else:
return self.visit_dangerous_node(node)
-
+
visit_UnopNode = visit_neutral_node
-
+
visit_UnaryMinusNode = visit_dangerous_node
-
+
visit_InPlaceAssignmentNode = visit_dangerous_node
-
+
visit_Node = visit_safe_node
-
+
def visit_assignment(self, lhs, rhs):
- if (isinstance(rhs, ExprNodes.IntNode)
+ if (isinstance(rhs, ExprNodes.IntNode)
and isinstance(lhs, ExprNodes.NameNode)
and Utils.long_literal(rhs.value)):
entry = lhs.entry or self.env.lookup(lhs.name)
if entry:
entry.might_overflow = True
-
+
def visit_SingleAssignmentNode(self, node):
self.visit_assignment(node.lhs, node.rhs)
self.visitchildren(node)
return node
-
+
def visit_CascadedAssignmentNode(self, node):
for lhs in node.lhs_list:
self.visit_assignment(lhs, node.rhs)
@@ -280,7 +280,7 @@ class SimpleAssignmentTypeInferer(object):
break
if not ready_to_infer:
break
-
+
# We can't figure out the rest with this algorithm, let them be objects.
for entry in dependancies_by_entry:
entry.type = py_object_type
@@ -331,7 +331,7 @@ def safe_spanning_type(types, might_overflow):
# operations without other int types, so this is safe, too
return result_type
elif result_type.is_ptr and not (result_type.is_int and result_type.rank == 0):
- # Any pointer except (signed|unsigned|) char* can't implicitly
+ # Any pointer except (signed|unsigned|) char* can't implicitly
# become a PyObject.
return result_type
elif result_type.is_cpp_class:
@@ -342,7 +342,7 @@ def safe_spanning_type(types, might_overflow):
# used, won't arise in pure Python, and there shouldn't be side
# effects, so I'm declaring this safe.
return result_type
- # TODO: double complex should be OK as well, but we need
+ # TODO: double complex should be OK as well, but we need
# to make sure everything is supported.
elif result_type.is_int and not might_overflow:
return result_type
diff --git a/Cython/Compiler/TypeSlots.py b/Cython/Compiler/TypeSlots.py
index 955f66f65..9bb26c429 100644
--- a/Cython/Compiler/TypeSlots.py
+++ b/Cython/Compiler/TypeSlots.py
@@ -8,7 +8,7 @@ import PyrexTypes
import StringEncoding
import sys
-invisible = ['__cinit__', '__dealloc__', '__richcmp__',
+invisible = ['__cinit__', '__dealloc__', '__richcmp__',
'__nonzero__', '__bool__']
class Signature(object):
@@ -42,7 +42,7 @@ class Signature(object):
# '*' rest of args passed as generic Python
# arg tuple and kw dict (must be last
# char in format string)
-
+
format_map = {
'O': PyrexTypes.py_object_type,
'v': PyrexTypes.c_void_type,
@@ -61,7 +61,7 @@ class Signature(object):
# 'T', '-' and '*' are handled otherwise
# and are not looked up in here
}
-
+
error_value_map = {
'O': "NULL",
'T': "NULL",
@@ -71,7 +71,7 @@ class Signature(object):
'r': "-1",
'z': "-1",
}
-
+
def __init__(self, arg_format, ret_format):
self.has_dummy_arg = 0
self.has_generic_args = 0
@@ -84,27 +84,27 @@ class Signature(object):
self.fixed_arg_format = arg_format
self.ret_format = ret_format
self.error_value = self.error_value_map.get(ret_format, None)
-
+
def num_fixed_args(self):
return len(self.fixed_arg_format)
-
+
def is_self_arg(self, i):
# argument is 'self' for methods or 'class' for classmethods
return self.fixed_arg_format[i] == 'T'
-
+
def returns_self_type(self):
# return type is same as 'self' argument type
return self.ret_format == 'T'
-
+
def fixed_arg_type(self, i):
return self.format_map[self.fixed_arg_format[i]]
-
+
def return_type(self):
return self.format_map[self.ret_format]
def exception_value(self):
return self.error_value_map.get(self.ret_format)
-
+
def function_type(self, self_arg_override=None):
# Construct a C function type descriptor for this signature
args = []
@@ -182,8 +182,8 @@ class SlotDescriptor(object):
if preprocessor_guard:
code.putln("#endif")
- # Some C implementations have trouble statically
- # initialising a global with a pointer to an extern
+ # Some C implementations have trouble statically
+ # initialising a global with a pointer to an extern
# function, so we initialise some of the type slots
# in the module init function instead.
@@ -192,8 +192,8 @@ class SlotDescriptor(object):
value = self.slot_code(scope)
if value != "0":
code.putln("%s.%s = %s;" % (
- scope.parent_type.typeobj_cname,
- self.slot_name,
+ scope.parent_type.typeobj_cname,
+ self.slot_name,
value
)
)
@@ -203,18 +203,18 @@ class FixedSlot(SlotDescriptor):
# Descriptor for a type slot with a fixed value.
#
# value string
-
+
def __init__(self, slot_name, value, py3=True, py2=True, ifdef=None):
SlotDescriptor.__init__(self, slot_name, py3=py3, py2=py2, ifdef=ifdef)
self.value = value
-
+
def slot_code(self, scope):
return self.value
class EmptySlot(FixedSlot):
# Descriptor for a type slot whose value is always 0.
-
+
def __init__(self, slot_name, py3=True, py2=True, ifdef=None):
FixedSlot.__init__(self, slot_name, "0", py3=py3, py2=py2, ifdef=ifdef)
@@ -225,8 +225,8 @@ class MethodSlot(SlotDescriptor):
# signature Signature
# method_name string The __xxx__ name of the method
# alternatives [string] Alternative list of __xxx__ names for the method
-
- def __init__(self, signature, slot_name, method_name, fallback=None,
+
+ def __init__(self, signature, slot_name, method_name, fallback=None,
py3=True, py2=True, ifdef=None):
SlotDescriptor.__init__(self, slot_name, py3=py3, py2=py2, ifdef=ifdef)
self.signature = signature
@@ -270,10 +270,10 @@ class InternalMethodSlot(SlotDescriptor):
class GCDependentSlot(InternalMethodSlot):
# Descriptor for a slot whose value depends on whether
# the type participates in GC.
-
+
def __init__(self, slot_name, **kargs):
InternalMethodSlot.__init__(self, slot_name, **kargs)
-
+
def slot_code(self, scope):
if not scope.needs_gc():
return "0"
@@ -287,15 +287,15 @@ class GCDependentSlot(InternalMethodSlot):
if entry.visibility != 'extern':
return self.slot_code(parent_type_scope)
return InternalMethodSlot.slot_code(self, scope)
-
-
+
+
class ConstructorSlot(InternalMethodSlot):
# Descriptor for tp_new and tp_dealloc.
-
+
def __init__(self, slot_name, method, **kargs):
InternalMethodSlot.__init__(self, slot_name, **kargs)
self.method = method
-
+
def slot_code(self, scope):
if scope.parent_type.base_type \
and not scope.has_pyobject_attrs \
@@ -318,12 +318,12 @@ class SyntheticSlot(InternalMethodSlot):
# defined, the method will not be synthesized and an
# alternative default value will be placed in the type
# slot.
-
+
def __init__(self, slot_name, user_methods, default_value, **kargs):
InternalMethodSlot.__init__(self, slot_name, **kargs)
self.user_methods = user_methods
self.default_value = default_value
-
+
def slot_code(self, scope):
if scope.defines_any(self.user_methods):
return InternalMethodSlot.slot_code(self, scope)
@@ -333,7 +333,7 @@ class SyntheticSlot(InternalMethodSlot):
class TypeFlagsSlot(SlotDescriptor):
# Descriptor for the type flags slot.
-
+
def slot_code(self, scope):
value = "Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER"
if not scope.directives['final']:
@@ -345,7 +345,7 @@ class TypeFlagsSlot(SlotDescriptor):
class DocStringSlot(SlotDescriptor):
# Descriptor for the docstring slot.
-
+
def slot_code(self, scope):
if scope.doc is not None:
if scope.doc.is_unicode:
@@ -361,19 +361,19 @@ class SuiteSlot(SlotDescriptor):
# Descriptor for a substructure of the type object.
#
# sub_slots [SlotDescriptor]
-
+
def __init__(self, sub_slots, slot_type, slot_name):
SlotDescriptor.__init__(self, slot_name)
self.sub_slots = sub_slots
self.slot_type = slot_type
substructures.append(self)
-
+
def substructure_cname(self, scope):
return "%s%s_%s" % (Naming.pyrex_prefix, self.slot_name, scope.class_name)
-
+
def slot_code(self, scope):
return "&%s" % self.substructure_cname(scope)
-
+
def generate_substructure(self, scope, code):
code.putln("")
code.putln(
@@ -388,21 +388,21 @@ substructures = [] # List of all SuiteSlot instances
class MethodTableSlot(SlotDescriptor):
# Slot descriptor for the method table.
-
+
def slot_code(self, scope):
return scope.method_table_cname
class MemberTableSlot(SlotDescriptor):
# Slot descriptor for the table of Python-accessible attributes.
-
+
def slot_code(self, scope):
return "0"
class GetSetSlot(SlotDescriptor):
# Slot descriptor for the table of attribute get & set methods.
-
+
def slot_code(self, scope):
if scope.property_entries:
return scope.getset_table_cname
@@ -415,16 +415,16 @@ class BaseClassSlot(SlotDescriptor):
def __init__(self, name):
SlotDescriptor.__init__(self, name, dynamic = 1)
-
+
def generate_dynamic_init_code(self, scope, code):
base_type = scope.parent_type.base_type
if base_type:
code.putln("%s.%s = %s;" % (
- scope.parent_type.typeobj_cname,
+ scope.parent_type.typeobj_cname,
self.slot_name,
base_type.typeptr_cname))
-
+
# The following dictionary maps __xxx__ method names to slot descriptors.
method_name_to_slot = {}
@@ -455,11 +455,11 @@ def get_property_accessor_signature(name):
# Return signature of accessor for an extension type
# property, else None.
return property_accessor_signatures.get(name)
-
+
def get_base_slot_function(scope, slot):
- # Returns the function implementing this slot in the baseclass.
+ # Returns the function implementing this slot in the baseclass.
# This is useful for enabling the compiler to optimize calls
- # that recursively climb the class hierarchy.
+ # that recursively climb the class hierarchy.
base_type = scope.parent_type.base_type
if scope.parent_scope is base_type.scope.parent_scope:
parent_slot = slot.slot_code(base_type.scope)
@@ -593,7 +593,7 @@ PyNumberMethods = (
MethodSlot(unaryfunc, "nb_float", "__float__"),
MethodSlot(unaryfunc, "nb_oct", "__oct__", py3 = False),
MethodSlot(unaryfunc, "nb_hex", "__hex__", py3 = False),
-
+
# Added in release 2.0
MethodSlot(ibinaryfunc, "nb_inplace_add", "__iadd__"),
MethodSlot(ibinaryfunc, "nb_inplace_subtract", "__isub__"),
@@ -606,7 +606,7 @@ PyNumberMethods = (
MethodSlot(ibinaryfunc, "nb_inplace_and", "__iand__"),
MethodSlot(ibinaryfunc, "nb_inplace_xor", "__ixor__"),
MethodSlot(ibinaryfunc, "nb_inplace_or", "__ior__"),
-
+
# Added in release 2.2
# The following require the Py_TPFLAGS_HAVE_CLASS flag
MethodSlot(binaryfunc, "nb_floor_divide", "__floordiv__"),
@@ -662,7 +662,7 @@ slot_table = (
EmptySlot("tp_setattr"),
MethodSlot(cmpfunc, "tp_compare", "__cmp__", py3 = '<RESERVED>'),
MethodSlot(reprfunc, "tp_repr", "__repr__"),
-
+
SuiteSlot(PyNumberMethods, "PyNumberMethods", "tp_as_number"),
SuiteSlot(PySequenceMethods, "PySequenceMethods", "tp_as_sequence"),
SuiteSlot(PyMappingMethods, "PyMappingMethods", "tp_as_mapping"),
@@ -670,12 +670,12 @@ slot_table = (
MethodSlot(hashfunc, "tp_hash", "__hash__"),
MethodSlot(callfunc, "tp_call", "__call__"),
MethodSlot(reprfunc, "tp_str", "__str__"),
-
+
SyntheticSlot("tp_getattro", ["__getattr__","__getattribute__"], "0"), #"PyObject_GenericGetAttr"),
SyntheticSlot("tp_setattro", ["__setattr__", "__delattr__"], "0"), #"PyObject_GenericSetAttr"),
SuiteSlot(PyBufferProcs, "PyBufferProcs", "tp_as_buffer"),
-
+
TypeFlagsSlot("tp_flags"),
DocStringSlot("tp_doc"),
@@ -693,20 +693,20 @@ slot_table = (
MethodTableSlot("tp_methods"),
MemberTableSlot("tp_members"),
GetSetSlot("tp_getset"),
-
+
BaseClassSlot("tp_base"), #EmptySlot("tp_base"),
EmptySlot("tp_dict"),
-
+
SyntheticSlot("tp_descr_get", ["__get__"], "0"),
SyntheticSlot("tp_descr_set", ["__set__", "__delete__"], "0"),
-
+
EmptySlot("tp_dictoffset"),
-
+
MethodSlot(initproc, "tp_init", "__init__"),
EmptySlot("tp_alloc"), #FixedSlot("tp_alloc", "PyType_GenericAlloc"),
InternalMethodSlot("tp_new"),
EmptySlot("tp_free"),
-
+
EmptySlot("tp_is_gc"),
EmptySlot("tp_bases"),
EmptySlot("tp_mro"),
@@ -739,7 +739,7 @@ MethodSlot(descrsetfunc, "", "__set__")
MethodSlot(descrdelfunc, "", "__delete__")
-# Method flags for python-exposed methods.
+# Method flags for python-exposed methods.
method_noargs = "METH_NOARGS"
method_onearg = "METH_O"
diff --git a/Cython/Compiler/UtilNodes.py b/Cython/Compiler/UtilNodes.py
index 5222b39ef..3aff81f83 100644
--- a/Cython/Compiler/UtilNodes.py
+++ b/Cython/Compiler/UtilNodes.py
@@ -30,7 +30,7 @@ class TempRefNode(AtomicExprNode):
def analyse_types(self, env):
assert self.type == self.handle.type
-
+
def analyse_target_types(self, env):
assert self.type == self.handle.type
@@ -68,20 +68,20 @@ class CleanupTempRefNode(TempRefNode):
class TempsBlockNode(Node):
# THIS IS DEPRECATED, USE LetNode instead
-
+
"""
Creates a block which allocates temporary variables.
This is used by transforms to output constructs that need
to make use of a temporary variable. Simply pass the types
of the needed temporaries to the constructor.
-
+
The variables can be referred to using a TempRefNode
(which can be constructed by calling get_ref_node).
"""
# temps [TempHandle]
# body StatNode
-
+
child_attrs = ["body"]
def generate_execution_code(self, code):
@@ -102,13 +102,13 @@ class TempsBlockNode(Node):
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
-
+
def analyse_expressions(self, env):
self.body.analyse_expressions(env)
-
+
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(env, code)
-
+
def annotate(self, code):
self.body.annotate(code)
@@ -175,10 +175,10 @@ class ResultRefNode(AtomicExprNode):
def generate_result_code(self, code):
pass
-
+
def generate_disposal_code(self, code):
pass
-
+
def generate_assignment_code(self, rhs, code):
if self.type.is_pyobject:
rhs.make_owned_reference(code)
@@ -190,7 +190,7 @@ class ResultRefNode(AtomicExprNode):
def allocate_temps(self, env):
pass
-
+
def release_temp(self, env):
pass
@@ -263,7 +263,7 @@ class EvalWithTempExprNode(ExprNodes.ExprNode, LetNodeMixin):
self.setup_temp_expr(code)
self.subexpression.generate_evaluation_code(code)
self.teardown_temp_expr(code)
-
+
LetRefNode = ResultRefNode
class LetNode(Nodes.StatNode, LetNodeMixin):
@@ -289,7 +289,7 @@ class LetNode(Nodes.StatNode, LetNodeMixin):
def analyse_declarations(self, env):
self.temp_expression.analyse_declarations(env)
self.body.analyse_declarations(env)
-
+
def analyse_expressions(self, env):
self.temp_expression.analyse_expressions(env)
self.body.analyse_expressions(env)
@@ -317,7 +317,7 @@ class TempResultFromStatNode(ExprNodes.ExprNode):
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
-
+
def analyse_types(self, env):
self.body.analyse_expressions(env)
diff --git a/Cython/Compiler/Visitor.py b/Cython/Compiler/Visitor.py
index 65220b7e0..0bee20026 100644
--- a/Cython/Compiler/Visitor.py
+++ b/Cython/Compiler/Visitor.py
@@ -19,16 +19,16 @@ class TreeVisitor(object):
containing child nodes or lists of child nodes. Lists are not considered
part of the tree structure (i.e. contained nodes are considered direct
children of the parent node).
-
+
visit_children visits each of the children of a given node (see the visit_children
documentation). When recursing the tree using visit_children, an attribute
access_path is maintained which gives information about the current location
in the tree as a stack of tuples: (parent_node, attrname, index), representing
the node, attribute and optional list index that was taken in each step in the path to
the current node.
-
+
Example:
-
+
>>> class SampleNode(object):
... child_attrs = ["head", "body"]
... def __init__(self, value, head=None, body=None):
@@ -61,7 +61,7 @@ class TreeVisitor(object):
def dump_node(self, node, indent=0):
ignored = list(node.child_attrs) + [u'child_attrs', u'pos',
- u'gil_message', u'cpp_message',
+ u'gil_message', u'cpp_message',
u'subexprs']
values = []
pos = node.pos
@@ -189,7 +189,7 @@ class TreeVisitor(object):
"""
Visits the children of the given parent. If parent is None, returns
immediately (returning None).
-
+
The return value is a dictionary giving the results for each
child (mapping the attribute name to either the return value
or a list of return values (in the case of multiple children
@@ -214,14 +214,14 @@ class VisitorTransform(TreeVisitor):
"""
A tree transform is a base class for visitors that wants to do stream
processing of the structure (rather than attributes etc.) of a tree.
-
+
It implements __call__ to simply visit the argument node.
-
+
It requires the visitor methods to return the nodes which should take
the place of the visited node in the result tree (which can be the same
or one or more replacement). Specifically, if the return value from
a visitor method is:
-
+
- [] or None; the visited node will be removed (set to None if an attribute and
removed if in a list)
- A single node; the visited node will be replaced by the returned node.
@@ -245,12 +245,12 @@ class VisitorTransform(TreeVisitor):
else:
newlist.append(x)
setattr(parent, attr, newlist)
- return result
+ return result
def recurse_to_children(self, node):
self.visitchildren(node)
return node
-
+
def __call__(self, root):
return self._visit(root)
@@ -286,7 +286,7 @@ class ScopeTrackingTransform(CythonTransform):
# Keeps track of type of scopes
#scope_type: can be either of 'module', 'function', 'cclass', 'pyclass', 'struct'
#scope_node: the node that owns the current scope
-
+
def visit_ModuleNode(self, node):
self.scope_type = 'module'
self.scope_node = node
@@ -300,7 +300,7 @@ class ScopeTrackingTransform(CythonTransform):
self.visitchildren(node)
self.scope_type, self.scope_node = prev
return node
-
+
def visit_CClassDefNode(self, node):
return self.visit_scope(node, 'cclass')
@@ -316,11 +316,11 @@ class ScopeTrackingTransform(CythonTransform):
class EnvTransform(CythonTransform):
"""
- This transformation keeps a stack of the environments.
+ This transformation keeps a stack of the environments.
"""
def __call__(self, root):
self.env_stack = [root.scope]
- return super(EnvTransform, self).__call__(root)
+ return super(EnvTransform, self).__call__(root)
def current_env(self):
return self.env_stack[-1]
diff --git a/Cython/Debugger/Cygdb.py b/Cython/Debugger/Cygdb.py
index d4ebe3102..faa98d73f 100644
--- a/Cython/Debugger/Cygdb.py
+++ b/Cython/Debugger/Cygdb.py
@@ -7,7 +7,7 @@ The current directory should contain a directory named 'cython_debug', or a
path to the cython project directory should be given (the parent directory of
cython_debug).
-Additional gdb args can be provided only if a path to the project directory is
+Additional gdb args can be provided only if a path to the project directory is
given.
"""
@@ -22,7 +22,7 @@ usage = "Usage: cygdb [PATH [GDB_ARGUMENTS]]"
def make_command_file(path_to_debug_info, prefix_code='', no_import=False):
if not no_import:
- pattern = os.path.join(path_to_debug_info,
+ pattern = os.path.join(path_to_debug_info,
'cython_debug',
'cython_debug_info_*')
debug_files = glob.glob(pattern)
@@ -30,14 +30,14 @@ def make_command_file(path_to_debug_info, prefix_code='', no_import=False):
if not debug_files:
sys.exit('%s.\nNo debug files were found in %s. Aborting.' % (
usage, os.path.abspath(path_to_debug_info)))
-
+
fd, tempfilename = tempfile.mkstemp()
f = os.fdopen(fd, 'w')
f.write(prefix_code)
f.write('set breakpoint pending on\n')
f.write("set print pretty on\n")
f.write('python from Cython.Debugger import libcython, libpython\n')
-
+
if no_import:
# don't do this, this overrides file command in .gdbinit
# f.write("file %s\n" % sys.executable)
@@ -58,17 +58,17 @@ def make_command_file(path_to_debug_info, prefix_code='', no_import=False):
'stripped). Some functionality may not work (properly).\\n')
end
'''))
-
+
f.close()
-
+
return tempfilename
def main(path_to_debug_info=None, gdb_argv=None, no_import=False):
"""
Start the Cython debugger. This tells gdb to import the Cython and Python
- extensions (libcython.py and libpython.py) and it enables gdb's pending
+ extensions (libcython.py and libpython.py) and it enables gdb's pending
breakpoints.
-
+
path_to_debug_info is the path to the Cython build directory
gdb_argv is the list of options to gdb
no_import tells cygdb whether it should import debug information
@@ -78,13 +78,13 @@ def main(path_to_debug_info=None, gdb_argv=None, no_import=False):
path_to_debug_info = sys.argv[1]
else:
path_to_debug_info = os.curdir
-
+
if gdb_argv is None:
gdb_argv = sys.argv[2:]
-
+
if path_to_debug_info == '--':
no_import = True
-
+
tempfilename = make_command_file(path_to_debug_info, no_import=no_import)
p = subprocess.Popen(['gdb', '-command', tempfilename] + gdb_argv)
while True:
diff --git a/Cython/Debugger/DebugWriter.py b/Cython/Debugger/DebugWriter.py
index f4cc563ec..7242a582e 100644
--- a/Cython/Debugger/DebugWriter.py
+++ b/Cython/Debugger/DebugWriter.py
@@ -33,24 +33,24 @@ from Cython.Compiler import Errors
class CythonDebugWriter(object):
"""
Class to output debugging information for cygdb
-
+
It writes debug information to cython_debug/cython_debug_info_<modulename>
in the build directory.
"""
-
+
def __init__(self, output_dir):
if etree is None:
raise Errors.NoElementTreeInstalledException()
-
+
self.output_dir = os.path.join(output_dir, 'cython_debug')
self.tb = etree.TreeBuilder()
# set by Cython.Compiler.ParseTreeTransforms.DebugTransform
self.module_name = None
self.start('cython_debug', attrs=dict(version='1.0'))
-
+
def start(self, name, attrs=None):
self.tb.start(name, attrs or {})
-
+
def end(self, name):
self.tb.end(name)
@@ -69,10 +69,10 @@ class CythonDebugWriter(object):
kw = {}
if have_lxml:
kw['pretty_print'] = True
-
+
fn = "cython_debug_info_" + self.module_name
et.write(os.path.join(self.output_dir, fn), encoding="UTF-8", **kw)
-
+
interpreter_path = os.path.join(self.output_dir, 'interpreter')
with open(interpreter_path, 'w') as f:
f.write(sys.executable)
diff --git a/Cython/Debugger/Tests/TestLibCython.py b/Cython/Debugger/Tests/TestLibCython.py
index 69145d52b..ba0b772e9 100644
--- a/Cython/Debugger/Tests/TestLibCython.py
+++ b/Cython/Debugger/Tests/TestLibCython.py
@@ -25,7 +25,7 @@ with open(codefile) as f:
source_to_lineno = dict((line.strip(), i + 1) for i, line in enumerate(f))
class DebuggerTestCase(unittest.TestCase):
-
+
def setUp(self):
"""
Run gdb and have cygdb import the debug information from the code
@@ -33,32 +33,32 @@ class DebuggerTestCase(unittest.TestCase):
"""
self.tempdir = tempfile.mkdtemp()
self.destfile = os.path.join(self.tempdir, 'codefile.pyx')
- self.debug_dest = os.path.join(self.tempdir,
- 'cython_debug',
+ self.debug_dest = os.path.join(self.tempdir,
+ 'cython_debug',
'cython_debug_info_codefile')
self.cfuncs_destfile = os.path.join(self.tempdir, 'cfuncs')
-
+
self.cwd = os.getcwd()
os.chdir(self.tempdir)
-
+
shutil.copy(codefile, self.destfile)
shutil.copy(cfuncs_file, self.cfuncs_destfile + '.c')
-
+
compiler = ccompiler.new_compiler()
compiler.compile(['cfuncs.c'], debug=True, extra_postargs=['-fPIC'])
-
+
opts = dict(
test_directory=self.tempdir,
module='codefile',
)
-
+
cython_compile_testcase = runtests.CythonCompileTestCase(
workdir=self.tempdir,
# we clean up everything (not only compiled files)
cleanup_workdir=False,
**opts
)
-
+
cython_compile_testcase.run_cython(
targetdir=self.tempdir,
incdir=None,
@@ -69,26 +69,26 @@ class DebuggerTestCase(unittest.TestCase):
},
**opts
)
-
+
cython_compile_testcase.run_distutils(
incdir=None,
workdir=self.tempdir,
extra_extension_args={'extra_objects':['cfuncs.o']},
**opts
)
-
+
# ext = Cython.Distutils.extension.Extension(
# 'codefile',
- # ['codefile.pyx'],
+ # ['codefile.pyx'],
# pyrex_gdb=True,
# extra_objects=['cfuncs.o'])
- #
+ #
# distutils.core.setup(
# script_args=['build_ext', '--inplace'],
# ext_modules=[ext],
# cmdclass=dict(build_ext=Cython.Distutils.build_ext)
# )
-
+
def tearDown(self):
os.chdir(self.cwd)
shutil.rmtree(self.tempdir)
@@ -97,45 +97,45 @@ class DebuggerTestCase(unittest.TestCase):
class GdbDebuggerTestCase(DebuggerTestCase):
def setUp(self):
super(GdbDebuggerTestCase, self).setUp()
-
+
prefix_code = textwrap.dedent('''\
python
-
+
import os
import sys
import traceback
-
+
def excepthook(type, value, tb):
traceback.print_exception(type, value, tb)
os._exit(1)
-
+
sys.excepthook = excepthook
-
+
# Have tracebacks end up on sys.stderr (gdb replaces sys.stderr
# with an object that calls gdb.write())
sys.stderr = sys.__stderr__
-
+
end
''')
-
+
code = textwrap.dedent('''\
python
-
+
from Cython.Debugger.Tests import test_libcython_in_gdb
test_libcython_in_gdb.main(version=%r)
-
+
end
''' % (sys.version_info[:2],))
-
- self.gdb_command_file = cygdb.make_command_file(self.tempdir,
+
+ self.gdb_command_file = cygdb.make_command_file(self.tempdir,
prefix_code)
-
+
with open(self.gdb_command_file, 'a') as f:
f.write(code)
-
+
args = ['gdb', '-batch', '-x', self.gdb_command_file, '-n', '--args',
sys.executable, '-c', 'import codefile']
-
+
paths = []
path = os.environ.get('PYTHONPATH')
if path:
@@ -143,7 +143,7 @@ class GdbDebuggerTestCase(DebuggerTestCase):
paths.append(os.path.dirname(os.path.dirname(
os.path.abspath(Cython.__file__))))
env = dict(os.environ, PYTHONPATH=os.pathsep.join(paths))
-
+
try:
p = subprocess.Popen(['gdb', '-v'], stdout=subprocess.PIPE)
have_gdb = True
@@ -154,7 +154,7 @@ class GdbDebuggerTestCase(DebuggerTestCase):
gdb_version = p.stdout.read().decode('ascii')
p.wait()
p.stdout.close()
-
+
if have_gdb:
# Based on Lib/test/test_gdb.py
regex = "^GNU gdb [^\d]*(\d+)\.(\d+)"
@@ -170,21 +170,21 @@ class GdbDebuggerTestCase(DebuggerTestCase):
stdout=open(os.devnull, 'w'),
stderr=subprocess.PIPE,
env=env)
-
+
def tearDown(self):
super(GdbDebuggerTestCase, self).tearDown()
if self.p:
self.p.stderr.close()
self.p.wait()
os.remove(self.gdb_command_file)
-
-
+
+
class TestAll(GdbDebuggerTestCase):
-
+
def test_all(self):
if self.p is None:
return
-
+
out, err = self.p.communicate()
border = '*' * 30
start = '%s v INSIDE GDB v %s' % (border, border)
diff --git a/Cython/Debugger/Tests/cfuncs.c b/Cython/Debugger/Tests/cfuncs.c
index 081c36533..ccb42050b 100644
--- a/Cython/Debugger/Tests/cfuncs.c
+++ b/Cython/Debugger/Tests/cfuncs.c
@@ -2,7 +2,7 @@ void
some_c_function(void)
{
int a, b, c;
-
+
a = 1;
b = 2;
}
diff --git a/Cython/Debugger/Tests/test_libcython_in_gdb.py b/Cython/Debugger/Tests/test_libcython_in_gdb.py
index e8827b66d..6fd056fe5 100644
--- a/Cython/Debugger/Tests/test_libcython_in_gdb.py
+++ b/Cython/Debugger/Tests/test_libcython_in_gdb.py
@@ -30,10 +30,10 @@ sys.argv = ['gdb']
class DebugTestCase(unittest.TestCase):
"""
- Base class for test cases. On teardown it kills the inferior and unsets
+ Base class for test cases. On teardown it kills the inferior and unsets
all breakpoints.
"""
-
+
def __init__(self, name):
super(DebugTestCase, self).__init__(name)
self.cy = libcython.cy
@@ -43,17 +43,17 @@ class DebugTestCase(unittest.TestCase):
'codefile.ham']
self.eggs_func = libcython.cy.functions_by_qualified_name[
'codefile.eggs']
-
+
def read_var(self, varname, cast_to=None):
result = gdb.parse_and_eval('$cy_cvalue("%s")' % varname)
if cast_to:
result = cast_to(result)
-
+
return result
-
+
def local_info(self):
return gdb.execute('info locals', to_string=True)
-
+
def lineno_equals(self, source_line=None, lineno=None):
if source_line is not None:
lineno = test_libcython.source_to_lineno[source_line]
@@ -71,22 +71,22 @@ class DebugTestCase(unittest.TestCase):
gdb.execute('kill inferior 1', to_string=True)
except RuntimeError:
pass
-
+
gdb.execute('set args -c "import codefile"')
-
+
libcython.cy.step.static_breakpoints.clear()
libcython.cy.step.runtime_breakpoints.clear()
libcython.cy.step.init_breakpoints()
class TestDebugInformationClasses(DebugTestCase):
-
+
def test_CythonModule(self):
"test that debug information was parsed properly into data structures"
self.assertEqual(self.module.name, 'codefile')
- global_vars = ('c_var', 'python_var', '__name__',
+ global_vars = ('c_var', 'python_var', '__name__',
'__builtins__', '__doc__', '__file__')
assert set(global_vars).issubset(self.module.globals)
-
+
def test_CythonVariable(self):
module_globals = self.module.globals
c_var = module_globals['c_var']
@@ -94,32 +94,32 @@ class TestDebugInformationClasses(DebugTestCase):
self.assertEqual(c_var.type, libcython.CObject)
self.assertEqual(python_var.type, libcython.PythonObject)
self.assertEqual(c_var.qualified_name, 'codefile.c_var')
-
+
def test_CythonFunction(self):
self.assertEqual(self.spam_func.qualified_name, 'codefile.spam')
- self.assertEqual(self.spam_meth.qualified_name,
+ self.assertEqual(self.spam_meth.qualified_name,
'codefile.SomeClass.spam')
self.assertEqual(self.spam_func.module, self.module)
-
+
assert self.eggs_func.pf_cname
assert not self.ham_func.pf_cname
assert not self.spam_func.pf_cname
assert not self.spam_meth.pf_cname
-
+
self.assertEqual(self.spam_func.type, libcython.CObject)
self.assertEqual(self.ham_func.type, libcython.CObject)
-
+
self.assertEqual(self.spam_func.arguments, ['a'])
- self.assertEqual(self.spam_func.step_into_functions,
+ self.assertEqual(self.spam_func.step_into_functions,
set(['puts', 'some_c_function']))
-
+
expected_lineno = test_libcython.source_to_lineno['def spam(a=0):']
self.assertEqual(self.spam_func.lineno, expected_lineno)
self.assertEqual(sorted(self.spam_func.locals), list('abcd'))
class TestParameters(unittest.TestCase):
-
+
def test_parameters(self):
gdb.execute('set cy_colorize_code on')
assert libcython.parameters.colorize_code
@@ -132,80 +132,80 @@ class TestBreak(DebugTestCase):
def test_break(self):
breakpoint_amount = len(gdb.breakpoints())
gdb.execute('cy break codefile.spam')
-
+
self.assertEqual(len(gdb.breakpoints()), breakpoint_amount + 1)
bp = gdb.breakpoints()[-1]
self.assertEqual(bp.type, gdb.BP_BREAKPOINT)
assert self.spam_func.cname in bp.location
assert bp.enabled
-
+
def test_python_break(self):
gdb.execute('cy break -p join')
assert 'def join(' in gdb.execute('cy run', to_string=True)
class TestKilled(DebugTestCase):
-
+
def test_abort(self):
gdb.execute("set args -c 'import os; os.abort()'")
output = gdb.execute('cy run', to_string=True)
assert 'abort' in output.lower()
class DebugStepperTestCase(DebugTestCase):
-
+
def step(self, varnames_and_values, source_line=None, lineno=None):
gdb.execute(self.command)
for varname, value in varnames_and_values:
self.assertEqual(self.read_var(varname), value, self.local_info())
-
+
self.lineno_equals(source_line, lineno)
class TestStep(DebugStepperTestCase):
"""
- Test stepping. Stepping happens in the code found in
+ Test stepping. Stepping happens in the code found in
Cython/Debugger/Tests/codefile.
"""
-
+
def test_cython_step(self):
gdb.execute('cy break codefile.spam')
-
+
gdb.execute('run', to_string=True)
self.lineno_equals('def spam(a=0):')
-
+
gdb.execute('cy step', to_string=True)
self.lineno_equals('b = c = d = 0')
-
+
self.command = 'cy step'
self.step([('b', 0)], source_line='b = 1')
self.step([('b', 1), ('c', 0)], source_line='c = 2')
self.step([('c', 2)], source_line='int(10)')
self.step([], source_line='puts("spam")')
-
+
gdb.execute('cont', to_string=True)
self.assertEqual(len(gdb.inferiors()), 1)
self.assertEqual(gdb.inferiors()[0].pid, 0)
-
+
def test_c_step(self):
self.break_and_run('some_c_function()')
gdb.execute('cy step', to_string=True)
self.assertEqual(gdb.selected_frame().name(), 'some_c_function')
-
+
def test_python_step(self):
self.break_and_run('os.path.join("foo", "bar")')
-
+
result = gdb.execute('cy step', to_string=True)
-
+
curframe = gdb.selected_frame()
self.assertEqual(curframe.name(), 'PyEval_EvalFrameEx')
-
+
pyframe = libpython.Frame(curframe).get_pyop()
self.assertEqual(str(pyframe.co_name), 'join')
assert re.match(r'\d+ def join\(', result), result
class TestNext(DebugStepperTestCase):
-
+
def test_cython_next(self):
self.break_and_run('c = 2')
@@ -222,18 +222,18 @@ class TestNext(DebugStepperTestCase):
class TestLocalsGlobals(DebugTestCase):
-
+
def test_locals(self):
self.break_and_run('int(10)')
-
+
result = gdb.execute('cy locals', to_string=True)
assert 'a = 0', repr(result)
assert 'b = (int) 1', result
assert 'c = (int) 2' in result, repr(result)
-
+
def test_globals(self):
self.break_and_run('int(10)')
-
+
result = gdb.execute('cy globals', to_string=True)
assert '__name__ ' in result, repr(result)
assert '__doc__ ' in result, repr(result)
@@ -243,45 +243,45 @@ class TestLocalsGlobals(DebugTestCase):
class TestBacktrace(DebugTestCase):
-
+
def test_backtrace(self):
libcython.parameters.colorize_code.value = False
-
+
self.break_and_run('os.path.join("foo", "bar")')
result = gdb.execute('cy bt', to_string=True)
-
+
_debug(libpython.execute, libpython._execute, gdb.execute)
_debug(gdb.execute('cy list', to_string=True))
_debug(repr(result))
-
- assert re.search(r'\#\d+ *0x.* in spam\(\) at .*codefile\.pyx:22',
+
+ assert re.search(r'\#\d+ *0x.* in spam\(\) at .*codefile\.pyx:22',
result), result
assert 'os.path.join("foo", "bar")' in result, result
-
+
gdb.execute("cy step")
-
+
gdb.execute('cy bt')
result = gdb.execute('cy bt -a', to_string=True)
assert re.search(r'\#0 *0x.* in main\(\) at', result), result
class TestFunctions(DebugTestCase):
-
+
def test_functions(self):
self.break_and_run('c = 2')
result = gdb.execute('print $cy_cname("b")', to_string=True)
assert re.search('__pyx_.*b', result), result
-
+
result = gdb.execute('print $cy_lineno()', to_string=True)
supposed_lineno = test_libcython.source_to_lineno['c = 2']
assert str(supposed_lineno) in result, (supposed_lineno, result)
-
+
result = gdb.execute('print $cy_cvalue("b")', to_string=True)
assert '= 1' in result
-
+
class TestPrint(DebugTestCase):
-
+
def test_print(self):
self.break_and_run('c = 2')
result = gdb.execute('cy print b', to_string=True)
@@ -294,53 +294,53 @@ class TestUpDown(DebugTestCase):
self.break_and_run('os.path.join("foo", "bar")')
gdb.execute('cy step')
self.assertRaises(RuntimeError, gdb.execute, 'cy down')
-
+
result = gdb.execute('cy up', to_string=True)
assert 'spam()' in result
assert 'os.path.join("foo", "bar")' in result
class TestExec(DebugTestCase):
-
+
def setUp(self):
super(TestExec, self).setUp()
self.fd, self.tmpfilename = tempfile.mkstemp()
self.tmpfile = os.fdopen(self.fd, 'r+')
-
+
def tearDown(self):
super(TestExec, self).tearDown()
-
+
try:
self.tmpfile.close()
finally:
os.remove(self.tmpfilename)
-
+
def eval_command(self, command):
- gdb.execute('cy exec open(%r, "w").write(str(%s))' %
+ gdb.execute('cy exec open(%r, "w").write(str(%s))' %
(self.tmpfilename, command))
return self.tmpfile.read().strip()
-
+
def test_cython_exec(self):
self.break_and_run('os.path.join("foo", "bar")')
-
+
# test normal behaviour
self.assertEqual("[0]", self.eval_command('[a]'))
-
+
# test multiline code
result = gdb.execute(textwrap.dedent('''\
cy exec
pass
-
+
"nothing"
end
'''))
result = self.tmpfile.read().rstrip()
self.assertEqual('', result)
-
+
def test_python_exec(self):
self.break_and_run('os.path.join("foo", "bar")')
gdb.execute('cy step')
-
+
gdb.execute('cy exec some_random_var = 14')
self.assertEqual('14', self.eval_command('some_random_var'))
@@ -351,7 +351,7 @@ if _do_debug:
def _debug(*messages):
if _do_debug:
- messages = itertools.chain([sys._getframe(1).f_code.co_name, ':'],
+ messages = itertools.chain([sys._getframe(1).f_code.co_name, ':'],
messages)
_debug_file.write(' '.join(str(msg) for msg in messages) + '\n')
@@ -368,13 +368,13 @@ def run_unittest_in_module(modulename):
else:
m = __import__(modulename, fromlist=[''])
tests = inspect.getmembers(m, inspect.isclass)
-
+
# test_support.run_unittest(tests)
-
+
test_loader = unittest.TestLoader()
suite = unittest.TestSuite(
[test_loader.loadTestsFromTestCase(cls) for name, cls in tests])
-
+
result = unittest.TextTestRunner(verbosity=1).run(suite)
return result.wasSuccessful()
@@ -384,18 +384,18 @@ def runtests():
returned to the parent test process.
"""
from Cython.Debugger.Tests import test_libpython_in_gdb
-
+
success_libcython = run_unittest_in_module(__name__)
success_libpython = run_unittest_in_module(test_libpython_in_gdb.__name__)
-
+
if not success_libcython or not success_libpython:
sys.exit(1)
-
+
def main(version, trace_code=False):
- global inferior_python_version
-
+ global inferior_python_version
+
inferior_python_version = version
-
+
if trace_code:
tracer = trace.Trace(count=False, trace=True, outfile=sys.stderr,
ignoredirs=[sys.prefix, sys.exec_prefix])
diff --git a/Cython/Debugger/Tests/test_libpython_in_gdb.py b/Cython/Debugger/Tests/test_libpython_in_gdb.py
index c5c9cc001..5e0407d9e 100644
--- a/Cython/Debugger/Tests/test_libpython_in_gdb.py
+++ b/Cython/Debugger/Tests/test_libpython_in_gdb.py
@@ -1,7 +1,7 @@
# -*- coding: UTF-8 -*-
"""
-Test libpython.py. This is already partly tested by test_libcython_in_gdb and
+Test libpython.py. This is already partly tested by test_libcython_in_gdb and
Lib/test/test_gdb.py in the Python source. These tests are run in gdb and
called from test_libcython_in_gdb.main()
"""
@@ -22,96 +22,96 @@ class TestPrettyPrinters(test_libcython_in_gdb.DebugTestCase):
"""
Test whether types of Python objects are correctly inferred and that
the right libpython.PySomeTypeObjectPtr classes are instantiated.
-
- Also test whether values are appropriately formatted (don't be too
+
+ Also test whether values are appropriately formatted (don't be too
laborious as Lib/test/test_gdb.py already covers this extensively).
-
- Don't take care of decreffing newly allocated objects as a new
+
+ Don't take care of decreffing newly allocated objects as a new
interpreter is started for every test anyway.
"""
-
+
def setUp(self):
super(TestPrettyPrinters, self).setUp()
self.break_and_run('b = c = d = 0')
-
+
def get_pyobject(self, code):
value = gdb.parse_and_eval(code)
assert libpython.pointervalue(value) != 0
return value
-
+
def pyobject_fromcode(self, code, gdbvar=None):
if gdbvar is not None:
d = {'varname':gdbvar, 'code':code}
gdb.execute('set $%(varname)s = %(code)s' % d)
code = '$' + gdbvar
-
+
return libpython.PyObjectPtr.from_pyobject_ptr(self.get_pyobject(code))
-
+
def get_repr(self, pyobject):
return pyobject.get_truncated_repr(libpython.MAX_OUTPUT_LEN)
-
+
def alloc_bytestring(self, string, gdbvar=None):
if inferior_python_version < (3, 0):
funcname = 'PyString_FromString'
else:
funcname = 'PyBytes_FromString'
-
+
assert '"' not in string
-
+
# ensure double quotes
code = '(PyObject *) %s("%s")' % (funcname, string)
return self.pyobject_fromcode(code, gdbvar=gdbvar)
-
+
def alloc_unicodestring(self, string, gdbvar=None):
self.alloc_bytestring(string.encode('UTF-8'), gdbvar='_temp')
-
+
postfix = libpython.get_inferior_unicode_postfix()
funcname = 'PyUnicode%s_FromEncodedObject' % (postfix,)
-
+
return self.pyobject_fromcode(
- '(PyObject *) %s($_temp, "UTF-8", "strict")' % funcname,
+ '(PyObject *) %s($_temp, "UTF-8", "strict")' % funcname,
gdbvar=gdbvar)
-
+
def test_bytestring(self):
bytestring = self.alloc_bytestring("spam")
-
+
if inferior_python_version < (3, 0):
bytestring_class = libpython.PyStringObjectPtr
expected = repr("spam")
else:
bytestring_class = libpython.PyBytesObjectPtr
expected = "b'spam'"
-
+
self.assertEqual(type(bytestring), bytestring_class)
self.assertEqual(self.get_repr(bytestring), expected)
-
+
def test_unicode(self):
unicode_string = self.alloc_unicodestring(u"spam ἄλφα")
-
+
expected = "'spam ἄλφα'"
if inferior_python_version < (3, 0):
expected = 'u' + expected
-
+
self.assertEqual(type(unicode_string), libpython.PyUnicodeObjectPtr)
self.assertEqual(self.get_repr(unicode_string), expected)
-
+
def test_int(self):
if inferior_python_version < (3, 0):
intval = self.pyobject_fromcode('PyInt_FromLong(100)')
self.assertEqual(type(intval), libpython.PyIntObjectPtr)
self.assertEqual(self.get_repr(intval), '100')
-
+
def test_long(self):
- longval = self.pyobject_fromcode('PyLong_FromLong(200)',
+ longval = self.pyobject_fromcode('PyLong_FromLong(200)',
gdbvar='longval')
assert gdb.parse_and_eval('$longval->ob_type == &PyLong_Type')
-
+
self.assertEqual(type(longval), libpython.PyLongObjectPtr)
self.assertEqual(self.get_repr(longval), '200')
-
+
def test_frame_type(self):
frame = self.pyobject_fromcode('PyEval_GetFrame()')
-
+
self.assertEqual(type(frame), libpython.PyFrameObjectPtr)
-
+
diff --git a/Cython/Debugger/libcython.py b/Cython/Debugger/libcython.py
index d766f5655..e70b1aadb 100644
--- a/Cython/Debugger/libcython.py
+++ b/Cython/Debugger/libcython.py
@@ -67,7 +67,7 @@ def dont_suppress_errors(function):
except Exception:
traceback.print_exc()
raise
-
+
return wrapper
def default_selected_gdb_frame(err=True):
@@ -78,10 +78,10 @@ def default_selected_gdb_frame(err=True):
frame = frame or gdb.selected_frame()
except RuntimeError:
raise gdb.GdbError("No frame is currently selected.")
-
+
if err and frame.name() is None:
raise NoFunctionNameInFrameError()
-
+
return function(self, frame, *args, **kwargs)
return wrapper
return decorator
@@ -95,7 +95,7 @@ def require_cython_frame(function):
raise gdb.GdbError('Selected frame does not correspond with a '
'Cython function we know about.')
return function(self, *args, **kwargs)
- return wrapper
+ return wrapper
def dispatch_on_frame(c_command, python_command=None):
def decorator(function):
@@ -103,7 +103,7 @@ def dispatch_on_frame(c_command, python_command=None):
def wrapper(self, *args, **kwargs):
is_cy = self.is_cython_function()
is_py = self.is_python_function()
-
+
if is_cy or (is_py and not python_command):
function(self, *args, **kwargs)
elif is_py:
@@ -113,7 +113,7 @@ def dispatch_on_frame(c_command, python_command=None):
else:
raise gdb.GdbError("Not a function cygdb knows about. "
"Use the normal GDB commands instead.")
-
+
return wrapper
return decorator
@@ -124,10 +124,10 @@ def require_running_program(function):
gdb.selected_frame()
except RuntimeError:
raise gdb.GdbError("No frame is currently selected.")
-
+
return function(*args, **kwargs)
return wrapper
-
+
def gdb_function_value_to_unicode(function):
@functools.wraps(function)
@@ -153,7 +153,7 @@ class CythonModule(object):
# {c_lineno: cython_lineno}
self.lineno_c2cy = {}
self.functions = {}
-
+
def qualified_name(self, varname):
return '.'.join(self.name, varname)
@@ -167,17 +167,17 @@ class CythonVariable(object):
self.lineno = int(lineno)
class CythonFunction(CythonVariable):
- def __init__(self,
- module,
- name,
- cname,
+ def __init__(self,
+ module,
+ name,
+ cname,
pf_cname,
- qualified_name,
- lineno,
+ qualified_name,
+ lineno,
type=CObject):
- super(CythonFunction, self).__init__(name,
- cname,
- qualified_name,
+ super(CythonFunction, self).__init__(name,
+ cname,
+ qualified_name,
type,
lineno)
self.module = module
@@ -190,7 +190,7 @@ class CythonFunction(CythonVariable):
# General purpose classes
class CythonBase(object):
-
+
@default_selected_gdb_frame(err=False)
def is_cython_function(self, frame):
return frame.name() in self.cy.functions_by_cname
@@ -205,7 +205,7 @@ class CythonBase(object):
pyframe = libpython.Frame(frame).get_pyop()
return pyframe and not pyframe.is_optimized_out()
return False
-
+
@default_selected_gdb_frame()
def get_c_function_name(self, frame):
return frame.name()
@@ -213,24 +213,24 @@ class CythonBase(object):
@default_selected_gdb_frame()
def get_c_lineno(self, frame):
return frame.find_sal().line
-
+
@default_selected_gdb_frame()
def get_cython_function(self, frame):
result = self.cy.functions_by_cname.get(frame.name())
if result is None:
raise NoCythonFunctionInFrameError()
-
+
return result
-
+
@default_selected_gdb_frame()
def get_cython_lineno(self, frame):
"""
- Get the current Cython line number. Returns 0 if there is no
+ Get the current Cython line number. Returns 0 if there is no
correspondence between the C and Cython code.
"""
cyfunc = self.get_cython_function(frame)
return cyfunc.module.lineno_c2cy.get(self.get_c_lineno(frame), 0)
-
+
@default_selected_gdb_frame()
def get_source_desc(self, frame):
filename = lineno = lexer = None
@@ -247,7 +247,7 @@ class CythonBase(object):
filename = pyframeobject.filename()
lineno = pyframeobject.current_line_num()
-
+
if pygments:
lexer = pygments.lexers.PythonLexer(stripall=False)
else:
@@ -260,14 +260,14 @@ class CythonBase(object):
lineno = symbol_and_line_obj.line
if pygments:
lexer = pygments.lexers.CLexer(stripall=False)
-
+
return SourceFileDescriptor(filename, lexer), lineno
@default_selected_gdb_frame()
def get_source_line(self, frame):
source_desc, lineno = self.get_source_desc()
return source_desc.get_source(lineno)
-
+
@default_selected_gdb_frame()
def is_relevant_function(self, frame):
"""
@@ -284,7 +284,7 @@ class CythonBase(object):
return name in cython_func.step_into_functions
return False
-
+
@default_selected_gdb_frame(err=False)
def print_stackframe(self, frame, index, is_c=False):
"""
@@ -295,7 +295,7 @@ class CythonBase(object):
# raising GdbError when calling self.cy.cy_cvalue.invoke()
selected_frame = gdb.selected_frame()
frame.select()
-
+
try:
source_desc, lineno = self.get_source_desc(frame)
except NoFunctionNameInFrameError:
@@ -307,14 +307,14 @@ class CythonBase(object):
if pyframe is None or pyframe.is_optimized_out():
# print this python function as a C function
return self.print_stackframe(frame, index, is_c=True)
-
+
func_name = pyframe.co_name
func_cname = 'PyEval_EvalFrameEx'
func_args = []
elif self.is_cython_function(frame):
cyfunc = self.get_cython_function(frame)
f = lambda arg: self.cy.cy_cvalue.invoke(arg, frame=frame)
-
+
func_name = cyfunc.name
func_cname = cyfunc.cname
func_args = [] # [(arg, f(arg)) for arg in cyfunc.arguments]
@@ -323,7 +323,7 @@ class CythonBase(object):
func_name = frame.name()
func_cname = func_name
func_args = []
-
+
try:
gdb_value = gdb.parse_and_eval(func_cname)
except RuntimeError:
@@ -331,36 +331,36 @@ class CythonBase(object):
else:
# Seriously? Why is the address not an int?
func_address = int(str(gdb_value.address).split()[0], 0)
-
+
a = ', '.join('%s=%s' % (name, val) for name, val in func_args)
print '#%-2d 0x%016x in %s(%s)' % (index, func_address, func_name, a),
-
+
if source_desc.filename is not None:
print 'at %s:%s' % (source_desc.filename, lineno),
-
+
print
-
+
try:
print ' ' + source_desc.get_source(lineno)
except gdb.GdbError:
pass
-
+
selected_frame.select()
-
+
def get_remote_cython_globals_dict(self):
m = gdb.parse_and_eval('__pyx_m')
-
+
try:
PyModuleObject = gdb.lookup_type('PyModuleObject')
except RuntimeError:
raise gdb.GdbError(textwrap.dedent("""\
- Unable to lookup type PyModuleObject, did you compile python
+ Unable to lookup type PyModuleObject, did you compile python
with debugging support (-g)?"""))
-
+
m = m.cast(PyModuleObject.pointer())
return m['md_dict']
-
-
+
+
def get_cython_globals_dict(self):
"""
Get the Cython globals dict where the remote names are turned into
@@ -368,12 +368,12 @@ class CythonBase(object):
"""
remote_dict = self.get_remote_cython_globals_dict()
pyobject_dict = libpython.PyObjectPtr.from_pyobject_ptr(remote_dict)
-
+
result = {}
seen = set()
for k, v in pyobject_dict.iteritems():
result[k.proxyval(seen)] = v
-
+
return result
def print_gdb_value(self, name, value, max_name_length=None, prefix=''):
@@ -381,11 +381,11 @@ class CythonBase(object):
typename = ''
else:
typename = '(%s) ' % (value.type,)
-
+
if max_name_length is None:
print '%s%s = %s%s' % (prefix, name, typename, value)
else:
- print '%s%-*s = %s%s' % (prefix, max_name_length, name, typename,
+ print '%s%-*s = %s%s' % (prefix, max_name_length, name, typename,
value)
def is_initialized(self, cython_func, local_name):
@@ -420,30 +420,30 @@ class SourceFileDescriptor(object):
# to provide "correct" colouring, the entire code needs to be
# lexed. However, this makes a lot of things terribly slow, so
# we decide not to. Besides, it's unlikely to matter.
-
+
if lex_source and lex_entire:
f = self.lex(f.read()).splitlines()
-
+
slice = itertools.islice(f, start - 1, stop - 1)
-
+
for idx, line in enumerate(slice):
if start + idx == mark_line:
prefix = '>'
else:
prefix = ' '
-
+
if lex_source and not lex_entire:
line = self.lex(line)
yield '%s %4d %s' % (prefix, start + idx, line.rstrip())
- def get_source(self, start, stop=None, lex_source=True, mark_line=0,
+ def get_source(self, start, stop=None, lex_source=True, mark_line=0,
lex_entire=False):
exc = gdb.GdbError('Unable to retrieve source code')
-
+
if not self.filename:
raise exc
-
+
start = max(start, 1)
if stop is None:
stop = start + 1
@@ -461,21 +461,21 @@ class CyGDBError(gdb.GdbError):
"""
Base class for Cython-command related erorrs
"""
-
+
def __init__(self, *args):
args = args or (self.msg,)
super(CyGDBError, self).__init__(*args)
-
+
class NoCythonFunctionInFrameError(CyGDBError):
"""
- raised when the user requests the current cython function, which is
+ raised when the user requests the current cython function, which is
unavailable
"""
msg = "Current function is a function cygdb doesn't know about"
class NoFunctionNameInFrameError(NoCythonFunctionInFrameError):
"""
- raised when the name of the C function could not be determined
+ raised when the name of the C function could not be determined
in the current C stack frame
"""
msg = ('C function name could not be determined in the current C stack '
@@ -488,23 +488,23 @@ class CythonParameter(gdb.Parameter):
"""
Base class for cython parameters
"""
-
+
def __init__(self, name, command_class, parameter_class, default=None):
self.show_doc = self.set_doc = self.__class__.__doc__
- super(CythonParameter, self).__init__(name, command_class,
+ super(CythonParameter, self).__init__(name, command_class,
parameter_class)
if default is not None:
self.value = default
-
+
def __nonzero__(self):
return bool(self.value)
-
+
__bool__ = __nonzero__ # python 3
class CompleteUnqualifiedFunctionNames(CythonParameter):
"""
Have 'cy break' complete unqualified function or method names.
- """
+ """
class ColorizeSourceCode(CythonParameter):
"""
@@ -521,7 +521,7 @@ class CythonParameters(object):
Simple container class that might get more functionality in the distant
future (mostly to remind us that we're dealing with parameters).
"""
-
+
def __init__(self):
self.complete_unqualified = CompleteUnqualifiedFunctionNames(
'cy_complete_unqualified',
@@ -538,7 +538,7 @@ class CythonParameters(object):
gdb.COMMAND_FILES,
gdb.PARAM_STRING,
"dark")
-
+
parameters = CythonParameters()
@@ -548,30 +548,30 @@ class CythonCommand(gdb.Command, CythonBase):
"""
Base class for Cython commands
"""
-
+
command_class = gdb.COMMAND_NONE
-
+
@classmethod
def _register(cls, clsname, args, kwargs):
if not hasattr(cls, 'completer_class'):
return cls(clsname, cls.command_class, *args, **kwargs)
else:
- return cls(clsname, cls.command_class, cls.completer_class,
+ return cls(clsname, cls.command_class, cls.completer_class,
*args, **kwargs)
-
+
@classmethod
def register(cls, *args, **kwargs):
alias = getattr(cls, 'alias', None)
if alias:
cls._register(cls.alias, args, kwargs)
-
+
return cls._register(cls.name, args, kwargs)
class CyCy(CythonCommand):
"""
Invoke a Cython command. Available commands are:
-
+
cy import
cy break
cy step
@@ -589,16 +589,16 @@ class CyCy(CythonCommand):
cy globals
cy exec
"""
-
+
name = 'cy'
command_class = gdb.COMMAND_NONE
completer_class = gdb.COMPLETE_COMMAND
-
+
def __init__(self, name, command_class, completer_class):
# keep the signature 2.5 compatible (i.e. do not use f(*a, k=v)
- super(CythonCommand, self).__init__(name, command_class,
+ super(CythonCommand, self).__init__(name, command_class,
completer_class, prefix=True)
-
+
commands = dict(
import_ = CyImport.register(),
break_ = CyBreak.register(),
@@ -621,24 +621,24 @@ class CyCy(CythonCommand):
cy_cvalue = CyCValue('cy_cvalue'),
cy_lineno = CyLine('cy_lineno'),
)
-
+
for command_name, command in commands.iteritems():
command.cy = self
setattr(self, command_name, command)
-
+
self.cy = self
-
+
# Cython module namespace
self.cython_namespace = {}
-
- # maps (unique) qualified function names (e.g.
+
+ # maps (unique) qualified function names (e.g.
# cythonmodule.ClassName.method_name) to the CythonFunction object
self.functions_by_qualified_name = {}
-
+
# unique cnames of Cython functions
self.functions_by_cname = {}
-
- # map function names like method_name to a list of all such
+
+ # map function names like method_name to a list of all such
# CythonFunction objects
self.functions_by_name = collections.defaultdict(list)
@@ -648,46 +648,46 @@ class CyImport(CythonCommand):
Import debug information outputted by the Cython compiler
Example: cy import FILE...
"""
-
+
name = 'cy import'
command_class = gdb.COMMAND_STATUS
completer_class = gdb.COMPLETE_FILENAME
-
+
def invoke(self, args, from_tty):
args = args.encode(_filesystemencoding)
for arg in string_to_argv(args):
try:
f = open(arg)
except OSError, e:
- raise gdb.GdbError('Unable to open file %r: %s' %
+ raise gdb.GdbError('Unable to open file %r: %s' %
(args, e.args[1]))
-
+
t = etree.parse(f)
-
+
for module in t.getroot():
cython_module = CythonModule(**module.attrib)
self.cy.cython_namespace[cython_module.name] = cython_module
-
+
for variable in module.find('Globals'):
d = variable.attrib
cython_module.globals[d['name']] = CythonVariable(**d)
-
+
for function in module.find('Functions'):
- cython_function = CythonFunction(module=cython_module,
+ cython_function = CythonFunction(module=cython_module,
**function.attrib)
# update the global function mappings
name = cython_function.name
qname = cython_function.qualified_name
-
+
self.cy.functions_by_name[name].append(cython_function)
self.cy.functions_by_qualified_name[
cython_function.qualified_name] = cython_function
self.cy.functions_by_cname[
cython_function.cname] = cython_function
-
+
d = cython_module.functions[qname] = cython_function
-
+
for local in function.find('Locals'):
d = local.attrib
cython_function.locals[d['name']] = CythonVariable(**d)
@@ -695,7 +695,7 @@ class CyImport(CythonCommand):
for step_into_func in function.find('StepIntoFunctions'):
d = step_into_func.attrib
cython_function.step_into_functions.add(d['name'])
-
+
cython_function.arguments.extend(
funcarg.tag for funcarg in function.find('Arguments'))
@@ -712,30 +712,30 @@ class CyImport(CythonCommand):
class CyBreak(CythonCommand):
"""
Set a breakpoint for Cython code using Cython qualified name notation, e.g.:
-
+
cy break cython_modulename.ClassName.method_name...
-
+
or normal notation:
-
+
cy break function_or_method_name...
-
+
or for a line number:
-
+
cy break cython_module:lineno...
-
+
Set a Python breakpoint:
Break on any function or method named 'func' in module 'modname'
-
+
cy break -p modname.func...
-
+
Break on any function or method named 'func'
-
+
cy break -p func...
"""
-
+
name = 'cy break'
command_class = gdb.COMMAND_BREAKPOINTS
-
+
def _break_pyx(self, name):
modulename, _, lineno = name.partition(':')
lineno = int(lineno)
@@ -751,23 +751,23 @@ class CyBreak(CythonCommand):
else:
raise GdbError("Not a valid line number. "
"Does it contain actual code?")
-
+
def _break_funcname(self, funcname):
func = self.cy.functions_by_qualified_name.get(funcname)
break_funcs = [func]
-
+
if not func:
funcs = self.cy.functions_by_name.get(funcname)
if not funcs:
gdb.execute('break ' + funcname)
return
-
+
if len(funcs) > 1:
# multiple functions, let the user pick one
print 'There are multiple such functions:'
for idx, func in enumerate(funcs):
print '%3d) %s' % (idx, func.qualified_name)
-
+
while True:
try:
result = raw_input(
@@ -781,7 +781,7 @@ class CyBreak(CythonCommand):
elif result.lower() == 'a':
break_funcs = funcs
break
- elif (result.isdigit() and
+ elif (result.isdigit() and
0 <= int(result) < len(funcs)):
break_funcs = [funcs[int(result)]]
break
@@ -789,12 +789,12 @@ class CyBreak(CythonCommand):
print 'Not understood...'
else:
break_funcs = [funcs[0]]
-
+
for func in break_funcs:
gdb.execute('break %s' % func.cname)
if func.pf_cname:
gdb.execute('break %s' % func.pf_cname)
-
+
def invoke(self, function_names, from_tty):
argv = string_to_argv(function_names.encode('UTF-8'))
if function_names.startswith('-p'):
@@ -802,7 +802,7 @@ class CyBreak(CythonCommand):
python_breakpoints = True
else:
python_breakpoints = False
-
+
for funcname in argv:
if python_breakpoints:
gdb.execute('py-break %s' % funcname)
@@ -810,7 +810,7 @@ class CyBreak(CythonCommand):
self._break_pyx(funcname)
else:
self._break_funcname(funcname)
-
+
@dont_suppress_errors
def complete(self, text, word):
names = self.cy.functions_by_qualified_name
@@ -820,18 +820,18 @@ class CyBreak(CythonCommand):
words = text.strip().split()
if words and '.' in words[-1]:
lastword = words[-1]
- compl = [n for n in self.cy.functions_by_qualified_name
+ compl = [n for n in self.cy.functions_by_qualified_name
if n.startswith(lastword)]
else:
seen = set(text[:-len(word)].split())
return [n for n in names if n.startswith(word) and n not in seen]
-
+
if len(lastword) > len(word):
# readline sees something (e.g. a '.') as a word boundary, so don't
# "recomplete" this prefix
strip_prefix_length = len(lastword) - len(word)
compl = [n[strip_prefix_length:] for n in compl]
-
+
return compl
@@ -840,7 +840,7 @@ class CythonCodeStepper(CythonCommand, libpython.GenericCodeStepper):
Base class for CyStep and CyNext. It implements the interface dictated by
libpython.GenericCodeStepper.
"""
-
+
def lineno(self, frame):
# Take care of the Python and Cython levels. We need to care for both
# as we can't simply dispath to 'py-step', since that would work for
@@ -850,7 +850,7 @@ class CythonCodeStepper(CythonCommand, libpython.GenericCodeStepper):
return self.get_cython_lineno(frame)
else:
return libpython.py_step.lineno(frame)
-
+
def get_source_line(self, frame):
try:
line = super(CythonCodeStepper, self).get_source_line(frame)
@@ -866,7 +866,7 @@ class CythonCodeStepper(CythonCommand, libpython.GenericCodeStepper):
def runtime_break_functions(self):
if self.is_cython_function():
return self.get_cython_function().step_into_functions
-
+
def static_break_functions(self):
result = ['PyEval_EvalFrameEx']
result.extend(self.cy.functions_by_cname)
@@ -878,7 +878,7 @@ class CythonCodeStepper(CythonCommand, libpython.GenericCodeStepper):
command = 'step'
else:
command = 'next'
-
+
self.finish_executing(gdb.execute(command, to_string=True))
else:
self.step()
@@ -886,7 +886,7 @@ class CythonCodeStepper(CythonCommand, libpython.GenericCodeStepper):
class CyStep(CythonCodeStepper):
"Step through Cython, Python or C code."
-
+
name = 'cy step'
stepinto = True
@@ -900,21 +900,21 @@ class CyNext(CythonCodeStepper):
class CyRun(CythonCodeStepper):
"""
- Run a Cython program. This is like the 'run' command, except that it
+ Run a Cython program. This is like the 'run' command, except that it
displays Cython or Python source lines as well
"""
-
+
name = 'cy run'
-
+
invoke = CythonCodeStepper.run
class CyCont(CyRun):
"""
- Continue a Cython program. This is like the 'run' command, except that it
+ Continue a Cython program. This is like the 'run' command, except that it
displays Cython or Python source lines as well.
"""
-
+
name = 'cy cont'
invoke = CythonCodeStepper.cont
@@ -934,7 +934,7 @@ class CyUp(CythonCommand):
"""
name = 'cy up'
_command = 'up'
-
+
def invoke(self, *args):
try:
gdb.execute(self._command, to_string=True)
@@ -942,13 +942,13 @@ class CyUp(CythonCommand):
gdb.execute(self._command, to_string=True)
except RuntimeError, e:
raise gdb.GdbError(*e.args)
-
+
frame = gdb.selected_frame()
index = 0
while frame:
frame = frame.older()
index += 1
-
+
self.print_stackframe(index=index - 1)
@@ -956,7 +956,7 @@ class CyDown(CyUp):
"""
Go down a Cython, Python or relevant C frame.
"""
-
+
name = 'cy down'
_command = 'down'
@@ -966,21 +966,21 @@ class CySelect(CythonCodeStepper):
Select a frame. Use frame numbers as listed in `cy backtrace`.
This command is useful because `cy backtrace` prints a reversed backtrace.
"""
-
+
name = 'cy select'
-
+
def invoke(self, stackno, from_tty):
try:
stackno = int(stackno)
except ValueError:
raise gdb.GdbError("Not a valid number: %r" % (stackno,))
-
+
frame = gdb.selected_frame()
while frame.newer():
frame = frame.newer()
-
+
stackdepth = self._stackdepth(frame)
-
+
try:
gdb.execute('select %d' % (stackdepth - stackno - 1,))
except RuntimeError, e:
@@ -989,37 +989,37 @@ class CySelect(CythonCodeStepper):
class CyBacktrace(CythonCommand):
'Print the Cython stack'
-
+
name = 'cy bt'
alias = 'cy backtrace'
command_class = gdb.COMMAND_STACK
completer_class = gdb.COMPLETE_NONE
-
+
@require_running_program
def invoke(self, args, from_tty):
# get the first frame
selected_frame = frame = gdb.selected_frame()
while frame.older():
frame = frame.older()
-
+
print_all = args == '-a'
-
+
index = 0
while frame:
is_c = False
-
+
is_relevant = False
try:
is_relevant = self.is_relevant_function(frame)
except CyGDBError:
pass
-
+
if print_all or is_relevant:
self.print_stackframe(frame, index)
-
+
index += 1
frame = frame.newer()
-
+
selected_frame.select()
@@ -1028,15 +1028,15 @@ class CyList(CythonCommand):
List Cython source code. To disable to customize colouring see the cy_*
parameters.
"""
-
+
name = 'cy list'
command_class = gdb.COMMAND_FILES
completer_class = gdb.COMPLETE_NONE
-
+
@dispatch_on_frame(c_command='list')
def invoke(self, _, from_tty):
sd, lineno = self.get_source_desc()
- source = sd.get_source(lineno - 5, lineno + 5, mark_line=lineno,
+ source = sd.get_source(lineno - 5, lineno + 5, mark_line=lineno,
lex_entire=True)
print source
@@ -1045,10 +1045,10 @@ class CyPrint(CythonCommand):
"""
Print a Cython variable using 'cy-print x' or 'cy-print module.function.x'
"""
-
+
name = 'cy print'
command_class = gdb.COMMAND_DATA
-
+
def invoke(self, name, from_tty, max_name_length=None):
if self.is_python_function():
return gdb.execute('py-print ' + name)
@@ -1059,11 +1059,11 @@ class CyPrint(CythonCommand):
value = value.dereference()
else:
break
-
+
self.print_gdb_value(name, value, max_name_length)
else:
gdb.execute('print ' + name)
-
+
def complete(self):
if self.is_cython_function():
f = self.get_cython_function()
@@ -1078,11 +1078,11 @@ class CyLocals(CythonCommand):
"""
List the locals from the current Cython frame.
"""
-
+
name = 'cy locals'
command_class = gdb.COMMAND_STACK
completer_class = gdb.COMPLETE_NONE
-
+
@dispatch_on_frame(c_command='info locals', python_command='py-locals')
def invoke(self, args, from_tty):
local_cython_vars = self.get_cython_function().locals
@@ -1091,7 +1091,7 @@ class CyLocals(CythonCommand):
if self.is_initialized(self.get_cython_function(), cyvar.name):
value = gdb.parse_and_eval(cyvar.cname)
if not value.is_optimized_out:
- self.print_gdb_value(cyvar.name, value,
+ self.print_gdb_value(cyvar.name, value,
max_name_length, '')
@@ -1099,32 +1099,32 @@ class CyGlobals(CyLocals):
"""
List the globals from the current Cython module.
"""
-
+
name = 'cy globals'
command_class = gdb.COMMAND_STACK
completer_class = gdb.COMPLETE_NONE
-
+
@dispatch_on_frame(c_command='info variables', python_command='py-globals')
def invoke(self, args, from_tty):
global_python_dict = self.get_cython_globals_dict()
module_globals = self.get_cython_function().module.globals
-
+
max_globals_len = 0
max_globals_dict_len = 0
if module_globals:
max_globals_len = len(max(module_globals, key=len))
if global_python_dict:
max_globals_dict_len = len(max(global_python_dict))
-
+
max_name_length = max(max_globals_len, max_globals_dict_len)
-
+
seen = set()
print 'Python globals:'
for k, v in sorted(global_python_dict.iteritems(), key=sortkey):
v = v.get_truncated_repr(libpython.MAX_OUTPUT_LEN)
seen.add(k)
print ' %-*s = %s' % (max_name_length, k, v)
-
+
print 'C globals:'
for name, cyvar in sorted(module_globals.iteritems(), key=sortkey):
if name not in seen:
@@ -1142,20 +1142,20 @@ class CyExec(CythonCommand, libpython.PyExec):
"""
Execute Python code in the nearest Python or Cython frame.
"""
-
+
name = '-cy-exec'
command_class = gdb.COMMAND_STACK
completer_class = gdb.COMPLETE_NONE
-
+
def _fill_locals_dict(self, executor, local_dict_pointer):
"Fill a remotely allocated dict with values from the Cython C stack"
cython_func = self.get_cython_function()
current_lineno = self.get_cython_lineno()
-
+
for name, cyvar in cython_func.locals.iteritems():
- if (cyvar.type == PythonObject and
+ if (cyvar.type == PythonObject and
self.is_initialized(cython_func, name)):
-
+
try:
val = gdb.parse_and_eval(cyvar.cname)
except RuntimeError:
@@ -1163,7 +1163,7 @@ class CyExec(CythonCommand, libpython.PyExec):
else:
if val.is_optimized_out:
continue
-
+
pystringp = executor.alloc_pystring(name)
code = '''
(PyObject *) PyDict_SetItem(
@@ -1179,38 +1179,38 @@ class CyExec(CythonCommand, libpython.PyExec):
finally:
# PyDict_SetItem doesn't steal our reference
executor.decref(pystringp)
-
+
def _find_first_cython_or_python_frame(self):
frame = gdb.selected_frame()
while frame:
- if (self.is_cython_function(frame) or
+ if (self.is_cython_function(frame) or
self.is_python_function(frame)):
return frame
-
+
frame = frame.older()
-
+
raise gdb.GdbError("There is no Cython or Python frame on the stack.")
-
+
def invoke(self, expr, from_tty):
frame = self._find_first_cython_or_python_frame()
if self.is_python_function(frame):
libpython.py_exec.invoke(expr, from_tty)
return
-
+
expr, input_type = self.readcode(expr)
executor = libpython.PythonCodeExecutor()
-
+
with libpython.FetchAndRestoreError():
- # get the dict of Cython globals and construct a dict in the
+ # get the dict of Cython globals and construct a dict in the
# inferior with Cython locals
global_dict = gdb.parse_and_eval(
'(PyObject *) PyModule_GetDict(__pyx_m)')
local_dict = gdb.parse_and_eval('(PyObject *) PyDict_New()')
-
+
cython_function = self.get_cython_function()
-
+
try:
- self._fill_locals_dict(executor,
+ self._fill_locals_dict(executor,
libpython.pointervalue(local_dict))
executor.evalcode(expr, input_type, global_dict, local_dict)
finally:
@@ -1223,18 +1223,18 @@ class CyCName(gdb.Function, CythonBase):
"""
Get the C name of a Cython variable in the current context.
Examples:
-
+
print $cy_cname("function")
print $cy_cname("Class.method")
print $cy_cname("module.function")
"""
-
+
@require_cython_frame
@gdb_function_value_to_unicode
def invoke(self, cyname, frame=None):
frame = frame or gdb.selected_frame()
cname = None
-
+
if self.is_cython_function(frame):
cython_function = self.get_cython_function(frame)
if cyname in cython_function.locals:
@@ -1245,13 +1245,13 @@ class CyCName(gdb.Function, CythonBase):
qname = '%s.%s' % (cython_function.module.name, cyname)
if qname in cython_function.module.functions:
cname = cython_function.module.functions[qname].cname
-
+
if not cname:
cname = self.cy.functions_by_qualified_name.get(cyname)
-
+
if not cname:
raise gdb.GdbError('No such Cython variable: %s' % cyname)
-
+
return cname
@@ -1259,7 +1259,7 @@ class CyCValue(CyCName):
"""
Get the value of a Cython variable.
"""
-
+
@require_cython_frame
@gdb_function_value_to_unicode
def invoke(self, cyname, frame=None):
@@ -1280,7 +1280,7 @@ class CyLine(gdb.Function, CythonBase):
"""
Get the current Cython line.
"""
-
+
@require_cython_frame
def invoke(self):
return self.get_cython_lineno()
diff --git a/Cython/Debugger/libpython.py b/Cython/Debugger/libpython.py
index b289d04bb..253c4c192 100644
--- a/Cython/Debugger/libpython.py
+++ b/Cython/Debugger/libpython.py
@@ -162,7 +162,7 @@ class TruncatedStringIO(object):
all_pretty_typenames = set()
class PrettyPrinterTrackerMeta(type):
-
+
def __init__(self, name, bases, dict):
super(PrettyPrinterTrackerMeta, self).__init__(name, bases, dict)
all_pretty_typenames.add(self._typename)
@@ -179,11 +179,11 @@ class PyObjectPtr(object):
Note that at every stage the underlying pointer could be NULL, point
to corrupt data, etc; this is the debugger, after all.
"""
-
+
__metaclass__ = PrettyPrinterTrackerMeta
-
+
_typename = 'PyObject'
-
+
def __init__(self, gdbval, cast_to=None):
if cast_to:
self._gdbval = gdbval.cast(cast_to)
@@ -356,7 +356,7 @@ class PyObjectPtr(object):
#print 'tp_flags = 0x%08x' % tp_flags
#print 'tp_name = %r' % tp_name
-
+
name_map = {'bool': PyBoolObjectPtr,
'classobj': PyClassObjectPtr,
'instance': PyInstanceObjectPtr,
@@ -368,7 +368,7 @@ class PyObjectPtr(object):
}
if tp_name in name_map:
return name_map[tp_name]
-
+
if tp_flags & Py_TPFLAGS_HEAPTYPE:
return HeapTypeObjectPtr
@@ -486,7 +486,7 @@ def _PyObject_VAR_SIZE(typeobj, nitems):
class HeapTypeObjectPtr(PyObjectPtr):
_typename = 'PyObject'
-
+
def get_attr_dict(self):
'''
Get the PyDictObject ptr representing the attribute dictionary
@@ -564,7 +564,7 @@ class PyBaseExceptionObjectPtr(PyObjectPtr):
within the process being debugged.
"""
_typename = 'PyBaseExceptionObject'
-
+
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
@@ -677,7 +677,7 @@ class PyDictObjectPtr(PyObjectPtr):
if not pyop_value.is_null():
pyop_key = PyObjectPtr.from_pyobject_ptr(ep['me_key'])
yield (pyop_key, pyop_value)
-
+
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
@@ -711,7 +711,7 @@ class PyDictObjectPtr(PyObjectPtr):
class PyInstanceObjectPtr(PyObjectPtr):
_typename = 'PyInstanceObject'
-
+
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
@@ -756,7 +756,7 @@ class PyIntObjectPtr(PyObjectPtr):
class PyListObjectPtr(PyObjectPtr):
_typename = 'PyListObject'
-
+
def __getitem__(self, i):
# Get the gdb.Value for the (PyObject*) with the given index:
field_ob_item = self.field('ob_item')
@@ -789,7 +789,7 @@ class PyListObjectPtr(PyObjectPtr):
class PyLongObjectPtr(PyObjectPtr):
_typename = 'PyLongObject'
-
+
def proxyval(self, visited):
'''
Python's Include/longobjrep.h has this declaration:
@@ -807,7 +807,7 @@ class PyLongObjectPtr(PyObjectPtr):
where SHIFT can be either:
#define PyLong_SHIFT 30
#define PyLong_SHIFT 15
- '''
+ '''
ob_size = long(self.field('ob_size'))
if ob_size == 0:
return 0L
@@ -838,7 +838,7 @@ class PyBoolObjectPtr(PyLongObjectPtr):
<bool> instances (Py_True/Py_False) within the process being debugged.
"""
_typename = 'PyBoolObject'
-
+
def proxyval(self, visited):
castto = gdb.lookup_type('PyLongObject').pointer()
self._gdbval = self._gdbval.cast(castto)
@@ -1050,11 +1050,11 @@ class PySetObjectPtr(PyObjectPtr):
class PyBytesObjectPtr(PyObjectPtr):
_typename = 'PyBytesObject'
-
+
def __str__(self):
field_ob_size = self.field('ob_size')
field_ob_sval = self.field('ob_sval')
- return ''.join(struct.pack('b', field_ob_sval[i])
+ return ''.join(struct.pack('b', field_ob_sval[i])
for i in safe_range(field_ob_size))
def proxyval(self, visited):
@@ -1071,7 +1071,7 @@ class PyBytesObjectPtr(PyObjectPtr):
quote = "'"
if "'" in proxy and not '"' in proxy:
quote = '"'
-
+
if py3:
out.write('b')
@@ -1219,7 +1219,7 @@ class PyUnicodeObjectPtr(PyObjectPtr):
else:
# Python 2, write the 'u'
out.write('u')
-
+
if "'" in proxy and '"' not in proxy:
quote = '"'
else:
@@ -1684,11 +1684,11 @@ class PyLocals(gdb.Command):
namespace = self.get_namespace(pyop_frame)
namespace = [(name.proxyval(set()), val) for name, val in namespace]
-
+
if namespace:
name, val = max(namespace, key=lambda (name, val): len(name))
max_name_length = len(name)
-
+
for name, pyop_value in namespace:
value = pyop_value.get_truncated_repr(MAX_OUTPUT_LEN)
print ('%-*s = %s' % (max_name_length, name, value))
@@ -1699,7 +1699,7 @@ class PyLocals(gdb.Command):
class PyGlobals(PyLocals):
'List all the globals in the currently select Python frame'
-
+
def get_namespace(self, pyop_frame):
return pyop_frame.iter_globals()
@@ -1709,18 +1709,18 @@ PyGlobals("py-globals", gdb.COMMAND_DATA, gdb.COMPLETE_NONE)
class PyNameEquals(gdb.Function):
-
+
def _get_pycurframe_attr(self, attr):
frame = Frame(gdb.selected_frame())
if frame.is_evalframeex():
pyframe = frame.get_pyop()
if pyframe is None:
return None
-
+
return getattr(pyframe, attr).proxyval(set())
-
+
return None
-
+
def invoke(self, funcname):
attr = self._get_pycurframe_attr('co_name')
return attr is not None and attr == funcname.string()
@@ -1729,7 +1729,7 @@ PyNameEquals("pyname_equals")
class PyModEquals(PyNameEquals):
-
+
def invoke(self, modname):
attr = self._get_pycurframe_attr('co_filename')
if attr is not None:
@@ -1743,20 +1743,20 @@ PyModEquals("pymod_equals")
class PyBreak(gdb.Command):
"""
Set a Python breakpoint. Examples:
-
+
Break on any function or method named 'func' in module 'modname'
-
- py-break modname.func
-
+
+ py-break modname.func
+
Break on any function or method named 'func'
-
+
py-break func
"""
-
+
def invoke(self, funcname, from_tty):
if '.' in funcname:
modname, dot, funcname = funcname.rpartition('.')
- cond = '$pyname_equals("%s") && $pymod_equals("%s")' % (funcname,
+ cond = '$pyname_equals("%s") && $pymod_equals("%s")' % (funcname,
modname)
else:
cond = '$pyname_equals("%s")' % funcname
@@ -1770,31 +1770,31 @@ class _LoggingState(object):
"""
State that helps to provide a reentrant gdb.execute() function.
"""
-
+
def __init__(self):
self.fd, self.filename = tempfile.mkstemp()
self.file = os.fdopen(self.fd, 'r+')
_execute("set logging file %s" % self.filename)
self.file_position_stack = []
-
+
atexit.register(os.close, self.fd)
atexit.register(os.remove, self.filename)
-
+
def __enter__(self):
if not self.file_position_stack:
_execute("set logging redirect on")
_execute("set logging on")
_execute("set pagination off")
-
+
self.file_position_stack.append(os.fstat(self.fd).st_size)
return self
-
+
def getoutput(self):
gdb.flush()
self.file.seek(self.file_position_stack[-1])
result = self.file.read()
return result
-
+
def __exit__(self, exc_type, exc_val, tb):
startpos = self.file_position_stack.pop()
self.file.seek(startpos)
@@ -1808,7 +1808,7 @@ class _LoggingState(object):
def execute(command, from_tty=False, to_string=False):
"""
Replace gdb.execute() with this function and have it accept a 'to_string'
- argument (new in 7.2). Have it properly capture stderr also. Ensure
+ argument (new in 7.2). Have it properly capture stderr also. Ensure
reentrancy.
"""
if to_string:
@@ -1831,9 +1831,9 @@ def get_selected_inferior():
# Woooh, another bug in gdb! Is there an end in sight?
# http://sourceware.org/bugzilla/show_bug.cgi?id=12212
return gdb.inferiors()[0]
-
+
selected_thread = gdb.selected_thread()
-
+
for inferior in gdb.inferiors():
for thread in inferior.threads():
if thread == selected_thread:
@@ -1842,39 +1842,39 @@ def get_selected_inferior():
class GenericCodeStepper(gdb.Command):
"""
- Superclass for code stepping. Subclasses must implement the following
+ Superclass for code stepping. Subclasses must implement the following
methods:
-
+
lineno(frame)
tells the current line number (only called for a relevant frame)
-
+
is_relevant_function(frame)
tells whether we care about frame 'frame'
-
+
get_source_line(frame)
get the line of source code for the current line (only called for a
- relevant frame). If the source code cannot be retrieved this
+ relevant frame). If the source code cannot be retrieved this
function should return None
-
- static_break_functions()
- returns an iterable of function names that are considered relevant
- and should halt step-into execution. This is needed to provide a
+
+ static_break_functions()
+ returns an iterable of function names that are considered relevant
+ and should halt step-into execution. This is needed to provide a
performing step-into
-
- runtime_break_functions
- list of functions that we should break into depending on the
+
+ runtime_break_functions
+ list of functions that we should break into depending on the
context
This class provides an 'invoke' method that invokes a 'step' or 'step-over'
depending on the 'stepinto' argument.
"""
-
+
stepper = False
static_breakpoints = {}
runtime_breakpoints = {}
-
+
def __init__(self, name, stepinto=False):
- super(GenericCodeStepper, self).__init__(name,
+ super(GenericCodeStepper, self).__init__(name,
gdb.COMMAND_RUNNING,
gdb.COMPLETE_NONE)
self.stepinto = stepinto
@@ -1886,16 +1886,16 @@ class GenericCodeStepper(gdb.Command):
def init_breakpoints(self):
"""
Keep all breakpoints around and simply disable/enable them each time
- we are stepping. We need this because if you set and delete a
+ we are stepping. We need this because if you set and delete a
breakpoint, gdb will not repeat your command (this is due to 'delete').
- We also can't use the breakpoint API because there's no option to make
+ We also can't use the breakpoint API because there's no option to make
breakpoint setting silent.
-
+
This method must be called whenever the list of functions we should
step into changes. It can be called on any GenericCodeStepper instance.
"""
break_funcs = set(self.static_break_functions())
-
+
for funcname in break_funcs:
if funcname not in self.static_breakpoints:
try:
@@ -1910,45 +1910,45 @@ class GenericCodeStepper(gdb.Command):
result = gdb.execute(textwrap.dedent("""\
python bp = gdb.Breakpoint(%r, gdb.BP_BREAKPOINT, \
internal=True); \
- print bp.number""",
+ print bp.number""",
to_string=True))
-
+
breakpoint = int(result)
self.static_breakpoints[funcname] = breakpoint
-
+
for bp in set(self.static_breakpoints) - break_funcs:
gdb.execute("delete " + self.static_breakpoints[bp])
-
+
self.disable_breakpoints()
def enable_breakpoints(self):
for bp in self.static_breakpoints.itervalues():
gdb.execute('enable ' + bp)
-
+
runtime_break_functions = self.runtime_break_functions()
if runtime_break_functions is None:
return
-
+
for funcname in runtime_break_functions:
- if (funcname not in self.static_breakpoints and
+ if (funcname not in self.static_breakpoints and
funcname not in self.runtime_breakpoints):
self.runtime_breakpoints[funcname] = self._break_func(funcname)
elif funcname in self.runtime_breakpoints:
gdb.execute('enable ' + self.runtime_breakpoints[funcname])
-
+
def disable_breakpoints(self):
chain = itertools.chain(self.static_breakpoints.itervalues(),
self.runtime_breakpoints.itervalues())
for bp in chain:
gdb.execute('disable ' + bp)
-
+
def runtime_break_functions(self):
"""
- Implement this if the list of step-into functions depends on the
+ Implement this if the list of step-into functions depends on the
context.
"""
-
+
def stopped(self, result):
match = re.search('^Program received signal .*', result, re.MULTILINE)
if match:
@@ -1957,15 +1957,15 @@ class GenericCodeStepper(gdb.Command):
return result
else:
return None
-
+
def _stackdepth(self, frame):
depth = 0
while frame:
frame = frame.older()
depth += 1
-
+
return depth
-
+
def finish_executing(self, result):
"""
After doing some kind of code running in the inferior, print the line
@@ -1986,10 +1986,10 @@ class GenericCodeStepper(gdb.Command):
else:
frame = gdb.selected_frame()
output = None
-
+
if self.is_relevant_function(frame):
output = self.get_source_line(frame)
-
+
if output is None:
pframe = getattr(self, 'print_stackframe', None)
if pframe:
@@ -1998,10 +1998,10 @@ class GenericCodeStepper(gdb.Command):
print result.strip()
else:
print output
-
+
def _finish(self):
"""
- Execute until the function returns (or until something else makes it
+ Execute until the function returns (or until something else makes it
stop)
"""
if gdb.selected_frame().older() is not None:
@@ -2009,27 +2009,27 @@ class GenericCodeStepper(gdb.Command):
else:
# outermost frame, continue
return gdb.execute('cont', to_string=True)
-
+
def finish(self, *args):
"""
Execute until the function returns to a relevant caller.
"""
-
+
while True:
result = self._finish()
-
+
try:
frame = gdb.selected_frame()
except RuntimeError:
break
-
+
hitbp = re.search(r'Breakpoint (\d+)', result)
is_relavant = self.is_relevant_function(frame)
if hitbp or is_relavant or self.stopped(result):
break
-
+
self.finish_executing(result)
-
+
def _step(self):
"""
Do a single step or step-over. Returns the result of the last gdb
@@ -2037,12 +2037,12 @@ class GenericCodeStepper(gdb.Command):
"""
if self.stepinto:
self.enable_breakpoints()
-
+
beginframe = gdb.selected_frame()
beginline = self.lineno(beginframe)
if not self.stepinto:
depth = self._stackdepth(beginframe)
-
+
newframe = beginframe
result = ''
@@ -2051,17 +2051,17 @@ class GenericCodeStepper(gdb.Command):
result = gdb.execute('next', to_string=True)
else:
result = self._finish()
-
+
if self.stopped(result):
break
-
+
newframe = gdb.selected_frame()
is_relevant_function = self.is_relevant_function(newframe)
try:
framename = newframe.name()
except RuntimeError:
framename = None
-
+
m = re.search(r'Breakpoint (\d+)', result)
if m:
bp = self.runtime_breakpoints.get(framename)
@@ -2070,39 +2070,39 @@ class GenericCodeStepper(gdb.Command):
# that the function, in case hit by a runtime breakpoint,
# is in the right context
break
-
+
if newframe != beginframe:
# new function
-
+
if not self.stepinto:
# see if we returned to the caller
newdepth = self._stackdepth(newframe)
- is_relevant_function = (newdepth < depth and
+ is_relevant_function = (newdepth < depth and
is_relevant_function)
-
+
if is_relevant_function:
break
else:
if self.lineno(newframe) > beginline:
break
-
+
if self.stepinto:
self.disable_breakpoints()
return result
-
+
def step(self, *args):
return self.finish_executing(self._step())
def run(self, *args):
self.finish_executing(gdb.execute('run', to_string=True))
-
+
def cont(self, *args):
self.finish_executing(gdb.execute('cont', to_string=True))
class PythonCodeStepper(GenericCodeStepper):
-
+
def pyframe(self, frame):
pyframe = Frame(frame).get_pyop()
if pyframe:
@@ -2111,47 +2111,47 @@ class PythonCodeStepper(GenericCodeStepper):
raise gdb.GdbError(
"Unable to find the Python frame, run your code with a debug "
"build (configure with --with-pydebug or compile with -g).")
-
+
def lineno(self, frame):
return self.pyframe(frame).current_line_num()
-
+
def is_relevant_function(self, frame):
return Frame(frame).is_evalframeex()
def get_source_line(self, frame):
try:
pyframe = self.pyframe(frame)
- return '%4d %s' % (pyframe.current_line_num(),
+ return '%4d %s' % (pyframe.current_line_num(),
pyframe.current_line().rstrip())
except IOError, e:
return None
-
+
def static_break_functions(self):
yield 'PyEval_EvalFrameEx'
class PyStep(PythonCodeStepper):
"Step through Python code."
-
+
invoke = PythonCodeStepper.step
-
+
class PyNext(PythonCodeStepper):
"Step-over Python code."
-
+
invoke = PythonCodeStepper.step
-
+
class PyFinish(PythonCodeStepper):
"Execute until function returns to a caller."
-
+
invoke = PythonCodeStepper.finish
class PyRun(PythonCodeStepper):
"Run the program."
-
+
invoke = PythonCodeStepper.run
class PyCont(PythonCodeStepper):
-
+
invoke = PythonCodeStepper.cont
@@ -2170,8 +2170,8 @@ Py_eval_input = 258
def _pointervalue(gdbval):
"""
- Return the value of the pionter as a Python int.
-
+ Return the value of the pionter as a Python int.
+
gdbval.type must be a pointer type
"""
# don't convert with int() as it will raise a RuntimeError
@@ -2192,14 +2192,14 @@ def pointervalue(gdbval):
# work around yet another bug in gdb where you get random behaviour
# and tracebacks
pass
-
+
return pointer
def get_inferior_unicode_postfix():
try:
gdb.parse_and_eval('PyUnicode_FromEncodedObject')
except RuntimeError:
- try:
+ try:
gdb.parse_and_eval('PyUnicodeUCS2_FromEncodedObject')
except RuntimeError:
return 'UCS4'
@@ -2207,33 +2207,33 @@ def get_inferior_unicode_postfix():
return 'UCS2'
else:
return ''
-
+
class PythonCodeExecutor(object):
-
+
def malloc(self, size):
chunk = (gdb.parse_and_eval("(void *) malloc((size_t) %d)" % size))
-
+
pointer = pointervalue(chunk)
if pointer == 0:
raise gdb.GdbError("No memory could be allocated in the inferior.")
-
+
return pointer
-
+
def alloc_string(self, string):
pointer = self.malloc(len(string))
get_selected_inferior().write_memory(pointer, string)
-
+
return pointer
-
+
def alloc_pystring(self, string):
stringp = self.alloc_string(string)
PyString_FromStringAndSize = 'PyString_FromStringAndSize'
-
+
try:
gdb.parse_and_eval(PyString_FromStringAndSize)
except RuntimeError:
# Python 3
- PyString_FromStringAndSize = ('PyUnicode%s_FromStringAndSize' %
+ PyString_FromStringAndSize = ('PyUnicode%s_FromStringAndSize' %
(get_inferior_unicode_postfix,))
try:
@@ -2242,59 +2242,59 @@ class PythonCodeExecutor(object):
PyString_FromStringAndSize, stringp, len(string)))
finally:
self.free(stringp)
-
+
pointer = pointervalue(result)
if pointer == 0:
raise gdb.GdbError("Unable to allocate Python string in "
"the inferior.")
-
+
return pointer
-
+
def free(self, pointer):
gdb.parse_and_eval("free((void *) %d)" % pointer)
-
+
def incref(self, pointer):
"Increment the reference count of a Python object in the inferior."
gdb.parse_and_eval('Py_IncRef((PyObject *) %d)' % pointer)
-
+
def decref(self, pointer):
"Decrement the reference count of a Python object in the inferior."
# Py_DecRef is like Py_XDECREF, but a function. So we don't have
# to check for NULL. This should also decref all our allocated
# Python strings.
gdb.parse_and_eval('Py_DecRef((PyObject *) %d)' % pointer)
-
+
def evalcode(self, code, input_type, global_dict=None, local_dict=None):
"""
Evaluate python code `code` given as a string in the inferior and
return the result as a gdb.Value. Returns a new reference in the
inferior.
-
+
Of course, executing any code in the inferior may be dangerous and may
leave the debuggee in an unsafe state or terminate it alltogether.
"""
if '\0' in code:
raise gdb.GdbError("String contains NUL byte.")
-
+
code += '\0'
-
+
pointer = self.alloc_string(code)
-
+
globalsp = pointervalue(global_dict)
localsp = pointervalue(local_dict)
-
+
if globalsp == 0 or localsp == 0:
raise gdb.GdbError("Unable to obtain or create locals or globals.")
-
+
code = """
PyRun_String(
(char *) %(code)d,
(int) %(start)d,
(PyObject *) %(globals)s,
(PyObject *) %(locals)d)
- """ % dict(code=pointer, start=input_type,
+ """ % dict(code=pointer, start=input_type,
globals=globalsp, locals=localsp)
-
+
with FetchAndRestoreError():
try:
self.decref(gdb.parse_and_eval(code))
@@ -2311,25 +2311,25 @@ class FetchAndRestoreError(PythonCodeExecutor):
def __init__(self):
self.sizeof_PyObjectPtr = gdb.lookup_type('PyObject').pointer().sizeof
self.pointer = self.malloc(self.sizeof_PyObjectPtr * 3)
-
+
type = self.pointer
value = self.pointer + self.sizeof_PyObjectPtr
traceback = self.pointer + self.sizeof_PyObjectPtr * 2
-
+
self.errstate = type, value, traceback
-
+
def __enter__(self):
gdb.parse_and_eval("PyErr_Fetch(%d, %d, %d)" % self.errstate)
-
+
def __exit__(self, *args):
if gdb.parse_and_eval("(int) PyErr_Occurred()"):
gdb.parse_and_eval("PyErr_Print()")
-
+
pyerr_restore = ("PyErr_Restore("
"(PyObject *) *%d,"
"(PyObject *) *%d,"
"(PyObject *) *%d)")
-
+
try:
gdb.parse_and_eval(pyerr_restore % self.errstate)
finally:
@@ -2342,15 +2342,15 @@ class FixGdbCommand(gdb.Command):
super(FixGdbCommand, self).__init__(command, gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
self.actual_command = actual_command
-
+
def fix_gdb(self):
"""
- So, you must be wondering what the story is this time! Yeeees, indeed,
+ So, you must be wondering what the story is this time! Yeeees, indeed,
I have quite the story for you! It seems that invoking either 'cy exec'
- and 'py-exec' work perfectly fine, but after this gdb's python API is
- entirely broken. Some unset exception value is still set?
+ and 'py-exec' work perfectly fine, but after this gdb's python API is
+ entirely broken. Some unset exception value is still set?
sys.exc_clear() didn't help. A demonstration:
-
+
(gdb) cy exec 'hello'
'hello'
(gdb) python gdb.execute('cont')
@@ -2358,17 +2358,17 @@ class FixGdbCommand(gdb.Command):
Error while executing Python code.
(gdb) python gdb.execute('cont')
[15148 refs]
-
+
Program exited normally.
"""
- warnings.filterwarnings('ignore', r'.*', RuntimeWarning,
+ warnings.filterwarnings('ignore', r'.*', RuntimeWarning,
re.escape(__name__))
try:
long(gdb.parse_and_eval("(void *) 0")) == 0
except RuntimeError:
pass
# warnings.resetwarnings()
-
+
def invoke(self, args, from_tty):
self.fix_gdb()
try:
@@ -2379,7 +2379,7 @@ class FixGdbCommand(gdb.Command):
class PyExec(gdb.Command):
-
+
def readcode(self, expr):
if expr:
return expr, Py_single_input
@@ -2393,23 +2393,23 @@ class PyExec(gdb.Command):
else:
if line.rstrip() == 'end':
break
-
+
lines.append(line)
-
+
return '\n'.join(lines), Py_file_input
-
+
def invoke(self, expr, from_tty):
expr, input_type = self.readcode(expr)
-
+
executor = PythonCodeExecutor()
global_dict = gdb.parse_and_eval('PyEval_GetGlobals()')
local_dict = gdb.parse_and_eval('PyEval_GetLocals()')
-
+
if pointervalue(global_dict) == 0 or pointervalue(local_dict) == 0:
raise gdb.GdbError("Unable to find the locals or globals of the "
"most recent Python function (relative to the "
"selected frame).")
-
+
executor.evalcode(expr, input_type, global_dict, local_dict)
py_exec = FixGdbCommand('py-exec', '-py-exec')
diff --git a/Cython/Distutils/__init__.py b/Cython/Distutils/__init__.py
index 54bc0bdee..41bf9be5e 100644
--- a/Cython/Distutils/__init__.py
+++ b/Cython/Distutils/__init__.py
@@ -8,5 +8,5 @@
# so that *our* build_ext can make use of it.
from Cython.Distutils.build_ext import build_ext
-
+
# from extension import Extension
diff --git a/Cython/Distutils/build_ext.py b/Cython/Distutils/build_ext.py
index 7139669a0..4faba5879 100644
--- a/Cython/Distutils/build_ext.py
+++ b/Cython/Distutils/build_ext.py
@@ -26,26 +26,26 @@ show_compilers = _build_ext.show_compilers
class Optimization(object):
def __init__(self):
self.flags = (
- 'OPT',
+ 'OPT',
'CFLAGS',
'CPPFLAGS',
- 'EXTRA_CFLAGS',
+ 'EXTRA_CFLAGS',
'BASECFLAGS',
'PY_CFLAGS',
)
self.state = sysconfig.get_config_vars(*self.flags)
self.config_vars = sysconfig.get_config_vars()
-
-
+
+
def disable_optimization(self):
"disable optimization for the C or C++ compiler"
badoptions = ('-O1', '-O2', '-O3')
-
+
for flag, option in zip(self.flags, self.state):
if option is not None:
L = [opt for opt in option.split() if opt not in badoptions]
self.config_vars[flag] = ' '.join(L)
-
+
def restore_state(self):
"restore the original state"
for flag, option in zip(self.flags, self.state):
@@ -62,9 +62,9 @@ except NameError:
for x in it:
if x:
return True
-
+
return False
-
+
class build_ext(_build_ext.build_ext):
@@ -96,7 +96,7 @@ class build_ext(_build_ext.build_ext):
])
boolean_options.extend([
- 'pyrex-cplus', 'pyrex-create-listing', 'pyrex-line-directives',
+ 'pyrex-cplus', 'pyrex-create-listing', 'pyrex-line-directives',
'pyrex-c-in-temp', 'pyrex-gdb',
])
@@ -121,22 +121,22 @@ class build_ext(_build_ext.build_ext):
if self.pyrex_directives is None:
self.pyrex_directives = {}
# finalize_options ()
-
+
def run(self):
# We have one shot at this before build_ext initializes the compiler.
# If --pyrex-gdb is in effect as a command line option or as option
# of any Extension module, disable optimization for the C or C++
# compiler.
- if (self.pyrex_gdb or any([getattr(ext, 'pyrex_gdb', False)
+ if (self.pyrex_gdb or any([getattr(ext, 'pyrex_gdb', False)
for ext in self.extensions])):
optimization.disable_optimization()
-
+
_build_ext.build_ext.run(self)
-
+
def build_extensions(self):
# First, sanity-check the 'extensions' list
self.check_extensions_list(self.extensions)
-
+
for ext in self.extensions:
ext.sources = self.cython_sources(ext.sources, ext)
self.build_extension(ext)
@@ -180,7 +180,7 @@ class build_ext(_build_ext.build_ext):
# cplus = self.pyrex_cplus or \
# (extension.language != None and \
# extension.language.lower() == 'c++')
-
+
create_listing = self.pyrex_create_listing or \
getattr(extension, 'pyrex_create_listing', 0)
line_directives = self.pyrex_line_directives or \
@@ -265,7 +265,7 @@ class build_ext(_build_ext.build_ext):
output_dir = os.curdir
else:
output_dir = self.build_lib
- options = CompilationOptions(pyrex_default_options,
+ options = CompilationOptions(pyrex_default_options,
use_listing_file = create_listing,
include_path = includes,
compiler_directives = directives,
diff --git a/Cython/Includes/cpython/__init__.pxd b/Cython/Includes/cpython/__init__.pxd
index 4046e77b0..356547ac9 100644
--- a/Cython/Includes/cpython/__init__.pxd
+++ b/Cython/Includes/cpython/__init__.pxd
@@ -25,7 +25,7 @@
# PyObject* PyNumber_Add(PyObject *o1, PyObject *o2)
#
# in your file after any .pxi includes. Cython will use the latest
-# declaration.
+# declaration.
#
# Cython takes care of this automatically for anything of type object.
## More precisely, I think the correct convention for
@@ -36,7 +36,7 @@
## any funny reference counting.
## (2) Declare output as object if a new reference is returned.
## (3) Declare output as PyObject* if a borrowed reference is returned.
-##
+##
## This way when you call objects, no cast is needed, and if the api
## calls returns a new reference (which is about 95% of them), then
## you can just assign to a variable of type object. With borrowed
@@ -46,8 +46,8 @@
## to your object, so you're OK, as long as you relealize this
## and use the result of an explicit cast to <object> as a borrowed
## reference (and you can call Py_INCREF if you want to turn it
-## into another reference for some reason).
-#
+## into another reference for some reason).
+#
# "The reference count is important because today's computers have
# a finite (and often severely limited) memory size; it counts how
# many different places there are that have a reference to an
@@ -77,7 +77,7 @@
# count as there are distinct memory locations in virtual memory
# (assuming sizeof(long) >= sizeof(char*)). Thus, the reference
# count increment is a simple operation.
-#
+#
# It is not necessary to increment an object's reference count for
# every local variable that contains a pointer to an object. In
# theory, the object's reference count goes up by one when the
diff --git a/Cython/Includes/cpython/bool.pxd b/Cython/Includes/cpython/bool.pxd
index a709f2604..c775088ce 100644
--- a/Cython/Includes/cpython/bool.pxd
+++ b/Cython/Includes/cpython/bool.pxd
@@ -27,12 +27,12 @@ cdef extern from "Python.h":
# counts.
# Py_RETURN_FALSE
- # Return Py_False from a function, properly incrementing its reference count.
+ # Return Py_False from a function, properly incrementing its reference count.
# Py_RETURN_TRUE
- # Return Py_True from a function, properly incrementing its reference count.
+ # Return Py_True from a function, properly incrementing its reference count.
object PyBool_FromLong(long v)
# Return value: New reference.
- # Return a new reference to Py_True or Py_False depending on the truth value of v.
+ # Return a new reference to Py_True or Py_False depending on the truth value of v.
diff --git a/Cython/Includes/cpython/buffer.pxd b/Cython/Includes/cpython/buffer.pxd
index 654eb9694..f25c56e92 100644
--- a/Cython/Includes/cpython/buffer.pxd
+++ b/Cython/Includes/cpython/buffer.pxd
@@ -87,8 +87,8 @@ cdef extern from "Python.h":
# is 'C') or Fortran-style (fortran is 'F') contiguous or either
# one (fortran is 'A'). Return 0 otherwise.
- void PyBuffer_FillContiguousStrides(int ndims,
- Py_ssize_t *shape,
+ void PyBuffer_FillContiguousStrides(int ndims,
+ Py_ssize_t *shape,
Py_ssize_t *strides,
int itemsize,
char fort)
diff --git a/Cython/Includes/cpython/bytes.pxd b/Cython/Includes/cpython/bytes.pxd
index bed53fbf5..2fb350201 100644
--- a/Cython/Includes/cpython/bytes.pxd
+++ b/Cython/Includes/cpython/bytes.pxd
@@ -10,18 +10,18 @@ cdef extern from "Python.h":
# These functions raise TypeError when expecting a string
# parameter and are called with a non-string parameter.
# PyStringObject
- # This subtype of PyObject represents a Python bytes object.
+ # This subtype of PyObject represents a Python bytes object.
# PyTypeObject PyBytes_Type
# This instance of PyTypeObject represents the Python bytes type;
# it is the same object as bytes and types.BytesType in the Python
- # layer.
+ # layer.
bint PyBytes_Check(object o)
# Return true if the object o is a string object or an instance of
- # a subtype of the string type.
+ # a subtype of the string type.
bint PyBytes_CheckExact(object o)
- # Return true if the object o is a string object, but not an instance of a subtype of the string type.
+ # Return true if the object o is a string object, but not an instance of a subtype of the string type.
bytes PyBytes_FromString(char *v)
# Return value: New reference.
@@ -66,13 +66,13 @@ cdef extern from "Python.h":
bytes PyBytes_FromFormatV(char *format, va_list vargs)
# Return value: New reference.
- # Identical to PyBytes_FromFormat() except that it takes exactly two arguments.
+ # Identical to PyBytes_FromFormat() except that it takes exactly two arguments.
Py_ssize_t PyBytes_Size(object string) except -1
- # Return the length of the string in string object string.
+ # Return the length of the string in string object string.
Py_ssize_t PyBytes_GET_SIZE(object string)
- # Macro form of PyBytes_Size() but without error checking.
+ # Macro form of PyBytes_Size() but without error checking.
char* PyBytes_AsString(object string) except NULL
# Return a NUL-terminated representation of the contents of
@@ -98,7 +98,7 @@ cdef extern from "Python.h":
# version of the object. If length is NULL, the resulting buffer
# may not contain NUL characters; if it does, the function returns
# -1 and a TypeError is raised.
-
+
# The buffer refers to an internal string buffer of obj, not a
# copy. The data must not be modified in any way, unless the
# string was just created using PyBytes_FromStringAndSize(NULL,
diff --git a/Cython/Includes/cpython/complex.pxd b/Cython/Includes/cpython/complex.pxd
index 48091a4f9..f5ba33957 100644
--- a/Cython/Includes/cpython/complex.pxd
+++ b/Cython/Includes/cpython/complex.pxd
@@ -10,7 +10,7 @@ cdef extern from "Python.h":
############################################################################
# PyComplexObject
- # This subtype of PyObject represents a Python complex number object.
+ # This subtype of PyObject represents a Python complex number object.
ctypedef class __builtin__.complex [object PyComplexObject]:
cdef Py_complex cval
@@ -32,17 +32,17 @@ cdef extern from "Python.h":
object PyComplex_FromCComplex(Py_complex v)
# Return value: New reference.
- # Create a new Python complex number object from a C Py_complex value.
+ # Create a new Python complex number object from a C Py_complex value.
object PyComplex_FromDoubles(double real, double imag)
# Return value: New reference.
- # Return a new PyComplexObject object from real and imag.
+ # Return a new PyComplexObject object from real and imag.
double PyComplex_RealAsDouble(object op) except? -1
- # Return the real part of op as a C double.
+ # Return the real part of op as a C double.
double PyComplex_ImagAsDouble(object op) except? -1
- # Return the imaginary part of op as a C double.
+ # Return the imaginary part of op as a C double.
Py_complex PyComplex_AsCComplex(object op)
# Return the Py_complex value of the complex number op.
diff --git a/Cython/Includes/cpython/dict.pxd b/Cython/Includes/cpython/dict.pxd
index 4f39cdfe5..616fb8711 100644
--- a/Cython/Includes/cpython/dict.pxd
+++ b/Cython/Includes/cpython/dict.pxd
@@ -10,7 +10,7 @@ cdef extern from "Python.h":
#
# This subtype of PyObject represents a Python dictionary object
# (i.e. the 'dict' type).
-
+
# PyTypeObject PyDict_Type
#
# This instance of PyTypeObject represents the Python dictionary
@@ -19,7 +19,7 @@ cdef extern from "Python.h":
bint PyDict_Check(object p)
# Return true if p is a dict object or an instance of a subtype of
- # the dict type.
+ # the dict type.
bint PyDict_CheckExact(object p)
# Return true if p is a dict object, but not an instance of a
@@ -27,21 +27,21 @@ cdef extern from "Python.h":
object PyDict_New()
# Return value: New reference.
- # Return a new empty dictionary, or NULL on failure.
+ # Return a new empty dictionary, or NULL on failure.
object PyDictProxy_New(object dict)
# Return value: New reference.
# Return a proxy object for a mapping which enforces read-only
# behavior. This is normally used to create a proxy to prevent
- # modification of the dictionary for non-dynamic class types.
+ # modification of the dictionary for non-dynamic class types.
void PyDict_Clear(object p)
- # Empty an existing dictionary of all key-value pairs.
+ # Empty an existing dictionary of all key-value pairs.
int PyDict_Contains(object p, object key) except -1
# Determine if dictionary p contains key. If an item in p is
# matches key, return 1, otherwise return 0. On error, return
- # -1. This is equivalent to the Python expression "key in p".
+ # -1. This is equivalent to the Python expression "key in p".
object PyDict_Copy(object p)
# Return value: New reference.
diff --git a/Cython/Includes/cpython/exc.pxd b/Cython/Includes/cpython/exc.pxd
index 71d277db5..cf114f3b8 100644
--- a/Cython/Includes/cpython/exc.pxd
+++ b/Cython/Includes/cpython/exc.pxd
@@ -1,7 +1,7 @@
from cpython.ref cimport PyObject
cdef extern from "Python.h":
-
+
#####################################################################
# 3. Exception Handling
#####################################################################
@@ -76,7 +76,7 @@ cdef extern from "Python.h":
# performance.
void PyErr_Clear()
- # Clear the error indicator. If the error indicator is not set, there is no effect.
+ # Clear the error indicator. If the error indicator is not set, there is no effect.
void PyErr_Fetch(PyObject** ptype, PyObject** pvalue, PyObject** ptraceback)
# Retrieve the error indicator into three variables whose
@@ -124,7 +124,7 @@ cdef extern from "Python.h":
# parsed, but the width part is ignored.
void PyErr_SetNone(object type)
- # This is a shorthand for "PyErr_SetObject(type, Py_None)".
+ # This is a shorthand for "PyErr_SetObject(type, Py_None)".
int PyErr_BadArgument() except 0
diff --git a/Cython/Includes/cpython/float.pxd b/Cython/Includes/cpython/float.pxd
index 602cc816d..45e544a65 100644
--- a/Cython/Includes/cpython/float.pxd
+++ b/Cython/Includes/cpython/float.pxd
@@ -1,12 +1,12 @@
cdef extern from "Python.h":
############################################################################
- # 7.2.3
+ # 7.2.3
############################################################################
# PyFloatObject
#
# This subtype of PyObject represents a Python floating point object.
-
+
# PyTypeObject PyFloat_Type
#
# This instance of PyTypeObject represents the Python floating
@@ -19,7 +19,7 @@ cdef extern from "Python.h":
bint PyFloat_CheckExact(object p)
# Return true if its argument is a PyFloatObject, but not a
- # subtype of PyFloatObject.
+ # subtype of PyFloatObject.
object PyFloat_FromString(object str, char **pend)
# Return value: New reference.
@@ -29,7 +29,7 @@ cdef extern from "Python.h":
object PyFloat_FromDouble(double v)
# Return value: New reference.
- # Create a PyFloatObject object from v, or NULL on failure.
+ # Create a PyFloatObject object from v, or NULL on failure.
double PyFloat_AsDouble(object pyfloat) except? -1
# Return a C double representation of the contents of pyfloat.
diff --git a/Cython/Includes/cpython/function.pxd b/Cython/Includes/cpython/function.pxd
index 375c084c4..e8e4f068c 100644
--- a/Cython/Includes/cpython/function.pxd
+++ b/Cython/Includes/cpython/function.pxd
@@ -32,11 +32,11 @@ cdef extern from "Python.h":
PyObject* PyFunction_GetCode(object op) except? NULL
# Return value: Borrowed reference.
- # Return the code object associated with the function object op.
+ # Return the code object associated with the function object op.
PyObject* PyFunction_GetGlobals(object op) except? NULL
# Return value: Borrowed reference.
- # Return the globals dictionary associated with the function object op.
+ # Return the globals dictionary associated with the function object op.
PyObject* PyFunction_GetModule(object op) except? NULL
# Return value: Borrowed reference.
@@ -52,7 +52,7 @@ cdef extern from "Python.h":
int PyFunction_SetDefaults(object op, object defaults) except -1
# Set the argument default values for the function object
# op. defaults must be Py_None or a tuple.
- # Raises SystemError and returns -1 on failure.
+ # Raises SystemError and returns -1 on failure.
PyObject* PyFunction_GetClosure(object op) except? NULL
# Return value: Borrowed reference.
@@ -62,4 +62,4 @@ cdef extern from "Python.h":
int PyFunction_SetClosure(object op, object closure) except -1
# Set the closure associated with the function object op. closure
# must be Py_None or a tuple of cell objects.
- # Raises SystemError and returns -1 on failure.
+ # Raises SystemError and returns -1 on failure.
diff --git a/Cython/Includes/cpython/instance.pxd b/Cython/Includes/cpython/instance.pxd
index 4160cbe6a..aecdc0cfd 100644
--- a/Cython/Includes/cpython/instance.pxd
+++ b/Cython/Includes/cpython/instance.pxd
@@ -1,5 +1,5 @@
cdef extern from "Python.h":
-
+
############################################################################
# 7.5.2 Instance Objects
############################################################################
@@ -7,9 +7,9 @@ cdef extern from "Python.h":
# PyTypeObject PyInstance_Type
#
# Type object for class instances.
-
+
int PyInstance_Check(object obj)
- # Return true if obj is an instance.
+ # Return true if obj is an instance.
object PyInstance_New(object cls, object arg, object kw)
# Return value: New reference.
diff --git a/Cython/Includes/cpython/int.pxd b/Cython/Includes/cpython/int.pxd
index bc87c6032..6846be33b 100644
--- a/Cython/Includes/cpython/int.pxd
+++ b/Cython/Includes/cpython/int.pxd
@@ -56,18 +56,18 @@ cdef extern from "Python.h":
# whether the value just happened to be -1.
long PyInt_AS_LONG(object io)
- # Return the value of the object io. No error checking is performed.
+ # Return the value of the object io. No error checking is performed.
unsigned long PyInt_AsUnsignedLongMask(object io) except? -1
# Will first attempt to cast the object to a PyIntObject or
# PyLongObject, if it is not already one, and then return its
# value as unsigned long. This function does not check for
- # overflow.
+ # overflow.
PY_LONG_LONG PyInt_AsUnsignedLongLongMask(object io) except? -1
# Will first attempt to cast the object to a PyIntObject or
# PyLongObject, if it is not already one, and then return its
- # value as unsigned long long, without checking for overflow.
+ # value as unsigned long long, without checking for overflow.
Py_ssize_t PyInt_AsSsize_t(object io) except? -1
# Will first attempt to cast the object to a PyIntObject or
diff --git a/Cython/Includes/cpython/iterator.pxd b/Cython/Includes/cpython/iterator.pxd
index 94ae1f623..0e10907f7 100644
--- a/Cython/Includes/cpython/iterator.pxd
+++ b/Cython/Includes/cpython/iterator.pxd
@@ -4,7 +4,7 @@ cdef extern from "Python.h":
# 6.5 Iterator Protocol
############################################################################
bint PyIter_Check(object o)
- # Return true if the object o supports the iterator protocol.
+ # Return true if the object o supports the iterator protocol.
object PyIter_Next(object o)
# Return value: New reference.
diff --git a/Cython/Includes/cpython/list.pxd b/Cython/Includes/cpython/list.pxd
index 16c8076be..b2a26bd6b 100644
--- a/Cython/Includes/cpython/list.pxd
+++ b/Cython/Includes/cpython/list.pxd
@@ -13,11 +13,11 @@ cdef extern from "Python.h":
# functions such as PySequence_SetItem() or expose the object to
# Python code before setting all items to a real object with
# PyList_SetItem().
-
+
bint PyList_Check(object p)
# Return true if p is a list object or an instance of a subtype of
# the list type.
-
+
bint PyList_CheckExact(object p)
# Return true if p is a list object, but not an instance of a
# subtype of the list type.
@@ -27,10 +27,10 @@ cdef extern from "Python.h":
# to "len(list)" on a list object.
Py_ssize_t PyList_GET_SIZE(object list)
- # Macro form of PyList_Size() without error checking.
+ # Macro form of PyList_Size() without error checking.
PyObject* PyList_GetItem(object list, Py_ssize_t index) except NULL
- # Return value: Borrowed reference.
+ # Return value: Borrowed reference.
# Return the object at position pos in the list pointed to by
# p. The position must be positive, indexing from the end of the
# list is not supported. If pos is out of bounds, return NULL and
@@ -38,7 +38,7 @@ cdef extern from "Python.h":
PyObject* PyList_GET_ITEM(object list, Py_ssize_t i)
# Return value: Borrowed reference.
- # Macro form of PyList_GetItem() without error checking.
+ # Macro form of PyList_GetItem() without error checking.
int PyList_SetItem(object list, Py_ssize_t index, object item) except -1
# Set the item at index index in list to item. Return 0 on success
diff --git a/Cython/Includes/cpython/long.pxd b/Cython/Includes/cpython/long.pxd
index 4c5700cf4..5a7f1ee2e 100644
--- a/Cython/Includes/cpython/long.pxd
+++ b/Cython/Includes/cpython/long.pxd
@@ -17,30 +17,30 @@ cdef extern from "Python.h":
# type. This is the same object as long and types.LongType.
bint PyLong_Check(object p)
- # Return true if its argument is a PyLongObject or a subtype of PyLongObject.
+ # Return true if its argument is a PyLongObject or a subtype of PyLongObject.
bint PyLong_CheckExact(object p)
# Return true if its argument is a PyLongObject, but not a subtype of PyLongObject.
object PyLong_FromLong(long v)
# Return value: New reference.
- # Return a new PyLongObject object from v, or NULL on failure.
+ # Return a new PyLongObject object from v, or NULL on failure.
object PyLong_FromUnsignedLong(unsigned long v)
# Return value: New reference.
- # Return a new PyLongObject object from a C unsigned long, or NULL on failure.
+ # Return a new PyLongObject object from a C unsigned long, or NULL on failure.
object PyLong_FromLongLong(PY_LONG_LONG v)
# Return value: New reference.
- # Return a new PyLongObject object from a C long long, or NULL on failure.
+ # Return a new PyLongObject object from a C long long, or NULL on failure.
object PyLong_FromUnsignedLongLong(uPY_LONG_LONG v)
# Return value: New reference.
- # Return a new PyLongObject object from a C unsigned long long, or NULL on failure.
+ # Return a new PyLongObject object from a C unsigned long long, or NULL on failure.
object PyLong_FromDouble(double v)
# Return value: New reference.
- # Return a new PyLongObject object from the integer part of v, or NULL on failure.
+ # Return a new PyLongObject object from the integer part of v, or NULL on failure.
object PyLong_FromString(char *str, char **pend, int base)
# Return value: New reference.
@@ -62,7 +62,7 @@ cdef extern from "Python.h":
# the Unicode string, length gives the number of characters, and
# base is the radix for the conversion. The radix must be in the
# range [2, 36]; if it is out of range, ValueError will be
- # raised.
+ # raised.
object PyLong_FromVoidPtr(void *p)
# Return value: New reference.
@@ -90,11 +90,11 @@ cdef extern from "Python.h":
# Return a C unsigned long long from a Python long integer. If
# pylong cannot be represented as an unsigned long long, an
# OverflowError will be raised if the value is positive, or a
- # TypeError will be raised if the value is negative.
+ # TypeError will be raised if the value is negative.
unsigned long PyLong_AsUnsignedLongMask(object io) except? -1
# Return a C unsigned long from a Python long integer, without
- # checking for overflow.
+ # checking for overflow.
uPY_LONG_LONG PyLong_AsUnsignedLongLongMask(object io) except? -1
#unsigned PY_LONG_LONG PyLong_AsUnsignedLongLongMask(object io)
diff --git a/Cython/Includes/cpython/mem.pxd b/Cython/Includes/cpython/mem.pxd
index 6d7b8dac1..3b3ab5be1 100644
--- a/Cython/Includes/cpython/mem.pxd
+++ b/Cython/Includes/cpython/mem.pxd
@@ -63,7 +63,7 @@ cdef extern from "Python.h":
# sizeof(TYPE)) bytes. Returns a pointer cast to TYPE*.
void PyMem_Del(void *p)
- # Same as PyMem_Free().
+ # Same as PyMem_Free().
# In addition, the following macro sets are provided for calling
# the Python memory allocator directly, without involving the C
diff --git a/Cython/Includes/cpython/method.pxd b/Cython/Includes/cpython/method.pxd
index 36e7ef450..bc0941642 100644
--- a/Cython/Includes/cpython/method.pxd
+++ b/Cython/Includes/cpython/method.pxd
@@ -6,7 +6,7 @@ cdef extern from "Python.h":
# There are some useful functions that are useful for working with method objects.
# PyTypeObject PyMethod_Type
- # This instance of PyTypeObject represents the Python method type. This is exposed to Python programs as types.MethodType.
+ # This instance of PyTypeObject represents the Python method type. This is exposed to Python programs as types.MethodType.
bint PyMethod_Check(object o)
# Return true if o is a method object (has type
@@ -29,20 +29,20 @@ cdef extern from "Python.h":
PyObject* PyMethod_GET_CLASS(object meth)
# Return value: Borrowed reference.
- # Macro version of PyMethod_Class() which avoids error checking.
+ # Macro version of PyMethod_Class() which avoids error checking.
PyObject* PyMethod_Function(object meth) except NULL
# Return value: Borrowed reference.
- # Return the function object associated with the method meth.
+ # Return the function object associated with the method meth.
PyObject* PyMethod_GET_FUNCTION(object meth)
# Return value: Borrowed reference.
- # Macro version of PyMethod_Function() which avoids error checking.
+ # Macro version of PyMethod_Function() which avoids error checking.
PyObject* PyMethod_Self(object meth) except? NULL
# Return value: Borrowed reference.
- # Return the instance associated with the method meth if it is bound, otherwise return NULL.
+ # Return the instance associated with the method meth if it is bound, otherwise return NULL.
PyObject* PyMethod_GET_SELF(object meth)
# Return value: Borrowed reference.
- # Macro version of PyMethod_Self() which avoids error checking.
+ # Macro version of PyMethod_Self() which avoids error checking.
diff --git a/Cython/Includes/cpython/module.pxd b/Cython/Includes/cpython/module.pxd
index 8ff05108c..84e23ca71 100644
--- a/Cython/Includes/cpython/module.pxd
+++ b/Cython/Includes/cpython/module.pxd
@@ -130,7 +130,7 @@ cdef extern from "Python.h":
# object.
bint PyModule_CheckExact(object p)
- # Return true if p is a module object, but not a subtype of PyModule_Type.
+ # Return true if p is a module object, but not a subtype of PyModule_Type.
object PyModule_New(char *name)
# Return value: New reference.
@@ -161,15 +161,15 @@ cdef extern from "Python.h":
# Add an object to module as name. This is a convenience function
# which can be used from the module's initialization
# function. This steals a reference to value. Return -1 on error,
- # 0 on success.
+ # 0 on success.
int PyModule_AddIntant(object module, char *name, long value) except -1
# Add an integer ant to module as name. This convenience
# function can be used from the module's initialization
- # function. Return -1 on error, 0 on success.
+ # function. Return -1 on error, 0 on success.
int PyModule_AddStringant(object module, char *name, char *value) except -1
# Add a string constant to module as name. This convenience
# function can be used from the module's initialization
# function. The string value must be null-terminated. Return -1 on
- # error, 0 on success.
+ # error, 0 on success.
diff --git a/Cython/Includes/cpython/number.pxd b/Cython/Includes/cpython/number.pxd
index 99a94c8ed..6304102ec 100644
--- a/Cython/Includes/cpython/number.pxd
+++ b/Cython/Includes/cpython/number.pxd
@@ -36,7 +36,7 @@ cdef extern from "Python.h":
object PyNumber_FloorDivide(object o1, object o2)
# Return value: New reference.
# Return the floor of o1 divided by o2, or NULL on failure. This
- # is equivalent to the ``classic'' division of integers.
+ # is equivalent to the ``classic'' division of integers.
object PyNumber_TrueDivide(object o1, object o2)
# Return value: New reference.
@@ -45,7 +45,7 @@ cdef extern from "Python.h":
# ``approximate'' because binary floating point numbers are
# approximate; it is not possible to represent all real numbers in
# base two. This function can return a floating point value when
- # passed two integers.
+ # passed two integers.
object PyNumber_Remainder(object o1, object o2)
# Return value: New reference.
@@ -113,7 +113,7 @@ cdef extern from "Python.h":
object PyNumber_Or(object o1, object o2)
# Return value: New reference.
- # Returns the ``bitwise or'' of o1 and o2 on success, or NULL on failure. This is the equivalent of the Python expression "o1 | o2".
+ # Returns the ``bitwise or'' of o1 and o2 on success, or NULL on failure. This is the equivalent of the Python expression "o1 | o2".
object PyNumber_InPlaceAdd(object o1, object o2)
# Return value: New reference.
@@ -144,7 +144,7 @@ cdef extern from "Python.h":
# Returns the mathematical floor of dividing o1 by o2, or NULL on
# failure. The operation is done in-place when o1 supports
# it. This is the equivalent of the Python statement "o1 //=
- # o2".
+ # o2".
object PyNumber_InPlaceTrueDivide(object o1, object o2)
# Return value: New reference.
@@ -154,7 +154,7 @@ cdef extern from "Python.h":
# approximate; it is not possible to represent all real numbers in
# base two. This function can return a floating point value when
# passed two integers. The operation is done in-place when o1
- # supports it.
+ # supports it.
object PyNumber_InPlaceRemainder(object o1, object o2)
# Return value: New reference.
@@ -244,7 +244,7 @@ cdef extern from "Python.h":
# exception that will be raised (usually IndexError or
# OverflowError). If exc is NULL, then the exception is cleared
# and the value is clipped to PY_SSIZE_T_MIN for a negative
- # integer or PY_SSIZE_T_MAX for a positive integer.
+ # integer or PY_SSIZE_T_MAX for a positive integer.
bint PyIndex_Check(object o)
# Returns True if o is an index integer (has the nb_index slot of
diff --git a/Cython/Includes/cpython/object.pxd b/Cython/Includes/cpython/object.pxd
index e26dc47a5..68c2cc11c 100644
--- a/Cython/Includes/cpython/object.pxd
+++ b/Cython/Includes/cpython/object.pxd
@@ -2,7 +2,7 @@ from cpython.ref cimport PyObject, PyTypeObject
from libc.stdio cimport FILE
cdef extern from "Python.h":
-
+
#####################################################################
# 6.1 Object Protocol
#####################################################################
@@ -53,7 +53,7 @@ cdef extern from "Python.h":
# Delete attribute named attr_name, for object o. Returns -1 on
# failure. This is the equivalent of the Python statement "del
# o.attr_name".
-
+
int Py_LT, Py_LE, Py_EQ, Py_NE, Py_GT, Py_GE
object PyObject_RichCompare(object o1, object o2, int opid)
@@ -121,7 +121,7 @@ cdef extern from "Python.h":
# instance and cls is neither a type object, nor a class object,
# nor a tuple, inst must have a __class__ attribute -- the class
# relationship of the value of that attribute with cls will be
- # used to determine the result of this function.
+ # used to determine the result of this function.
# Subclass determination is done in a fairly straightforward way,
# but includes a wrinkle that implementors of extensions to the
@@ -197,7 +197,7 @@ cdef extern from "Python.h":
# Call a callable Python object callable, with a variable number
# of PyObject* arguments. The arguments are provided as a variable
# number of parameters followed by NULL. Returns the result of the
- # call on success, or NULL on failure.
+ # call on success, or NULL on failure.
#PyObject* PyObject_CallMethodObjArgs(object o, object name, ..., NULL)
object PyObject_CallMethodObjArgs(object o, object name, ...)
diff --git a/Cython/Includes/cpython/pycapsule.pxd b/Cython/Includes/cpython/pycapsule.pxd
index aceed7cc5..f0b326bc2 100644
--- a/Cython/Includes/cpython/pycapsule.pxd
+++ b/Cython/Includes/cpython/pycapsule.pxd
@@ -96,7 +96,7 @@ cdef extern from "Python.h":
# In other words, if PyCapsule_IsValid() returns a true value,
# calls to any of the accessors (any function starting with
# PyCapsule_Get()) are guaranteed to succeed.
- #
+ #
# Return a nonzero value if the object is valid and matches the
# name passed in. Return 0 otherwise. This function will not fail.
diff --git a/Cython/Includes/cpython/ref.pxd b/Cython/Includes/cpython/ref.pxd
index 0bb49a3b7..8ffd98923 100644
--- a/Cython/Includes/cpython/ref.pxd
+++ b/Cython/Includes/cpython/ref.pxd
@@ -15,7 +15,7 @@ cdef extern from "Python.h":
# Py_XINCREF().
void Py_XINCREF(PyObject* o)
- # Increment the reference count for object o. The object may be NULL, in which case the macro has no effect.
+ # Increment the reference count for object o. The object may be NULL, in which case the macro has no effect.
void Py_DECREF(object o)
# Decrement the reference count for object o. The object must not
diff --git a/Cython/Includes/cpython/sequence.pxd b/Cython/Includes/cpython/sequence.pxd
index f32982133..63a25ac1e 100644
--- a/Cython/Includes/cpython/sequence.pxd
+++ b/Cython/Includes/cpython/sequence.pxd
@@ -16,7 +16,7 @@ cdef extern from "Python.h":
# this is equivalent to the Python expression "len(o)".
Py_ssize_t PySequence_Length(object o) except -1
- # Alternate name for PySequence_Size().
+ # Alternate name for PySequence_Size().
object PySequence_Concat(object o1, object o2)
# Return value: New reference.
@@ -117,7 +117,7 @@ cdef extern from "Python.h":
PyObject** PySequence_Fast_ITEMS(object o)
# Return the underlying array of PyObject pointers. Assumes that o
- # was returned by PySequence_Fast() and o is not NULL.
+ # was returned by PySequence_Fast() and o is not NULL.
object PySequence_ITEM(object o, Py_ssize_t i)
# Return value: New reference.
diff --git a/Cython/Includes/cpython/set.pxd b/Cython/Includes/cpython/set.pxd
index 3cedcb5bd..58ef2ca3c 100644
--- a/Cython/Includes/cpython/set.pxd
+++ b/Cython/Includes/cpython/set.pxd
@@ -27,10 +27,10 @@ cdef extern from "Python.h":
# structure.
# PyTypeObject PySet_Type
- # This is an instance of PyTypeObject representing the Python set type.
+ # This is an instance of PyTypeObject representing the Python set type.
# PyTypeObject PyFrozenSet_Type
- # This is an instance of PyTypeObject representing the Python frozenset type.
+ # This is an instance of PyTypeObject representing the Python frozenset type.
# The following type check macros work on pointers to any Python
# object. Likewise, the constructor functions work with any
@@ -45,7 +45,7 @@ cdef extern from "Python.h":
# an instance of a subtype.
bint PyFrozenSet_CheckExact(object p)
- # Return true if p is a frozenset object but not an instance of a subtype.
+ # Return true if p is a frozenset object but not an instance of a subtype.
object PySet_New(object iterable)
# Return value: New reference.
@@ -72,7 +72,7 @@ cdef extern from "Python.h":
# set, frozenset, or an instance of a subtype.
int PySet_GET_SIZE(object anyset)
- # Macro form of PySet_Size() without error checking.
+ # Macro form of PySet_Size() without error checking.
bint PySet_Contains(object anyset, object key) except -1
# Return 1 if found, 0 if not found, and -1 if an error is
@@ -110,4 +110,4 @@ cdef extern from "Python.h":
# not an instance of set or its subtype.
int PySet_Clear(object set)
- # Empty an existing set of all elements.
+ # Empty an existing set of all elements.
diff --git a/Cython/Includes/cpython/string.pxd b/Cython/Includes/cpython/string.pxd
index fc2fd33bb..65c6d371c 100644
--- a/Cython/Includes/cpython/string.pxd
+++ b/Cython/Includes/cpython/string.pxd
@@ -10,18 +10,18 @@ cdef extern from "Python.h":
# These functions raise TypeError when expecting a string
# parameter and are called with a non-string parameter.
# PyStringObject
- # This subtype of PyObject represents a Python string object.
+ # This subtype of PyObject represents a Python string object.
# PyTypeObject PyString_Type
# This instance of PyTypeObject represents the Python string type;
# it is the same object as str and types.StringType in the Python
- # layer.
+ # layer.
bint PyString_Check(object o)
# Return true if the object o is a string object or an instance of
- # a subtype of the string type.
+ # a subtype of the string type.
bint PyString_CheckExact(object o)
- # Return true if the object o is a string object, but not an instance of a subtype of the string type.
+ # Return true if the object o is a string object, but not an instance of a subtype of the string type.
object PyString_FromString(char *v)
# Return value: New reference.
@@ -66,13 +66,13 @@ cdef extern from "Python.h":
object PyString_FromFormatV(char *format, va_list vargs)
# Return value: New reference.
- # Identical to PyString_FromFormat() except that it takes exactly two arguments.
+ # Identical to PyString_FromFormat() except that it takes exactly two arguments.
Py_ssize_t PyString_Size(object string) except -1
- # Return the length of the string in string object string.
+ # Return the length of the string in string object string.
Py_ssize_t PyString_GET_SIZE(object string)
- # Macro form of PyString_Size() but without error checking.
+ # Macro form of PyString_Size() but without error checking.
char* PyString_AsString(object string) except NULL
# Return a NUL-terminated representation of the contents of
@@ -98,7 +98,7 @@ cdef extern from "Python.h":
# version of the object. If length is NULL, the resulting buffer
# may not contain NUL characters; if it does, the function returns
# -1 and a TypeError is raised.
-
+
# The buffer refers to an internal string buffer of obj, not a
# copy. The data must not be modified in any way, unless the
# string was just created using PyString_FromStringAndSize(NULL,
diff --git a/Cython/Includes/cpython/tuple.pxd b/Cython/Includes/cpython/tuple.pxd
index ca4b0f02f..0898a8b6b 100644
--- a/Cython/Includes/cpython/tuple.pxd
+++ b/Cython/Includes/cpython/tuple.pxd
@@ -15,7 +15,7 @@ cdef extern from "Python.h":
tuple PyTuple_New(Py_ssize_t len)
# Return value: New reference.
- # Return a new tuple object of size len, or NULL on failure.
+ # Return a new tuple object of size len, or NULL on failure.
tuple PyTuple_Pack(Py_ssize_t n, ...)
# Return value: New reference.
@@ -25,7 +25,7 @@ cdef extern from "Python.h":
# equivalent to "Py_BuildValue("(OO)", a, b)".
int PyTuple_Size(object p) except -1
- # Take a pointer to a tuple object, and return the size of that tuple.
+ # Take a pointer to a tuple object, and return the size of that tuple.
int PyTuple_GET_SIZE(object p)
# Return the size of the tuple p, which must be non-NULL and point
@@ -39,11 +39,11 @@ cdef extern from "Python.h":
PyObject* PyTuple_GET_ITEM(object p, Py_ssize_t pos)
# Return value: Borrowed reference.
- # Like PyTuple_GetItem(), but does no checking of its arguments.
+ # Like PyTuple_GetItem(), but does no checking of its arguments.
tuple PyTuple_GetSlice(object p, Py_ssize_t low, Py_ssize_t high)
# Return value: New reference.
- # Take a slice of the tuple pointed to by p from low to high and return it as a new tuple.
+ # Take a slice of the tuple pointed to by p from low to high and return it as a new tuple.
int PyTuple_SetItem(object p, Py_ssize_t pos, object o)
# Insert a reference to object o at position pos of the tuple
diff --git a/Cython/Includes/cpython/type.pxd b/Cython/Includes/cpython/type.pxd
index 362a0964f..719954c3f 100644
--- a/Cython/Includes/cpython/type.pxd
+++ b/Cython/Includes/cpython/type.pxd
@@ -1,6 +1,6 @@
cdef extern from "Python.h":
- # The C structure of the objects used to describe built-in types.
+ # The C structure of the objects used to describe built-in types.
############################################################################
# 7.1.1 Type Objects
@@ -26,10 +26,10 @@ cdef extern from "Python.h":
bint PyType_IS_GC(object o)
# Return true if the type object includes support for the cycle
- # detector; this tests the type flag Py_TPFLAGS_HAVE_GC.
+ # detector; this tests the type flag Py_TPFLAGS_HAVE_GC.
bint PyType_IsSubtype(object a, object b)
- # Return true if a is a subtype of b.
+ # Return true if a is a subtype of b.
object PyType_GenericAlloc(object type, Py_ssize_t nitems)
# Return value: New reference.
diff --git a/Cython/Includes/cpython/unicode.pxd b/Cython/Includes/cpython/unicode.pxd
index d24367902..97452c62d 100644
--- a/Cython/Includes/cpython/unicode.pxd
+++ b/Cython/Includes/cpython/unicode.pxd
@@ -32,35 +32,35 @@ cdef extern from *:
# Return 1 or 0 depending on whether ch is an uppercase character.
bint Py_UNICODE_ISUPPER(Py_UNICODE ch)
-
- # Return 1 or 0 depending on whether ch is a titlecase character.
+
+ # Return 1 or 0 depending on whether ch is a titlecase character.
bint Py_UNICODE_ISTITLE(Py_UNICODE ch)
- # Return 1 or 0 depending on whether ch is a linebreak character.
+ # Return 1 or 0 depending on whether ch is a linebreak character.
bint Py_UNICODE_ISLINEBREAK(Py_UNICODE ch)
- # Return 1 or 0 depending on whether ch is a decimal character.
+ # Return 1 or 0 depending on whether ch is a decimal character.
bint Py_UNICODE_ISDECIMAL(Py_UNICODE ch)
- # Return 1 or 0 depending on whether ch is a digit character.
+ # Return 1 or 0 depending on whether ch is a digit character.
bint Py_UNICODE_ISDIGIT(Py_UNICODE ch)
- # Return 1 or 0 depending on whether ch is a numeric character.
+ # Return 1 or 0 depending on whether ch is a numeric character.
bint Py_UNICODE_ISNUMERIC(Py_UNICODE ch)
- # Return 1 or 0 depending on whether ch is an alphabetic character.
+ # Return 1 or 0 depending on whether ch is an alphabetic character.
bint Py_UNICODE_ISALPHA(Py_UNICODE ch)
- # Return 1 or 0 depending on whether ch is an alphanumeric character.
+ # Return 1 or 0 depending on whether ch is an alphanumeric character.
bint Py_UNICODE_ISALNUM(Py_UNICODE ch)
- # Return the character ch converted to lower case.
+ # Return the character ch converted to lower case.
Py_UNICODE Py_UNICODE_TOLOWER(Py_UNICODE ch)
- # Return the character ch converted to upper case.
+ # Return the character ch converted to upper case.
Py_UNICODE Py_UNICODE_TOUPPER(Py_UNICODE ch)
- # Return the character ch converted to title case.
+ # Return the character ch converted to title case.
Py_UNICODE Py_UNICODE_TOTITLE(Py_UNICODE ch)
# Return the character ch converted to a decimal positive
@@ -100,7 +100,7 @@ cdef extern from *:
# Py_UNICODE buffer, NULL if unicode is not a Unicode object.
Py_UNICODE* PyUnicode_AsUnicode(object o) except NULL
- # Return the length of the Unicode object.
+ # Return the length of the Unicode object.
Py_ssize_t PyUnicode_GetSize(object o) except -1
# Coerce an encoded object obj to an Unicode object and return a
@@ -175,7 +175,7 @@ cdef extern from *:
# raised by the codec.
object PyUnicode_EncodeUTF8(Py_UNICODE *s, Py_ssize_t size, char *errors)
- # Encode a Unicode objects using UTF-8 and return the result as Python string object. Error handling is ``strict''. Return NULL if an exception was raised by the codec.
+ # Encode a Unicode objects using UTF-8 and return the result as Python string object. Error handling is ``strict''. Return NULL if an exception was raised by the codec.
object PyUnicode_AsUTF8String(object unicode)
# These are the UTF-16 codec APIs:
@@ -183,7 +183,7 @@ cdef extern from *:
# Decode length bytes from a UTF-16 encoded buffer string and
# return the corresponding Unicode object. errors (if non-NULL)
# defines the error handling. It defaults to ``strict''.
- #
+ #
# If byteorder is non-NULL, the decoder starts decoding using the
# given byte order:
#
diff --git a/Cython/Includes/libc/signal.pxd b/Cython/Includes/libc/signal.pxd
index 710558f6c..4d3c4fadb 100644
--- a/Cython/Includes/libc/signal.pxd
+++ b/Cython/Includes/libc/signal.pxd
@@ -65,4 +65,4 @@ cdef extern from "signal.h" nogil:
enum: SIGUSR2
enum: SIGWINCH
enum: SIGINFO
-
+
diff --git a/Cython/Includes/libc/stdio.pxd b/Cython/Includes/libc/stdio.pxd
index eba564cd2..025378a0e 100644
--- a/Cython/Includes/libc/stdio.pxd
+++ b/Cython/Includes/libc/stdio.pxd
@@ -10,7 +10,7 @@ cdef extern from "stdio.h" nogil:
cdef FILE *stdin
cdef FILE *stdout
cdef FILE *stderr
-
+
enum: FOPEN_MAX
enum: FILENAME_MAX
FILE *fopen (const_char *FILENAME, const_char *OPENTYPE)
diff --git a/Cython/Includes/libc/stdlib.pxd b/Cython/Includes/libc/stdlib.pxd
index 1da45c755..a5b8d9772 100644
--- a/Cython/Includes/libc/stdlib.pxd
+++ b/Cython/Includes/libc/stdlib.pxd
@@ -18,7 +18,7 @@ cdef extern from "stdlib.h" nogil:
float strtof (const_char *STRING, char **TAILPTR)
double strtod (const_char *STRING, char **TAILPTR)
long double strtold (const_char *STRING, char **TAILPTR)
-
+
# 7.20.2 Pseudo-random sequence generation functions
enum: RAND_MAX
int rand ()
@@ -42,7 +42,7 @@ cdef extern from "stdlib.h" nogil:
#7.20.5 Searching and sorting utilities
void *bsearch (const_void *KEY, const_void *ARRAY,
- size_t COUNT, size_t SIZE,
+ size_t COUNT, size_t SIZE,
int (*COMPARE)(const_void *, const_void *))
void qsort (void *ARRAY, size_t COUNT, size_t SIZE,
int (*COMPARE)(const_void *, const_void *))
diff --git a/Cython/Includes/libc/string.pxd b/Cython/Includes/libc/string.pxd
index 87469e811..101129e21 100644
--- a/Cython/Includes/libc/string.pxd
+++ b/Cython/Includes/libc/string.pxd
@@ -22,7 +22,7 @@ cdef extern from "string.h" nogil:
char *strndup (const_char *S, size_t SIZE)
char *strcat (char *TO, const_char *FROM)
char *strncat (char *TO, const_char *FROM, size_t SIZE)
-
+
int strcmp (const_char *S1, const_char *S2)
int strcasecmp (const_char *S1, const_char *S2)
int strncmp (const_char *S1, const_char *S2, size_t SIZE)
diff --git a/Cython/Includes/numpy.pxd b/Cython/Includes/numpy.pxd
index 20b116d0d..e0cf332e7 100644
--- a/Cython/Includes/numpy.pxd
+++ b/Cython/Includes/numpy.pxd
@@ -34,15 +34,15 @@ cdef extern from "numpy/arrayobject.h":
NPY_BYTE
NPY_UBYTE
NPY_SHORT
- NPY_USHORT
+ NPY_USHORT
NPY_INT
- NPY_UINT
+ NPY_UINT
NPY_LONG
NPY_ULONG
NPY_LONGLONG
NPY_ULONGLONG
NPY_FLOAT
- NPY_DOUBLE
+ NPY_DOUBLE
NPY_LONGDOUBLE
NPY_CFLOAT
NPY_CDOUBLE
@@ -138,14 +138,14 @@ cdef extern from "numpy/arrayobject.h":
NPY_INOUT_FARRAY
NPY_UPDATE_ALL
-
+
cdef enum:
NPY_MAXDIMS
npy_intp NPY_MAX_ELSIZE
ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *)
-
+
ctypedef class numpy.dtype [object PyArray_Descr]:
# Use PyDataType_* macros when possible, however there are no macros
# for accessing some of the fields, so some are defined. Please
@@ -168,16 +168,16 @@ cdef extern from "numpy/arrayobject.h":
# For use in situations where ndarray can't replace PyArrayObject*,
# like PyArrayObject**.
pass
-
+
ctypedef class numpy.ndarray [object PyArrayObject]:
cdef __cythonbufferdefaults__ = {"mode": "strided"}
-
+
cdef:
# Only taking a few of the most commonly used and stable fields.
# One should use PyArray_* macros instead to access the C fields.
char *data
int ndim "nd"
- npy_intp *shape "dimensions"
+ npy_intp *shape "dimensions"
npy_intp *strides
dtype descr
PyObject* base
@@ -193,7 +193,7 @@ cdef extern from "numpy/arrayobject.h":
cdef int copy_shape, i, ndim
cdef int endian_detector = 1
cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
-
+
ndim = PyArray_NDIM(self)
if sizeof(npy_intp) != sizeof(Py_ssize_t):
@@ -204,7 +204,7 @@ cdef extern from "numpy/arrayobject.h":
if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
raise ValueError(u"ndarray is not C contiguous")
-
+
if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
raise ValueError(u"ndarray is not Fortran contiguous")
@@ -307,7 +307,7 @@ cdef extern from "numpy/arrayobject.h":
ctypedef signed int npy_int32
ctypedef signed long long npy_int64
ctypedef signed long long npy_int96
- ctypedef signed long long npy_int128
+ ctypedef signed long long npy_int128
ctypedef unsigned char npy_uint8
ctypedef unsigned short npy_uint16
@@ -349,7 +349,7 @@ cdef extern from "numpy/arrayobject.h":
ctypedef struct npy_complex192:
double real
double imag
-
+
ctypedef struct npy_complex256:
double real
double imag
@@ -388,7 +388,7 @@ cdef extern from "numpy/arrayobject.h":
object PyArray_GETITEM(ndarray arr, void *itemptr)
int PyArray_SETITEM(ndarray arr, void *itemptr, object obj)
-
+
bint PyTypeNum_ISBOOL(int)
bint PyTypeNum_ISUNSIGNED(int)
bint PyTypeNum_ISSIGNED(int)
@@ -476,7 +476,7 @@ cdef extern from "numpy/arrayobject.h":
bint PyArray_SAMESHAPE(ndarray, ndarray)
npy_intp PyArray_SIZE(ndarray)
npy_intp PyArray_NBYTES(ndarray)
-
+
object PyArray_FROM_O(object)
object PyArray_FROM_OF(object m, int flags)
bint PyArray_FROM_OT(object m, int type)
@@ -703,7 +703,7 @@ cdef extern from "numpy/arrayobject.h":
object PyArray_CheckAxis (ndarray, int *, int)
npy_intp PyArray_OverflowMultiplyList (npy_intp *, int)
int PyArray_CompareString (char *, char *, size_t)
-
+
# Typedefs that matches the runtime dtype objects in
# the numpy module.
@@ -778,7 +778,7 @@ cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset
cdef int endian_detector = 1
cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
cdef tuple fields
-
+
for childname in descr.names:
fields = descr.fields[childname]
child, new_offset = fields
@@ -796,7 +796,7 @@ cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset
#
# A proper PEP 3118 exporter for other clients than Cython
# must deal properly with this!
-
+
# Output padding bytes
while offset[0] < new_offset:
f[0] = 120 # "x"; pad byte
@@ -804,7 +804,7 @@ cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset
offset[0] += 1
offset[0] += child.itemsize
-
+
if not PyDataType_HASFIELDS(child):
t = child.type_num
if end - f < 5:
@@ -845,7 +845,7 @@ cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset
cdef extern from "numpy/ufuncobject.h":
ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *)
-
+
ctypedef extern class numpy.ufunc [object PyUFuncObject]:
cdef:
int nin, nout, nargs
diff --git a/Cython/Includes/posix/fcntl.pxd b/Cython/Includes/posix/fcntl.pxd
index 093eab004..b71d31176 100644
--- a/Cython/Includes/posix/fcntl.pxd
+++ b/Cython/Includes/posix/fcntl.pxd
@@ -64,4 +64,4 @@ cdef extern from "fcntl.h" nogil:
int fcntl(int, int, ...)
int open(char *, int, ...)
#int open (char *, int, mode_t)
-
+
diff --git a/Cython/Includes/posix/unistd.pxd b/Cython/Includes/posix/unistd.pxd
index 8fdfdfbce..2e115f009 100644
--- a/Cython/Includes/posix/unistd.pxd
+++ b/Cython/Includes/posix/unistd.pxd
@@ -21,7 +21,7 @@ cdef extern from "unistd.h" nogil:
enum: F_TEST
enum: F_TLOCK
enum: F_ULOCK
-
+
# pathconf()
# _PC_*
diff --git a/Cython/Plex/Actions.py b/Cython/Plex/Actions.py
index 50e1c9ccb..ee6e0f987 100644
--- a/Cython/Plex/Actions.py
+++ b/Cython/Plex/Actions.py
@@ -55,7 +55,7 @@ class Call(Action):
class Begin(Action):
"""
Begin(state_name) is a Plex action which causes the Scanner to
- enter the state |state_name|. See the docstring of Plex.Lexicon
+ enter the state |state_name|. See the docstring of Plex.Lexicon
for more information.
"""
@@ -75,7 +75,7 @@ class Begin(Action):
class Ignore(Action):
"""
IGNORE is a Plex action which causes its associated token
- to be ignored. See the docstring of Plex.Lexicon for more
+ to be ignored. See the docstring of Plex.Lexicon for more
information.
"""
def perform(self, token_stream, text):
@@ -90,7 +90,7 @@ IGNORE = Ignore()
class Text(Action):
"""
TEXT is a Plex action which causes the text of a token to
- be returned as the value of the token. See the docstring of
+ be returned as the value of the token. See the docstring of
Plex.Lexicon for more information.
"""
diff --git a/Cython/Plex/DFA.py b/Cython/Plex/DFA.py
index 6d0085a89..510d39318 100644
--- a/Cython/Plex/DFA.py
+++ b/Cython/Plex/DFA.py
@@ -115,7 +115,7 @@ class StateMap(object):
#for old_state in old_state_set.keys():
#new_state.merge_actions(old_state)
return new_state
-
+
def highest_priority_action(self, state_set):
best_action = None
best_priority = LOWEST_PRIORITY
@@ -125,7 +125,7 @@ class StateMap(object):
best_action = state.action
best_priority = priority
return best_action
-
+
# def old_to_new_set(self, old_state_set):
# """
# Return the new state corresponding to a set of old states as
@@ -152,5 +152,5 @@ class StateMap(object):
old_state_set = self.new_to_old_dict[id(new_state)]
file.write(" State %s <-- %s\n" % (
new_state['number'], state_set_str(old_state_set)))
-
+
diff --git a/Cython/Plex/Errors.py b/Cython/Plex/Errors.py
index 3c134735f..965e8d351 100644
--- a/Cython/Plex/Errors.py
+++ b/Cython/Plex/Errors.py
@@ -43,7 +43,7 @@ class UnrecognizedInput(PlexError):
self.state_name = state_name
def __str__(self):
- return ("'%s', line %d, char %d: Token not recognised in state %s"
+ return ("'%s', line %d, char %d: Token not recognised in state %s"
% (self.position + (repr(self.state_name),)))
diff --git a/Cython/Plex/Lexicons.py b/Cython/Plex/Lexicons.py
index e6cea0528..88074666b 100644
--- a/Cython/Plex/Lexicons.py
+++ b/Cython/Plex/Lexicons.py
@@ -172,7 +172,7 @@ class Lexicon(object):
else:
action = Actions.Call(action_spec)
final_state = machine.new_state()
- re.build_machine(machine, initial_state, final_state,
+ re.build_machine(machine, initial_state, final_state,
match_bol = 1, nocase = 0)
final_state.set_action(action, priority = -token_number)
except Errors.PlexError, e:
diff --git a/Cython/Plex/Machines.py b/Cython/Plex/Machines.py
index 6d92d9c65..531d68e95 100644
--- a/Cython/Plex/Machines.py
+++ b/Cython/Plex/Machines.py
@@ -46,7 +46,7 @@ class Machine(object):
def get_initial_state(self, name):
return self.initial_states[name]
-
+
def dump(self, file):
file.write("Plex.Machine:\n")
if self.initial_states is not None:
@@ -79,13 +79,13 @@ class Node(object):
def add_transition(self, event, new_state):
self.transitions.add(event, new_state)
-
+
def link_to(self, state):
"""Add an epsilon-move from this state to another state."""
self.add_transition('', state)
def set_action(self, action, priority):
- """Make this an accepting state with the given action. If
+ """Make this an accepting state with the given action. If
there is already an action, choose the action with highest
priority."""
if priority > self.action_priority:
@@ -128,11 +128,11 @@ class FastMachine(object):
states = None # [state]
# where state = {event:state, 'else':state, 'action':Action}
next_number = 1 # for debugging
-
+
new_state_template = {
'':None, 'bol':None, 'eol':None, 'eof':None, 'else':None
}
-
+
def __init__(self, old_machine = None):
self.initial_states = initial_states = {}
self.states = []
@@ -151,11 +151,11 @@ class FastMachine(object):
else:
new_state[event] = None
new_state['action'] = old_state.action
-
+
def __del__(self):
for state in self.states:
state.clear()
-
+
def new_state(self, action = None):
number = self.next_number
self.next_number = number + 1
@@ -164,10 +164,10 @@ class FastMachine(object):
result['action'] = action
self.states.append(result)
return result
-
+
def make_initial_state(self, name, state):
self.initial_states[name] = state
-
+
def add_transitions(self, state, event, new_state, maxint=sys.maxint):
if type(event) is tuple:
code0, code1 = event
@@ -179,10 +179,10 @@ class FastMachine(object):
code0 = code0 + 1
else:
state[event] = new_state
-
+
def get_initial_state(self, name):
return self.initial_states[name]
-
+
def dump(self, file):
file.write("Plex.FastMachine:\n")
file.write(" Initial states:\n")
@@ -200,7 +200,7 @@ class FastMachine(object):
action = state['action']
if action is not None:
file.write(" %s\n" % action)
-
+
def dump_transitions(self, state, file):
chars_leading_to_state = {}
special_to_state = {}
@@ -244,10 +244,10 @@ class FastMachine(object):
c2 = c2 + 1
result.append((chr(c1), chr(c2)))
return tuple(result)
-
+
def ranges_to_string(self, range_list):
return ','.join(map(self.range_to_string, range_list))
-
+
def range_to_string(self, range_tuple):
(c1, c2) = range_tuple
if c1 == c2:
diff --git a/Cython/Plex/Regexps.py b/Cython/Plex/Regexps.py
index dea66b6ef..63dd6908f 100644
--- a/Cython/Plex/Regexps.py
+++ b/Cython/Plex/Regexps.py
@@ -90,8 +90,8 @@ def CodeRange(code1, code2):
with a code |c| in the range |code1| <= |c| < |code2|.
"""
if code1 <= nl_code < code2:
- return Alt(RawCodeRange(code1, nl_code),
- RawNewline,
+ return Alt(RawCodeRange(code1, nl_code),
+ RawNewline,
RawCodeRange(nl_code + 1, code2))
else:
return RawCodeRange(code1, code2)
@@ -111,8 +111,8 @@ class RE(object):
nullable = 1 # True if this RE can match 0 input symbols
match_nl = 1 # True if this RE can match a string ending with '\n'
str = None # Set to a string to override the class's __str__ result
-
- def build_machine(self, machine, initial_state, final_state,
+
+ def build_machine(self, machine, initial_state, final_state,
match_bol, nocase):
"""
This method should add states to |machine| to implement this
@@ -121,9 +121,9 @@ class RE(object):
beginning of a line. If nocase is true, upper and lower case
letters should be treated as equivalent.
"""
- raise NotImplementedError("%s.build_machine not implemented" %
+ raise NotImplementedError("%s.build_machine not implemented" %
self.__class__.__name__)
-
+
def build_opt(self, m, initial_state, c):
"""
Given a state |s| of machine |m|, return a new state
@@ -153,7 +153,7 @@ class RE(object):
def check_string(self, num, value):
if type(value) != type(''):
self.wrong_type(num, value, "string")
-
+
def check_char(self, num, value):
self.check_string(num, value)
if len(value) != 1:
@@ -170,7 +170,7 @@ class RE(object):
raise Errors.PlexTypeError("Invalid type for argument %d of Plex.%s "
"(expected %s, got %s" % (
num, self.__class__.__name__, expected, got))
-
+
#
# Primitive RE constructors
# -------------------------
@@ -182,13 +182,13 @@ class RE(object):
## """
## Char(c) is an RE which matches the character |c|.
## """
-
+
## nullable = 0
-
+
## def __init__(self, char):
## self.char = char
## self.match_nl = char == '\n'
-
+
## def build_machine(self, m, initial_state, final_state, match_bol, nocase):
## c = self.char
## if match_bol and c != BOL:
@@ -231,12 +231,12 @@ class RawCodeRange(RE):
range = None # (code, code)
uppercase_range = None # (code, code) or None
lowercase_range = None # (code, code) or None
-
+
def __init__(self, code1, code2):
self.range = (code1, code2)
self.uppercase_range = uppercase_range(code1, code2)
self.lowercase_range = lowercase_range(code1, code2)
-
+
def build_machine(self, m, initial_state, final_state, match_bol, nocase):
if match_bol:
initial_state = self.build_opt(m, initial_state, BOL)
@@ -246,7 +246,7 @@ class RawCodeRange(RE):
initial_state.add_transition(self.uppercase_range, final_state)
if self.lowercase_range:
initial_state.add_transition(self.lowercase_range, final_state)
-
+
def calc_str(self):
return "CodeRange(%d,%d)" % (self.code1, self.code2)
@@ -310,7 +310,7 @@ class Seq(RE):
if not re.nullable:
break
self.match_nl = match_nl
-
+
def build_machine(self, m, initial_state, final_state, match_bol, nocase):
re_list = self.re_list
if len(re_list) == 0:
@@ -394,7 +394,7 @@ class Rep1(RE):
class SwitchCase(RE):
"""
- SwitchCase(re, nocase) is an RE which matches the same strings as RE,
+ SwitchCase(re, nocase) is an RE which matches the same strings as RE,
but treating upper and lower case letters according to |nocase|. If
|nocase| is true, case is ignored, otherwise it is not.
"""
@@ -408,7 +408,7 @@ class SwitchCase(RE):
self.match_nl = re.match_nl
def build_machine(self, m, initial_state, final_state, match_bol, nocase):
- self.re.build_machine(m, initial_state, final_state, match_bol,
+ self.re.build_machine(m, initial_state, final_state, match_bol,
self.nocase)
def calc_str(self):
@@ -524,7 +524,7 @@ def NoCase(re):
def Case(re):
"""
Case(re) is an RE which matches the same strings as RE, but treating
- upper and lower case letters as distinct, i.e. it cancels the effect
+ upper and lower case letters as distinct, i.e. it cancels the effect
of any enclosing NoCase().
"""
return SwitchCase(re, nocase = 0)
@@ -532,7 +532,7 @@ def Case(re):
#
# RE Constants
#
-
+
Bol = Char(BOL)
Bol.__doc__ = \
"""
diff --git a/Cython/Plex/Scanners.pxd b/Cython/Plex/Scanners.pxd
index 8e93e4be9..1415220f0 100644
--- a/Cython/Plex/Scanners.pxd
+++ b/Cython/Plex/Scanners.pxd
@@ -38,6 +38,6 @@ cdef class Scanner:
buf_start_pos=long, buf_len=long, buf_index=long,
trace=bint, discard=long, data=unicode, buffer=unicode)
cdef run_machine_inlined(self)
-
+
cdef begin(self, state)
cdef produce(self, value, text = *)
diff --git a/Cython/Plex/Scanners.py b/Cython/Plex/Scanners.py
index d2fa468c6..36ed9d19c 100644
--- a/Cython/Plex/Scanners.py
+++ b/Cython/Plex/Scanners.py
@@ -37,10 +37,10 @@ class Scanner(object):
position() --> (name, line, col)
Returns the position of the last token read using the
read() method.
-
+
begin(state_name)
Causes scanner to change state.
-
+
produce(value [, text])
Causes return of a token value to the caller of the
Scanner.
@@ -90,7 +90,7 @@ class Scanner(object):
self.start_col = 0
self.text = None
self.state_name = None
-
+
self.lexicon = lexicon
self.stream = stream
self.name = name
diff --git a/Cython/Plex/Traditional.py b/Cython/Plex/Traditional.py
index fe13d3a1b..6d3e48fa4 100644
--- a/Cython/Plex/Traditional.py
+++ b/Cython/Plex/Traditional.py
@@ -11,7 +11,7 @@ from Errors import PlexError
class RegexpSyntaxError(PlexError):
pass
-
+
def re(s):
"""
Convert traditional string representation of regular expression |s|
@@ -26,13 +26,13 @@ class REParser(object):
self.i = -1
self.end = 0
self.next()
-
+
def parse_re(self):
re = self.parse_alt()
if not self.end:
self.error("Unexpected %s" % repr(self.c))
return re
-
+
def parse_alt(self):
"""Parse a set of alternative regexps."""
re = self.parse_seq()
@@ -43,14 +43,14 @@ class REParser(object):
re_list.append(self.parse_seq())
re = Alt(*re_list)
return re
-
+
def parse_seq(self):
"""Parse a sequence of regexps."""
re_list = []
while not self.end and not self.c in "|)":
re_list.append(self.parse_mod())
return Seq(*re_list)
-
+
def parse_mod(self):
"""Parse a primitive regexp followed by *, +, ? modifiers."""
re = self.parse_prim()
@@ -84,7 +84,7 @@ class REParser(object):
c = self.get()
re = Char(c)
return re
-
+
def parse_charset(self):
"""Parse a charset. Does not include the surrounding []."""
char_list = []
@@ -109,7 +109,7 @@ class REParser(object):
return AnyBut(chars)
else:
return Any(chars)
-
+
def next(self):
"""Advance to the next char."""
s = self.s
@@ -119,14 +119,14 @@ class REParser(object):
else:
self.c = ''
self.end = 1
-
+
def get(self):
if self.end:
self.error("Premature end of string")
c = self.c
self.next()
return c
-
+
def lookahead(self, n):
"""Look ahead n chars."""
j = self.i + n
@@ -144,11 +144,11 @@ class REParser(object):
self.next()
else:
self.error("Missing %s" % repr(c))
-
+
def error(self, mess):
"""Raise exception to signal syntax error in regexp."""
raise RegexpSyntaxError("Syntax error in regexp %s at position %d: %s" % (
repr(self.s), self.i, mess))
-
-
+
+
diff --git a/Cython/Plex/Transitions.py b/Cython/Plex/Transitions.py
index 98d5a286d..8ed55148d 100644
--- a/Cython/Plex/Transitions.py
+++ b/Cython/Plex/Transitions.py
@@ -11,34 +11,34 @@ from sys import maxint as maxint
class TransitionMap(object):
"""
A TransitionMap maps an input event to a set of states.
- An input event is one of: a range of character codes,
- the empty string (representing an epsilon move), or one
+ An input event is one of: a range of character codes,
+ the empty string (representing an epsilon move), or one
of the special symbols BOL, EOL, EOF.
-
- For characters, this implementation compactly represents
+
+ For characters, this implementation compactly represents
the map by means of a list:
-
+
[code_0, states_0, code_1, states_1, code_2, states_2,
..., code_n-1, states_n-1, code_n]
-
- where |code_i| is a character code, and |states_i| is a
+
+ where |code_i| is a character code, and |states_i| is a
set of states corresponding to characters with codes |c|
in the range |code_i| <= |c| <= |code_i+1|.
-
+
The following invariants hold:
n >= 1
code_0 == -maxint
code_n == maxint
code_i < code_i+1 for i in 0..n-1
states_0 == states_n-1
-
+
Mappings for the special events '', BOL, EOL, EOF are
kept separately in a dictionary.
"""
-
+
map = None # The list of codes and states
special = None # Mapping for special events
-
+
def __init__(self, map = None, special = None):
if not map:
map = [-maxint, {}, maxint]
@@ -47,7 +47,7 @@ class TransitionMap(object):
self.map = map
self.special = special
#self.check() ###
-
+
def add(self, event, new_state,
TupleType = tuple):
"""
@@ -79,14 +79,14 @@ class TransitionMap(object):
i = i + 2
else:
self.get_special(event).update(new_set)
-
+
def get_epsilon(self,
none = None):
"""
Return the mapping for epsilon, or None.
"""
return self.special.get('', none)
-
+
def iteritems(self,
len = len):
"""
@@ -111,14 +111,14 @@ class TransitionMap(object):
result.append((event, set))
return iter(result)
items = iteritems
-
+
# ------------------- Private methods --------------------
def split(self, code,
len = len, maxint = maxint):
"""
- Search the list for the position of the split point for |code|,
- inserting a new split point if necessary. Returns index |i| such
+ Search the list for the position of the split point for |code|,
+ inserting a new split point if necessary. Returns index |i| such
that |code| == |map[i]|.
"""
# We use a funky variation on binary search.
@@ -144,7 +144,7 @@ class TransitionMap(object):
map[hi:hi] = [code, map[hi - 1].copy()]
#self.check() ###
return hi
-
+
def get_special(self, event):
"""
Get state set for special event, adding a new entry if necessary.
@@ -157,7 +157,7 @@ class TransitionMap(object):
return set
# --------------------- Conversion methods -----------------------
-
+
def __str__(self):
map_strs = []
map = self.map
@@ -183,15 +183,15 @@ class TransitionMap(object):
','.join(map_strs),
special_strs
)
-
+
# --------------------- Debugging methods -----------------------
-
+
def check(self):
"""Check data structure integrity."""
if not self.map[-3] < self.map[-1]:
print(self)
assert 0
-
+
def dump(self, file):
map = self.map
i = 0
@@ -204,7 +204,7 @@ class TransitionMap(object):
if not event:
event = 'empty'
self.dump_trans(event, set, file)
-
+
def dump_range(self, code0, code1, set, file):
if set:
if code0 == -maxint:
@@ -217,19 +217,19 @@ class TransitionMap(object):
elif code0 == code1 - 1:
k = self.dump_char(code0)
else:
- k = "%s..%s" % (self.dump_char(code0),
+ k = "%s..%s" % (self.dump_char(code0),
self.dump_char(code1 - 1))
self.dump_trans(k, set, file)
-
+
def dump_char(self, code):
if 0 <= code <= 255:
return repr(chr(code))
else:
return "chr(%d)" % code
-
+
def dump_trans(self, key, set, file):
file.write(" %s --> %s\n" % (key, self.dump_set(set)))
-
+
def dump_set(self, set):
return state_set_str(set)
@@ -243,6 +243,6 @@ class TransitionMap(object):
def state_set_str(set):
return "[%s]" % ','.join(["S%d" % state.number for state in set])
-
-
+
+
diff --git a/Cython/Shadow.py b/Cython/Shadow.py
index ddfb3fc55..d0e401ea8 100644
--- a/Cython/Shadow.py
+++ b/Cython/Shadow.py
@@ -49,10 +49,10 @@ def sizeof(arg):
def typeof(arg):
return type(arg)
-
+
def address(arg):
return pointer(type(arg))([arg])
-
+
def declare(type=None, value=None, **kwds):
if type is not None and hasattr(type, '__call__'):
if value:
@@ -97,35 +97,35 @@ class PointerType(CythonType):
self._items = []
else:
raise ValueError
-
+
def __getitem__(self, ix):
if ix < 0:
raise IndexError("negative indexing not allowed in C")
return self._items[ix]
-
+
def __setitem__(self, ix, value):
if ix < 0:
raise IndexError("negative indexing not allowed in C")
self._items[ix] = cast(self._basetype, value)
-
+
class ArrayType(PointerType):
-
+
def __init__(self):
self._items = [None] * self._n
class StructType(CythonType):
-
+
def __init__(self, **data):
for key, value in data.iteritems():
setattr(self, key, value)
-
+
def __setattr__(self, key, value):
if key in self._members:
self.__dict__[key] = cast(self._members[key], value)
else:
raise AttributeError("Struct has no member '%s'" % key)
-
+
class UnionType(CythonType):
@@ -134,7 +134,7 @@ class UnionType(CythonType):
raise AttributeError("Union can only store one field at a time.")
for key, value in data.iteritems():
setattr(self, key, value)
-
+
def __setattr__(self, key, value):
if key in '__dict__':
CythonType.__setattr__(self, key, value)
@@ -172,12 +172,12 @@ class typedef(CythonType):
def __init__(self, type):
self._basetype = type
-
+
def __call__(self, value=None):
if value is not None:
value = cast(self._basetype, value)
return value
-
+
py_int = int
@@ -214,7 +214,7 @@ for name in int_types:
if name != 'Py_UNICODE' and not name.endswith('size_t'):
gs['u'+name] = typedef(py_int)
gs['s'+name] = typedef(py_int)
-
+
for name in float_types:
gs[name] = typedef(py_float)
diff --git a/Cython/StringIOTree.py b/Cython/StringIOTree.py
index 0d4012bf7..c41c3d6e8 100644
--- a/Cython/StringIOTree.py
+++ b/Cython/StringIOTree.py
@@ -65,7 +65,7 @@ class StringIOTree(object):
def allmarkers(self):
children = self.prepended_children
return [m for c in children for m in c.allmarkers()] + self.markers
-
+
__doc__ = r"""
Implements a buffer with insertion points. When you know you need to
diff --git a/Cython/TestUtils.py b/Cython/TestUtils.py
index e01e57584..f5150b65a 100644
--- a/Cython/TestUtils.py
+++ b/Cython/TestUtils.py
@@ -24,7 +24,7 @@ class NodeTypeWriter(TreeVisitor):
name = u"%s[%d]" % tip[1:3]
else:
name = tip[1]
-
+
self.result.append(u" " * self._indents +
u"%s: %s" % (name, node.__class__.__name__))
self._indents += 1
@@ -46,11 +46,11 @@ class CythonTest(unittest.TestCase):
self.listing_file = Errors.listing_file
self.echo_file = Errors.echo_file
Errors.listing_file = Errors.echo_file = None
-
+
def tearDown(self):
Errors.listing_file = self.listing_file
Errors.echo_file = self.echo_file
-
+
def assertLines(self, expected, result):
"Checks that the given strings or lists of strings are equal line by line"
if not isinstance(expected, list): expected = expected.split(u"\n")
@@ -70,9 +70,9 @@ class CythonTest(unittest.TestCase):
def assertCode(self, expected, result_tree):
result_lines = self.codeToLines(result_tree)
-
+
expected_lines = strip_common_indent(expected.split("\n"))
-
+
for idx, (line, expected_line) in enumerate(zip(result_lines, expected_lines)):
self.assertEqual(expected_line, line, "Line %d:\nGot: %s\nExp: %s" % (idx, line, expected_line))
self.assertEqual(len(result_lines), len(expected_lines),
@@ -125,24 +125,24 @@ class TransformTest(CythonTest):
are testing; pyx should be either a string (passed to the parser to
create a post-parse tree) or a node representing input to pipeline.
The result will be a transformed result.
-
+
- Check that the tree is correct. If wanted, assertCode can be used, which
takes a code string as expected, and a ModuleNode in result_tree
(it serializes the ModuleNode to a string and compares line-by-line).
-
+
All code strings are first stripped for whitespace lines and then common
indentation.
-
+
Plans: One could have a pxd dictionary parameter to run_pipeline.
"""
-
+
def run_pipeline(self, pipeline, pyx, pxds={}):
tree = self.fragment(pyx, pxds).root
# Run pipeline
for T in pipeline:
tree = T(tree)
- return tree
+ return tree
class TreeAssertVisitor(VisitorTransform):
diff --git a/Cython/Tests/TestCodeWriter.py b/Cython/Tests/TestCodeWriter.py
index 030730728..6f9b5478a 100644
--- a/Cython/Tests/TestCodeWriter.py
+++ b/Cython/Tests/TestCodeWriter.py
@@ -3,7 +3,7 @@ from Cython.TestUtils import CythonTest
class TestCodeWriter(CythonTest):
# CythonTest uses the CodeWriter heavily, so do some checking by
# roundtripping Cython code through the test framework.
-
+
# Note that this test is dependant upon the normal Cython parser
# to generate the input trees to the CodeWriter. This save *a lot*
# of time; better to spend that time writing other tests than perfecting
@@ -26,7 +26,7 @@ class TestCodeWriter(CythonTest):
def test_if(self):
self.t(u"if x:\n pass")
-
+
def test_ifelifelse(self):
self.t(u"""
if x:
@@ -38,7 +38,7 @@ class TestCodeWriter(CythonTest):
else:
pass
""")
-
+
def test_def(self):
self.t(u"""
def f(x, y, z):
@@ -61,7 +61,7 @@ class TestCodeWriter(CythonTest):
cdef int hello
cdef int hello = 4, x = 3, y, z
""")
-
+
def test_for_loop(self):
self.t(u"""
for x, y, z in f(g(h(34) * 2) + 23):
@@ -75,7 +75,7 @@ class TestCodeWriter(CythonTest):
def test_attribute(self):
self.t(u"a.x")
-
+
if __name__ == "__main__":
import unittest
unittest.main()
diff --git a/Cython/Tests/TestStringIOTree.py b/Cython/Tests/TestStringIOTree.py
index 4c08cafaa..09c04a514 100644
--- a/Cython/Tests/TestStringIOTree.py
+++ b/Cython/Tests/TestStringIOTree.py
@@ -24,42 +24,42 @@ cpdef bacon():
linemap = dict(enumerate(code.splitlines()))
class TestStringIOTree(unittest.TestCase):
-
+
def setUp(self):
self.tree = stringtree.StringIOTree()
-
+
def test_markers(self):
assert not self.tree.allmarkers()
-
+
def test_insertion(self):
self.write_lines((1, 2, 3))
line_4_to_6_insertion_point = self.tree.insertion_point()
self.write_lines((7, 8))
line_9_to_13_insertion_point = self.tree.insertion_point()
self.write_lines((14, 15, 16))
-
+
line_4_insertion_point = line_4_to_6_insertion_point.insertion_point()
self.write_lines((5, 6), tree=line_4_to_6_insertion_point)
-
+
line_9_to_12_insertion_point = (
line_9_to_13_insertion_point.insertion_point())
self.write_line(13, tree=line_9_to_13_insertion_point)
-
+
self.write_line(4, tree=line_4_insertion_point)
self.write_line(9, tree=line_9_to_12_insertion_point)
line_10_insertion_point = line_9_to_12_insertion_point.insertion_point()
self.write_line(11, tree=line_9_to_12_insertion_point)
self.write_line(10, tree=line_10_insertion_point)
self.write_line(12, tree=line_9_to_12_insertion_point)
-
+
self.assertEqual(self.tree.allmarkers(), range(1, 17))
self.assertEqual(code.strip(), self.tree.getvalue().strip())
-
-
+
+
def write_lines(self, linenos, tree=None):
for lineno in linenos:
self.write_line(lineno, tree=tree)
-
+
def write_line(self, lineno, tree=None):
if tree is None:
tree = self.tree
diff --git a/Cython/Tests/xmlrunner.py b/Cython/Tests/xmlrunner.py
index b7cdaf557..88001159e 100644
--- a/Cython/Tests/xmlrunner.py
+++ b/Cython/Tests/xmlrunner.py
@@ -49,27 +49,27 @@ class _TestInfo(object):
"""This class is used to keep useful information about the execution of a
test method.
"""
-
+
# Possible test outcomes
(SUCCESS, FAILURE, ERROR) = range(3)
-
+
def __init__(self, test_result, test_method, outcome=SUCCESS, err=None):
"Create a new instance of _TestInfo."
self.test_result = test_result
self.test_method = test_method
self.outcome = outcome
self.err = err
-
+
def get_elapsed_time(self):
"""Return the time that shows how long the test method took to
execute.
"""
return self.test_result.stop_time - self.test_result.start_time
-
+
def get_description(self):
"Return a text representation of the test method."
return self.test_result.getDescription(self.test_method)
-
+
def get_error_info(self):
"""Return a text representation of an exception thrown by a test
method.
@@ -95,7 +95,7 @@ class _XMLTestResult(_TextTestResult):
self.successes = []
self.callback = None
self.elapsed_times = elapsed_times
-
+
def _prepare_callback(self, test_info, target_list, verbose_str,
short_str):
"""Append a _TestInfo to the given target list and sets a callback
@@ -106,51 +106,51 @@ class _XMLTestResult(_TextTestResult):
"""This callback prints the test method outcome to the stream,
as well as the elapsed time.
"""
-
+
# Ignore the elapsed times for a more reliable unit testing
if not self.elapsed_times:
self.start_time = self.stop_time = 0
-
+
if self.showAll:
self.stream.writeln('(%.3fs) %s' % \
(test_info.get_elapsed_time(), verbose_str))
elif self.dots:
self.stream.write(short_str)
self.callback = callback
-
+
def startTest(self, test):
"Called before execute each test method."
self.start_time = time.time()
TestResult.startTest(self, test)
-
+
if self.showAll:
self.stream.write(' ' + self.getDescription(test))
self.stream.write(" ... ")
-
+
def stopTest(self, test):
"Called after execute each test method."
_TextTestResult.stopTest(self, test)
self.stop_time = time.time()
-
+
if self.callback and callable(self.callback):
self.callback()
self.callback = None
-
+
def addSuccess(self, test):
"Called when a test executes successfully."
self._prepare_callback(_TestInfo(self, test), \
self.successes, 'OK', '.')
-
+
def addFailure(self, test, err):
"Called when a test method fails."
self._prepare_callback(_TestInfo(self, test, _TestInfo.FAILURE, err), \
self.failures, 'FAIL', 'F')
-
+
def addError(self, test, err):
"Called when a test method raises an error."
self._prepare_callback(_TestInfo(self, test, _TestInfo.ERROR, err), \
self.errors, 'ERROR', 'E')
-
+
def printErrorList(self, flavour, errors):
"Write some information about the FAIL or ERROR to the stream."
for test_info in errors:
@@ -167,106 +167,106 @@ class _XMLTestResult(_TextTestResult):
will be generated for each TestCase.
"""
tests_by_testcase = {}
-
+
for tests in (self.successes, self.failures, self.errors):
for test_info in tests:
testcase = type(test_info.test_method)
-
+
# Ignore module name if it is '__main__'
module = testcase.__module__ + '.'
if module == '__main__.':
module = ''
testcase_name = module + testcase.__name__
-
+
if not tests_by_testcase.has_key(testcase_name):
tests_by_testcase[testcase_name] = []
tests_by_testcase[testcase_name].append(test_info)
-
+
return tests_by_testcase
-
+
def _report_testsuite(suite_name, tests, xml_document):
"Appends the testsuite section to the XML document."
testsuite = xml_document.createElement('testsuite')
xml_document.appendChild(testsuite)
-
+
testsuite.setAttribute('name', str(suite_name))
testsuite.setAttribute('tests', str(len(tests)))
-
+
testsuite.setAttribute('time', '%.3f' % \
sum(map(lambda e: e.get_elapsed_time(), tests)))
-
+
failures = filter(lambda e: e.outcome==_TestInfo.FAILURE, tests)
testsuite.setAttribute('failures', str(len(failures)))
-
+
errors = filter(lambda e: e.outcome==_TestInfo.ERROR, tests)
testsuite.setAttribute('errors', str(len(errors)))
-
+
return testsuite
-
+
_report_testsuite = staticmethod(_report_testsuite)
-
+
def _report_testcase(suite_name, test_result, xml_testsuite, xml_document):
"Appends a testcase section to the XML document."
testcase = xml_document.createElement('testcase')
xml_testsuite.appendChild(testcase)
-
+
testcase.setAttribute('classname', str(suite_name))
testcase.setAttribute('name', test_result.test_method.shortDescription()
or getattr(test_result.test_method, '_testMethodName',
str(test_result.test_method)))
testcase.setAttribute('time', '%.3f' % test_result.get_elapsed_time())
-
+
if (test_result.outcome != _TestInfo.SUCCESS):
elem_name = ('failure', 'error')[test_result.outcome-1]
failure = xml_document.createElement(elem_name)
testcase.appendChild(failure)
-
+
failure.setAttribute('type', str(test_result.err[0].__name__))
failure.setAttribute('message', str(test_result.err[1]))
-
+
error_info = test_result.get_error_info()
failureText = xml_document.createCDATASection(error_info)
failure.appendChild(failureText)
-
+
_report_testcase = staticmethod(_report_testcase)
-
+
def _report_output(test_runner, xml_testsuite, xml_document):
"Appends the system-out and system-err sections to the XML document."
systemout = xml_document.createElement('system-out')
xml_testsuite.appendChild(systemout)
-
+
stdout = test_runner.stdout.getvalue()
systemout_text = xml_document.createCDATASection(stdout)
systemout.appendChild(systemout_text)
-
+
systemerr = xml_document.createElement('system-err')
xml_testsuite.appendChild(systemerr)
-
+
stderr = test_runner.stderr.getvalue()
systemerr_text = xml_document.createCDATASection(stderr)
systemerr.appendChild(systemerr_text)
-
+
_report_output = staticmethod(_report_output)
-
+
def generate_reports(self, test_runner):
"Generates the XML reports to a given XMLTestRunner object."
from xml.dom.minidom import Document
all_results = self._get_info_by_testcase()
-
+
if type(test_runner.output) == str and not \
os.path.exists(test_runner.output):
os.makedirs(test_runner.output)
-
+
for suite, tests in all_results.items():
doc = Document()
-
+
# Build the XML file
testsuite = _XMLTestResult._report_testsuite(suite, tests, doc)
for test in tests:
_XMLTestResult._report_testcase(suite, test, testsuite, doc)
_XMLTestResult._report_output(test_runner, testsuite, doc)
xml_content = doc.toprettyxml(indent='\t')
-
+
if type(test_runner.output) is str:
report_file = open('%s%sTEST-%s.xml' % \
(test_runner.output, os.sep, suite), 'w')
@@ -289,14 +289,14 @@ class XMLTestRunner(TextTestRunner):
TextTestRunner.__init__(self, stream, descriptions, verbosity)
self.output = output
self.elapsed_times = elapsed_times
-
+
def _make_result(self):
"""Create the TestResult object which will be used to store
information about the executed tests.
"""
return _XMLTestResult(self.stream, self.descriptions, \
self.verbosity, self.elapsed_times)
-
+
def _patch_standard_output(self):
"""Replace the stdout and stderr streams with string-based streams
in order to capture the tests' output.
@@ -304,30 +304,30 @@ class XMLTestRunner(TextTestRunner):
(self.old_stdout, self.old_stderr) = (sys.stdout, sys.stderr)
(sys.stdout, sys.stderr) = (self.stdout, self.stderr) = \
(StringIO(), StringIO())
-
+
def _restore_standard_output(self):
"Restore the stdout and stderr streams."
(sys.stdout, sys.stderr) = (self.old_stdout, self.old_stderr)
-
+
def run(self, test):
"Run the given test case or test suite."
-
+
try:
# Prepare the test execution
self._patch_standard_output()
result = self._make_result()
-
+
# Print a nice header
self.stream.writeln()
self.stream.writeln('Running tests...')
self.stream.writeln(result.separator2)
-
+
# Execute tests
start_time = time.time()
test(result)
stop_time = time.time()
time_taken = stop_time - start_time
-
+
# Print results
result.printErrors()
self.stream.writeln(result.separator2)
@@ -335,7 +335,7 @@ class XMLTestRunner(TextTestRunner):
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", time_taken))
self.stream.writeln()
-
+
# Error traces
if not result.wasSuccessful():
self.stream.write("FAILED (")
@@ -349,12 +349,12 @@ class XMLTestRunner(TextTestRunner):
self.stream.writeln(")")
else:
self.stream.writeln("OK")
-
+
# Generate reports
self.stream.writeln()
self.stream.writeln('Generating XML reports...')
result.generate_reports(self)
finally:
self._restore_standard_output()
-
+
return result
diff --git a/Cython/Utils.py b/Cython/Utils.py
index 06c2295dc..abebe312e 100644
--- a/Cython/Utils.py
+++ b/Cython/Utils.py
@@ -11,8 +11,8 @@ def replace_suffix(path, newsuf):
def open_new_file(path):
if os.path.exists(path):
- # Make sure to create a new file here so we can
- # safely hard link the output files.
+ # Make sure to create a new file here so we can
+ # safely hard link the output files.
os.unlink(path)
# we use the ISO-8859-1 encoding here because we only write pure
diff --git a/Cython/__init__.py b/Cython/__init__.py
index b7b6df9c6..080d95f56 100644
--- a/Cython/__init__.py
+++ b/Cython/__init__.py
@@ -1,4 +1,4 @@
__version__ = "0.14.rc0"
-# Void cython.* directives (for case insensitive operating systems).
+# Void cython.* directives (for case insensitive operating systems).
from Cython.Shadow import *
diff --git a/INSTALL.txt b/INSTALL.txt
index 1e208699c..a54a2eee3 100644
--- a/INSTALL.txt
+++ b/INSTALL.txt
@@ -3,7 +3,7 @@ Cython - Installation Instructions
You have two installation options:
-(1) Run the setup.py script in this directory
+(1) Run the setup.py script in this directory
as follows:
python setup.py install
@@ -15,7 +15,7 @@ OR
(2) If you prefer not to modify your Python
installation, arrange for the directory
- containing this file (INSTALL.txt) to be in
+ containing this file (INSTALL.txt) to be in
your PYTHONPATH. On unix, also put the bin
directory on your PATH.
diff --git a/ToDo.txt b/ToDo.txt
index c6d86c69f..cf8cabcf4 100644
--- a/ToDo.txt
+++ b/ToDo.txt
@@ -134,7 +134,7 @@ Recognise #line directives?
Catch floating point exceptions?
-Check that forward-declared non-external extension types
+Check that forward-declared non-external extension types
are defined.
Generate type test when casting from one Python type
diff --git a/USAGE.txt b/USAGE.txt
index 50d06a19c..13d07ad7e 100644
--- a/USAGE.txt
+++ b/USAGE.txt
@@ -57,7 +57,7 @@ The cython command supports the following options:
-----------------------------------------------------------------------------
-v --version Display version number of cython compiler
-l --create-listing Write error messages to a .lis file
- -I --include-dir <directory> Search for include files in named
+ -I --include-dir <directory> Search for include files in named
directory (may be repeated)
-o --output-file <filename> Specify name of generated C file (only
one source file allowed if this is used)
diff --git a/tests/broken/b_extimpinherit.pyx b/tests/broken/b_extimpinherit.pyx
index b775fdbfe..ce6670496 100644
--- a/tests/broken/b_extimpinherit.pyx
+++ b/tests/broken/b_extimpinherit.pyx
@@ -2,6 +2,6 @@ cdef class Parrot:
cdef describe(self):
print "This is a parrot."
-
+
cdef action(self):
print "Polly wants a cracker!"
diff --git a/tests/broken/cdefemptysue.pyx b/tests/broken/cdefemptysue.pyx
index 326637b6b..d2dda1b25 100644
--- a/tests/broken/cdefemptysue.pyx
+++ b/tests/broken/cdefemptysue.pyx
@@ -2,13 +2,13 @@ cdef extern from "cdefemptysue.h":
cdef struct spam:
pass
-
+
ctypedef union eggs:
pass
-
+
cdef enum ham:
pass
-
+
cdef extern spam s
cdef extern eggs e
cdef extern ham h
diff --git a/tests/broken/cdefexternblock.pyx b/tests/broken/cdefexternblock.pyx
index 4d613cd39..89fca223f 100644
--- a/tests/broken/cdefexternblock.pyx
+++ b/tests/broken/cdefexternblock.pyx
@@ -1,12 +1,12 @@
cdef extern from "cheese.h":
ctypedef int camembert
-
+
struct roquefort:
int x
-
+
char *swiss
-
+
void cheddar()
class external.runny [object runny_obj]:
diff --git a/tests/broken/externsue.pyx b/tests/broken/externsue.pyx
index 1ebc3602f..2580ce79b 100644
--- a/tests/broken/externsue.pyx
+++ b/tests/broken/externsue.pyx
@@ -2,10 +2,10 @@ cdef extern from "externsue.h":
enum Eggs:
runny, firm, hard
-
+
struct Spam:
int i
-
+
union Soviet:
char c
diff --git a/tests/broken/getattr.pyx b/tests/broken/getattr.pyx
index 5877a7aff..7ffebe2d8 100644
--- a/tests/broken/getattr.pyx
+++ b/tests/broken/getattr.pyx
@@ -1,5 +1,5 @@
cdef class Spam:
cdef public object eggs
-
+
def __getattr__(self, name):
print "Spam getattr:", name
diff --git a/tests/broken/r_excval.pyx b/tests/broken/r_excval.pyx
index bb1982f03..55eff869e 100644
--- a/tests/broken/r_excval.pyx
+++ b/tests/broken/r_excval.pyx
@@ -2,7 +2,7 @@ cdef int tomato() except -1:
print "Entering tomato"
raise Exception("Eject! Eject! Eject!")
print "Leaving tomato"
-
+
cdef void sandwich():
print "Entering sandwich"
tomato()
diff --git a/tests/broken/r_extcmethod.pyx b/tests/broken/r_extcmethod.pyx
index cf2fe8fcf..5ce4f945f 100644
--- a/tests/broken/r_extcmethod.pyx
+++ b/tests/broken/r_extcmethod.pyx
@@ -7,7 +7,7 @@ cdef class SpamDish:
cdef class FancySpamDish(SpamDish):
cdef int lettuce
-
+
cdef void describe(self):
print "This dish contains", self.spam, "tons of spam",
print "and", self.lettuce, "milligrams of lettuce."
diff --git a/tests/broken/r_extimpinherit.pyx b/tests/broken/r_extimpinherit.pyx
index b454d99cf..3b533e723 100644
--- a/tests/broken/r_extimpinherit.pyx
+++ b/tests/broken/r_extimpinherit.pyx
@@ -5,7 +5,7 @@ cdef class Norwegian(Parrot):
cdef action(self):
print "This parrot is resting."
-
+
cdef plumage(self):
print "Lovely plumage!"
diff --git a/tests/broken/r_extinherit.pyx b/tests/broken/r_extinherit.pyx
index 804e2414c..6723b65d5 100644
--- a/tests/broken/r_extinherit.pyx
+++ b/tests/broken/r_extinherit.pyx
@@ -1,10 +1,10 @@
cdef class Parrot:
cdef object plumage
-
+
def __init__(self):
self.plumage = "yellow"
-
+
def describe(self):
print "This bird has lovely", self.plumage, "plumage."
diff --git a/tests/broken/r_extmember.pyx b/tests/broken/r_extmember.pyx
index 71088fd9a..c3fb83c94 100644
--- a/tests/broken/r_extmember.pyx
+++ b/tests/broken/r_extmember.pyx
@@ -2,11 +2,11 @@ cdef class Spam:
cdef public int tons
cdef readonly float tastiness
cdef int temperature
-
+
def __init__(self, tons, tastiness, temperature):
self.tons = tons
self.tastiness = tastiness
self.temperature = temperature
-
+
def get_temperature(self):
return self.temperature
diff --git a/tests/broken/r_extnumeric2.pyx b/tests/broken/r_extnumeric2.pyx
index 3dd6d4218..e050a01ad 100644
--- a/tests/broken/r_extnumeric2.pyx
+++ b/tests/broken/r_extnumeric2.pyx
@@ -1,9 +1,9 @@
cdef extern from "numeric.h":
-
+
struct PyArray_Descr:
int type_num, elsize
char type
-
+
ctypedef class Numeric.ArrayType [object PyArrayObject]:
cdef char *data
cdef int nd
@@ -11,7 +11,7 @@ cdef extern from "numeric.h":
cdef object base
cdef PyArray_Descr *descr
cdef int flags
-
+
def ogle(ArrayType a):
print "No. of dimensions:", a.nd
print " Dim Value"
diff --git a/tests/broken/r_extproperty.pyx b/tests/broken/r_extproperty.pyx
index d575fe985..31a8de602 100644
--- a/tests/broken/r_extproperty.pyx
+++ b/tests/broken/r_extproperty.pyx
@@ -1,18 +1,18 @@
cdef class CheeseShop:
cdef object cheeses
-
+
def __cinit__(self):
self.cheeses = []
property cheese:
-
+
"A senseless waste of a property."
-
+
def __get__(self):
return "We don't have: %s" % self.cheeses
def __set__(self, value):
self.cheeses.append(value)
-
+
def __del__(self):
del self.cheeses[:]
diff --git a/tests/bugs.txt b/tests/bugs.txt
index 8c69c30d9..5f0b87157 100644
--- a/tests/bugs.txt
+++ b/tests/bugs.txt
@@ -1,5 +1,5 @@
-# This file contains tests corresponding to unresolved bugs,
-# which will be skipped in the normal testing run.
+# This file contains tests corresponding to unresolved bugs,
+# which will be skipped in the normal testing run.
methodmangling_T5
class_attribute_init_values_T18
diff --git a/tests/compile/a_capi.pyx b/tests/compile/a_capi.pyx
index f88ee92e7..4d8bfa95c 100644
--- a/tests/compile/a_capi.pyx
+++ b/tests/compile/a_capi.pyx
@@ -1,6 +1,6 @@
cdef public struct Foo:
int a, b
-
+
ctypedef struct Blarg:
int c, d
diff --git a/tests/compile/c_directives.pyx b/tests/compile/c_directives.pyx
index f29200113..1d505daaa 100644
--- a/tests/compile/c_directives.pyx
+++ b/tests/compile/c_directives.pyx
@@ -17,7 +17,7 @@ def f(object[int, ndim=2] buf):
@cy.boundscheck(True)
def g(object[int, ndim=2] buf):
- # The below line should have no meaning
+ # The below line should have no meaning
# cython: boundscheck = False
# even if the above line doesn't follow indentation.
print buf[3, 2] # bc
@@ -32,7 +32,7 @@ from cython cimport boundscheck as bc
def i(object[int] buf):
with bc(True):
print buf[3] # bs
-
+
from cython cimport warn as my_warn
@my_warn(undeclared=True)
diff --git a/tests/compile/cargdef.pyx b/tests/compile/cargdef.pyx
index c5da39bbe..58421cb89 100644
--- a/tests/compile/cargdef.pyx
+++ b/tests/compile/cargdef.pyx
@@ -1,10 +1,10 @@
def f(obj, int i, float f, char *s1, char s2[]):
pass
-
+
cdef g(obj, int i, float f, char *s1, char s2[]):
pass
-
+
cdef do_g(object (*func)(object, int, float, char*, char*)):
return func(1, 2, 3.14159, "a", "b")
-
+
do_g(&g)
diff --git a/tests/compile/cassign.pyx b/tests/compile/cassign.pyx
index e3e26089d..a6bd7994e 100644
--- a/tests/compile/cassign.pyx
+++ b/tests/compile/cassign.pyx
@@ -8,7 +8,7 @@ cdef void foo():
p1 = p2
obj1 = i1
i1 = obj1
- p1 = obj1
+ p1 = obj1
p1 = "spanish inquisition"
-foo()
+foo()
diff --git a/tests/compile/cast_ctypedef_array_T518.pyx b/tests/compile/cast_ctypedef_array_T518.pyx
index 5d1056114..f67850624 100644
--- a/tests/compile/cast_ctypedef_array_T518.pyx
+++ b/tests/compile/cast_ctypedef_array_T518.pyx
@@ -7,9 +7,9 @@ cdef extern from "cast_ctypedef_array_T518_helper.h":
void foo_clear(foo_t)
cdef foo_t value
-foo_init(value)
-foo_clear(value)
+foo_init(value)
+foo_clear(value)
cdef void *pointer = <void*> value
-foo_init(<foo_t>pointer)
-foo_clear(<foo_t>pointer)
+foo_init(<foo_t>pointer)
+foo_clear(<foo_t>pointer)
diff --git a/tests/compile/cenum.pyx b/tests/compile/cenum.pyx
index 6ebc0b5cc..4b90156ea 100644
--- a/tests/compile/cenum.pyx
+++ b/tests/compile/cenum.pyx
@@ -11,4 +11,4 @@ cdef void eggs():
s1 = c
i = s1
-eggs()
+eggs()
diff --git a/tests/compile/docstrings.pyx b/tests/compile/docstrings.pyx
index a710203e1..1c258f7c0 100644
--- a/tests/compile/docstrings.pyx
+++ b/tests/compile/docstrings.pyx
@@ -5,7 +5,7 @@ def zap(polly, volts):
class Parrot:
"Standard Norwegian Blue."
-
+
def admire_plumage(self):
"Lovely, ain't it?"
diff --git a/tests/compile/extcmethcall.pyx b/tests/compile/extcmethcall.pyx
index a6d2cf643..d9815e356 100644
--- a/tests/compile/extcmethcall.pyx
+++ b/tests/compile/extcmethcall.pyx
@@ -1,7 +1,7 @@
cdef class Spam:
cdef int tons
-
+
cdef void add_tons(self, int x):
pass
diff --git a/tests/compile/extcoerce.pyx b/tests/compile/extcoerce.pyx
index 7a850b602..fb2d58880 100644
--- a/tests/compile/extcoerce.pyx
+++ b/tests/compile/extcoerce.pyx
@@ -5,7 +5,7 @@ cdef class Grail:
cdef class Swallow:
pass
-
+
def f(Grail g):
cdef int i = 0
cdef Swallow s
diff --git a/tests/compile/extdelattr.pyx b/tests/compile/extdelattr.pyx
index 7236bcc11..a77f84e9d 100644
--- a/tests/compile/extdelattr.pyx
+++ b/tests/compile/extdelattr.pyx
@@ -1,4 +1,4 @@
cdef class Spam:
-
+
def __delattr__(self, n):
pass
diff --git a/tests/compile/extdelitem.pyx b/tests/compile/extdelitem.pyx
index e7f4a7f96..c6104ff2d 100644
--- a/tests/compile/extdelitem.pyx
+++ b/tests/compile/extdelitem.pyx
@@ -1,4 +1,4 @@
cdef class Spam:
-
+
def __delitem__(self, i):
pass
diff --git a/tests/compile/extdelslice.pyx b/tests/compile/extdelslice.pyx
index 17e92ff7e..cfdfff607 100644
--- a/tests/compile/extdelslice.pyx
+++ b/tests/compile/extdelslice.pyx
@@ -1,4 +1,4 @@
cdef class Spam:
-
+
def __delslice__(self, Py_ssize_t i, Py_ssize_t j):
pass
diff --git a/tests/compile/extgetattr.pyx b/tests/compile/extgetattr.pyx
index 9ae9ac2bd..c18fa9298 100644
--- a/tests/compile/extgetattr.pyx
+++ b/tests/compile/extgetattr.pyx
@@ -1,4 +1,4 @@
cdef class Spam:
-
+
def __getattr__(self, x):
pass
diff --git a/tests/compile/extinheritdel.pyx b/tests/compile/extinheritdel.pyx
index 359fdc423..c47670eaf 100644
--- a/tests/compile/extinheritdel.pyx
+++ b/tests/compile/extinheritdel.pyx
@@ -2,7 +2,7 @@ cdef class Parrot:
pass
cdef class Norwegian(Parrot):
-
+
def __delitem__(self, i):
pass
diff --git a/tests/compile/extinheritset.pyx b/tests/compile/extinheritset.pyx
index 81fcf3adb..17309f62e 100644
--- a/tests/compile/extinheritset.pyx
+++ b/tests/compile/extinheritset.pyx
@@ -2,7 +2,7 @@ cdef class Parrot:
pass
cdef class Norwegian(Parrot):
-
+
def __setitem__(self, i, x):
pass
diff --git a/tests/compile/extpropertyall.pyx b/tests/compile/extpropertyall.pyx
index ee0ca8f1b..4cfd91609 100644
--- a/tests/compile/extpropertyall.pyx
+++ b/tests/compile/extpropertyall.pyx
@@ -3,13 +3,13 @@ cdef class Spam:
property eggs:
"Ova"
-
+
def __get__(self):
pass
-
+
def __set__(self, x):
pass
-
+
def __del__(self):
pass
diff --git a/tests/compile/extsetattr.pyx b/tests/compile/extsetattr.pyx
index 4ab993a63..73f61eb98 100644
--- a/tests/compile/extsetattr.pyx
+++ b/tests/compile/extsetattr.pyx
@@ -1,4 +1,4 @@
cdef class Spam:
-
+
def __setattr__(self, n, x):
pass
diff --git a/tests/compile/extsetitem.pyx b/tests/compile/extsetitem.pyx
index 20d0f9972..7c4262702 100644
--- a/tests/compile/extsetitem.pyx
+++ b/tests/compile/extsetitem.pyx
@@ -1,4 +1,4 @@
cdef class Spam:
-
+
def __setitem__(self, i, x):
pass
diff --git a/tests/compile/extsetslice.pyx b/tests/compile/extsetslice.pyx
index a64e9e0f7..51b047e5f 100644
--- a/tests/compile/extsetslice.pyx
+++ b/tests/compile/extsetslice.pyx
@@ -1,4 +1,4 @@
cdef class Spam:
-
+
def __setslice__(self, Py_ssize_t i, Py_ssize_t j, x):
pass
diff --git a/tests/compile/for.pyx b/tests/compile/for.pyx
index d56c7a480..ec9eab89b 100644
--- a/tests/compile/for.pyx
+++ b/tests/compile/for.pyx
@@ -6,21 +6,21 @@ def f(a, b, c):
i = 2
break
i = 3
-
+
for i in b:
i = 1
-
+
for a in "spam":
i = 1
-
+
for a[b] in c:
i = 1
-
+
for a,b in c:
i = 1
-
+
for a in b,c:
i = 1
-
-
+
+
diff --git a/tests/compile/fromimport.pyx b/tests/compile/fromimport.pyx
index 873649da1..e2a9e593a 100644
--- a/tests/compile/fromimport.pyx
+++ b/tests/compile/fromimport.pyx
@@ -2,4 +2,4 @@ def f():
from spam import eggs
from spam.morespam import bacon, eggs, ham
from spam import eggs as ova
-
+
diff --git a/tests/compile/gencall.pyx b/tests/compile/gencall.pyx
index 228187278..06030cbbe 100644
--- a/tests/compile/gencall.pyx
+++ b/tests/compile/gencall.pyx
@@ -8,4 +8,4 @@ def z(a, b, c):
f(x = 42, **b)
f(a, *b)
f(a, x = 42, *b, **c)
-
+
diff --git a/tests/compile/huss2.pyx b/tests/compile/huss2.pyx
index 061f25667..92e2d475a 100644
--- a/tests/compile/huss2.pyx
+++ b/tests/compile/huss2.pyx
@@ -6,7 +6,7 @@ cdef enum Color:
cdef void f():
cdef Color e
cdef int i
-
+
i = red
i = red + 1
i = red | 1
diff --git a/tests/compile/ia_cdefblock.pyx b/tests/compile/ia_cdefblock.pyx
index b542466b6..57ecfcca7 100644
--- a/tests/compile/ia_cdefblock.pyx
+++ b/tests/compile/ia_cdefblock.pyx
@@ -4,7 +4,7 @@ cdef:
int i
int priv_i
-
+
void priv_f():
global priv_i
priv_i = 42
@@ -13,12 +13,12 @@ cdef public:
struct PubFoo:
int i
-
+
int pub_v
-
+
void pub_f():
pass
-
+
class PubBlarg [object PubBlargObj, type PubBlargType]:
pass
diff --git a/tests/compile/import.pyx b/tests/compile/import.pyx
index 72981be96..5b677300a 100644
--- a/tests/compile/import.pyx
+++ b/tests/compile/import.pyx
@@ -3,4 +3,4 @@ def f():
import spam.eggs
import spam, eggs, ham
import spam as tasty
-
+
diff --git a/tests/compile/index.pyx b/tests/compile/index.pyx
index 9c2dd6063..38cdde60d 100644
--- a/tests/compile/index.pyx
+++ b/tests/compile/index.pyx
@@ -14,5 +14,5 @@ def f(obj1, obj2, obj3):
array1[obj2] = int3
obj1[int2] = obj3
obj1[obj2] = 42
-
+
f(None, None, None)
diff --git a/tests/compile/jiba3.pyx b/tests/compile/jiba3.pyx
index 4f41ca47c..e550ba73f 100644
--- a/tests/compile/jiba3.pyx
+++ b/tests/compile/jiba3.pyx
@@ -8,7 +8,7 @@ cdef void test(float* f):
cdef class Position:
cdef readonly CoordSyst parent
-
+
cdef class Point(Position):
cdef void bug(self):
test(self.parent._matrix)
diff --git a/tests/compile/libc_signal.pyx b/tests/compile/libc_signal.pyx
index 523359e96..4e162c19d 100644
--- a/tests/compile/libc_signal.pyx
+++ b/tests/compile/libc_signal.pyx
@@ -1,6 +1,6 @@
from libc.signal cimport *
-cdef void sighdl(int signum) nogil:
+cdef void sighdl(int signum) nogil:
pass
cdef sighandler_t h
diff --git a/tests/compile/point.h b/tests/compile/point.h
index 866807c38..8b4452423 100644
--- a/tests/compile/point.h
+++ b/tests/compile/point.h
@@ -2,7 +2,7 @@
#define POINT_H
namespace geometry {
-
+
struct Point
{
double x;
diff --git a/tests/compile/pylong.pyx b/tests/compile/pylong.pyx
index e16b25474..a6c537c41 100644
--- a/tests/compile/pylong.pyx
+++ b/tests/compile/pylong.pyx
@@ -3,18 +3,18 @@ cdef extern from "Python.h":
pass
ctypedef struct PyObject:
- Py_ssize_t ob_refcnt
+ Py_ssize_t ob_refcnt
PyTypeObject *ob_type
cdef extern from "longintrepr.h":
cdef struct _longobject:
- int ob_refcnt
+ int ob_refcnt
PyTypeObject *ob_type
# int ob_size # not in Py3k
unsigned int *ob_digit
def test(temp = long(0)):
- cdef _longobject *l
- l = <_longobject *> temp
+ cdef _longobject *l
+ l = <_longobject *> temp
#print sizeof(l.ob_size) # not in Py3k
print sizeof(l.ob_digit[0])
diff --git a/tests/compile/tryexcept.pyx b/tests/compile/tryexcept.pyx
index b1fcca923..fcfe83cec 100644
--- a/tests/compile/tryexcept.pyx
+++ b/tests/compile/tryexcept.pyx
@@ -1,38 +1,38 @@
def f(a, b, c, x):
cdef int i
a = b + c
-
+
try:
i = 1
raise x
i = 2
except a:
i = 3
-
+
try:
i = 1
except a:
i = 2
except b:
i = 3
-
+
try:
i = 1
except a, b:
i = 2
-
+
try:
i = 1
except a:
i = 2
except:
i = 3
-
+
try:
i = 1
except (a, b), c[42]:
i = 2
-
+
for a in b:
try:
c = x * 42
@@ -47,33 +47,33 @@ def f(a, b, c, x):
def g(a, b, c, x):
cdef int i
a = b + c
-
+
try:
i = 1
raise x
i = 2
except a:
i = 3
-
+
try:
i = 1
except a:
i = 2
except b:
i = 3
-
+
try:
i = 1
except a as b:
i = 2
-
+
try:
i = 1
except a:
i = 2
except:
i = 3
-
+
try:
i = 1
except (a, b) as c:
diff --git a/tests/compile/tryfinally.pyx b/tests/compile/tryfinally.pyx
index 8c418b203..4528607a3 100644
--- a/tests/compile/tryfinally.pyx
+++ b/tests/compile/tryfinally.pyx
@@ -1,13 +1,13 @@
def f(a, b, c, x):
cdef int i
a = b + c
-
+
try:
return
raise a
finally:
c = a - b
-
+
for a in b:
try:
continue
@@ -15,5 +15,5 @@ def f(a, b, c, x):
c = a * b
finally:
i = 42
-
-
+
+
diff --git a/tests/compile/types_and_names.pxd b/tests/compile/types_and_names.pxd
index 22a7b733c..62ec4cdca 100644
--- a/tests/compile/types_and_names.pxd
+++ b/tests/compile/types_and_names.pxd
@@ -3,7 +3,7 @@ cdef struct point:
double y
double z
-cdef foo(int, int i,
- list, list L,
+cdef foo(int, int i,
+ list, list L,
point, point p, point* ps)
-
+
diff --git a/tests/compile/types_and_names.pyx b/tests/compile/types_and_names.pyx
index c54bb321a..e24bb0f19 100644
--- a/tests/compile/types_and_names.pyx
+++ b/tests/compile/types_and_names.pyx
@@ -2,14 +2,14 @@ print sizeof(point*)
cdef foo(int i0, int i, list L0, list L, point p0, point p, point* ps):
pass
-
+
cdef class A:
cdef list
cdef list L
# Possibly empty declarators
cdef point(self, int, int i, list, list L, point, point p, point* ps):
pass
-
+
cdef class B(A):
cdef point(self, o, int i, oo, list L, ooo, point p, point* ps):
pass
diff --git a/tests/compile/while.pyx b/tests/compile/while.pyx
index 8d6990d3a..439cb3d2c 100644
--- a/tests/compile/while.pyx
+++ b/tests/compile/while.pyx
@@ -1,26 +1,26 @@
def f(a, b):
cdef int i = 5
-
+
while a:
x = 1
while a+b:
x = 1
-
+
while i:
x = 1
else:
x = 2
-
+
while i:
x = 1
break
x = 2
else:
x = 3
-
+
while i:
x = 1
continue
x = 2
-
+
diff --git a/tests/errors/e_ass.pyx b/tests/errors/e_ass.pyx
index a49015db8..0d1a0a4a4 100644
--- a/tests/errors/e_ass.pyx
+++ b/tests/errors/e_ass.pyx
@@ -6,7 +6,7 @@ cdef void foo(obj):
p2 = obj # error
obj = p2 # error
-
+
_ERRORS = u"""
5:16: Cannot assign type 'char *' to 'int'
6:17: Cannot convert Python object to 'int *'
diff --git a/tests/errors/e_badtypeuse.pyx b/tests/errors/e_badtypeuse.pyx
index ce29c0550..70c4166fd 100644
--- a/tests/errors/e_badtypeuse.pyx
+++ b/tests/errors/e_badtypeuse.pyx
@@ -19,7 +19,7 @@ cdef f(Grail g, # incomplete argument type
void v, # incomplete argument type
int a[]):
pass
-
+
cdef NoSuchType* ptr
ptr = None # This should not produce another error
diff --git a/tests/errors/e_bufaccess.pyx b/tests/errors/e_bufaccess.pyx
index e6f9e39c6..f93d1107d 100644
--- a/tests/errors/e_bufaccess.pyx
+++ b/tests/errors/e_bufaccess.pyx
@@ -4,7 +4,7 @@ cdef class A:
def f():
cdef object[fakeoption=True] buf1
- cdef object[int, -1] buf1b
+ cdef object[int, -1] buf1b
cdef object[ndim=-1] buf2
cdef object[int, 'a'] buf3
cdef object[int,2,3,4,5,6] buf4
diff --git a/tests/errors/e_cenum.pyx b/tests/errors/e_cenum.pyx
index 142c6b056..70656e105 100644
--- a/tests/errors/e_cenum.pyx
+++ b/tests/errors/e_cenum.pyx
@@ -4,7 +4,7 @@ cdef enum Spam:
cdef void f():
global a
a = 42 # assignment to non-lvalue
-
+
_ERRORS = u"""
6:3: Assignment to non-lvalue 'a'
"""
diff --git a/tests/errors/e_ctypedefornot.pyx b/tests/errors/e_ctypedefornot.pyx
index a0e18237e..632ba780d 100644
--- a/tests/errors/e_ctypedefornot.pyx
+++ b/tests/errors/e_ctypedefornot.pyx
@@ -2,7 +2,7 @@ cdef struct Foo
ctypedef struct Foo:
int i
-
+
ctypedef struct Blarg:
char c
diff --git a/tests/errors/e_slice.pyx b/tests/errors/e_slice.pyx
index ead4380fe..6deda6fb2 100644
--- a/tests/errors/e_slice.pyx
+++ b/tests/errors/e_slice.pyx
@@ -3,7 +3,7 @@ def f(obj2):
obj1 = obj2[ptr1::] # error
obj1 = obj2[:ptr1:] # error
obj1 = obj2[::ptr1] # error
-
+
cdef int a
cdef int* int_ptr
@@ -13,7 +13,7 @@ for a in int_ptr[2:]:
pass
for a in int_ptr[2:2:a]:
pass
-
+
_ERRORS = u"""
3:20: Cannot convert 'int *' to Python object
4:21: Cannot convert 'int *' to Python object
diff --git a/tests/errors/e_tempcast.pyx b/tests/errors/e_tempcast.pyx
index 4e59a41c9..d334f16d7 100644
--- a/tests/errors/e_tempcast.pyx
+++ b/tests/errors/e_tempcast.pyx
@@ -4,7 +4,7 @@ def foo(obj):
cdef void *p
p = <void *>blarg # ok
p = <void *>(obj + blarg) # error - temporary
-
+
_ERRORS = u"""
6:5: Casting temporary Python object to non-numeric non-Python type
"""
diff --git a/tests/errors/encoding.pyx b/tests/errors/encoding.pyx
index 4460cf5cd..39fcdf611 100644
--- a/tests/errors/encoding.pyx
+++ b/tests/errors/encoding.pyx
@@ -1,7 +1,7 @@
# coding=ASCII
"""
-Trs bien.
+Trs bien.
"""
_ERRORS = u"""
diff --git a/tests/errors/nogil.pyx b/tests/errors/nogil.pyx
index f0c5dfa31..e82856644 100644
--- a/tests/errors/nogil.pyx
+++ b/tests/errors/nogil.pyx
@@ -13,7 +13,7 @@ cdef object p() nogil:
cdef void r() nogil:
q()
-
+
cdef object m():
cdef object x, y, obj
cdef int i, j, k
diff --git a/tests/run/__getattribute_subclasses__.pyx b/tests/run/__getattribute_subclasses__.pyx
index 68906710f..237900895 100644
--- a/tests/run/__getattribute_subclasses__.pyx
+++ b/tests/run/__getattribute_subclasses__.pyx
@@ -1,7 +1,7 @@
__doc__ = u"""
-__getattribute__ and __getattr__ special methods and subclasses.
+__getattribute__ and __getattr__ special methods and subclasses.
-getattr does not override members.
+getattr does not override members.
>>> a = getattr_boring()
>>> a.boring_member
10
@@ -54,7 +54,7 @@ in the inheritance hiarchy they came from.
"""
cdef class boring:
- cdef readonly int boring_member
+ cdef readonly int boring_member
def __init__(self):
self.boring_member = 10
@@ -98,13 +98,13 @@ cdef class boring_getattribute(_getattribute):
cdef readonly int boring_getattribute_member
cdef class boring_boring_getattribute(boring_getattribute):
- cdef readonly int boring_boring_getattribute_member
+ cdef readonly int boring_boring_getattribute_member
cdef class boring_getattr(_getattr):
cdef readonly int boring_getattr_member
cdef class boring_boring_getattr(boring_getattr):
- cdef readonly int boring_boring_getattr_member
+ cdef readonly int boring_boring_getattr_member
cdef class getattribute_boring_boring_getattr(boring_boring_getattr):
def __getattribute__(self,n):
diff --git a/tests/run/autotestdict.pyx b/tests/run/autotestdict.pyx
index 1df98968f..0824affb9 100644
--- a/tests/run/autotestdict.pyx
+++ b/tests/run/autotestdict.pyx
@@ -131,7 +131,7 @@ cdef class MyCdefClass:
cdef class MyOtherCdefClass:
"""
Needs no hack
-
+
>>> True
True
"""
diff --git a/tests/run/autotestdict_all.pyx b/tests/run/autotestdict_all.pyx
index 71deb439f..624a12b2f 100644
--- a/tests/run/autotestdict_all.pyx
+++ b/tests/run/autotestdict_all.pyx
@@ -130,7 +130,7 @@ cdef class MyCdefClass:
cdef class MyOtherCdefClass:
"""
Needs no hack
-
+
>>> True
True
"""
diff --git a/tests/run/autotestdict_cdef.pyx b/tests/run/autotestdict_cdef.pyx
index 9ebd1c539..c4909873c 100644
--- a/tests/run/autotestdict_cdef.pyx
+++ b/tests/run/autotestdict_cdef.pyx
@@ -129,7 +129,7 @@ cdef class MyCdefClass:
cdef class MyOtherCdefClass:
"""
Needs no hack
-
+
>>> True
True
"""
diff --git a/tests/run/bufaccess.pyx b/tests/run/bufaccess.pyx
index bb7072b4d..6f26cf8fb 100644
--- a/tests/run/bufaccess.pyx
+++ b/tests/run/bufaccess.pyx
@@ -21,7 +21,7 @@ __test__ = {}
import sys
import re
exclude = []#re.compile('object').search]
-
+
def testcase(func):
for e in exclude:
if e(func.__name__):
@@ -80,7 +80,7 @@ def acquire_raise(o):
"""
Apparently, doctest won't handle mixed exceptions and print
stats, so need to circumvent this.
-
+
>>> A = IntMockBuffer("A", range(6))
>>> A.resetlog()
>>> acquire_raise(A)
@@ -90,7 +90,7 @@ def acquire_raise(o):
>>> A.printlog()
acquired A
released A
-
+
"""
cdef object[int] buf
buf = o
@@ -251,7 +251,7 @@ def as_argument_defval(object[int] bufarg=IntMockBuffer('default', range(6)), in
0 1 2 3 4 5 END
released A
"""
- cdef int i
+ cdef int i
for i in range(n):
print bufarg[i],
print 'END'
@@ -264,7 +264,7 @@ def cdef_assignment(obj, n):
acquired A
0 1 2 3 4 5 END
released A
-
+
"""
cdef object[int] buf = obj
cdef int i
@@ -350,7 +350,7 @@ def explicitly_release_buffer():
#
# Getting items and index bounds checking
-#
+#
@testcase
def get_int_2d(object[int, ndim=2] buf, int i, int j):
"""
@@ -409,7 +409,7 @@ def set_int_2d(object[int, ndim=2] buf, int i, int j, int value):
"""
Uses get_int_2d to read back the value afterwards. For pure
unit test, one should support reading in MockBuffer instead.
-
+
>>> C = IntMockBuffer("C", range(6), (2,3))
>>> set_int_2d(C, 1, 1, 10)
acquired C
@@ -435,7 +435,7 @@ def set_int_2d(object[int, ndim=2] buf, int i, int j, int value):
acquired C
released C
8
-
+
>>> set_int_2d(C, -2, -3, 9)
acquired C
released C
@@ -453,7 +453,7 @@ def set_int_2d(object[int, ndim=2] buf, int i, int j, int value):
Traceback (most recent call last):
...
IndexError: Out of bounds on buffer access (axis 1)
-
+
"""
buf[i, j] = value
@@ -474,7 +474,7 @@ def no_negative_indices(object[int, negative_indices=False] buf, int idx):
"""
The most interesting thing here is to inspect the C source and
make sure optimal code is produced.
-
+
>>> A = IntMockBuffer(None, range(6))
>>> no_negative_indices(A, 3)
3
@@ -490,7 +490,7 @@ def no_negative_indices(object[int, negative_indices=False] buf, int idx):
def wraparound_directive(object[int] buf, int pos_idx, int neg_idx):
"""
Again, the most interesting thing here is to inspect the C source.
-
+
>>> A = IntMockBuffer(None, range(4))
>>> wraparound_directive(A, 2, -1)
5
@@ -562,12 +562,12 @@ def c_contig(object[int, ndim=1, mode='c'] buf):
['FORMAT', 'ND', 'STRIDES', 'C_CONTIGUOUS']
"""
return buf[2]
-
+
@testcase
def c_contig_2d(object[int, ndim=2, mode='c'] buf):
"""
Multi-dim has seperate implementation
-
+
>>> A = IntMockBuffer(None, range(12), shape=(3,4))
>>> c_contig_2d(A)
7
@@ -591,7 +591,7 @@ def f_contig(object[int, ndim=1, mode='fortran'] buf):
def f_contig_2d(object[int, ndim=2, mode='fortran'] buf):
"""
Must set up strides manually to ensure Fortran ordering.
-
+
>>> A = IntMockBuffer(None, range(12), shape=(4,3), strides=(1, 4))
>>> f_contig_2d(A)
7
@@ -654,7 +654,7 @@ def unsafe_get(object[int] buf, int idx):
def unsafe_get_nonegative(object[int, negative_indices=False] buf, int idx):
"""
Also inspect the C source to see that it is optimal...
-
+
>>> A = IntMockBuffer(None, range(10), shape=(3,), offset=5)
>>> unsafe_get_nonegative(A, -2)
3
@@ -677,14 +677,14 @@ def mixed_get(object[int] buf, int unsafe_idx, int safe_idx):
with cython.boundscheck(True):
two = buf[safe_idx]
return (one, two)
-
+
#
# Coercions
#
@testcase
def coercions(object[unsigned char] uc):
"""
-TODO
+TODO
"""
print type(uc[0])
uc[0] = -1
@@ -714,7 +714,7 @@ def printbuf_int(object[int] buf, shape):
def printbuf_int_2d(o, shape):
"""
Strided:
-
+
>>> printbuf_int_2d(IntMockBuffer("A", range(6), (2,3)), (2,3))
acquired A
0 1 2 END
@@ -816,7 +816,7 @@ def printbuf_td_h_short(object[td_h_short] buf, shape):
Traceback (most recent call last):
...
ValueError: Buffer dtype mismatch, expected 'td_h_short' but got 'int'
- """
+ """
cdef int i
for i in range(shape[0]):
print buf[i],
@@ -911,7 +911,7 @@ def assign_to_object(object[object] buf, int idx, obj):
>>> get_refcount(a), get_refcount(b)
(2, 2)
>>> addref(a)
- >>> A = ObjectMockBuffer(None, [1, a]) # 1, ...,otherwise it thinks nested lists...
+ >>> A = ObjectMockBuffer(None, [1, a]) # 1, ...,otherwise it thinks nested lists...
>>> get_refcount(a), get_refcount(b)
(3, 2)
>>> assign_to_object(A, 1, b)
@@ -920,7 +920,7 @@ def assign_to_object(object[object] buf, int idx, obj):
>>> decref(b)
"""
buf[idx] = obj
-
+
@testcase
def assign_temporary_to_object(object[object] buf):
"""
@@ -936,7 +936,7 @@ def assign_temporary_to_object(object[object] buf):
>>> assign_temporary_to_object(A)
>>> get_refcount(a)
2
-
+
>>> printbuf_object(A, (2,))
{4: 23} 2
{1: 8} 2
@@ -967,7 +967,7 @@ def buffer_cast(object[unsigned int, cast=True] buf, int idx):
def buffer_cast_fails(object[char, cast=True] buf):
"""
Cannot cast between datatype of different sizes.
-
+
>>> buffer_cast_fails(IntMockBuffer(None, [0]))
Traceback (most recent call last):
...
@@ -999,10 +999,10 @@ cdef class MockBuffer:
cdef Py_ssize_t* shape
cdef Py_ssize_t* suboffsets
cdef object label, log
-
+
cdef readonly object recieved_flags, release_ok
cdef public object fail
-
+
def __init__(self, label, data, shape=None, strides=None, format=None, offset=0):
# It is important not to store references to data after the constructor
# as refcounting is checked on object buffers.
@@ -1053,7 +1053,7 @@ cdef class MockBuffer:
self.strides = self.list_to_sizebuf(strides)
self.shape = self.list_to_sizebuf(shape)
-
+
def __dealloc__(self):
stdlib.free(self.strides)
stdlib.free(self.shape)
@@ -1062,7 +1062,7 @@ cdef class MockBuffer:
# must recursively free indirect...
else:
stdlib.free(self.buffer)
-
+
cdef void* create_buffer(self, data):
cdef char* buf = <char*>stdlib.malloc(len(data) * self.itemsize)
cdef char* it = buf
@@ -1098,7 +1098,7 @@ cdef class MockBuffer:
for name, value in available_flags:
if (value & flags) == value:
self.recieved_flags.append(name)
-
+
buffer.buf = <void*>(<char*>self.buffer + (<int>self.offset * self.itemsize))
buffer.obj = self
buffer.len = self.len
@@ -1120,7 +1120,7 @@ cdef class MockBuffer:
self.release_ok = False
if self.label:
msg = "released %s" % self.label
- print msg
+ print msg
self.log += msg + "\n"
def printlog(self):
@@ -1134,7 +1134,7 @@ cdef class MockBuffer:
print "ERROR, not subclassed", self.__class__
cdef get_default_format(self):
print "ERROR, not subclassed", self.__class__
-
+
cdef class CharMockBuffer(MockBuffer):
cdef int write(self, char* buf, object value) except -1:
(<char*>buf)[0] = <int>value
@@ -1194,14 +1194,14 @@ cdef class ObjectMockBuffer(MockBuffer):
cdef get_itemsize(self): return sizeof(void*)
cdef get_default_format(self): return b"@O"
-
+
cdef class IntStridedMockBuffer(IntMockBuffer):
cdef __cythonbufferdefaults__ = {"mode" : "strided"}
-
+
cdef class ErrorBuffer:
cdef object label
-
+
def __init__(self, label):
self.label = label
@@ -1251,7 +1251,7 @@ def bufdefaults1(IntStridedMockBuffer[int, ndim=1] buf):
For IntStridedMockBuffer, mode should be
"strided" by defaults which should show
up in the flags.
-
+
>>> A = IntStridedMockBuffer("A", range(10))
>>> bufdefaults1(A)
acquired A
@@ -1260,7 +1260,7 @@ def bufdefaults1(IntStridedMockBuffer[int, ndim=1] buf):
['FORMAT', 'ND', 'STRIDES']
"""
pass
-
+
#
# Structs
@@ -1287,7 +1287,7 @@ cdef class MyStructMockBuffer(MockBuffer):
s = <MyStruct*>buf;
s.a, s.b, s.c, s.d, s.e = value
return 0
-
+
cdef get_itemsize(self): return sizeof(MyStruct)
cdef get_default_format(self): return b"2bq2i"
@@ -1297,7 +1297,7 @@ cdef class NestedStructMockBuffer(MockBuffer):
s = <NestedStruct*>buf;
s.x.a, s.x.b, s.y.a, s.y.b, s.z = value
return 0
-
+
cdef get_itemsize(self): return sizeof(NestedStruct)
cdef get_default_format(self): return b"2T{ii}i"
@@ -1305,7 +1305,7 @@ cdef class NestedStructMockBuffer(MockBuffer):
def basic_struct(object[MyStruct] buf):
"""
See also buffmt.pyx
-
+
>>> basic_struct(MyStructMockBuffer(None, [(1, 2, 3, 4, 5)]))
1 2 3 4 5
>>> basic_struct(MyStructMockBuffer(None, [(1, 2, 3, 4, 5)], format="bbqii"))
@@ -1317,7 +1317,7 @@ def basic_struct(object[MyStruct] buf):
def nested_struct(object[NestedStruct] buf):
"""
See also buffmt.pyx
-
+
>>> nested_struct(NestedStructMockBuffer(None, [(1, 2, 3, 4, 5)]))
1 2 3 4 5
>>> nested_struct(NestedStructMockBuffer(None, [(1, 2, 3, 4, 5)], format="T{ii}T{2i}i"))
@@ -1335,7 +1335,7 @@ cdef class LongComplexMockBuffer(MockBuffer):
s = <LongComplex*>buf;
s.real, s.imag = value
return 0
-
+
cdef get_itemsize(self): return sizeof(LongComplex)
cdef get_default_format(self): return b"Zg"
diff --git a/tests/run/buffmt.pyx b/tests/run/buffmt.pyx
index c4ec4c0c7..622ca7e70 100644
--- a/tests/run/buffmt.pyx
+++ b/tests/run/buffmt.pyx
@@ -22,7 +22,7 @@ else:
cdef struct align_of_float_helper:
char ch
- float d
+ float d
cdef struct align_of_int_helper:
char ch
int i
@@ -33,19 +33,19 @@ if float_align != 4 or sizeof(float) != 4:
if int_align != 4 or sizeof(int) != 4:
raise RuntimeError("Alignment or size of int is %d on this system, please report to cython-dev for a testcase fix" % int_align)
-
+
cdef class MockBuffer:
cdef Py_ssize_t zero
cdef Py_ssize_t minusone
cdef object format
cdef object itemsize
-
+
def __init__(self, format, itemsize):
self.format = unicode(format).encode(u"ASCII")
self.itemsize = itemsize
self.zero = 0
self.minusone = -1
-
+
def __getbuffer__(self, Py_buffer* info, int flags):
info.buf = NULL
info.strides = &self.zero
@@ -64,7 +64,7 @@ def _int(fmt):
Traceback (most recent call last):
...
ValueError: Buffer dtype mismatch, expected 'int' but got 'char'
-
+
>>> _int("if")
Traceback (most recent call last):
...
@@ -92,7 +92,7 @@ def wrongsize():
...
ValueError: Item size of buffer (1 byte) does not match size of 'float' (4 bytes)
- """
+ """
cdef object[float] buf = MockBuffer("f", 1)
@testcase
@@ -151,7 +151,7 @@ cdef struct UnpackedStruct4:
def char3int(fmt):
"""
>>> char3int("ciii")
- >>> char3int("c1i1i1i")
+ >>> char3int("c1i1i1i")
>>> char3int("c3i")
>>> char3int("ci2i")
@@ -170,7 +170,7 @@ def char3int(fmt):
ValueError: Buffer dtype mismatch; next field is at offset 1 but 4 expected
#TODO char3int("=cxxx@iii")
-
+
Error:
>>> char3int("cii")
Traceback (most recent call last):
@@ -211,16 +211,16 @@ def complex_test(fmt):
>>> complex_test("3Zf")
>>> complex_test("6f")
>>> complex_test("3T{Zf}")
-
+
>>> complex_test("fZfZff")
Traceback (most recent call last):
...
ValueError: Buffer dtype mismatch, expected 'float' but got 'complex float' in 'ComplexFloat.imag'
-
+
"""
cdef object obj = MockBuffer(fmt, sizeof(ComplexTest))
cdef object[ComplexTest] buf1 = obj
-
+
@testcase
def alignment_string(fmt, exc=None):
@@ -263,7 +263,7 @@ cdef struct MixedComplex:
def mixed_complex_struct():
"""
Triggering a specific execution path for this case.
-
+
>>> mixed_complex_struct()
Traceback (most recent call last):
...
@@ -281,13 +281,13 @@ cdef packed struct PackedStruct:
char a
int b
PackedSubStruct sub
-
+
@testcase
def packed_struct(fmt):
"""
Assuming int is four bytes:
-
+
>>> packed_struct("^cici")
>>> packed_struct("=cibi")
@@ -295,7 +295,7 @@ def packed_struct(fmt):
Traceback (most recent call last):
...
ValueError: Buffer packing mode currently only allowed at beginning of format string (this is a defect)
-
+
However aligned access won't work:
>>> packed_struct("@cici")
Traceback (most recent call last):
diff --git a/tests/run/call_crash.pyx b/tests/run/call_crash.pyx
index 74e107435..0d30110d3 100644
--- a/tests/run/call_crash.pyx
+++ b/tests/run/call_crash.pyx
@@ -5,19 +5,19 @@ cdef class A:
"""
cdef int (*func_ptr)(int)
-
+
def __init__(self):
self.func_ptr = &func
cdef int do_it(self, int s):
cdef int r = first_call(self).func_ptr(s) # the temp for first_call(self) not properly freed
return r
-
+
def test(self, s):
return self.do_it(s)
cdef A first_call(A x):
return x
-
+
cdef int func(int s):
return s*s
diff --git a/tests/run/callargs.pyx b/tests/run/callargs.pyx
index d922b4fb7..b8d634790 100644
--- a/tests/run/callargs.pyx
+++ b/tests/run/callargs.pyx
@@ -76,7 +76,7 @@ def h(a, b, c, *args, **kwargs):
TypeError: h() takes at least 3 positional arguments (2 given)
"""
print a, b, c, u'*', len(args), len(kwargs)
-
+
args = (9,8,7)
import sys
@@ -156,7 +156,7 @@ def test_noargs(f):
0
>>> test_noargs(g)
0
-
+
# and some errors:
>>> test_noargs(h)
Traceback (most recent call last):
diff --git a/tests/run/cdefassign.pyx b/tests/run/cdefassign.pyx
index b512af7ef..2e9dd9b25 100644
--- a/tests/run/cdefassign.pyx
+++ b/tests/run/cdefassign.pyx
@@ -11,6 +11,6 @@ def test(x, int y):
cdef object o = int(8)
print a, b, c, p[0], before, g, o
-# Also test that pruning cdefs doesn't hurt
+# Also test that pruning cdefs doesn't hurt
def empty():
cdef int i
diff --git a/tests/run/cdefoptargs.pyx b/tests/run/cdefoptargs.pyx
index 3a601dba4..e79e1879f 100644
--- a/tests/run/cdefoptargs.pyx
+++ b/tests/run/cdefoptargs.pyx
@@ -26,7 +26,7 @@ cdef b(a, b, c=1, d=2):
cdef int foo(int a, int b=1, int c=1):
return a+b*c
-
+
def test_foo():
"""
>>> test_foo()
diff --git a/tests/run/closure_decorators_T478.pyx b/tests/run/closure_decorators_T478.pyx
index e6277d645..38a74a0a6 100644
--- a/tests/run/closure_decorators_T478.pyx
+++ b/tests/run/closure_decorators_T478.pyx
@@ -16,18 +16,18 @@ def print_args(func):
print "args", args, "kwds", kwds
return func(*args, **kwds)
return f
-
+
cdef class Num:
cdef int n
-
+
def __init__(self, n):
self.n = n
-
+
def __repr__(self):
return "Num(%s)" % self.n
-
+
@print_args
def is_prime(self, bint print_factors=False):
if self.n == 2:
diff --git a/tests/run/complex_numbers_T305.pyx b/tests/run/complex_numbers_T305.pyx
index 6343ceb88..66f2cd04c 100644
--- a/tests/run/complex_numbers_T305.pyx
+++ b/tests/run/complex_numbers_T305.pyx
@@ -26,7 +26,7 @@ def test_arithmetic(double complex z, double complex w):
def test_pow(double complex z, double complex w, tol=None):
"""
Various implementations produce slightly different results...
-
+
>>> a = complex(3, 1)
>>> test_pow(a, 1)
(3+1j)
diff --git a/tests/run/cpp_classes.pyx b/tests/run/cpp_classes.pyx
index 8ddcba1ae..7005cec16 100644
--- a/tests/run/cpp_classes.pyx
+++ b/tests/run/cpp_classes.pyx
@@ -11,21 +11,21 @@ cdef extern from "shapes.h" namespace "shapes":
cdef cppclass Shape:
float area()
-
+
cdef cppclass Circle(Shape):
int radius
Circle(int)
-
+
cdef cppclass Rectangle(Shape):
int width
int height
Rectangle()
Rectangle(int, int)
-
+
cdef cppclass Square(Rectangle):
int side
Square(int)
-
+
int constructor_count, destructor_count
def test_new_del():
diff --git a/tests/run/cpp_exceptions.pyx b/tests/run/cpp_exceptions.pyx
index 7d1f16b80..28cc66799 100644
--- a/tests/run/cpp_exceptions.pyx
+++ b/tests/run/cpp_exceptions.pyx
@@ -5,7 +5,7 @@ cdef extern from "cpp_exceptions_helper.h":
cdef int raise_int_raw "raise_int"(bint fire) except +
cdef int raise_int_value "raise_int"(bint fire) except +ValueError
cdef int raise_int_custom "raise_int"(bint fire) except +raise_py_error
-
+
cdef int raise_index_raw "raise_index"(bint fire) except +
cdef int raise_index_value "raise_index"(bint fire) except +ValueError
cdef int raise_index_custom "raise_index"(bint fire) except +raise_py_error
diff --git a/tests/run/cpp_namespaces_helper.h b/tests/run/cpp_namespaces_helper.h
index 1a7f8ace2..af7f9ae8d 100644
--- a/tests/run/cpp_namespaces_helper.h
+++ b/tests/run/cpp_namespaces_helper.h
@@ -1,25 +1,25 @@
namespace outer {
-
+
int x = 10;
-
+
int outer_value = 10;
-
+
namespace inner {
-
+
int x = 100;
-
+
int inner_value = 100;
-
+
}
-
+
}
namespace A {
-
+
typedef int A_t;
-
+
A_t A_func(A_t first, A_t second) {
return first + second;
}
-
+
}
diff --git a/tests/run/cpp_nested_templates.pyx b/tests/run/cpp_nested_templates.pyx
index ff3345dd1..7768a035c 100644
--- a/tests/run/cpp_nested_templates.pyx
+++ b/tests/run/cpp_nested_templates.pyx
@@ -6,7 +6,7 @@ cdef extern from "cpp_templates_helper.h":
void set(T)
T get()
bint operator==(Wrap[T])
-
+
cdef cppclass Pair[T1,T2]:
Pair(T1,T2)
T1 first()
@@ -37,9 +37,9 @@ def test_wrap_pair_pair(int i, int j, double x):
try:
wrap = new Wrap[Pair[int, Pair[int, double]]](
Pair[int, Pair[int, double]](i,Pair[int, double](j, x)))
- return (wrap.get().first(),
+ return (wrap.get().first(),
wrap.get().second().first(),
- wrap.get().second().second(),
+ wrap.get().second().second(),
deref(wrap) == deref(wrap))
finally:
del wrap
diff --git a/tests/run/cpp_operators.pyx b/tests/run/cpp_operators.pyx
index 1b7824862..8cb97f939 100644
--- a/tests/run/cpp_operators.pyx
+++ b/tests/run/cpp_operators.pyx
@@ -92,11 +92,11 @@ def test_binop():
out(t[0] * 1)
out(t[0] / 1)
out(t[0] % 1)
-
+
out(t[0] & 1)
out(t[0] | 1)
out(t[0] ^ 1)
-
+
out(t[0] << 1)
out(t[0] >> 1)
diff --git a/tests/run/cpp_operators_helper.h b/tests/run/cpp_operators_helper.h
index 231f4868b..9b885b6d2 100644
--- a/tests/run/cpp_operators_helper.h
+++ b/tests/run/cpp_operators_helper.h
@@ -14,12 +14,12 @@ public:
UN_OP(~);
UN_OP(!);
UN_OP(&);
-
+
UN_OP(++);
UN_OP(--);
POST_UN_OP(++);
POST_UN_OP(--);
-
+
BIN_OP(+);
BIN_OP(-);
BIN_OP(*);
@@ -28,7 +28,7 @@ public:
BIN_OP(<<);
BIN_OP(>>);
-
+
BIN_OP(|);
BIN_OP(&);
BIN_OP(^);
diff --git a/tests/run/cpp_stl.pyx b/tests/run/cpp_stl.pyx
index 66ecf6885..4d3c0f05b 100644
--- a/tests/run/cpp_stl.pyx
+++ b/tests/run/cpp_stl.pyx
@@ -7,7 +7,7 @@ cdef extern from "vector" namespace "std":
void assign(int, T)
void clear()
int size()
-
+
cppclass iterator:
T operator*()
iterator operator++()
diff --git a/tests/run/cpp_templates.pyx b/tests/run/cpp_templates.pyx
index e29bceae4..12007f4c3 100644
--- a/tests/run/cpp_templates.pyx
+++ b/tests/run/cpp_templates.pyx
@@ -6,7 +6,7 @@ cdef extern from "cpp_templates_helper.h":
void set(T)
T get()
bint operator==(Wrap[T])
-
+
cdef cppclass Pair[T1,T2]:
Pair(T1,T2)
T1 first()
diff --git a/tests/run/ctruthtests.pyx b/tests/run/ctruthtests.pyx
index fdb6e92e5..af7ad3d71 100644
--- a/tests/run/ctruthtests.pyx
+++ b/tests/run/ctruthtests.pyx
@@ -83,7 +83,7 @@ ctypedef union _aux:
cdef class TestExtPtr:
cdef void* p
- def __init__(self, int i):
+ def __init__(self, int i):
cdef _aux aux
aux.i = i
self.p = aux.p
diff --git a/tests/run/cython_includes.pyx b/tests/run/cython_includes.pyx
index 88bf080a0..f18ce2816 100644
--- a/tests/run/cython_includes.pyx
+++ b/tests/run/cython_includes.pyx
@@ -29,4 +29,4 @@ def cpython_cimports():
print PyType_Check2([])
print PyType_Check3(list)
print PyType_Check3([])
-
+
diff --git a/tests/run/dict_getitem.pyx b/tests/run/dict_getitem.pyx
index 9e0ecb702..845ac7ff8 100644
--- a/tests/run/dict_getitem.pyx
+++ b/tests/run/dict_getitem.pyx
@@ -18,7 +18,7 @@ def test(dict d, index):
>>> test(d, Unhashable())
Traceback (most recent call last):
ValueError
-
+
>>> test(None, 1) # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError: ...subscriptable...
diff --git a/tests/run/exceptionrefcount.pyx b/tests/run/exceptionrefcount.pyx
index 4c8d645b0..6747393c9 100644
--- a/tests/run/exceptionrefcount.pyx
+++ b/tests/run/exceptionrefcount.pyx
@@ -15,7 +15,7 @@ __doc__ = u"""
... refcount1 = sys.getrefcount(SampleException)
... test_func()
... refcount2 = sys.getrefcount(SampleException)
-...
+...
... assert_refcount(refcount1, refcount2, test_func)
... assert_refcount(initial_refcount, refcount2, test_func)
... refcount3 = sys.getrefcount(SampleException)
diff --git a/tests/run/extcmethod.pyx b/tests/run/extcmethod.pyx
index 4092c4d7f..0f9f09e84 100644
--- a/tests/run/extcmethod.pyx
+++ b/tests/run/extcmethod.pyx
@@ -1,7 +1,7 @@
cdef class Spam:
cdef int tons
-
+
cdef void add_tons(self, int x):
self.tons = self.tons + x
diff --git a/tests/run/extern_builtins_T258.pyx b/tests/run/extern_builtins_T258.pyx
index 344cc895f..902063bc1 100644
--- a/tests/run/extern_builtins_T258.pyx
+++ b/tests/run/extern_builtins_T258.pyx
@@ -25,7 +25,7 @@ def test_list(list L):
def test_tuple(tuple t):
"""
Actual builtin types are restrictive wrt subclassing so optimizations can be safely performed.
-
+
>>> test_tuple((1,2))
2
>>> class tuple_subclass(tuple): pass
diff --git a/tests/run/extpropertyref.pyx b/tests/run/extpropertyref.pyx
index 2efdffac3..c2a01f488 100644
--- a/tests/run/extpropertyref.pyx
+++ b/tests/run/extpropertyref.pyx
@@ -1,7 +1,7 @@
cdef class Spam:
property eggs:
-
+
def __get__(self):
"""
This is the docstring for Spam.eggs.__get__
diff --git a/tests/run/exttype.pyx b/tests/run/exttype.pyx
index 35d7e33f7..8f6e56479 100644
--- a/tests/run/exttype.pyx
+++ b/tests/run/exttype.pyx
@@ -14,10 +14,10 @@ cdef class Spam:
def __cinit__(self, eggs):
self.eggs = eggs
self.ham = 42
-
+
def __dealloc__(self):
self.ham = 0
-
+
def eat(self):
gobble(self.eggs, self.ham)
diff --git a/tests/run/for_decrement.pyx b/tests/run/for_decrement.pyx
index e5a086ca1..57db91619 100644
--- a/tests/run/for_decrement.pyx
+++ b/tests/run/for_decrement.pyx
@@ -16,7 +16,7 @@ cdef int get_step():
def range_loop_indices():
"""
- Optimized integer for loops using range() should follow Python behavior,
+ Optimized integer for loops using range() should follow Python behavior,
and leave the index variable with the last value of the range.
"""
cdef int i, j, k=0, l=10, m=10
@@ -29,7 +29,7 @@ def range_loop_indices():
def from_loop_indices():
"""
- for-from-loops should follow C behavior, and leave the index variable
+ for-from-loops should follow C behavior, and leave the index variable
incremented one step after the last iteration.
"""
cdef int i, j, k
diff --git a/tests/run/function_as_method_T494.pyx b/tests/run/function_as_method_T494.pyx
index 736423526..ee2bc53e1 100644
--- a/tests/run/function_as_method_T494.pyx
+++ b/tests/run/function_as_method_T494.pyx
@@ -1,7 +1,7 @@
__doc__ = """
>>> A.foo = foo
>>> print A().foo()
-
+
"""
class A:
diff --git a/tests/run/function_binding_T494.pyx b/tests/run/function_binding_T494.pyx
index 6823eecb2..820d52ea0 100644
--- a/tests/run/function_binding_T494.pyx
+++ b/tests/run/function_binding_T494.pyx
@@ -4,7 +4,7 @@ class SomeNumber(object):
def __init__(self, n):
self._n = n
-
+
def __repr__(self):
return "SomeNumber(%s)" % self._n
diff --git a/tests/run/hash_T326.pyx b/tests/run/hash_T326.pyx
index 38fe48b7b..32bcc59d0 100644
--- a/tests/run/hash_T326.pyx
+++ b/tests/run/hash_T326.pyx
@@ -10,7 +10,7 @@ __doc__ = u"""
Traceback (most recent call last):
...
TypeError: That's kind of a round number...
-
+
"""
cdef class A:
diff --git a/tests/run/if.pyx b/tests/run/if.pyx
index ddbb1e184..f1c3d5ab8 100644
--- a/tests/run/if.pyx
+++ b/tests/run/if.pyx
@@ -29,7 +29,7 @@ def g(a, b):
elif b:
x = 2
return x
-
+
def h(a, b):
"""
>>> h(1,2)
diff --git a/tests/run/ifelseexpr_T267.pyx b/tests/run/ifelseexpr_T267.pyx
index baf29101d..41f412a60 100644
--- a/tests/run/ifelseexpr_T267.pyx
+++ b/tests/run/ifelseexpr_T267.pyx
@@ -22,7 +22,7 @@ def constants(x):
return a
def temps(x):
- return ident(1) if ident(x) < ident(5) else ident(10)
+ return ident(1) if ident(x) < ident(5) else ident(10)
def nested(x):
return 1 if x == 1 else (2 if x == 2 else 3)
diff --git a/tests/run/importfrom.pyx b/tests/run/importfrom.pyx
index 2cf1e6b56..93d4e5eb0 100644
--- a/tests/run/importfrom.pyx
+++ b/tests/run/importfrom.pyx
@@ -59,7 +59,7 @@ def typed_imports():
import types
cdef long maxunicode
cdef type t
-
+
from sys import maxunicode
print maxunicode == sys.maxunicode
from types import ModuleType as t
diff --git a/tests/run/index.pyx b/tests/run/index.pyx
index f829095bf..22cec2b18 100644
--- a/tests/run/index.pyx
+++ b/tests/run/index.pyx
@@ -65,7 +65,7 @@ def index_object(object o, int i):
return o[i]
-# These make sure that our fast indexing works with large and unsigned types.
+# These make sure that our fast indexing works with large and unsigned types.
def test_unsigned_long():
"""
diff --git a/tests/run/inplace.pyx b/tests/run/inplace.pyx
index a5606f6a0..230cd60a2 100644
--- a/tests/run/inplace.pyx
+++ b/tests/run/inplace.pyx
@@ -93,11 +93,11 @@ def smoketest():
def side_effect(x):
print u"side effect", x
return x
-
+
cdef int c_side_effect(int x):
print u"c side effect", x
return x
-
+
def test_side_effects():
"""
>>> test_side_effects()
@@ -185,7 +185,7 @@ def nested_struct_assignment():
nested.a.value = 2
nested.a.value += 3
assert nested.a.value == 5
-
+
nested.a.inner.x = 5
nested.a.inner.x += 10
assert nested.a.inner.x == 15
@@ -200,7 +200,7 @@ def nested_array_assignment():
array.a[0].value = 2
array.a[c_side_effect(0)].value += 3
assert array.a[0].value == 5
-
+
array.a[1].inner.x = 5
array.a[c_side_effect(1)].inner.x += 10
assert array.a[1].inner.x == 15
diff --git a/tests/run/int_literals.pyx b/tests/run/int_literals.pyx
index 511aef5c2..db9d1bb4f 100644
--- a/tests/run/int_literals.pyx
+++ b/tests/run/int_literals.pyx
@@ -28,9 +28,9 @@ def c_longs():
cdef unsigned long ua = 1UL
cdef long long aa = 0xFFFFFFFFFFFFFFFFLL
cdef unsigned long long uaa = 0xFFFFFFFFFFFFFFFFULL
-
+
return a, ua, aa, uaa
-
+
def py_longs():
return 1, 1L, 100000000000000000000000000000000, -100000000000000000000000000000000
diff --git a/tests/run/knuth_man_or_boy_test.pyx b/tests/run/knuth_man_or_boy_test.pyx
index 5f49fe7a4..ecfc13c83 100644
--- a/tests/run/knuth_man_or_boy_test.pyx
+++ b/tests/run/knuth_man_or_boy_test.pyx
@@ -53,4 +53,4 @@ def a(in_k, x1, x2, x3, x4, x5):
k[0] -= 1
return a(k[0], b, x1, x2, x3, x4)
return compute(x4) + compute(x5) if k[0] <= 0 else b()
-
+
diff --git a/tests/run/large_consts_T237.pyx b/tests/run/large_consts_T237.pyx
index 13d549da2..4eccf30b4 100644
--- a/tests/run/large_consts_T237.pyx
+++ b/tests/run/large_consts_T237.pyx
@@ -6,7 +6,7 @@ def add_large():
"""
>>> add_large() == 2147483647 + 2147483647
True
-
+
#>>> add_large_c() == 2147483647 + 2147483647
#True
"""
diff --git a/tests/run/list_pop.pyx b/tests/run/list_pop.pyx
index d1cf062e4..d28c9a203 100644
--- a/tests/run/list_pop.pyx
+++ b/tests/run/list_pop.pyx
@@ -19,7 +19,7 @@ def simple_pop(L):
[0, 1, 2, 3, 4, 5, 6, 7]
>>> while L:
... _ = simple_pop(L)
-
+
>>> L
[]
>>> simple_pop(L)
@@ -45,7 +45,7 @@ def simple_pop_typed(list L):
[0, 1, 2, 3, 4, 5, 6, 7]
>>> while L:
... _ = simple_pop_typed(L)
-
+
>>> L
[]
>>> simple_pop_typed(L)
@@ -75,13 +75,13 @@ def index_pop(L, int i):
Traceback (most recent call last):
...
IndexError: pop index out of range
-
+
>>> while L:
... _ = index_pop(L, 0)
-
+
>>> L
[]
-
+
>>> index_pop(L, 0)
Traceback (most recent call last):
...
@@ -111,13 +111,13 @@ def index_pop_typed(list L, int i):
Traceback (most recent call last):
...
IndexError: pop index out of range
-
+
>>> while L:
... _ = index_pop_typed(L, 0)
-
+
>>> L
[]
-
+
>>> index_pop_typed(L, 0)
Traceback (most recent call last):
...
diff --git a/tests/run/literals.pyx b/tests/run/literals.pyx
index 388fe6754..0d71625dc 100644
--- a/tests/run/literals.pyx
+++ b/tests/run/literals.pyx
@@ -37,9 +37,9 @@ double quoted string."""
a \three \line
raw string with some backslashes.'''
m = 'Three backslashed ordinaries: \c\g\+'
- n = '''Triple single quoted string
+ n = '''Triple single quoted string
with ' and " quotes'''
- o = """Triple double quoted string
+ o = """Triple double quoted string
with ' and " quotes"""
p = "name_like_string"
q = "NameLikeString2"
diff --git a/tests/run/modbody.pyx b/tests/run/modbody.pyx
index 7ba63a68d..de82137ef 100644
--- a/tests/run/modbody.pyx
+++ b/tests/run/modbody.pyx
@@ -12,7 +12,7 @@ def f():
True
"""
pass
-
+
g = 42
x = u"spam"
y = u"eggs"
diff --git a/tests/run/moduletryexcept.pyx b/tests/run/moduletryexcept.pyx
index 6566db6bc..721f13af5 100644
--- a/tests/run/moduletryexcept.pyx
+++ b/tests/run/moduletryexcept.pyx
@@ -86,8 +86,8 @@ try:
except:
raise
except (AttributeError,
- KeyError,
- IndexError,
+ KeyError,
+ IndexError,
ValueError) as e:
val = e
raise e
diff --git a/tests/run/numpy_common.pxi b/tests/run/numpy_common.pxi
index ad6e7b085..df2aebade 100644
--- a/tests/run/numpy_common.pxi
+++ b/tests/run/numpy_common.pxi
@@ -2,6 +2,6 @@ cdef extern from *:
void import_array()
void import_umath()
-if 0:
+if 0:
import_array()
import_umath()
diff --git a/tests/run/numpy_test.pyx b/tests/run/numpy_test.pyx
index 104670ba0..557f4842c 100644
--- a/tests/run/numpy_test.pyx
+++ b/tests/run/numpy_test.pyx
@@ -34,7 +34,7 @@ try:
[[ 16. 17. 18. 19.]
[ 20. 21. 22. 23.]]]
6.0 0.0 13.0 8.0
-
+
>>> obj_array()
[a 1 {}]
a 1 {}
@@ -115,7 +115,7 @@ try:
Traceback (most recent call last):
...
ValueError: ndarray is not C contiguous
-
+
>>> test_dtype('b', inc1_byte)
>>> test_dtype('B', inc1_ubyte)
>>> test_dtype('h', inc1_short)
@@ -124,7 +124,7 @@ try:
>>> test_dtype('I', inc1_uint)
>>> test_dtype('l', inc1_long)
>>> test_dtype('L', inc1_ulong)
-
+
>>> test_dtype('f', inc1_float)
>>> test_dtype('d', inc1_double)
>>> test_dtype('g', inc1_longdouble)
@@ -154,16 +154,16 @@ try:
Traceback (most recent call last):
...
ValueError: Non-native byte order not supported
-
+
>>> test_recordarray()
-
+
>>> print(test_nested_dtypes(np.zeros((3,), dtype=np.dtype([\
('a', np.dtype('i,i')),\
('b', np.dtype('i,i'))\
]))))
- array([((0, 0), (0, 0)), ((1, 2), (1, 4)), ((1, 2), (1, 4))],
+ array([((0, 0), (0, 0)), ((1, 2), (1, 4)), ((1, 2), (1, 4))],
dtype=[('a', [('f0', '!i4'), ('f1', '!i4')]), ('b', [('f0', '!i4'), ('f1', '!i4')])])
>>> print(test_nested_dtypes(np.zeros((3,), dtype=np.dtype([\
@@ -175,10 +175,10 @@ try:
ValueError: Buffer dtype mismatch, expected 'int' but got 'float' in 'DoubleInt.y'
>>> print(test_packed_align(np.zeros((1,), dtype=np.dtype('b,i', align=False))))
- array([(22, 23)],
+ array([(22, 23)],
dtype=[('f0', '|i1'), ('f1', '!i4')])
>>> print(test_unpacked_align(np.zeros((1,), dtype=np.dtype('b,i', align=True))))
- array([(22, 23)],
+ array([(22, 23)],
dtype=[('f0', '|i1'), ('', '|V3'), ('f1', '!i4')])
>>> print(test_packed_align(np.zeros((1,), dtype=np.dtype('b,i', align=True))))
@@ -205,9 +205,9 @@ try:
8,16
>>> test_point_record()
- array([(0.0, 0.0), (1.0, -1.0), (2.0, -2.0)],
+ array([(0.0, 0.0), (1.0, -1.0), (2.0, -2.0)],
dtype=[('x', '!f8'), ('y', '!f8')])
-
+
"""
except:
__doc__ = u""
@@ -232,7 +232,7 @@ def ndarray_str(arr):
Since Py2.3 doctest don't support <BLANKLINE>, manually replace blank lines
with <_BLANKLINE_>
"""
- return unicode(arr).replace(u'\n\n', u'\n<_BLANKLINE_>\n')
+ return unicode(arr).replace(u'\n\n', u'\n<_BLANKLINE_>\n')
def basic():
cdef object[int, ndim=2] buf = np.arange(10, dtype=b'i').reshape((2, 5))
@@ -295,7 +295,7 @@ def inc1_clongdouble(np.ndarray[long double complex] arr): arr[1] = arr[1] + (1
def inc1_cfloat_struct(np.ndarray[np.cfloat_t] arr):
arr[1].real += 1
arr[1].imag += 1
-
+
def inc1_cdouble_struct(np.ndarray[np.cdouble_t] arr):
arr[1].real += 1
arr[1].imag += 1
@@ -324,7 +324,7 @@ def inc1_uintp_t(np.ndarray[np.uintp_t] arr): arr[1] += 1
def inc1_int32_t(np.ndarray[np.int32_t] arr): arr[1] += 1
def inc1_float64_t(np.ndarray[np.float64_t] arr): arr[1] += 1
-
+
def test_dtype(dtype, inc1):
if dtype in ("g", np.longdouble,
"G", np.clongdouble):
@@ -407,7 +407,7 @@ def test_packed_align(np.ndarray[PackedStruct] arr):
def test_unpacked_align(np.ndarray[UnpackedStruct] arr):
arr[0].a = 22
- arr[0].b = 23
+ arr[0].b = 23
return repr(arr).replace('<', '!').replace('>', '!')
def test_complextypes():
diff --git a/tests/run/pinard5.pyx b/tests/run/pinard5.pyx
index 86aa53f8d..0d3f07174 100644
--- a/tests/run/pinard5.pyx
+++ b/tests/run/pinard5.pyx
@@ -1,7 +1,7 @@
cdef class Tri:
def test(self):
return 1
-
+
cdef class Curseur:
cdef Tri tri
def detail(self):
diff --git a/tests/run/pure.pyx b/tests/run/pure.pyx
index 4ed263e87..424cbd15c 100644
--- a/tests/run/pure.pyx
+++ b/tests/run/pure.pyx
@@ -37,7 +37,7 @@ def test_declare(n):
i = sizeof(xx)
ptr = cython.declare(cython.p_int, cython.address(y))
return y, ptr[0]
-
+
@cython.locals(x=cython.double, n=cython.int)
def test_cast(x):
"""
@@ -50,7 +50,7 @@ def test_cast(x):
"""
n = cython.cast(cython.int, x)
return n
-
+
@cython.locals(x=cython.int, y=cython.p_int)
def test_address(x):
"""
diff --git a/tests/run/pure_py.py b/tests/run/pure_py.py
index 3c91b0987..3adfb3716 100644
--- a/tests/run/pure_py.py
+++ b/tests/run/pure_py.py
@@ -39,7 +39,7 @@ def test_sizeof():
## i = sizeof(xx)
## ptr = cython.declare(cython.p_int, cython.address(y))
## return y, ptr[0]
-
+
@cython.locals(x=cython.double, n=cython.int)
def test_cast(x):
"""
@@ -48,7 +48,7 @@ def test_cast(x):
"""
n = cython.cast(cython.int, x)
return n
-
+
@cython.locals(x=cython.int, y=cython.p_int)
def test_address(x):
"""
diff --git a/tests/run/r_extcomplex2.pyx b/tests/run/r_extcomplex2.pyx
index fa1ef2323..a4e849522 100644
--- a/tests/run/r_extcomplex2.pyx
+++ b/tests/run/r_extcomplex2.pyx
@@ -8,14 +8,14 @@ __doc__ = u"""
"""
cdef extern from "complexobject.h":
-
+
struct Py_complex:
double real
double imag
-
+
ctypedef class __builtin__.complex [object PyComplexObject]:
cdef Py_complex cval
-
+
def spam(complex c):
print u"Real:", c.cval.real
print u"Imag:", c.cval.imag
diff --git a/tests/run/r_forloop.pyx b/tests/run/r_forloop.pyx
index f0bdebf0d..699912606 100644
--- a/tests/run/r_forloop.pyx
+++ b/tests/run/r_forloop.pyx
@@ -139,7 +139,7 @@ def go_list_ret():
for i in l:
if i > 1:
return i
-
+
def go_tuple():
"""
>>> go_tuple()
diff --git a/tests/run/r_huss3.pyx b/tests/run/r_huss3.pyx
index 7fc830103..5e47bf8aa 100644
--- a/tests/run/r_huss3.pyx
+++ b/tests/run/r_huss3.pyx
@@ -3,7 +3,7 @@ __doc__ = u"""
... foo()
... except Exception, e:
... print("%s: %s" % (e.__class__.__name__, e))
-ValueError:
+ValueError:
>>> try:
... bar()
... except Exception, e:
diff --git a/tests/run/r_pythonapi.pyx b/tests/run/r_pythonapi.pyx
index 8557d17f4..daeb95caf 100644
--- a/tests/run/r_pythonapi.pyx
+++ b/tests/run/r_pythonapi.pyx
@@ -12,7 +12,7 @@ cdef extern from "string.h":
void memcpy(char *d, char *s, int n)
from cpython cimport PyUnicode_DecodeUTF8
-
+
def spam():
cdef char buf[12]
memcpy(buf, "Ftang\0Ftang!", sizeof(buf))
diff --git a/tests/run/r_spamtype.pyx b/tests/run/r_spamtype.pyx
index cfa94fbb1..328956ed7 100644
--- a/tests/run/r_spamtype.pyx
+++ b/tests/run/r_spamtype.pyx
@@ -12,15 +12,15 @@ __doc__ = u"""
cdef class Spam:
cdef int tons
-
+
def __cinit__(self):
self.tons = 17
-
+
def __dealloc__(self):
print self.tons, u"tons of spam is history."
-
+
def get_tons(self):
return self.tons
-
+
def set_tons(self, x):
self.tons = x
diff --git a/tests/run/shapes.h b/tests/run/shapes.h
index aa24ba3b5..6623c6b79 100644
--- a/tests/run/shapes.h
+++ b/tests/run/shapes.h
@@ -2,15 +2,15 @@
#define SHAPES_H
namespace shapes {
-
+
int constructor_count = 0;
int destructor_count = 0;
-
+
class Shape
{
public:
virtual float area() = 0;
- Shape() { constructor_count++; }
+ Shape() { constructor_count++; }
virtual ~Shape() { destructor_count++; }
};
@@ -18,7 +18,7 @@ namespace shapes {
{
public:
Rectangle() { }
- Rectangle(int width, int height)
+ Rectangle(int width, int height)
{
this->width = width;
this->height = height;
diff --git a/tests/run/simpcall.pyx b/tests/run/simpcall.pyx
index 6d0957e2a..2d62caa7b 100644
--- a/tests/run/simpcall.pyx
+++ b/tests/run/simpcall.pyx
@@ -20,7 +20,7 @@ def z(a, b, c):
f(a, b,)
g(1, 2.0, "spam")
g(a, b, c)
-
+
def fail0(a, b):
"""
>>> fail0(1,2)
@@ -28,7 +28,7 @@ def fail0(a, b):
TypeError: f() takes exactly 2 positional arguments (0 given)
"""
f()
-
+
def fail1(a, b):
"""
>>> fail1(1,2)
diff --git a/tests/run/slice3.pyx b/tests/run/slice3.pyx
index 928be777e..fde53aadd 100644
--- a/tests/run/slice3.pyx
+++ b/tests/run/slice3.pyx
@@ -49,4 +49,4 @@ def select(obj1, obj2, obj3, obj4, obj5):
obj1 = obj2[:obj4:obj5]
obj1 = obj2[obj3:obj4:obj5]
obj1 = obj2[int3:int4:int5]
-
+
diff --git a/tests/run/slice_ptr.pyx b/tests/run/slice_ptr.pyx
index 6da040b46..242ef833a 100644
--- a/tests/run/slice_ptr.pyx
+++ b/tests/run/slice_ptr.pyx
@@ -10,7 +10,7 @@ def double_ptr_slice(x, L, int a, int b):
>>> double_ptr_slice(0, L, 3, 7)
>>> double_ptr_slice(5, L, 3, 7)
>>> double_ptr_slice(9, L, 3, 7)
-
+
>>> double_ptr_slice(EqualsEvens(), L, 0, 10)
>>> double_ptr_slice(EqualsEvens(), L, 1, 10)
"""
diff --git a/tests/run/special_methods_T561.pyx b/tests/run/special_methods_T561.pyx
index 1d37d2083..0f84eba34 100644
--- a/tests/run/special_methods_T561.pyx
+++ b/tests/run/special_methods_T561.pyx
@@ -3,11 +3,11 @@
# generate its own wrapper. (This wrapper would be used, for instance,
# when using the special method as a bound method.)
-# To test this, we go through and verify that each affected special
+# To test this, we go through and verify that each affected special
# method works as a bound method.
# Special methods that are treated the same under Python 2 and 3 are
-# tested here; see also special_methods_T561_py2.pyx and
+# tested here; see also special_methods_T561_py2.pyx and
# special_methods_T561_py3.pyx for tests of the differences between
# Python 2 and 3.
@@ -165,7 +165,7 @@ __doc__ = u"""
>>> vs0_len()
VS __len__ 0
0
- >>> # If you define either setitem or delitem, you get wrapper objects
+ >>> # If you define either setitem or delitem, you get wrapper objects
>>> # for both methods. (This behavior is unchanged by #561.)
>>> si_setitem = SetItem().__setitem__
>>> si_setitem('foo', 'bar')
@@ -212,7 +212,7 @@ __doc__ = u"""
>>> g11 = object.__getattribute__(GetAttribute(), '__getattribute__')
>>> g11('attr')
GetAttribute getattribute 'attr'
- >>> # If you define either setattr or delattr, you get wrapper objects
+ >>> # If you define either setattr or delattr, you get wrapper objects
>>> # for both methods. (This behavior is unchanged by #561.)
>>> sa_setattr = SetAttr().__setattr__
>>> sa_setattr('foo', 'bar')
@@ -267,7 +267,7 @@ __doc__ = u"""
>>> vs0_get = vs0.__get__
>>> vs0_get('instance', 'owner')
VS __get__ 0 'instance' 'owner'
- >>> # If you define either set or delete, you get wrapper objects
+ >>> # If you define either set or delete, you get wrapper objects
>>> # for both methods. (This behavior is unchanged by #561.)
>>> s_set = Set().__set__
>>> s_set('instance', 'val')
diff --git a/tests/run/special_methods_T561_py2.pyx b/tests/run/special_methods_T561_py2.pyx
index 3b3da12b0..1031976ce 100644
--- a/tests/run/special_methods_T561_py2.pyx
+++ b/tests/run/special_methods_T561_py2.pyx
@@ -49,7 +49,7 @@ __doc__ = u"""
>>> vs0_getslice(13, 42)
VS __getslice__ 0 13 42
>>> # Cython supports setslice and delslice only for Python 2.
- >>> # If you define either setslice or delslice, you get wrapper objects
+ >>> # If you define either setslice or delslice, you get wrapper objects
>>> # for both methods. (This behavior is unchanged by #561.)
>>> ss_setslice = SetSlice().__setslice__
>>> ss_setslice(13, 42, 'foo')
diff --git a/tests/run/strfunction.pyx b/tests/run/strfunction.pyx
index 47c6366ff..dc6adaf79 100644
--- a/tests/run/strfunction.pyx
+++ b/tests/run/strfunction.pyx
@@ -19,7 +19,7 @@ class subs(str):
"""
>>> subs('testing a subtype')
'testing a subtype'
-
+
# >>> csub('testing a subtype')
# 'testing a subtype'
# >>> csubs('testing a subtype')
diff --git a/tests/run/strliterals.pyx b/tests/run/strliterals.pyx
index 1a344ac10..aca670c6e 100644
--- a/tests/run/strliterals.pyx
+++ b/tests/run/strliterals.pyx
@@ -103,7 +103,7 @@ __doc__ = ur"""
True
>>> len(u6)
7
-
+
>>> newlines == "Aaa\n"
True
"""
diff --git a/tests/run/struct_conversion.pyx b/tests/run/struct_conversion.pyx
index aa8100d12..b1542e75e 100644
--- a/tests/run/struct_conversion.pyx
+++ b/tests/run/struct_conversion.pyx
@@ -14,7 +14,7 @@ def test_constructor(x, y, color):
"""
cdef Point p = Point(x, y, color)
return p
-
+
def test_constructor_kwds(x, y, color):
"""
>>> test_constructor_kwds(1.25, 2.5, 128)
@@ -26,7 +26,7 @@ def test_constructor_kwds(x, y, color):
"""
cdef Point p = Point(x=x, y=y, color=color)
return p
-
+
def test_dict_construction(x, y, color):
"""
>>> test_dict_construction(4, 5, 64)
@@ -47,7 +47,7 @@ cdef struct with_pointers:
bint is_integral
int_or_float data
void* ptr
-
+
def test_pointers(int n, double x):
"""
>>> test_pointers(100, 2.71828)
diff --git a/tests/run/subop.pyx b/tests/run/subop.pyx
index 5166b183a..c95633faf 100644
--- a/tests/run/subop.pyx
+++ b/tests/run/subop.pyx
@@ -13,7 +13,7 @@ def f():
int1 = int2 - int3
obj1 = obj2 - int3
return int1, obj1
-
+
def p():
"""
>>> p()
diff --git a/tests/run/temps_corner1.pyx b/tests/run/temps_corner1.pyx
index da97e75cf..55951f271 100644
--- a/tests/run/temps_corner1.pyx
+++ b/tests/run/temps_corner1.pyx
@@ -4,7 +4,7 @@ cdef class A:
cdef int bitsize(A a):
return 1
-
+
coeffs = [A()]
class B:
diff --git a/tests/run/type_inference.pyx b/tests/run/type_inference.pyx
index 09b996a69..5ffe197dd 100644
--- a/tests/run/type_inference.pyx
+++ b/tests/run/type_inference.pyx
@@ -161,7 +161,7 @@ def unary_operators():
assert typeof(c) == "Python object", typeof(c)
d = -int(5)
assert typeof(d) == "Python object", typeof(d)
-
+
def builtin_type_operations():
"""
@@ -198,7 +198,7 @@ def builtin_type_operations():
assert typeof(T1) == "tuple object", typeof(T1)
T2 = () * 2
assert typeof(T2) == "tuple object", typeof(T2)
-
+
def cascade():
"""
>>> cascade()
@@ -382,7 +382,7 @@ def safe_only():
assert typeof(d) == "long", typeof(d)
# we special-case inference to type str, see
- # trac #553
+ # trac #553
s = "abc"
assert typeof(s) == "Python object", typeof(s)
cdef str t = "def"
diff --git a/tests/run/typedfieldbug_T303.pyx b/tests/run/typedfieldbug_T303.pyx
index 6415f1da1..aeb5c93a6 100644
--- a/tests/run/typedfieldbug_T303.pyx
+++ b/tests/run/typedfieldbug_T303.pyx
@@ -22,7 +22,7 @@ cdef class MyClass:
double actual_double
DoubleTypedef float_isreally_double
LongDoubleTypedef float_isreally_longdouble
-
+
def __init__(self):
self.actual_double = 42.0
self.float_isreally_double = 42.0
diff --git a/tests/run/typeof.pyx b/tests/run/typeof.pyx
index ced4c871b..5bdf2a16f 100644
--- a/tests/run/typeof.pyx
+++ b/tests/run/typeof.pyx
@@ -41,7 +41,7 @@ def simple():
print typeof(x)
print typeof(None)
used = i, l, ll, <long>iptr, <long>iptrptr, a, b, x
-
+
def expression():
"""
>>> expression()
diff --git a/tests/run/unicodemethods.pyx b/tests/run/unicodemethods.pyx
index 12da08446..28e16be91 100644
--- a/tests/run/unicodemethods.pyx
+++ b/tests/run/unicodemethods.pyx
@@ -53,11 +53,11 @@ def split_sep(unicode s, sep):
>>> print_all( text.split(sep) )
ab jd
sdflk as sa
- sadas asdas fsdf
+ sadas asdas fsdf
>>> print_all( split_sep(text, sep) )
ab jd
sdflk as sa
- sadas asdas fsdf
+ sadas asdas fsdf
"""
return s.split(sep)
@@ -71,10 +71,10 @@ def split_sep_max(unicode s, sep, max):
"""
>>> print_all( text.split(sep, 1) )
ab jd
- sdflk as sa sadas asdas fsdf
+ sdflk as sa sadas asdas fsdf
>>> print_all( split_sep_max(text, sep, 1) )
ab jd
- sdflk as sa sadas asdas fsdf
+ sdflk as sa sadas asdas fsdf
"""
return s.split(sep, max)
@@ -87,10 +87,10 @@ def split_sep_max_int(unicode s, sep):
"""
>>> print_all( text.split(sep, 1) )
ab jd
- sdflk as sa sadas asdas fsdf
+ sdflk as sa sadas asdas fsdf
>>> print_all( split_sep_max_int(text, sep) )
ab jd
- sdflk as sa sadas asdas fsdf
+ sdflk as sa sadas asdas fsdf
"""
return s.split(sep, 1)
@@ -106,13 +106,13 @@ def splitlines(unicode s):
>>> print_all( multiline_text.splitlines() )
ab jd
sdflk as sa
- sadas asdas fsdf
+ sadas asdas fsdf
>>> len(splitlines(multiline_text))
3
>>> print_all( splitlines(multiline_text) )
ab jd
sdflk as sa
- sadas asdas fsdf
+ sadas asdas fsdf
"""
return s.splitlines()
@@ -127,7 +127,7 @@ def splitlines_keep(unicode s, keep):
<BLANKLINE>
sdflk as sa
<BLANKLINE>
- sadas asdas fsdf
+ sadas asdas fsdf
>>> len(splitlines_keep(multiline_text, True))
3
>>> print_all( splitlines_keep(multiline_text, True) )
@@ -135,7 +135,7 @@ def splitlines_keep(unicode s, keep):
<BLANKLINE>
sdflk as sa
<BLANKLINE>
- sadas asdas fsdf
+ sadas asdas fsdf
"""
return s.splitlines(keep)
@@ -153,11 +153,11 @@ def splitlines_keep_bint(unicode s):
<BLANKLINE>
sdflk as sa
<BLANKLINE>
- sadas asdas fsdf
+ sadas asdas fsdf
>>> print_all( multiline_text.splitlines(False) )
ab jd
sdflk as sa
- sadas asdas fsdf
+ sadas asdas fsdf
>>> len(splitlines_keep_bint(multiline_text))
7
>>> print_all( splitlines_keep_bint(multiline_text) )
@@ -165,11 +165,11 @@ def splitlines_keep_bint(unicode s):
<BLANKLINE>
sdflk as sa
<BLANKLINE>
- sadas asdas fsdf
+ sadas asdas fsdf
--
ab jd
sdflk as sa
- sadas asdas fsdf
+ sadas asdas fsdf
"""
return s.splitlines(True) + ['--'] + s.splitlines(False)
@@ -471,9 +471,9 @@ def count_start_end(unicode s, substring, start, end):
def replace(unicode s, substring, repl):
"""
>>> print( text.replace('sa', 'SA') )
- ab jd sdflk as SA SAdas asdas fsdf
+ ab jd sdflk as SA SAdas asdas fsdf
>>> print( replace(text, 'sa', 'SA') )
- ab jd sdflk as SA SAdas asdas fsdf
+ ab jd sdflk as SA SAdas asdas fsdf
"""
return s.replace(substring, repl)
@@ -485,8 +485,8 @@ def replace(unicode s, substring, repl):
def replace_maxcount(unicode s, substring, repl, maxcount):
"""
>>> print( text.replace('sa', 'SA', 1) )
- ab jd sdflk as SA sadas asdas fsdf
+ ab jd sdflk as SA sadas asdas fsdf
>>> print( replace_maxcount(text, 'sa', 'SA', 1) )
- ab jd sdflk as SA sadas asdas fsdf
+ ab jd sdflk as SA sadas asdas fsdf
"""
return s.replace(substring, repl, maxcount)
diff --git a/tests/run/unsignedbehaviour_T184.pyx b/tests/run/unsignedbehaviour_T184.pyx
index 21cf84e7e..225de2f36 100644
--- a/tests/run/unsignedbehaviour_T184.pyx
+++ b/tests/run/unsignedbehaviour_T184.pyx
@@ -29,7 +29,7 @@ def loop():
for x in range(-i,i):
times += 1
return times
-
+
def rangelist():
cdef unsigned int i = 3
return list(range(-i, i))
diff --git a/tests/run/withstat.pyx b/tests/run/withstat.pyx
index fad2eaff1..ad037e288 100644
--- a/tests/run/withstat.pyx
+++ b/tests/run/withstat.pyx
@@ -22,7 +22,7 @@ class ContextManager(object):
def __exit__(self, a, b, tb):
print u"exit", typename(a), typename(b), typename(tb)
return self.exit_ret
-
+
def __enter__(self):
print u"enter"
return self.value
@@ -36,7 +36,7 @@ def no_as():
"""
with ContextManager(u"value"):
print u"hello"
-
+
def basic():
"""
>>> basic()
@@ -46,7 +46,7 @@ def basic():
"""
with ContextManager(u"value") as x:
print x
-
+
def with_pass():
"""
>>> with_pass()
@@ -55,7 +55,7 @@ def with_pass():
"""
with ContextManager(u"value") as x:
pass
-
+
def with_return():
"""
>>> with_return()
diff --git a/tests/wrappers/cpp_overload_wrapper_lib.cpp b/tests/wrappers/cpp_overload_wrapper_lib.cpp
index a031f975e..2f7af2f22 100644
--- a/tests/wrappers/cpp_overload_wrapper_lib.cpp
+++ b/tests/wrappers/cpp_overload_wrapper_lib.cpp
@@ -12,7 +12,7 @@ double doublefunc (double a, double b, double c)
DoubleKeeper::DoubleKeeper ()
- : number (1.0)
+ : number (1.0)
{
}
diff --git a/tests/wrappers/cpp_overload_wrapper_lib.h b/tests/wrappers/cpp_overload_wrapper_lib.h
index ca34afd5f..cdd347eb8 100644
--- a/tests/wrappers/cpp_overload_wrapper_lib.h
+++ b/tests/wrappers/cpp_overload_wrapper_lib.h
@@ -7,12 +7,12 @@ double doublefunc (double a, double b, double c);
class DoubleKeeper
{
double number;
-
+
public:
DoubleKeeper ();
DoubleKeeper (double number);
virtual ~DoubleKeeper ();
-
+
void set_number (double num);
void set_number (void);
double get_number () const;
diff --git a/tests/wrappers/cpp_references.pyx b/tests/wrappers/cpp_references.pyx
index a4c2edd29..ec19a5a2e 100644
--- a/tests/wrappers/cpp_references.pyx
+++ b/tests/wrappers/cpp_references.pyx
@@ -3,7 +3,7 @@ cimport cython
cdef extern from "cpp_references_helper.h":
cdef int& ref_func(int&)
-
+
cdef int ref_var_value
cdef int& ref_var
diff --git a/tests/wrappers/cppwrap_lib.h b/tests/wrappers/cppwrap_lib.h
index 94145d63c..dcca0bf51 100644
--- a/tests/wrappers/cppwrap_lib.h
+++ b/tests/wrappers/cppwrap_lib.h
@@ -7,11 +7,11 @@ double doublefunc (double a, double b, double c);
class DoubleKeeper
{
double number;
-
+
public:
DoubleKeeper (double number);
virtual ~DoubleKeeper ();
-
+
void set_number (double num);
double get_number () const;
virtual double transmogrify (double value) const;