summaryrefslogtreecommitdiff
path: root/Cython/Compiler/Nodes.py
diff options
context:
space:
mode:
Diffstat (limited to 'Cython/Compiler/Nodes.py')
-rw-r--r--Cython/Compiler/Nodes.py2384
1 files changed, 1595 insertions, 789 deletions
diff --git a/Cython/Compiler/Nodes.py b/Cython/Compiler/Nodes.py
index 457ae94ad..230226298 100644
--- a/Cython/Compiler/Nodes.py
+++ b/Cython/Compiler/Nodes.py
@@ -5,6 +5,7 @@
from __future__ import absolute_import
import cython
+
cython.declare(sys=object, os=object, copy=object,
Builtin=object, error=object, warning=object, Naming=object, PyrexTypes=object,
py_object_type=object, ModuleScope=object, LocalScope=object, ClosureScope=object,
@@ -12,17 +13,18 @@ cython.declare(sys=object, os=object, copy=object,
CppClassScope=object, UtilityCode=object, EncodedString=object,
error_type=object, _py_int_types=object)
-import sys, os, copy
+import sys, copy
from itertools import chain
from . import Builtin
-from .Errors import error, warning, InternalError, CompileError
+from .Errors import error, warning, InternalError, CompileError, CannotSpecialize
from . import Naming
from . import PyrexTypes
from . import TypeSlots
from .PyrexTypes import py_object_type, error_type
-from .Symtab import (ModuleScope, LocalScope, ClosureScope,
- StructOrUnionScope, PyClassScope, CppClassScope, TemplateScope)
+from .Symtab import (ModuleScope, LocalScope, ClosureScope, PropertyScope,
+ StructOrUnionScope, PyClassScope, CppClassScope, TemplateScope, GeneratorExpressionScope,
+ CppScopedEnumScope, punycodify_name)
from .Code import UtilityCode
from .StringEncoding import EncodedString
from . import Future
@@ -38,6 +40,9 @@ else:
_py_int_types = (int, long)
+IMPLICIT_CLASSMETHODS = {"__init_subclass__", "__class_getitem__"}
+
+
def relative_position(pos):
return (pos[0].get_filenametable_entry(), pos[1])
@@ -68,53 +73,6 @@ def embed_position(pos, docstring):
return doc
-def analyse_type_annotation(annotation, env, assigned_value=None):
- base_type = None
- is_ambiguous = False
- explicit_pytype = explicit_ctype = False
- if annotation.is_dict_literal:
- warning(annotation.pos,
- "Dicts should no longer be used as type annotations. Use 'cython.int' etc. directly.")
- for name, value in annotation.key_value_pairs:
- if not name.is_string_literal:
- continue
- if name.value in ('type', b'type'):
- explicit_pytype = True
- if not explicit_ctype:
- annotation = value
- elif name.value in ('ctype', b'ctype'):
- explicit_ctype = True
- annotation = value
- if explicit_pytype and explicit_ctype:
- warning(annotation.pos, "Duplicate type declarations found in signature annotation")
- arg_type = annotation.analyse_as_type(env)
- if annotation.is_name and not annotation.cython_attribute and annotation.name in ('int', 'long', 'float'):
- # Map builtin numeric Python types to C types in safe cases.
- if assigned_value is not None and arg_type is not None and not arg_type.is_pyobject:
- assigned_type = assigned_value.infer_type(env)
- if assigned_type and assigned_type.is_pyobject:
- # C type seems unsafe, e.g. due to 'None' default value => ignore annotation type
- is_ambiguous = True
- arg_type = None
- # ignore 'int' and require 'cython.int' to avoid unsafe integer declarations
- if arg_type in (PyrexTypes.c_long_type, PyrexTypes.c_int_type, PyrexTypes.c_float_type):
- arg_type = PyrexTypes.c_double_type if annotation.name == 'float' else py_object_type
- elif arg_type is not None and annotation.is_string_literal:
- warning(annotation.pos,
- "Strings should no longer be used for type declarations. Use 'cython.int' etc. directly.")
- if arg_type is not None:
- if explicit_pytype and not explicit_ctype and not arg_type.is_pyobject:
- warning(annotation.pos,
- "Python type declaration in signature annotation does not refer to a Python type")
- base_type = CAnalysedBaseTypeNode(
- annotation.pos, type=arg_type, is_arg=True)
- elif is_ambiguous:
- warning(annotation.pos, "Ambiguous types in annotation, ignoring")
- else:
- warning(annotation.pos, "Unknown type declaration in annotation, ignoring")
- return base_type, arg_type
-
-
def write_func_call(func, codewriter_class):
def f(*args, **kwds):
if len(args) > 1 and isinstance(args[1], codewriter_class):
@@ -125,19 +83,16 @@ def write_func_call(func, codewriter_class):
' ' * code.call_level,
node.__class__.__name__,
func.__name__,
- node.pos[1:])
- pristine = code.buffer.stream.tell()
- code.putln(marker)
+ node.pos[1:],
+ )
+ insertion_point = code.insertion_point()
start = code.buffer.stream.tell()
code.call_level += 4
res = func(*args, **kwds)
code.call_level -= 4
- if start == code.buffer.stream.tell():
- # no code written => undo writing marker
- code.buffer.stream.truncate(pristine)
- else:
- marker = marker.replace('->', '<-', 1)
- code.putln(marker)
+ if start != code.buffer.stream.tell():
+ code.putln(marker.replace('->', '<-', 1))
+ insertion_point.putln(marker)
return res
else:
return func(*args, **kwds)
@@ -160,9 +115,11 @@ class VerboseCodeWriter(type):
class CheckAnalysers(type):
"""Metaclass to check that type analysis functions return a node.
"""
- methods = set(['analyse_types',
- 'analyse_expressions',
- 'analyse_target_types'])
+ methods = frozenset({
+ 'analyse_types',
+ 'analyse_expressions',
+ 'analyse_target_types',
+ })
def __new__(cls, name, bases, attrs):
from types import FunctionType
@@ -200,6 +157,8 @@ class Node(object):
is_literal = 0
is_terminator = 0
is_wrapper = False # is a DefNode wrapper for a C function
+ is_cproperty = False
+ is_templated_type_node = False
temps = None
# All descendants should set child_attrs to a list of the attributes
@@ -254,7 +213,7 @@ class Node(object):
#
- # There are 3 phases of parse tree processing, applied in order to
+ # There are 3 main phases of parse tree processing, applied in order to
# all the statements in a given scope-block:
#
# (0) analyse_declarations
@@ -266,25 +225,25 @@ class Node(object):
# Determine the result types of expressions and fill in the
# 'type' attribute of each ExprNode. Insert coercion nodes into the
# tree where needed to convert to and from Python objects.
- # Allocate temporary locals for intermediate results. Fill
- # in the 'result_code' attribute of each ExprNode with a C code
- # fragment.
+ # Replace tree nodes with more appropriate implementations found by
+ # the type analysis.
#
# (2) generate_code
# Emit C code for all declarations, statements and expressions.
- # Recursively applies the 3 processing phases to the bodies of
- # functions.
+ #
+ # These phases are triggered by tree transformations.
+ # See the full pipeline in Pipeline.py.
#
def analyse_declarations(self, env):
pass
def analyse_expressions(self, env):
- raise InternalError("analyse_expressions not implemented for %s" % \
+ raise InternalError("analyse_expressions not implemented for %s" %
self.__class__.__name__)
def generate_code(self, code):
- raise InternalError("generate_code not implemented for %s" % \
+ raise InternalError("generate_code not implemented for %s" %
self.__class__.__name__)
def annotate(self, code):
@@ -360,6 +319,7 @@ class Node(object):
return u'"%s":%d:%d\n%s\n' % (
source_desc.get_escaped_description(), line, col, u''.join(lines))
+
class CompilerDirectivesNode(Node):
"""
Sets compiler directives for the children nodes
@@ -402,6 +362,7 @@ class CompilerDirectivesNode(Node):
self.body.annotate(code)
code.globalstate.directives = old
+
class BlockNode(object):
# Mixin class for nodes representing a declaration block.
@@ -415,14 +376,15 @@ class BlockNode(object):
for node in env.lambda_defs:
node.generate_function_definitions(env, code)
+
class StatListNode(Node):
# stats a list of StatNode
child_attrs = ["stats"]
@staticmethod
- def create_analysed(pos, env, *args, **kw):
- node = StatListNode(pos, *args, **kw)
+ def create_analysed(pos, env, **kw):
+ node = StatListNode(pos, **kw)
return node # No node-specific analysis needed
def analyse_declarations(self, env):
@@ -469,7 +431,7 @@ class StatNode(Node):
pass
def generate_execution_code(self, code):
- raise InternalError("generate_execution_code not implemented for %s" % \
+ raise InternalError("generate_execution_code not implemented for %s" %
self.__class__.__name__)
@@ -499,8 +461,13 @@ class CDefExternNode(StatNode):
env.add_include_file(self.include_file, self.verbatim_include, late)
def analyse_expressions(self, env):
+ # Allow C properties, inline methods, etc. also in external types.
+ self.body = self.body.analyse_expressions(env)
return self
+ def generate_function_definitions(self, env, code):
+ self.body.generate_function_definitions(env, code)
+
def generate_execution_code(self, code):
pass
@@ -525,6 +492,9 @@ class CDeclaratorNode(Node):
calling_convention = ""
+ def declared_name(self):
+ return None
+
def analyse_templates(self):
# Only C++ functions have templates.
return None
@@ -539,6 +509,9 @@ class CNameDeclaratorNode(CDeclaratorNode):
default = None
+ def declared_name(self):
+ return self.name
+
def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False):
if nonempty and self.name == '':
# May have mistaken the name for the type.
@@ -551,7 +524,12 @@ class CNameDeclaratorNode(CDeclaratorNode):
base_type = py_object_type
if base_type.is_fused and env.fused_to_specific:
- base_type = base_type.specialize(env.fused_to_specific)
+ try:
+ base_type = base_type.specialize(env.fused_to_specific)
+ except CannotSpecialize:
+ error(self.pos,
+ "'%s' cannot be specialized since its type is not a fused argument to this function" %
+ self.name)
self.type = base_type
return self, base_type
@@ -562,6 +540,9 @@ class CPtrDeclaratorNode(CDeclaratorNode):
child_attrs = ["base"]
+ def declared_name(self):
+ return self.base.declared_name()
+
def analyse_templates(self):
return self.base.analyse_templates()
@@ -572,14 +553,17 @@ class CPtrDeclaratorNode(CDeclaratorNode):
return self.base.analyse(ptr_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd)
-class CReferenceDeclaratorNode(CDeclaratorNode):
- # base CDeclaratorNode
-
+class _CReferenceDeclaratorBaseNode(CDeclaratorNode):
child_attrs = ["base"]
+ def declared_name(self):
+ return self.base.declared_name()
+
def analyse_templates(self):
return self.base.analyse_templates()
+
+class CReferenceDeclaratorNode(_CReferenceDeclaratorBaseNode):
def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False):
if base_type.is_pyobject:
error(self.pos, "Reference base type cannot be a Python object")
@@ -587,6 +571,14 @@ class CReferenceDeclaratorNode(CDeclaratorNode):
return self.base.analyse(ref_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd)
+class CppRvalueReferenceDeclaratorNode(_CReferenceDeclaratorBaseNode):
+ def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False):
+ if base_type.is_pyobject:
+ error(self.pos, "Rvalue-reference base type cannot be a Python object")
+ ref_type = PyrexTypes.cpp_rvalue_ref_type(base_type)
+ return self.base.analyse(ref_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd)
+
+
class CArrayDeclaratorNode(CDeclaratorNode):
# base CDeclaratorNode
# dimension ExprNode
@@ -594,7 +586,9 @@ class CArrayDeclaratorNode(CDeclaratorNode):
child_attrs = ["base", "dimension"]
def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False):
- if (base_type.is_cpp_class and base_type.is_template_type()) or base_type.is_cfunction:
+ if ((base_type.is_cpp_class and base_type.is_template_type()) or
+ base_type.is_cfunction or
+ base_type.python_type_constructor_name):
from .ExprNodes import TupleNode
if isinstance(self.dimension, TupleNode):
args = self.dimension.args
@@ -606,7 +600,7 @@ class CArrayDeclaratorNode(CDeclaratorNode):
error(args[ix].pos, "Template parameter not a type")
base_type = error_type
else:
- base_type = base_type.specialize_here(self.pos, values)
+ base_type = base_type.specialize_here(self.pos, env, values)
return self.base.analyse(base_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd)
if self.dimension:
self.dimension = self.dimension.analyse_const_expression(env)
@@ -636,8 +630,8 @@ class CFuncDeclaratorNode(CDeclaratorNode):
# args [CArgDeclNode]
# templates [TemplatePlaceholderType]
# has_varargs boolean
- # exception_value ConstNode
- # exception_check boolean True if PyErr_Occurred check needed
+ # exception_value ConstNode or NameNode NameNode when the name of a c++ exception conversion function
+ # exception_check boolean or "+" True if PyErr_Occurred check needed, "+" for a c++ check
# nogil boolean Can be called without gil
# with_gil boolean Acquire gil around function body
# is_const_method boolean Whether this is a const method
@@ -649,6 +643,9 @@ class CFuncDeclaratorNode(CDeclaratorNode):
is_const_method = 0
templates = None
+ def declared_name(self):
+ return self.base.declared_name()
+
def analyse_templates(self):
if isinstance(self.base, CArrayDeclaratorNode):
from .ExprNodes import TupleNode, NameNode
@@ -718,6 +715,12 @@ class CFuncDeclaratorNode(CDeclaratorNode):
env.add_include_file('new') # for std::bad_alloc
env.add_include_file('stdexcept')
env.add_include_file('typeinfo') # for std::bad_cast
+ elif return_type.is_pyobject and self.exception_check:
+ # Functions in pure Python mode default to always check return values for exceptions
+ # (equivalent to the "except*" declaration). In this case, the exception clause
+ # is silently ignored for functions returning a Python object.
+ self.exception_check = False
+
if (return_type.is_pyobject
and (self.exception_value or self.exception_check)
and self.exception_check != '+'):
@@ -727,15 +730,21 @@ class CFuncDeclaratorNode(CDeclaratorNode):
# Use an explicit exception return value to speed up exception checks.
# Even if it is not declared, we can use the default exception value of the return type,
# unless the function is some kind of external function that we do not control.
- if return_type.exception_value is not None and (visibility != 'extern' and not in_pxd):
- # Extension types are more difficult because the signature must match the base type signature.
- if not env.is_c_class_scope:
+ if (return_type.exception_value is not None and (visibility != 'extern' and not in_pxd)):
+ # - We skip this optimization for extension types; they are more difficult because
+ # the signature must match the base type signature.
+ # - Same for function pointers, as we want them to be able to match functions
+ # with any exception value.
+ # - Ideally the function-pointer test would be better after self.base is analysed
+ # however that is hard to do with the current implementation so it lives here
+ # for now.
+ if not env.is_c_class_scope and not isinstance(self.base, CPtrDeclaratorNode):
from .ExprNodes import ConstNode
self.exception_value = ConstNode(
self.pos, value=return_type.exception_value, type=return_type)
if self.exception_value:
- self.exception_value = self.exception_value.analyse_const_expression(env)
if self.exception_check == '+':
+ self.exception_value = self.exception_value.analyse_const_expression(env)
exc_val_type = self.exception_value.type
if (not exc_val_type.is_error
and not exc_val_type.is_pyobject
@@ -745,19 +754,28 @@ class CFuncDeclaratorNode(CDeclaratorNode):
and not (exc_val_type == PyrexTypes.c_char_type
and self.exception_value.value == '*')):
error(self.exception_value.pos,
- "Exception value must be a Python exception or cdef function with no arguments or *.")
+ "Exception value must be a Python exception, or C++ function with no arguments, or *.")
exc_val = self.exception_value
else:
- self.exception_value = self.exception_value.coerce_to(
+ self.exception_value = self.exception_value.analyse_types(env).coerce_to(
return_type, env).analyse_const_expression(env)
exc_val = self.exception_value.get_constant_c_result_code()
if exc_val is None:
- raise InternalError(
- "get_constant_c_result_code not implemented for %s" %
- self.exception_value.__class__.__name__)
+ error(self.exception_value.pos, "Exception value must be constant")
if not return_type.assignable_from(self.exception_value.type):
error(self.exception_value.pos,
"Exception value incompatible with function return type")
+ if (visibility != 'extern'
+ and (return_type.is_int or return_type.is_float)
+ and self.exception_value.has_constant_result()):
+ try:
+ type_default_value = float(return_type.default_value)
+ except ValueError:
+ pass
+ else:
+ if self.exception_value.constant_result == type_default_value:
+ warning(self.pos, "Ambiguous exception value, same as default return value: %r" %
+ self.exception_value.constant_result)
exc_check = self.exception_check
if return_type.is_cfunction:
error(self.pos, "Function cannot return a function")
@@ -789,6 +807,13 @@ class CFuncDeclaratorNode(CDeclaratorNode):
error(self.pos, "cannot have both '%s' and '%s' "
"calling conventions" % (current, callspec))
func_type.calling_convention = callspec
+
+ if func_type.return_type.is_rvalue_reference:
+ warning(self.pos, "Rvalue-reference as function return type not supported", 1)
+ for arg in func_type.args:
+ if arg.type.is_rvalue_reference and not arg.is_forwarding_reference():
+ warning(self.pos, "Rvalue-reference as function argument not supported", 1)
+
return self.base.analyse(func_type, env, visibility=visibility, in_pxd=in_pxd)
def declare_optional_arg_struct(self, func_type, env, fused_cname=None):
@@ -850,8 +875,12 @@ class CArgDeclNode(Node):
# annotation ExprNode or None Py3 function arg annotation
# is_self_arg boolean Is the "self" arg of an extension type method
# is_type_arg boolean Is the "class" arg of an extension type classmethod
- # is_kw_only boolean Is a keyword-only argument
+ # kw_only boolean Is a keyword-only argument
# is_dynamic boolean Non-literal arg stored inside CyFunction
+ # pos_only boolean Is a positional-only argument
+ #
+ # name_cstring property that converts the name to a cstring taking care of unicode
+ # and quoting it
child_attrs = ["base_type", "declarator", "default", "annotation"]
outer_attrs = ["default", "annotation"]
@@ -859,7 +888,9 @@ class CArgDeclNode(Node):
is_self_arg = 0
is_type_arg = 0
is_generic = 1
+ is_special_method_optional = False
kw_only = 0
+ pos_only = 0
not_none = 0
or_none = 0
type = None
@@ -868,59 +899,106 @@ class CArgDeclNode(Node):
annotation = None
is_dynamic = 0
+ def declared_name(self):
+ return self.declarator.declared_name()
+
+ @property
+ def name_cstring(self):
+ return self.name.as_c_string_literal()
+
+ @property
+ def hdr_cname(self):
+ # done lazily - needs self.entry to be set to get the class-mangled
+ # name, which means it has to be generated relatively late
+ if self.needs_conversion:
+ return punycodify_name(Naming.arg_prefix + self.entry.name)
+ else:
+ return punycodify_name(Naming.var_prefix + self.entry.name)
+
+
def analyse(self, env, nonempty=0, is_self_arg=False):
if is_self_arg:
- self.base_type.is_self_arg = self.is_self_arg = True
- if self.type is None:
- # The parser may misinterpret names as types. We fix that here.
- if isinstance(self.declarator, CNameDeclaratorNode) and self.declarator.name == '':
- if nonempty:
- if self.base_type.is_basic_c_type:
- # char, short, long called "int"
- type = self.base_type.analyse(env, could_be_name=True)
- arg_name = type.empty_declaration_code()
- else:
- arg_name = self.base_type.name
- self.declarator.name = EncodedString(arg_name)
- self.base_type.name = None
- self.base_type.is_basic_c_type = False
- could_be_name = True
- else:
- could_be_name = False
- self.base_type.is_arg = True
- base_type = self.base_type.analyse(env, could_be_name=could_be_name)
- if hasattr(self.base_type, 'arg_name') and self.base_type.arg_name:
- self.declarator.name = self.base_type.arg_name
-
- # The parser is unable to resolve the ambiguity of [] as part of the
- # type (e.g. in buffers) or empty declarator (as with arrays).
- # This is only arises for empty multi-dimensional arrays.
- if (base_type.is_array
- and isinstance(self.base_type, TemplatedTypeNode)
- and isinstance(self.declarator, CArrayDeclaratorNode)):
- declarator = self.declarator
- while isinstance(declarator.base, CArrayDeclaratorNode):
- declarator = declarator.base
- declarator.base = self.base_type.array_declarator
- base_type = base_type.base_type
+ self.base_type.is_self_arg = self.is_self_arg = is_self_arg
+ if self.type is not None:
+ return self.name_declarator, self.type
- # inject type declaration from annotations
- # this is called without 'env' by AdjustDefByDirectives transform before declaration analysis
- if self.annotation and env and env.directives['annotation_typing'] and self.base_type.name is None:
- arg_type = self.inject_type_from_annotations(env)
- if arg_type is not None:
- base_type = arg_type
- return self.declarator.analyse(base_type, env, nonempty=nonempty)
+ # The parser may misinterpret names as types. We fix that here.
+ if isinstance(self.declarator, CNameDeclaratorNode) and self.declarator.name == '':
+ if nonempty:
+ if self.base_type.is_basic_c_type:
+ # char, short, long called "int"
+ type = self.base_type.analyse(env, could_be_name=True)
+ arg_name = type.empty_declaration_code()
+ else:
+ arg_name = self.base_type.name
+ self.declarator.name = EncodedString(arg_name)
+ self.base_type.name = None
+ self.base_type.is_basic_c_type = False
+ could_be_name = True
else:
- return self.name_declarator, self.type
+ could_be_name = False
+ self.base_type.is_arg = True
+ base_type = self.base_type.analyse(env, could_be_name=could_be_name)
+ base_arg_name = getattr(self.base_type, 'arg_name', None)
+ if base_arg_name:
+ self.declarator.name = base_arg_name
+
+ # The parser is unable to resolve the ambiguity of [] as part of the
+ # type (e.g. in buffers) or empty declarator (as with arrays).
+ # This is only arises for empty multi-dimensional arrays.
+ if (base_type.is_array
+ and isinstance(self.base_type, TemplatedTypeNode)
+ and isinstance(self.declarator, CArrayDeclaratorNode)):
+ declarator = self.declarator
+ while isinstance(declarator.base, CArrayDeclaratorNode):
+ declarator = declarator.base
+ declarator.base = self.base_type.array_declarator
+ base_type = base_type.base_type
+
+ # inject type declaration from annotations
+ # this is called without 'env' by AdjustDefByDirectives transform before declaration analysis
+ if (self.annotation and env and env.directives['annotation_typing']
+ # CSimpleBaseTypeNode has a name attribute; CAnalysedBaseTypeNode
+ # (and maybe other options) doesn't
+ and getattr(self.base_type, "name", None) is None):
+ arg_type = self.inject_type_from_annotations(env)
+ if arg_type is not None:
+ base_type = arg_type
+ return self.declarator.analyse(base_type, env, nonempty=nonempty)
def inject_type_from_annotations(self, env):
annotation = self.annotation
if not annotation:
return None
- base_type, arg_type = analyse_type_annotation(annotation, env, assigned_value=self.default)
- if base_type is not None:
- self.base_type = base_type
+
+ modifiers, arg_type = annotation.analyse_type_annotation(env, assigned_value=self.default)
+ if arg_type is not None:
+ self.base_type = CAnalysedBaseTypeNode(
+ annotation.pos, type=arg_type, is_arg=True)
+
+ if arg_type:
+ if "typing.Optional" in modifiers:
+ # "x: Optional[...]" => explicitly allow 'None'
+ arg_type = arg_type.resolve()
+ if arg_type and not arg_type.is_pyobject:
+ # We probably already reported this as "cannot be applied to non-Python type".
+ # error(annotation.pos, "Only Python type arguments can use typing.Optional[...]")
+ pass
+ else:
+ self.or_none = True
+ elif arg_type is py_object_type:
+ # exclude ": object" from the None check - None is a generic object.
+ self.or_none = True
+ elif self.default and self.default.is_none and (arg_type.is_pyobject or arg_type.equivalent_type):
+ # "x: ... = None" => implicitly allow 'None'
+ if not arg_type.is_pyobject:
+ arg_type = arg_type.equivalent_type
+ if not self.or_none:
+ warning(self.pos, "PEP-484 recommends 'typing.Optional[...]' for arguments that can be None.")
+ self.or_none = True
+ elif arg_type.is_pyobject and not self.or_none:
+ self.not_none = True
+
return arg_type
def calculate_default_value_code(self, code):
@@ -947,8 +1025,7 @@ class CArgDeclNode(Node):
default.make_owned_reference(code)
result = default.result() if overloaded_assignment else default.result_as(self.type)
code.putln("%s = %s;" % (target, result))
- if self.type.is_pyobject:
- code.put_giveref(default.result())
+ code.put_giveref(default.result(), self.type)
default.generate_post_assignment_code(code)
default.free_temps(code)
@@ -989,6 +1066,7 @@ class CSimpleBaseTypeNode(CBaseTypeNode):
module_path = []
is_basic_c_type = False
complex = False
+ is_self_arg = False
def analyse(self, env, could_be_name=False):
# Return type descriptor.
@@ -1009,22 +1087,31 @@ class CSimpleBaseTypeNode(CBaseTypeNode):
else:
type = py_object_type
else:
+ scope = env
if self.module_path:
# Maybe it's a nested C++ class.
- scope = env
for item in self.module_path:
entry = scope.lookup(item)
- if entry is not None and entry.is_cpp_class:
+ if entry is not None and (
+ entry.is_cpp_class or
+ entry.is_type and entry.type.is_cpp_class
+ ):
scope = entry.type.scope
+ elif entry and entry.as_module:
+ scope = entry.as_module
else:
scope = None
break
-
+ if scope is None and len(self.module_path) == 1:
+ # (may be possible to handle longer module paths?)
+ # TODO: probably not the best place to declare it?
+ from .Builtin import get_known_standard_library_module_scope
+ found_entry = env.lookup(self.module_path[0])
+ if found_entry and found_entry.known_standard_library_import:
+ scope = get_known_standard_library_module_scope(found_entry.known_standard_library_import)
if scope is None:
# Maybe it's a cimport.
scope = env.find_imported_module(self.module_path, self.pos)
- else:
- scope = env
if scope:
if scope.is_c_class_scope:
@@ -1043,7 +1130,7 @@ class CSimpleBaseTypeNode(CBaseTypeNode):
self.arg_name = EncodedString(self.name)
else:
if self.templates:
- if not self.name in self.templates:
+ if self.name not in self.templates:
error(self.pos, "'%s' is not a type identifier" % self.name)
type = PyrexTypes.TemplatePlaceholderType(self.name)
else:
@@ -1063,10 +1150,9 @@ class CSimpleBaseTypeNode(CBaseTypeNode):
type = PyrexTypes.c_double_complex_type
type.create_declaration_utility_code(env)
self.complex = True
- if type:
- return type
- else:
- return PyrexTypes.error_type
+ if not type:
+ type = PyrexTypes.error_type
+ return type
class MemoryViewSliceTypeNode(CBaseTypeNode):
@@ -1135,29 +1221,56 @@ class TemplatedTypeNode(CBaseTypeNode):
child_attrs = ["base_type_node", "positional_args",
"keyword_args", "dtype_node"]
+ is_templated_type_node = True
dtype_node = None
-
name = None
+ def _analyse_template_types(self, env, base_type):
+ require_python_types = base_type.python_type_constructor_name in (
+ 'typing.Optional',
+ 'dataclasses.ClassVar',
+ )
+ in_c_type_context = env.in_c_type_context and not require_python_types
+
+ template_types = []
+ for template_node in self.positional_args:
+ # CBaseTypeNode -> allow C type declarations in a 'cdef' context again
+ with env.new_c_type_context(in_c_type_context or isinstance(template_node, CBaseTypeNode)):
+ ttype = template_node.analyse_as_type(env)
+ if ttype is None:
+ if base_type.is_cpp_class:
+ error(template_node.pos, "unknown type in template argument")
+ ttype = error_type
+ # For Python generics we can be a bit more flexible and allow None.
+ elif require_python_types and not ttype.is_pyobject:
+ if ttype.equivalent_type and not template_node.as_cython_attribute():
+ ttype = ttype.equivalent_type
+ else:
+ error(template_node.pos, "%s[...] cannot be applied to non-Python type %s" % (
+ base_type.python_type_constructor_name,
+ ttype,
+ ))
+ ttype = error_type
+ template_types.append(ttype)
+
+ return template_types
+
def analyse(self, env, could_be_name=False, base_type=None):
if base_type is None:
base_type = self.base_type_node.analyse(env)
if base_type.is_error: return base_type
- if base_type.is_cpp_class and base_type.is_template_type():
- # Templated class
+ if ((base_type.is_cpp_class and base_type.is_template_type()) or
+ base_type.python_type_constructor_name):
+ # Templated class, Python generics, etc.
if self.keyword_args and self.keyword_args.key_value_pairs:
- error(self.pos, "c++ templates cannot take keyword arguments")
+ tp = "c++ templates" if base_type.is_cpp_class else "indexed types"
+ error(self.pos, "%s cannot take keyword arguments" % tp)
self.type = PyrexTypes.error_type
- else:
- template_types = []
- for template_node in self.positional_args:
- type = template_node.analyse_as_type(env)
- if type is None:
- error(template_node.pos, "unknown type in template argument")
- type = error_type
- template_types.append(type)
- self.type = base_type.specialize_here(self.pos, template_types)
+ return self.type
+
+ template_types = self._analyse_template_types(env, base_type)
+ self.type = base_type.specialize_here(self.pos, env, template_types)
elif base_type.is_pyobject:
# Buffer
@@ -1198,11 +1311,29 @@ class TemplatedTypeNode(CBaseTypeNode):
dimension=dimension)
self.type = self.array_declarator.analyse(base_type, env)[1]
- if self.type.is_fused and env.fused_to_specific:
- self.type = self.type.specialize(env.fused_to_specific)
+ if self.type and self.type.is_fused and env.fused_to_specific:
+ try:
+ self.type = self.type.specialize(env.fused_to_specific)
+ except CannotSpecialize:
+ error(self.pos,
+ "'%s' cannot be specialized since its type is not a fused argument to this function" %
+ self.name)
return self.type
+ def analyse_pytyping_modifiers(self, env):
+ # Check for declaration modifiers, e.g. "typing.Optional[...]" or "dataclasses.InitVar[...]"
+ # TODO: somehow bring this together with IndexNode.analyse_pytyping_modifiers()
+ modifiers = []
+ modifier_node = self
+ while modifier_node.is_templated_type_node and modifier_node.base_type_node and len(modifier_node.positional_args) == 1:
+ modifier_type = self.base_type_node.analyse_as_type(env)
+ if modifier_type.python_type_constructor_name and modifier_type.modifier_name:
+ modifiers.append(modifier_type.modifier_name)
+ modifier_node = modifier_node.positional_args[0]
+
+ return modifiers
+
class CComplexBaseTypeNode(CBaseTypeNode):
# base_type CBaseTypeNode
@@ -1273,8 +1404,10 @@ class FusedTypeNode(CBaseTypeNode):
return PyrexTypes.FusedType(types, name=self.name)
-class CConstTypeNode(CBaseTypeNode):
+class CConstOrVolatileTypeNode(CBaseTypeNode):
# base_type CBaseTypeNode
+ # is_const boolean
+ # is_volatile boolean
child_attrs = ["base_type"]
@@ -1282,8 +1415,8 @@ class CConstTypeNode(CBaseTypeNode):
base = self.base_type.analyse(env, could_be_name)
if base.is_pyobject:
error(self.pos,
- "Const base type cannot be a Python object")
- return PyrexTypes.c_const_type(base)
+ "Const/volatile base type cannot be a Python object")
+ return PyrexTypes.c_const_or_volatile_type(base, self.is_const, self.is_volatile)
class CVarDefNode(StatNode):
@@ -1328,6 +1461,11 @@ class CVarDefNode(StatNode):
base_type = self.base_type.analyse(env)
+ # Check for declaration modifiers, e.g. "typing.Optional[...]" or "dataclasses.InitVar[...]"
+ modifiers = None
+ if self.base_type.is_templated_type_node:
+ modifiers = self.base_type.analyse_pytyping_modifiers(env)
+
if base_type.is_fused and not self.in_pxd and (env.is_c_class_scope or
env.is_module_scope):
error(self.pos, "Fused types not allowed here")
@@ -1369,6 +1507,8 @@ class CVarDefNode(StatNode):
return
if type.is_reference and self.visibility != 'extern':
error(declarator.pos, "C++ references cannot be declared; use a pointer instead")
+ if type.is_rvalue_reference and self.visibility != 'extern':
+ error(declarator.pos, "C++ rvalue-references cannot be declared")
if type.is_cfunction:
if 'staticmethod' in env.directives:
type.is_static_method = True
@@ -1383,14 +1523,13 @@ class CVarDefNode(StatNode):
self.entry.create_wrapper = True
else:
if self.overridable:
- warning(self.pos, "cpdef variables will not be supported in Cython 3; "
- "currently they are no different from cdef variables", 2)
+ error(self.pos, "Variables cannot be declared with 'cpdef'. Use 'cdef' instead.")
if self.directive_locals:
error(self.pos, "Decorators can only be followed by functions")
self.entry = dest_scope.declare_var(
name, type, declarator.pos,
cname=cname, visibility=visibility, in_pxd=self.in_pxd,
- api=self.api, is_cdef=1)
+ api=self.api, is_cdef=True, pytyping_modifiers=modifiers)
if Options.docstrings:
self.entry.doc = embed_position(self.pos, self.doc)
@@ -1499,6 +1638,9 @@ class CppClassNode(CStructOrUnionDefNode, BlockNode):
elif isinstance(attr, CompilerDirectivesNode):
for sub_attr in func_attributes(attr.body.stats):
yield sub_attr
+ elif isinstance(attr, CppClassNode) and attr.attributes is not None:
+ for sub_attr in func_attributes(attr.attributes):
+ yield sub_attr
if self.attributes is not None:
if self.in_pxd and not env.in_cinclude:
self.entry.defined_in_pxd = 1
@@ -1529,36 +1671,62 @@ class CppClassNode(CStructOrUnionDefNode, BlockNode):
class CEnumDefNode(StatNode):
- # name string or None
- # cname string or None
- # items [CEnumDefItemNode]
- # typedef_flag boolean
- # visibility "public" or "private" or "extern"
- # api boolean
- # in_pxd boolean
- # create_wrapper boolean
- # entry Entry
-
- child_attrs = ["items"]
+ # name string or None
+ # cname string or None
+ # scoped boolean Is a C++ scoped enum
+ # underlying_type CSimpleBaseTypeNode The underlying value type (int or C++ type)
+ # items [CEnumDefItemNode]
+ # typedef_flag boolean
+ # visibility "public" or "private" or "extern"
+ # api boolean
+ # in_pxd boolean
+ # create_wrapper boolean
+ # entry Entry
+ # doc EncodedString or None Doc string
+
+ child_attrs = ["items", "underlying_type"]
+ doc = None
def declare(self, env):
- self.entry = env.declare_enum(
- self.name, self.pos,
- cname=self.cname, typedef_flag=self.typedef_flag,
- visibility=self.visibility, api=self.api,
- create_wrapper=self.create_wrapper)
+ doc = None
+ if Options.docstrings:
+ doc = embed_position(self.pos, self.doc)
+
+ self.entry = env.declare_enum(
+ self.name, self.pos,
+ cname=self.cname,
+ scoped=self.scoped,
+ typedef_flag=self.typedef_flag,
+ visibility=self.visibility, api=self.api,
+ create_wrapper=self.create_wrapper, doc=doc)
def analyse_declarations(self, env):
+ scope = None
+ underlying_type = self.underlying_type.analyse(env)
+
+ if not underlying_type.is_int:
+ error(self.underlying_type.pos, "underlying type is not an integral type")
+
+ self.entry.type.underlying_type = underlying_type
+
+ if self.scoped and self.items is not None:
+ scope = CppScopedEnumScope(self.name, env)
+ scope.type = self.entry.type
+ else:
+ scope = env
+
if self.items is not None:
if self.in_pxd and not env.in_cinclude:
self.entry.defined_in_pxd = 1
for item in self.items:
- item.analyse_declarations(env, self.entry)
+ item.analyse_declarations(scope, self.entry)
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
+ if self.scoped:
+ return # nothing to do here for C++ enums
if self.visibility == 'public' or self.api:
code.mark_pos(self.pos)
temp = code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=True)
@@ -1567,7 +1735,7 @@ class CEnumDefNode(StatNode):
temp,
item.cname,
code.error_goto_if_null(temp, item.pos)))
- code.put_gotref(temp)
+ code.put_gotref(temp, PyrexTypes.py_object_type)
code.putln('if (PyDict_SetItemString(%s, "%s", %s) < 0) %s' % (
Naming.moddict_cname,
item.name,
@@ -1590,9 +1758,15 @@ class CEnumDefItemNode(StatNode):
if not self.value.type.is_int:
self.value = self.value.coerce_to(PyrexTypes.c_int_type, env)
self.value = self.value.analyse_const_expression(env)
+
+ if enum_entry.type.is_cpp_enum:
+ cname = "%s::%s" % (enum_entry.cname, self.name)
+ else:
+ cname = self.cname
+
entry = env.declare_const(
self.name, enum_entry.type,
- self.value, self.pos, cname=self.cname,
+ self.value, self.pos, cname=cname,
visibility=enum_entry.visibility, api=enum_entry.api,
create_wrapper=enum_entry.create_wrapper and enum_entry.name is None)
enum_entry.enum_values.append(entry)
@@ -1659,6 +1833,9 @@ class FuncDefNode(StatNode, BlockNode):
needs_outer_scope = False
pymethdef_required = False
is_generator = False
+ is_generator_expression = False # this can be True alongside is_generator
+ is_coroutine = False
+ is_asyncgen = False
is_generator_body = False
is_async_def = False
modifiers = []
@@ -1667,6 +1844,9 @@ class FuncDefNode(StatNode, BlockNode):
starstar_arg = None
is_cyfunction = False
code_object = None
+ return_type_annotation = None
+
+ outer_attrs = None # overridden by some derived classes - to be visited outside the node's scope
def analyse_default_values(self, env):
default_seen = 0
@@ -1676,6 +1856,10 @@ class FuncDefNode(StatNode, BlockNode):
if arg.is_generic:
arg.default = arg.default.analyse_types(env)
arg.default = arg.default.coerce_to(arg.type, env)
+ elif arg.is_special_method_optional:
+ if not arg.default.is_none:
+ error(arg.pos, "This argument cannot have a non-None default value")
+ arg.default = None
else:
error(arg.pos, "This argument cannot have a default value")
arg.default = None
@@ -1684,18 +1868,12 @@ class FuncDefNode(StatNode, BlockNode):
elif default_seen:
error(arg.pos, "Non-default argument following default argument")
- def analyse_annotation(self, env, annotation):
- # Annotations can not only contain valid Python expressions but arbitrary type references.
- if annotation is None:
- return None
- if not env.directives['annotation_typing'] or annotation.analyse_as_type(env) is None:
- annotation = annotation.analyse_types(env)
- return annotation
-
def analyse_annotations(self, env):
for arg in self.args:
if arg.annotation:
- arg.annotation = self.analyse_annotation(env, arg.annotation)
+ arg.annotation = arg.annotation.analyse_types(env)
+ if self.return_type_annotation:
+ self.return_type_annotation = self.return_type_annotation.analyse_types(env)
def align_argument_type(self, env, arg):
# @cython.locals()
@@ -1718,6 +1896,9 @@ class FuncDefNode(StatNode, BlockNode):
error(type_node.pos, "Previous declaration here")
else:
arg.type = other_type
+ if arg.type.is_complex:
+ # utility code for complex types is special-cased and also important to ensure that it's run
+ arg.type.create_declaration_utility_code(env)
return arg
def need_gil_acquisition(self, lenv):
@@ -1728,7 +1909,8 @@ class FuncDefNode(StatNode, BlockNode):
while genv.is_py_class_scope or genv.is_c_class_scope:
genv = genv.outer_scope
if self.needs_closure:
- lenv = ClosureScope(name=self.entry.name,
+ cls = GeneratorExpressionScope if self.is_generator_expression else ClosureScope
+ lenv = cls(name=self.entry.name,
outer_scope=genv,
parent_scope=env,
scope_name=self.entry.cname)
@@ -1749,8 +1931,6 @@ class FuncDefNode(StatNode, BlockNode):
def generate_function_definitions(self, env, code):
from . import Buffer
- if self.return_type.is_memoryviewslice:
- from . import MemoryView
lenv = self.local_scope
if lenv.is_closure_scope and not lenv.is_passthrough:
@@ -1778,6 +1958,8 @@ class FuncDefNode(StatNode, BlockNode):
profile = code.globalstate.directives['profile']
linetrace = code.globalstate.directives['linetrace']
if profile or linetrace:
+ if linetrace:
+ code.use_fast_gil_utility_code()
code.globalstate.use_utility_code(
UtilityCode.load_cached("Profile", "Profile.c"))
@@ -1823,14 +2005,20 @@ class FuncDefNode(StatNode, BlockNode):
# Initialize the return variable __pyx_r
init = ""
- if not self.return_type.is_void:
- if self.return_type.is_pyobject:
+ return_type = self.return_type
+ if return_type.is_cv_qualified and return_type.is_const:
+ # Within this function body, we want to be able to set this
+ # variable, even though the function itself needs to return
+ # a const version
+ return_type = return_type.cv_base_type
+ if not return_type.is_void:
+ if return_type.is_pyobject:
init = " = NULL"
- elif self.return_type.is_memoryviewslice:
- init = ' = ' + MemoryView.memslice_entry_init
+ elif return_type.is_memoryviewslice:
+ init = ' = ' + return_type.literal_code(return_type.default_value)
code.putln("%s%s;" % (
- self.return_type.declaration_code(Naming.retval_cname),
+ return_type.declaration_code(Naming.retval_cname),
init))
tempvardecl_code = code.insertion_point()
@@ -1862,11 +2050,12 @@ class FuncDefNode(StatNode, BlockNode):
use_refnanny = not lenv.nogil or lenv.has_with_gil_block
+ gilstate_decl = None
if acquire_gil or acquire_gil_for_var_decls_only:
code.put_ensure_gil()
code.funcstate.gil_owned = True
- elif lenv.nogil and lenv.has_with_gil_block:
- code.declare_gilstate()
+ else:
+ gilstate_decl = code.insertion_point()
if profile or linetrace:
if not self.is_generator:
@@ -1908,7 +2097,7 @@ class FuncDefNode(StatNode, BlockNode):
code.put_incref("Py_None", py_object_type)
code.putln(code.error_goto(self.pos))
code.putln("} else {")
- code.put_gotref(Naming.cur_scope_cname)
+ code.put_gotref(Naming.cur_scope_cname, lenv.scope_class.type)
code.putln("}")
# Note that it is unsafe to decref the scope at this point.
if self.needs_outer_scope:
@@ -1927,7 +2116,7 @@ class FuncDefNode(StatNode, BlockNode):
elif self.needs_closure:
# inner closures own a reference to their outer parent
code.put_incref(outer_scope_cname, cenv.scope_class.type)
- code.put_giveref(outer_scope_cname)
+ code.put_giveref(outer_scope_cname, cenv.scope_class.type)
# ----- Trace function call
if profile or linetrace:
# this looks a bit late, but if we don't get here due to a
@@ -1945,20 +2134,19 @@ class FuncDefNode(StatNode, BlockNode):
self.generate_argument_parsing_code(env, code)
# If an argument is assigned to in the body, we must
# incref it to properly keep track of refcounts.
- is_cdef = isinstance(self, CFuncDefNode)
for entry in lenv.arg_entries:
- if entry.type.is_pyobject:
- if (acquire_gil or len(entry.cf_assignments) > 1) and not entry.in_closure:
+ if not entry.type.is_memoryviewslice:
+ if (acquire_gil or entry.cf_is_reassigned) and not entry.in_closure:
code.put_var_incref(entry)
-
# Note: defaults are always incref-ed. For def functions, we
# we acquire arguments from object conversion, so we have
# new references. If we are a cdef function, we need to
# incref our arguments
- elif is_cdef and entry.type.is_memoryviewslice and len(entry.cf_assignments) > 1:
- code.put_incref_memoryviewslice(entry.cname, have_gil=code.funcstate.gil_owned)
+ elif entry.cf_is_reassigned and not entry.in_closure:
+ code.put_var_incref_memoryviewslice(entry,
+ have_gil=code.funcstate.gil_owned)
for entry in lenv.var_entries:
- if entry.is_arg and len(entry.cf_assignments) > 1 and not entry.in_closure:
+ if entry.is_arg and entry.cf_is_reassigned and not entry.in_closure:
if entry.xdecref_cleanup:
code.put_var_xincref(entry)
else:
@@ -1989,27 +2177,45 @@ class FuncDefNode(StatNode, BlockNode):
code.putln("")
code.putln("/* function exit code */")
+ gil_owned = {
+ 'success': code.funcstate.gil_owned,
+ 'error': code.funcstate.gil_owned,
+ 'gil_state_declared': gilstate_decl is None,
+ }
+ def assure_gil(code_path, code=code):
+ if not gil_owned[code_path]:
+ if not gil_owned['gil_state_declared']:
+ gilstate_decl.declare_gilstate()
+ gil_owned['gil_state_declared'] = True
+ code.put_ensure_gil(declare_gilstate=False)
+ gil_owned[code_path] = True
+
# ----- Default return value
+ return_type = self.return_type
if not self.body.is_terminator:
- if self.return_type.is_pyobject:
- #if self.return_type.is_extension_type:
+ if return_type.is_pyobject:
+ #if return_type.is_extension_type:
# lhs = "(PyObject *)%s" % Naming.retval_cname
#else:
lhs = Naming.retval_cname
- code.put_init_to_py_none(lhs, self.return_type)
- else:
- val = self.return_type.default_value
+ assure_gil('success')
+ code.put_init_to_py_none(lhs, return_type)
+ elif not return_type.is_memoryviewslice:
+ # memory view structs receive their default value on initialisation
+ val = return_type.default_value
if val:
code.putln("%s = %s;" % (Naming.retval_cname, val))
- elif not self.return_type.is_void:
+ elif not return_type.is_void:
code.putln("__Pyx_pretend_to_initialize(&%s);" % Naming.retval_cname)
+
# ----- Error cleanup
- if code.error_label in code.labels_used:
+ if code.label_used(code.error_label):
if not self.body.is_terminator:
code.put_goto(code.return_label)
code.put_label(code.error_label)
for cname, type in code.funcstate.all_managed_temps():
- code.put_xdecref(cname, type, have_gil=not lenv.nogil)
+ assure_gil('error')
+ code.put_xdecref(cname, type, have_gil=gil_owned['error'])
# Clean up buffers -- this calls a Python function
# so need to save and restore error state
@@ -2019,6 +2225,7 @@ class FuncDefNode(StatNode, BlockNode):
code.globalstate.use_utility_code(restore_exception_utility_code)
code.putln("{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;")
code.putln("__Pyx_PyThreadState_declare")
+ assure_gil('error')
code.putln("__Pyx_PyThreadState_assign")
code.putln("__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);")
for entry in used_buffer_entries:
@@ -2026,7 +2233,8 @@ class FuncDefNode(StatNode, BlockNode):
#code.putln("%s = 0;" % entry.cname)
code.putln("__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}")
- if self.return_type.is_memoryviewslice:
+ if return_type.is_memoryviewslice:
+ from . import MemoryView
MemoryView.put_init_entry(Naming.retval_cname, code)
err_val = Naming.retval_cname
else:
@@ -2038,104 +2246,136 @@ class FuncDefNode(StatNode, BlockNode):
# code.globalstate.use_utility_code(get_exception_tuple_utility_code)
# code.put_trace_exception()
- if lenv.nogil and not lenv.has_with_gil_block:
- code.putln("{")
- code.put_ensure_gil()
-
+ assure_gil('error')
+ if code.funcstate.error_without_exception:
+ tempvardecl_code.putln(
+ "int %s = 0; /* StopIteration */" % Naming.error_without_exception_cname
+ )
+ code.putln("if (!%s) {" % Naming.error_without_exception_cname)
code.put_add_traceback(self.entry.qualified_name)
-
- if lenv.nogil and not lenv.has_with_gil_block:
- code.put_release_ensured_gil()
+ if code.funcstate.error_without_exception:
code.putln("}")
else:
warning(self.entry.pos,
"Unraisable exception in function '%s'." %
self.entry.qualified_name, 0)
- code.put_unraisable(self.entry.qualified_name, lenv.nogil)
- default_retval = self.return_type.default_value
+ assure_gil('error')
+ code.put_unraisable(self.entry.qualified_name)
+ default_retval = return_type.default_value
if err_val is None and default_retval:
err_val = default_retval
if err_val is not None:
if err_val != Naming.retval_cname:
code.putln("%s = %s;" % (Naming.retval_cname, err_val))
- elif not self.return_type.is_void:
+ elif not return_type.is_void:
code.putln("__Pyx_pretend_to_initialize(&%s);" % Naming.retval_cname)
if is_getbuffer_slot:
+ assure_gil('error')
self.getbuffer_error_cleanup(code)
+ def align_error_path_gil_to_success_path(code=code.insertion_point()):
+ # align error and success GIL state when both join
+ if gil_owned['success']:
+ assure_gil('error', code=code)
+ elif gil_owned['error']:
+ code.put_release_ensured_gil()
+ gil_owned['error'] = False
+ assert gil_owned['error'] == gil_owned['success'], "%s: error path %s != success path %s" % (
+ self.pos, gil_owned['error'], gil_owned['success'])
+
# If we are using the non-error cleanup section we should
# jump past it if we have an error. The if-test below determine
# whether this section is used.
- if buffers_present or is_getbuffer_slot or self.return_type.is_memoryviewslice:
+ if buffers_present or is_getbuffer_slot or return_type.is_memoryviewslice:
+ # In the buffer cases, we already called assure_gil('error') and own the GIL.
+ assert gil_owned['error'] or return_type.is_memoryviewslice
code.put_goto(code.return_from_error_cleanup_label)
+ else:
+ # Adapt the GIL state to the success path right now.
+ align_error_path_gil_to_success_path()
+ else:
+ # No error path, no need to adapt the GIL state.
+ def align_error_path_gil_to_success_path(): pass
# ----- Non-error return cleanup
- code.put_label(code.return_label)
- for entry in used_buffer_entries:
- Buffer.put_release_buffer_code(code, entry)
- if is_getbuffer_slot:
- self.getbuffer_normal_cleanup(code)
+ if code.label_used(code.return_label) or not code.label_used(code.error_label):
+ code.put_label(code.return_label)
- if self.return_type.is_memoryviewslice:
- # See if our return value is uninitialized on non-error return
- # from . import MemoryView
- # MemoryView.err_if_nogil_initialized_check(self.pos, env)
- cond = code.unlikely(self.return_type.error_condition(Naming.retval_cname))
- code.putln(
- 'if (%s) {' % cond)
- if env.nogil:
- code.put_ensure_gil()
- code.putln(
- 'PyErr_SetString(PyExc_TypeError, "Memoryview return value is not initialized");')
- if env.nogil:
- code.put_release_ensured_gil()
- code.putln(
- '}')
+ for entry in used_buffer_entries:
+ assure_gil('success')
+ Buffer.put_release_buffer_code(code, entry)
+ if is_getbuffer_slot:
+ assure_gil('success')
+ self.getbuffer_normal_cleanup(code)
+
+ if return_type.is_memoryviewslice:
+ # See if our return value is uninitialized on non-error return
+ # from . import MemoryView
+ # MemoryView.err_if_nogil_initialized_check(self.pos, env)
+ cond = code.unlikely(return_type.error_condition(Naming.retval_cname))
+ code.putln(
+ 'if (%s) {' % cond)
+ if not gil_owned['success']:
+ code.put_ensure_gil()
+ code.putln(
+ 'PyErr_SetString(PyExc_TypeError, "Memoryview return value is not initialized");')
+ if not gil_owned['success']:
+ code.put_release_ensured_gil()
+ code.putln(
+ '}')
# ----- Return cleanup for both error and no-error return
- code.put_label(code.return_from_error_cleanup_label)
+ if code.label_used(code.return_from_error_cleanup_label):
+ align_error_path_gil_to_success_path()
+ code.put_label(code.return_from_error_cleanup_label)
for entry in lenv.var_entries:
if not entry.used or entry.in_closure:
continue
- if entry.type.is_memoryviewslice:
- code.put_xdecref_memoryviewslice(entry.cname, have_gil=not lenv.nogil)
- elif entry.type.is_pyobject:
- if not entry.is_arg or len(entry.cf_assignments) > 1:
- if entry.xdecref_cleanup:
- code.put_var_xdecref(entry)
- else:
- code.put_var_decref(entry)
+ if entry.type.is_pyobject:
+ if entry.is_arg and not entry.cf_is_reassigned:
+ continue
+ if entry.type.needs_refcounting:
+ assure_gil('success')
+ # FIXME ideally use entry.xdecref_cleanup but this currently isn't reliable
+ code.put_var_xdecref(entry, have_gil=gil_owned['success'])
# Decref any increfed args
for entry in lenv.arg_entries:
- if entry.type.is_pyobject:
- if (acquire_gil or len(entry.cf_assignments) > 1) and not entry.in_closure:
- code.put_var_decref(entry)
- elif (entry.type.is_memoryviewslice and
- (not is_cdef or len(entry.cf_assignments) > 1)):
+ if entry.in_closure:
+ continue
+ if entry.type.is_memoryviewslice:
# decref slices of def functions and acquired slices from cdef
# functions, but not borrowed slices from cdef functions.
- code.put_xdecref_memoryviewslice(entry.cname,
- have_gil=not lenv.nogil)
+ if not entry.cf_is_reassigned:
+ continue
+ else:
+ if not acquire_gil and not entry.cf_is_reassigned:
+ continue
+ if entry.type.needs_refcounting:
+ assure_gil('success')
+
+ # FIXME use entry.xdecref_cleanup - del arg seems to be the problem
+ code.put_var_xdecref(entry, have_gil=gil_owned['success'])
if self.needs_closure:
+ assure_gil('success')
code.put_decref(Naming.cur_scope_cname, lenv.scope_class.type)
# ----- Return
# This code is duplicated in ModuleNode.generate_module_init_func
if not lenv.nogil:
- default_retval = self.return_type.default_value
+ default_retval = return_type.default_value
err_val = self.error_value()
if err_val is None and default_retval:
err_val = default_retval # FIXME: why is err_val not used?
- if self.return_type.is_pyobject:
- code.put_xgiveref(self.return_type.as_pyobject(Naming.retval_cname))
+ code.put_xgiveref(Naming.retval_cname, return_type)
if self.entry.is_special and self.entry.name == "__hash__":
# Returning -1 for __hash__ is supposed to signal an error
# We do as Python instances and coerce -1 into -2.
+ assure_gil('success') # in special methods, the GIL is owned anyway
code.putln("if (unlikely(%s == -1) && !PyErr_Occurred()) %s = -2;" % (
Naming.retval_cname, Naming.retval_cname))
@@ -2143,23 +2383,22 @@ class FuncDefNode(StatNode, BlockNode):
code.funcstate.can_trace = False
if not self.is_generator:
# generators are traced when iterated, not at creation
- if self.return_type.is_pyobject:
+ if return_type.is_pyobject:
code.put_trace_return(
- Naming.retval_cname, nogil=not code.funcstate.gil_owned)
+ Naming.retval_cname, nogil=not gil_owned['success'])
else:
code.put_trace_return(
- "Py_None", nogil=not code.funcstate.gil_owned)
+ "Py_None", nogil=not gil_owned['success'])
- if not lenv.nogil:
- # GIL holding function
- code.put_finish_refcount_context()
+ if use_refnanny:
+ code.put_finish_refcount_context(nogil=not gil_owned['success'])
- if acquire_gil or (lenv.nogil and lenv.has_with_gil_block):
+ if acquire_gil or (lenv.nogil and gil_owned['success']):
# release the GIL (note that with-gil blocks acquire it on exit in their EnsureGILNode)
code.put_release_ensured_gil()
code.funcstate.gil_owned = False
- if not self.return_type.is_void:
+ if not return_type.is_void:
code.putln("return %s;" % Naming.retval_cname)
code.putln("}")
@@ -2194,11 +2433,11 @@ class FuncDefNode(StatNode, BlockNode):
typeptr_cname = arg.type.typeptr_cname
arg_code = "((PyObject *)%s)" % arg.entry.cname
code.putln(
- 'if (unlikely(!__Pyx_ArgTypeTest(%s, %s, %d, "%s", %s))) %s' % (
+ 'if (unlikely(!__Pyx_ArgTypeTest(%s, %s, %d, %s, %s))) %s' % (
arg_code,
typeptr_cname,
arg.accept_none,
- arg.name,
+ arg.name_cstring,
arg.type.is_builtin_type and arg.type.require_exact,
code.error_goto(arg.pos)))
else:
@@ -2212,8 +2451,8 @@ class FuncDefNode(StatNode, BlockNode):
cname = arg.entry.cname
code.putln('if (unlikely(((PyObject *)%s) == Py_None)) {' % cname)
- code.putln('''PyErr_Format(PyExc_TypeError, "Argument '%%.%ds' must not be None", "%s"); %s''' % (
- max(200, len(arg.name)), arg.name,
+ code.putln('''PyErr_Format(PyExc_TypeError, "Argument '%%.%ds' must not be None", %s); %s''' % (
+ max(200, len(arg.name_cstring)), arg.name_cstring,
code.error_goto(arg.pos)))
code.putln('}')
@@ -2223,9 +2462,11 @@ class FuncDefNode(StatNode, BlockNode):
def generate_execution_code(self, code):
code.mark_pos(self.pos)
# Evaluate and store argument default values
- for arg in self.args:
- if not arg.is_dynamic:
- arg.generate_assignment_code(code)
+ # skip this for wrappers since it's done by wrapped function
+ if not self.is_wrapper:
+ for arg in self.args:
+ if not arg.is_dynamic:
+ arg.generate_assignment_code(code)
#
# Special code for the __getbuffer__ function
@@ -2249,7 +2490,7 @@ class FuncDefNode(StatNode, BlockNode):
def getbuffer_check(self, code):
py_buffer, _ = self._get_py_buffer_info()
view = py_buffer.cname
- code.putln("if (%s == NULL) {" % view)
+ code.putln("if (unlikely(%s == NULL)) {" % view)
code.putln("PyErr_SetString(PyExc_BufferError, "
"\"PyObject_GetBuffer: view==NULL argument is obsolete\");")
code.putln("return -1;")
@@ -2260,7 +2501,7 @@ class FuncDefNode(StatNode, BlockNode):
view = py_buffer.cname
if obj_type and obj_type.is_pyobject:
code.put_init_to_py_none("%s->obj" % view, obj_type)
- code.put_giveref("%s->obj" % view) # Do not refnanny object within structs
+ code.put_giveref("%s->obj" % view, obj_type) # Do not refnanny object within structs
else:
code.putln("%s->obj = NULL;" % view)
@@ -2269,7 +2510,7 @@ class FuncDefNode(StatNode, BlockNode):
view = py_buffer.cname
if obj_type and obj_type.is_pyobject:
code.putln("if (%s->obj != NULL) {" % view)
- code.put_gotref("%s->obj" % view)
+ code.put_gotref("%s->obj" % view, obj_type)
code.put_decref_clear("%s->obj" % view, obj_type)
code.putln("}")
else:
@@ -2280,7 +2521,7 @@ class FuncDefNode(StatNode, BlockNode):
view = py_buffer.cname
if obj_type and obj_type.is_pyobject:
code.putln("if (%s->obj == Py_None) {" % view)
- code.put_gotref("%s->obj" % view)
+ code.put_gotref("%s->obj" % view, obj_type)
code.put_decref_clear("%s->obj" % view, obj_type)
code.putln("}")
@@ -2288,7 +2529,7 @@ class FuncDefNode(StatNode, BlockNode):
if not self.entry.is_special:
return None
name = self.entry.name
- slot = TypeSlots.method_name_to_slot.get(name)
+ slot = TypeSlots.get_slot_table(self.local_scope.directives).get_slot_by_method_name(name)
if not slot:
return None
if name == '__long__' and not self.entry.scope.lookup_here('__int__'):
@@ -2322,7 +2563,8 @@ class CFuncDefNode(FuncDefNode):
# is_static_method whether this is a static method
# is_c_class_method whether this is a cclass method
- child_attrs = ["base_type", "declarator", "body", "py_func_stat"]
+ child_attrs = ["base_type", "declarator", "body", "decorators", "py_func_stat"]
+ outer_attrs = ["decorators", "py_func_stat"]
inline_in_pxd = False
decorators = None
@@ -2336,6 +2578,9 @@ class CFuncDefNode(FuncDefNode):
def unqualified_name(self):
return self.entry.name
+ def declared_name(self):
+ return self.declarator.declared_name()
+
@property
def code_object(self):
# share the CodeObject with the cpdef wrapper (if available)
@@ -2356,20 +2601,20 @@ class CFuncDefNode(FuncDefNode):
self.is_static_method = 'staticmethod' in env.directives and not env.lookup_here('staticmethod')
# The 2 here is because we need both function and argument names.
if isinstance(self.declarator, CFuncDeclaratorNode):
- name_declarator, type = self.declarator.analyse(
+ name_declarator, typ = self.declarator.analyse(
base_type, env, nonempty=2 * (self.body is not None),
directive_locals=self.directive_locals, visibility=self.visibility)
else:
- name_declarator, type = self.declarator.analyse(
+ name_declarator, typ = self.declarator.analyse(
base_type, env, nonempty=2 * (self.body is not None), visibility=self.visibility)
- if not type.is_cfunction:
+ if not typ.is_cfunction:
error(self.pos, "Suite attached to non-function declaration")
# Remember the actual type according to the function header
# written here, because the type in the symbol table entry
# may be different if we're overriding a C method inherited
# from the base type of an extension type.
- self.type = type
- type.is_overridable = self.overridable
+ self.type = typ
+ typ.is_overridable = self.overridable
declarator = self.declarator
while not hasattr(declarator, 'args'):
declarator = declarator.base
@@ -2382,11 +2627,18 @@ class CFuncDefNode(FuncDefNode):
error(self.cfunc_declarator.pos,
"Function with optional arguments may not be declared public or api")
- if type.exception_check == '+' and self.visibility != 'extern':
- warning(self.cfunc_declarator.pos,
+ if typ.exception_check == '+' and self.visibility != 'extern':
+ if typ.exception_value and typ.exception_value.is_name:
+ # it really is impossible to reason about what the user wants to happens
+ # if they've specified a C++ exception translation function. Therefore,
+ # raise an error.
+ error(self.pos,
"Only extern functions can throw C++ exceptions.")
+ else:
+ warning(self.pos,
+ "Only extern functions can throw C++ exceptions.", 2)
- for formal_arg, type_arg in zip(self.args, type.args):
+ for formal_arg, type_arg in zip(self.args, typ.args):
self.align_argument_type(env, type_arg)
formal_arg.type = type_arg.type
formal_arg.name = type_arg.name
@@ -2407,20 +2659,21 @@ class CFuncDefNode(FuncDefNode):
elif 'inline' in self.modifiers:
warning(formal_arg.pos, "Buffer unpacking not optimized away.", 1)
- self._validate_type_visibility(type.return_type, self.pos, env)
+ self._validate_type_visibility(typ.return_type, self.pos, env)
name = name_declarator.name
cname = name_declarator.cname
- type.is_const_method = self.is_const_method
- type.is_static_method = self.is_static_method
+ typ.is_const_method = self.is_const_method
+ typ.is_static_method = self.is_static_method
+
self.entry = env.declare_cfunction(
- name, type, self.pos,
+ name, typ, self.pos,
cname=cname, visibility=self.visibility, api=self.api,
defining=self.body is not None, modifiers=self.modifiers,
overridable=self.overridable)
self.entry.inline_func_in_pxd = self.inline_in_pxd
- self.return_type = type.return_type
+ self.return_type = typ.return_type
if self.return_type.is_array and self.visibility != 'extern':
error(self.pos, "Function cannot return an array")
if self.return_type.is_cpp_class:
@@ -2435,38 +2688,45 @@ class CFuncDefNode(FuncDefNode):
self.create_local_scope(env)
def declare_cpdef_wrapper(self, env):
- if self.overridable:
- if self.is_static_method:
- # TODO(robertwb): Finish this up, perhaps via more function refactoring.
- error(self.pos, "static cpdef methods not yet supported")
- name = self.entry.name
- py_func_body = self.call_self_node(is_module_scope=env.is_module_scope)
- if self.is_static_method:
- from .ExprNodes import NameNode
- decorators = [DecoratorNode(self.pos, decorator=NameNode(self.pos, name='staticmethod'))]
- decorators[0].decorator.analyse_types(env)
+ if not self.overridable:
+ return
+ if self.is_static_method:
+ # TODO(robertwb): Finish this up, perhaps via more function refactoring.
+ error(self.pos, "static cpdef methods not yet supported")
+
+ name = self.entry.name
+ py_func_body = self.call_self_node(is_module_scope=env.is_module_scope)
+ if self.is_static_method:
+ from .ExprNodes import NameNode
+ decorators = [DecoratorNode(self.pos, decorator=NameNode(self.pos, name=EncodedString('staticmethod')))]
+ decorators[0].decorator.analyse_types(env)
+ else:
+ decorators = []
+ self.py_func = DefNode(pos=self.pos,
+ name=self.entry.name,
+ args=self.args,
+ star_arg=None,
+ starstar_arg=None,
+ doc=self.doc,
+ body=py_func_body,
+ decorators=decorators,
+ is_wrapper=1)
+ self.py_func.is_module_scope = env.is_module_scope
+ self.py_func.analyse_declarations(env)
+ self.py_func.entry.is_overridable = True
+ self.py_func_stat = StatListNode(self.pos, stats=[self.py_func])
+ self.py_func.type = PyrexTypes.py_object_type
+ self.entry.as_variable = self.py_func.entry
+ self.entry.used = self.entry.as_variable.used = True
+ # Reset scope entry the above cfunction
+ env.entries[name] = self.entry
+ if (not self.entry.is_final_cmethod and
+ (not env.is_module_scope or Options.lookup_module_cpdef)):
+ if self.override:
+ # This is a hack: we shouldn't create the wrapper twice, but we do for fused functions.
+ assert self.entry.is_fused_specialized # should not happen for non-fused cpdef functions
+ self.override.py_func = self.py_func
else:
- decorators = []
- self.py_func = DefNode(pos=self.pos,
- name=self.entry.name,
- args=self.args,
- star_arg=None,
- starstar_arg=None,
- doc=self.doc,
- body=py_func_body,
- decorators=decorators,
- is_wrapper=1)
- self.py_func.is_module_scope = env.is_module_scope
- self.py_func.analyse_declarations(env)
- self.py_func.entry.is_overridable = True
- self.py_func_stat = StatListNode(self.pos, stats=[self.py_func])
- self.py_func.type = PyrexTypes.py_object_type
- self.entry.as_variable = self.py_func.entry
- self.entry.used = self.entry.as_variable.used = True
- # Reset scope entry the above cfunction
- env.entries[name] = self.entry
- if (not self.entry.is_final_cmethod and
- (not env.is_module_scope or Options.lookup_module_cpdef)):
self.override = OverrideCheckNode(self.pos, py_func=self.py_func)
self.body = StatListNode(self.pos, stats=[self.override, self.body])
@@ -2585,7 +2845,7 @@ class CFuncDefNode(FuncDefNode):
header = self.return_type.declaration_code(entity, dll_linkage=dll_linkage)
#print (storage_class, modifiers, header)
- needs_proto = self.is_c_class_method
+ needs_proto = self.is_c_class_method or self.entry.is_cproperty
if self.template_declaration:
if needs_proto:
code.globalstate.parts['module_declarations'].putln(self.template_declaration)
@@ -2638,7 +2898,7 @@ class CFuncDefNode(FuncDefNode):
if entry.in_closure and not arg.default:
code.putln('%s = %s;' % (entry.cname, entry.original_cname))
if entry.type.is_memoryviewslice:
- code.put_incref_memoryviewslice(entry.cname, have_gil=True)
+ entry.type.generate_incref_memoryviewslice(code, entry.cname, True)
else:
code.put_var_incref(entry)
code.put_var_giveref(entry)
@@ -2670,7 +2930,6 @@ class CFuncDefNode(FuncDefNode):
if self.return_type.is_pyobject:
return "0"
else:
- #return None
return self.entry.type.exception_value
def caller_will_check_exceptions(self):
@@ -2769,7 +3028,7 @@ class DefNode(FuncDefNode):
self_in_stararg = 0
py_cfunc_node = None
requires_classobj = False
- defaults_struct = None # Dynamic kwrds structure name
+ defaults_struct = None # Dynamic kwrds structure name
doc = None
fused_py_func = False
@@ -2782,14 +3041,17 @@ class DefNode(FuncDefNode):
def __init__(self, pos, **kwds):
FuncDefNode.__init__(self, pos, **kwds)
- k = rk = r = 0
+ p = k = rk = r = 0
for arg in self.args:
+ if arg.pos_only:
+ p += 1
if arg.kw_only:
k += 1
if not arg.default:
rk += 1
if not arg.default:
r += 1
+ self.num_posonly_args = p
self.num_kwonly_args = k
self.num_required_kw_args = rk
self.num_required_args = r
@@ -2887,8 +3149,18 @@ class DefNode(FuncDefNode):
# staticmethod() was overridden - not much we can do here ...
self.is_staticmethod = False
- if self.name == '__new__' and env.is_py_class_scope:
- self.is_staticmethod = 1
+ if env.is_py_class_scope or env.is_c_class_scope:
+ if self.name == '__new__' and env.is_py_class_scope:
+ self.is_staticmethod = True
+ elif self.name == '__init_subclass__' and env.is_c_class_scope:
+ error(self.pos, "'__init_subclass__' is not supported by extension class")
+ elif self.name in IMPLICIT_CLASSMETHODS and not self.is_classmethod:
+ self.is_classmethod = True
+ # TODO: remove the need to generate a real decorator here, is_classmethod=True should suffice.
+ from .ExprNodes import NameNode
+ self.decorators = self.decorators or []
+ self.decorators.insert(0, DecoratorNode(
+ self.pos, decorator=NameNode(self.pos, name=EncodedString('classmethod'))))
self.analyse_argument_types(env)
if self.name == '<lambda>':
@@ -2901,7 +3173,7 @@ class DefNode(FuncDefNode):
# if a signature annotation provides a more specific return object type, use it
if self.return_type is py_object_type and self.return_type_annotation:
if env.directives['annotation_typing'] and not self.entry.is_special:
- _, return_type = analyse_type_annotation(self.return_type_annotation, env)
+ _, return_type = self.return_type_annotation.analyse_type_annotation(env)
if return_type and return_type.is_pyobject:
self.return_type = return_type
@@ -2941,9 +3213,6 @@ class DefNode(FuncDefNode):
arg.name = name_declarator.name
arg.type = type
- if type.is_fused:
- self.has_fused_arguments = True
-
self.align_argument_type(env, arg)
if name_declarator and name_declarator.cname:
error(self.pos, "Python function argument cannot have C name specification")
@@ -2968,12 +3237,15 @@ class DefNode(FuncDefNode):
else:
# probably just a plain 'object'
arg.accept_none = True
- else:
- arg.accept_none = True # won't be used, but must be there
+ elif not arg.type.is_error:
+ arg.accept_none = True # won't be used, but must be there
if arg.not_none:
error(arg.pos, "Only Python type arguments can have 'not None'")
if arg.or_none:
error(arg.pos, "Only Python type arguments can have 'or None'")
+
+ if arg.type.is_fused:
+ self.has_fused_arguments = True
env.fused_to_specific = f2s
if has_np_pythran(env):
@@ -2986,8 +3258,10 @@ class DefNode(FuncDefNode):
if self.decorators:
error(self.pos, "special functions of cdef classes cannot have decorators")
self.entry.trivial_signature = len(self.args) == 1 and not (self.star_arg or self.starstar_arg)
- elif not env.directives['always_allow_keywords'] and not (self.star_arg or self.starstar_arg):
- # Use the simpler calling signature for zero- and one-argument functions.
+ elif not (self.star_arg or self.starstar_arg) and (
+ not env.directives['always_allow_keywords']
+ or all([arg.pos_only for arg in self.args])):
+ # Use the simpler calling signature for zero- and one-argument pos-only functions.
if self.entry.signature is TypeSlots.pyfunction_signature:
if len(self.args) == 0:
self.entry.signature = TypeSlots.pyfunction_noargs
@@ -3002,18 +3276,19 @@ class DefNode(FuncDefNode):
self.entry.signature = TypeSlots.ibinaryfunc
sig = self.entry.signature
- nfixed = sig.num_fixed_args()
+ nfixed = sig.max_num_fixed_args()
+ min_nfixed = sig.min_num_fixed_args()
if (sig is TypeSlots.pymethod_signature and nfixed == 1
and len(self.args) == 0 and self.star_arg):
# this is the only case where a diverging number of
# arguments is not an error - when we have no explicit
# 'self' parameter as in method(*args)
- sig = self.entry.signature = TypeSlots.pyfunction_signature # self is not 'really' used
+ sig = self.entry.signature = TypeSlots.pyfunction_signature # self is not 'really' used
self.self_in_stararg = 1
- nfixed = 0
+ nfixed = min_nfixed = 0
if self.is_staticmethod and env.is_c_class_scope:
- nfixed = 0
+ nfixed = min_nfixed = 0
self.self_in_stararg = True # FIXME: why for staticmethods?
self.entry.signature = sig = copy.copy(sig)
@@ -3028,6 +3303,8 @@ class DefNode(FuncDefNode):
for i in range(min(nfixed, len(self.args))):
arg = self.args[i]
arg.is_generic = 0
+ if i >= min_nfixed:
+ arg.is_special_method_optional = True
if sig.is_self_arg(i) and not self.is_staticmethod:
if self.is_classmethod:
arg.is_type_arg = 1
@@ -3043,12 +3320,8 @@ class DefNode(FuncDefNode):
arg.needs_type_test = 1
else:
arg.needs_conversion = 1
- if arg.needs_conversion:
- arg.hdr_cname = Naming.arg_prefix + arg.name
- else:
- arg.hdr_cname = Naming.var_prefix + arg.name
- if nfixed > len(self.args):
+ if min_nfixed > len(self.args):
self.bad_signature()
return
elif nfixed < len(self.args):
@@ -3058,11 +3331,38 @@ class DefNode(FuncDefNode):
if arg.is_generic and (arg.type.is_extension_type or arg.type.is_builtin_type):
arg.needs_type_test = 1
+ # Decide whether to use METH_FASTCALL
+ # 1. If we use METH_NOARGS or METH_O, keep that. We can only change
+ # METH_VARARGS to METH_FASTCALL
+ # 2. Special methods like __call__ always use the METH_VARGARGS
+ # calling convention
+ mf = sig.method_flags()
+ if mf and TypeSlots.method_varargs in mf and not self.entry.is_special:
+ # 3. If the function uses the full args tuple, it's more
+ # efficient to use METH_VARARGS. This happens when the function
+ # takes *args but no other positional arguments (apart from
+ # possibly self). We don't do the analogous check for keyword
+ # arguments since the kwargs dict is copied anyway.
+ if self.star_arg:
+ uses_args_tuple = True
+ for arg in self.args:
+ if (arg.is_generic and not arg.kw_only and
+ not arg.is_self_arg and not arg.is_type_arg):
+ # Other positional argument
+ uses_args_tuple = False
+ else:
+ uses_args_tuple = False
+
+ if not uses_args_tuple:
+ sig = self.entry.signature = sig.with_fastcall()
+
def bad_signature(self):
sig = self.entry.signature
- expected_str = "%d" % sig.num_fixed_args()
+ expected_str = "%d" % sig.min_num_fixed_args()
if sig.has_generic_args:
expected_str += " or more"
+ elif sig.optional_object_arg_count:
+ expected_str += " to %d" % sig.max_num_fixed_args()
name = self.name
if name.startswith("__") and name.endswith("__"):
desc = "Special method"
@@ -3083,16 +3383,16 @@ class DefNode(FuncDefNode):
entry = env.declare_pyfunction(name, self.pos, allow_redefine=not self.is_wrapper)
self.entry = entry
prefix = env.next_id(env.scope_prefix)
- self.entry.pyfunc_cname = Naming.pyfunc_prefix + prefix + name
+ self.entry.pyfunc_cname = punycodify_name(Naming.pyfunc_prefix + prefix + name)
if Options.docstrings:
entry.doc = embed_position(self.pos, self.doc)
- entry.doc_cname = Naming.funcdoc_prefix + prefix + name
+ entry.doc_cname = punycodify_name(Naming.funcdoc_prefix + prefix + name)
if entry.is_special:
if entry.name in TypeSlots.invisible or not entry.doc or (
entry.name in '__getattr__' and env.directives['fast_getattr']):
entry.wrapperbase_cname = None
else:
- entry.wrapperbase_cname = Naming.wrapperbase_prefix + prefix + name
+ entry.wrapperbase_cname = punycodify_name(Naming.wrapperbase_prefix + prefix + name)
else:
entry.doc = None
@@ -3135,8 +3435,6 @@ class DefNode(FuncDefNode):
self.local_scope.directives = env.directives
self.analyse_default_values(env)
self.analyse_annotations(env)
- if self.return_type_annotation:
- self.return_type_annotation = self.analyse_annotation(env, self.return_type_annotation)
if not self.needs_assignment_synthesis(env) and self.decorators:
for decorator in self.decorators[::-1]:
@@ -3237,8 +3535,20 @@ class DefNode(FuncDefNode):
# Move arguments into closure if required
def put_into_closure(entry):
if entry.in_closure:
- code.putln('%s = %s;' % (entry.cname, entry.original_cname))
- if entry.xdecref_cleanup:
+ if entry.type.is_array:
+ # This applies to generator expressions that iterate over C arrays (and need to
+ # capture them by value), under most other circumstances C array arguments are dropped to
+ # pointers so this copy isn't used
+ assert entry.type.size is not None
+ code.globalstate.use_utility_code(UtilityCode.load_cached("IncludeStringH", "StringTools.c"))
+ code.putln("memcpy({0}, {1}, sizeof({0}));".format(entry.cname, entry.original_cname))
+ else:
+ code.putln('%s = %s;' % (entry.cname, entry.original_cname))
+ if entry.type.is_memoryviewslice:
+ # TODO - at some point reference count of memoryviews should
+ # genuinely be unified with PyObjects
+ entry.type.generate_incref_memoryviewslice(code, entry.cname, True)
+ elif entry.xdecref_cleanup:
# mostly applies to the starstar arg - this can sometimes be NULL
# so must be xincrefed instead
code.put_var_xincref(entry)
@@ -3260,10 +3570,11 @@ class DefNodeWrapper(FuncDefNode):
# DefNode python wrapper code generator
defnode = None
- target = None # Target DefNode
+ target = None # Target DefNode
def __init__(self, *args, **kwargs):
FuncDefNode.__init__(self, *args, **kwargs)
+ self.num_posonly_args = self.target.num_posonly_args
self.num_kwonly_args = self.target.num_kwonly_args
self.num_required_kw_args = self.target.num_required_kw_args
self.num_required_args = self.target.num_required_args
@@ -3274,8 +3585,8 @@ class DefNodeWrapper(FuncDefNode):
target_entry = self.target.entry
name = self.name
prefix = env.next_id(env.scope_prefix)
- target_entry.func_cname = Naming.pywrap_prefix + prefix + name
- target_entry.pymethdef_cname = Naming.pymethdef_prefix + prefix + name
+ target_entry.func_cname = punycodify_name(Naming.pywrap_prefix + prefix + name)
+ target_entry.pymethdef_cname = punycodify_name(Naming.pymethdef_prefix + prefix + name)
self.signature = target_entry.signature
@@ -3289,10 +3600,10 @@ class DefNodeWrapper(FuncDefNode):
for arg in self.args:
if not arg.type.is_pyobject:
if not arg.type.create_from_py_utility_code(env):
- pass # will fail later
+ pass # will fail later
elif arg.hdr_type and not arg.hdr_type.is_pyobject:
if not arg.hdr_type.create_to_py_utility_code(env):
- pass # will fail later
+ pass # will fail later
if self.starstar_arg and not self.starstar_arg.entry.cf_used:
# we will set the kwargs argument to NULL instead of a new dict
@@ -3319,7 +3630,13 @@ class DefNodeWrapper(FuncDefNode):
if self.signature.has_dummy_arg:
args.append(Naming.self_cname)
for arg in self.args:
- if arg.hdr_type and not (arg.type.is_memoryviewslice or
+ if arg.type.is_cpp_class:
+ # it's safe to move converted C++ types because they aren't
+ # used again afterwards
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("MoveIfSupported", "CppSupport.cpp"))
+ args.append("__PYX_STD_MOVE_IF_SUPPORTED(%s)" % arg.entry.cname)
+ elif arg.hdr_type and not (arg.type.is_memoryviewslice or
arg.type.is_struct or
arg.type.is_complex):
args.append(arg.type.cast_code(arg.entry.cname))
@@ -3363,7 +3680,7 @@ class DefNodeWrapper(FuncDefNode):
self.return_type.declaration_code(Naming.retval_cname),
retval_init))
code.put_declare_refcount_context()
- code.put_setup_refcount_context('%s (wrapper)' % self.name)
+ code.put_setup_refcount_context(EncodedString('%s (wrapper)' % self.name))
self.generate_argument_parsing_code(lenv, code)
self.generate_argument_type_tests(code)
@@ -3389,8 +3706,20 @@ class DefNodeWrapper(FuncDefNode):
# ----- Non-error return cleanup
code.put_label(code.return_label)
for entry in lenv.var_entries:
- if entry.is_arg and entry.type.is_pyobject:
- code.put_var_decref(entry)
+ if entry.is_arg:
+ # mainly captures the star/starstar args
+ if entry.xdecref_cleanup:
+ code.put_var_xdecref(entry)
+ else:
+ code.put_var_decref(entry)
+ for arg in self.args:
+ if not arg.type.is_pyobject:
+ # This captures anything that's been converted from a PyObject.
+ # Primarily memoryviews at the moment
+ if arg.entry.xdecref_cleanup:
+ code.put_var_xdecref(arg.entry)
+ else:
+ code.put_var_decref(arg.entry)
code.put_finish_refcount_context()
if not self.return_type.is_void:
@@ -3420,12 +3749,20 @@ class DefNodeWrapper(FuncDefNode):
entry = self.target.entry
if not entry.is_special and sig.method_flags() == [TypeSlots.method_noargs]:
arg_code_list.append("CYTHON_UNUSED PyObject *unused")
- if entry.scope.is_c_class_scope and entry.name == "__ipow__":
- arg_code_list.append("CYTHON_UNUSED PyObject *unused")
if sig.has_generic_args:
- arg_code_list.append(
- "PyObject *%s, PyObject *%s" % (
- Naming.args_cname, Naming.kwds_cname))
+ varargs_args = "PyObject *%s, PyObject *%s" % (
+ Naming.args_cname, Naming.kwds_cname)
+ if sig.use_fastcall:
+ fastcall_args = "PyObject *const *%s, Py_ssize_t %s, PyObject *%s" % (
+ Naming.args_cname, Naming.nargs_cname, Naming.kwds_cname)
+ arg_code_list.append(
+ "\n#if CYTHON_METH_FASTCALL\n%s\n#else\n%s\n#endif\n" % (
+ fastcall_args, varargs_args))
+ else:
+ arg_code_list.append(varargs_args)
+ if entry.is_special:
+ for n in range(len(self.args), sig.max_num_fixed_args()):
+ arg_code_list.append("CYTHON_UNUSED PyObject *unused_arg_%s" % n)
arg_code = ", ".join(arg_code_list)
# Prevent warning: unused function '__pyx_pw_5numpy_7ndarray_1__getbuffer__'
@@ -3436,7 +3773,7 @@ class DefNodeWrapper(FuncDefNode):
with_pymethdef = False
dc = self.return_type.declaration_code(entry.func_cname)
- header = "static %s%s(%s)" % (mf, dc, arg_code)
+ header = "%sstatic %s(%s)" % (mf, dc, arg_code)
code.putln("%s; /*proto*/" % header)
if proto_only:
@@ -3459,7 +3796,7 @@ class DefNodeWrapper(FuncDefNode):
docstr = docstr.as_utf8_string()
if not (entry.is_special and entry.name in ('__getbuffer__', '__releasebuffer__')):
- code.putln('static char %s[] = %s;' % (
+ code.putln('PyDoc_STRVAR(%s, %s);' % (
entry.doc_cname,
docstr.as_c_string_literal()))
@@ -3486,6 +3823,23 @@ class DefNodeWrapper(FuncDefNode):
if entry.is_arg:
code.put_var_declaration(entry)
+ # Assign nargs variable as len(args), but avoid an "unused" warning in the few cases where we don't need it.
+ if self.signature_has_generic_args():
+ nargs_code = "CYTHON_UNUSED const Py_ssize_t %s = PyTuple_GET_SIZE(%s);" % (
+ Naming.nargs_cname, Naming.args_cname)
+ if self.signature.use_fastcall:
+ code.putln("#if !CYTHON_METH_FASTCALL")
+ code.putln(nargs_code)
+ code.putln("#endif")
+ else:
+ code.putln(nargs_code)
+
+ # Array containing the values of keyword arguments when using METH_FASTCALL.
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("fastcall", "FunctionArguments.c"))
+ code.putln('CYTHON_UNUSED PyObject *const *%s = __Pyx_KwValues_%s(%s, %s);' % (
+ Naming.kwvalues_cname, self.signature.fastvar, Naming.args_cname, Naming.nargs_cname))
+
def generate_argument_parsing_code(self, env, code):
# Generate fast equivalent of PyArg_ParseTuple call for
# generic arguments, if any, including args/kwargs
@@ -3509,6 +3863,8 @@ class DefNodeWrapper(FuncDefNode):
elif not self.signature_has_nongeneric_args():
# func(*args) or func(**kw) or func(*args, **kw)
+ # possibly with a "self" argument but no other non-star
+ # arguments
self.generate_stararg_copy_code(code)
else:
@@ -3526,6 +3882,11 @@ class DefNodeWrapper(FuncDefNode):
code.put_var_xdecref_clear(self.starstar_arg.entry)
else:
code.put_var_decref_clear(self.starstar_arg.entry)
+ for arg in self.args:
+ if not arg.type.is_pyobject and arg.type.needs_refcounting:
+ # at the moment this just catches memoryviewslices, but in future
+ # other non-PyObject reference counted types might need cleanup
+ code.put_var_xdecref(arg.entry)
code.put_add_traceback(self.target.entry.qualified_name)
code.put_finish_refcount_context()
code.putln("return %s;" % self.error_value())
@@ -3544,10 +3905,9 @@ class DefNodeWrapper(FuncDefNode):
if not self.star_arg:
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseArgTupleInvalid", "FunctionArguments.c"))
- code.putln("if (unlikely(PyTuple_GET_SIZE(%s) > 0)) {" %
- Naming.args_cname)
- code.put('__Pyx_RaiseArgtupleInvalid("%s", 1, 0, 0, PyTuple_GET_SIZE(%s)); return %s;' % (
- self.name, Naming.args_cname, self.error_value()))
+ code.putln("if (unlikely(%s > 0)) {" % Naming.nargs_cname)
+ code.put('__Pyx_RaiseArgtupleInvalid(%s, 1, 0, 0, %s); return %s;' % (
+ self.name.as_c_string_literal(), Naming.nargs_cname, self.error_value()))
code.putln("}")
if self.starstar_arg:
@@ -3556,69 +3916,66 @@ class DefNodeWrapper(FuncDefNode):
else:
kwarg_check = "%s" % Naming.kwds_cname
else:
- kwarg_check = "unlikely(%s) && unlikely(PyDict_Size(%s) > 0)" % (
- Naming.kwds_cname, Naming.kwds_cname)
+ kwarg_check = "unlikely(%s) && __Pyx_NumKwargs_%s(%s)" % (
+ Naming.kwds_cname, self.signature.fastvar, Naming.kwds_cname)
code.globalstate.use_utility_code(
UtilityCode.load_cached("KeywordStringCheck", "FunctionArguments.c"))
code.putln(
- "if (%s && unlikely(!__Pyx_CheckKeywordStrings(%s, \"%s\", %d))) return %s;" % (
- kwarg_check, Naming.kwds_cname, self.name,
+ "if (%s && unlikely(!__Pyx_CheckKeywordStrings(%s, %s, %d))) return %s;" % (
+ kwarg_check, Naming.kwds_cname, self.name.as_c_string_literal(),
bool(self.starstar_arg), self.error_value()))
if self.starstar_arg and self.starstar_arg.entry.cf_used:
- if all(ref.node.allow_null for ref in self.starstar_arg.entry.cf_references):
- code.putln("if (%s) {" % kwarg_check)
- code.putln("%s = PyDict_Copy(%s); if (unlikely(!%s)) return %s;" % (
- self.starstar_arg.entry.cname,
- Naming.kwds_cname,
- self.starstar_arg.entry.cname,
- self.error_value()))
- code.put_gotref(self.starstar_arg.entry.cname)
- code.putln("} else {")
- code.putln("%s = NULL;" % (self.starstar_arg.entry.cname,))
- code.putln("}")
- self.starstar_arg.entry.xdecref_cleanup = 1
- else:
- code.put("%s = (%s) ? PyDict_Copy(%s) : PyDict_New(); " % (
- self.starstar_arg.entry.cname,
- Naming.kwds_cname,
- Naming.kwds_cname))
- code.putln("if (unlikely(!%s)) return %s;" % (
- self.starstar_arg.entry.cname, self.error_value()))
- self.starstar_arg.entry.xdecref_cleanup = 0
- code.put_gotref(self.starstar_arg.entry.cname)
+ code.putln("if (%s) {" % kwarg_check)
+ code.putln("%s = __Pyx_KwargsAsDict_%s(%s, %s);" % (
+ self.starstar_arg.entry.cname,
+ self.signature.fastvar,
+ Naming.kwds_cname,
+ Naming.kwvalues_cname))
+ code.putln("if (unlikely(!%s)) return %s;" % (
+ self.starstar_arg.entry.cname, self.error_value()))
+ code.put_gotref(self.starstar_arg.entry.cname, py_object_type)
+ code.putln("} else {")
+ code.putln("%s = PyDict_New();" % (self.starstar_arg.entry.cname,))
+ code.putln("if (unlikely(!%s)) return %s;" % (
+ self.starstar_arg.entry.cname, self.error_value()))
+ code.put_var_gotref(self.starstar_arg.entry)
+ self.starstar_arg.entry.xdecref_cleanup = False
+ code.putln("}")
if self.self_in_stararg and not self.target.is_staticmethod:
+ assert not self.signature.use_fastcall
# need to create a new tuple with 'self' inserted as first item
- code.put("%s = PyTuple_New(PyTuple_GET_SIZE(%s)+1); if (unlikely(!%s)) " % (
+ code.put("%s = PyTuple_New(%s + 1); if (unlikely(!%s)) " % (
self.star_arg.entry.cname,
- Naming.args_cname,
+ Naming.nargs_cname,
self.star_arg.entry.cname))
if self.starstar_arg and self.starstar_arg.entry.cf_used:
code.putln("{")
- code.put_xdecref_clear(self.starstar_arg.entry.cname, py_object_type)
+ code.put_var_xdecref_clear(self.starstar_arg.entry)
code.putln("return %s;" % self.error_value())
code.putln("}")
else:
code.putln("return %s;" % self.error_value())
- code.put_gotref(self.star_arg.entry.cname)
+ code.put_var_gotref(self.star_arg.entry)
code.put_incref(Naming.self_cname, py_object_type)
- code.put_giveref(Naming.self_cname)
+ code.put_giveref(Naming.self_cname, py_object_type)
code.putln("PyTuple_SET_ITEM(%s, 0, %s);" % (
self.star_arg.entry.cname, Naming.self_cname))
temp = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False)
- code.putln("for (%s=0; %s < PyTuple_GET_SIZE(%s); %s++) {" % (
- temp, temp, Naming.args_cname, temp))
+ code.putln("for (%s=0; %s < %s; %s++) {" % (
+ temp, temp, Naming.nargs_cname, temp))
code.putln("PyObject* item = PyTuple_GET_ITEM(%s, %s);" % (
Naming.args_cname, temp))
code.put_incref("item", py_object_type)
- code.put_giveref("item")
+ code.put_giveref("item", py_object_type)
code.putln("PyTuple_SET_ITEM(%s, %s+1, item);" % (
self.star_arg.entry.cname, temp))
code.putln("}")
code.funcstate.release_temp(temp)
self.star_arg.entry.xdecref_cleanup = 0
elif self.star_arg:
+ assert not self.signature.use_fastcall
code.put_incref(Naming.args_cname, py_object_type)
code.putln("%s = %s;" % (
self.star_arg.entry.cname,
@@ -3626,11 +3983,17 @@ class DefNodeWrapper(FuncDefNode):
self.star_arg.entry.xdecref_cleanup = 0
def generate_tuple_and_keyword_parsing_code(self, args, success_label, code):
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("fastcall", "FunctionArguments.c"))
+
+ self_name_csafe = self.name.as_c_string_literal()
+
argtuple_error_label = code.new_label("argtuple_error")
positional_args = []
required_kw_only_args = []
optional_kw_only_args = []
+ num_pos_only_args = 0
for arg in args:
if arg.is_generic:
if arg.default:
@@ -3643,6 +4006,8 @@ class DefNodeWrapper(FuncDefNode):
required_kw_only_args.append(arg)
elif not arg.is_self_arg and not arg.is_type_arg:
positional_args.append(arg)
+ if arg.pos_only:
+ num_pos_only_args += 1
# sort required kw-only args before optional ones to avoid special
# cases in the unpacking code
@@ -3661,10 +4026,12 @@ class DefNodeWrapper(FuncDefNode):
code.putln('{')
all_args = tuple(positional_args) + tuple(kw_only_args)
- code.putln("static PyObject **%s[] = {%s,0};" % (
+ non_posonly_args = [arg for arg in all_args if not arg.pos_only]
+ non_pos_args_id = ','.join(
+ ['&%s' % code.intern_identifier(arg.entry.name) for arg in non_posonly_args] + ['0'])
+ code.putln("PyObject **%s[] = {%s};" % (
Naming.pykwdlist_cname,
- ','.join(['&%s' % code.intern_identifier(arg.name)
- for arg in all_args])))
+ non_pos_args_id))
# Before being converted and assigned to the target variables,
# borrowed references to all unpacked argument values are
@@ -3676,14 +4043,43 @@ class DefNodeWrapper(FuncDefNode):
# was passed for them.
self.generate_argument_values_setup_code(all_args, code)
+ # If all args are positional-only, we can raise an error
+ # straight away if we receive a non-empty kw-dict.
+ # This requires a PyDict_Size call. This call is wasteful
+ # for functions which do accept kw-args, so we do not generate
+ # the PyDict_Size call unless all args are positional-only.
+ accept_kwd_args = non_posonly_args or self.starstar_arg
+ if accept_kwd_args:
+ kw_unpacking_condition = Naming.kwds_cname
+ else:
+ kw_unpacking_condition = "%s && __Pyx_NumKwargs_%s(%s) > 0" % (
+ Naming.kwds_cname, self.signature.fastvar, Naming.kwds_cname)
+
+ if self.num_required_kw_args > 0:
+ kw_unpacking_condition = "likely(%s)" % kw_unpacking_condition
+
# --- optimised code when we receive keyword arguments
- code.putln("if (%s(%s)) {" % (
- (self.num_required_kw_args > 0) and "likely" or "unlikely",
- Naming.kwds_cname))
- self.generate_keyword_unpacking_code(
- min_positional_args, max_positional_args,
- has_fixed_positional_count, has_kw_only_args,
- all_args, argtuple_error_label, code)
+ code.putln("if (%s) {" % kw_unpacking_condition)
+
+ if accept_kwd_args:
+ self.generate_keyword_unpacking_code(
+ min_positional_args, max_positional_args,
+ has_fixed_positional_count, has_kw_only_args, all_args, argtuple_error_label, code)
+ else:
+ # Here we do not accept kw-args but we are passed a non-empty kw-dict.
+ # We call ParseOptionalKeywords which will raise an appropriate error if
+ # the kw-args dict passed is non-empty (which it will be, since kw_unpacking_condition is true)
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("ParseKeywords", "FunctionArguments.c"))
+ code.putln('if (likely(__Pyx_ParseOptionalKeywords(%s, %s, %s, %s, %s, %s, %s) < 0)) %s' % (
+ Naming.kwds_cname,
+ Naming.kwvalues_cname,
+ Naming.pykwdlist_cname,
+ self.starstar_arg.entry.cname if self.starstar_arg else 0,
+ 'values',
+ 0,
+ self_name_csafe,
+ code.error_goto(self.pos)))
# --- optimised code when we do not receive any keyword arguments
if (self.num_required_kw_args and min_positional_args > 0) or min_positional_args == max_positional_args:
@@ -3693,20 +4089,20 @@ class DefNodeWrapper(FuncDefNode):
compare = '!='
else:
compare = '<'
- code.putln('} else if (PyTuple_GET_SIZE(%s) %s %d) {' % (
- Naming.args_cname, compare, min_positional_args))
+ code.putln('} else if (unlikely(%s %s %d)) {' % (
+ Naming.nargs_cname, compare, min_positional_args))
code.put_goto(argtuple_error_label)
if self.num_required_kw_args:
# pure error case: keywords required but not passed
if max_positional_args > min_positional_args and not self.star_arg:
- code.putln('} else if (PyTuple_GET_SIZE(%s) > %d) {' % (
- Naming.args_cname, max_positional_args))
+ code.putln('} else if (unlikely(%s > %d)) {' % (
+ Naming.nargs_cname, max_positional_args))
code.put_goto(argtuple_error_label)
code.putln('} else {')
for i, arg in enumerate(kw_only_args):
if not arg.default:
- pystring_cname = code.intern_identifier(arg.name)
+ pystring_cname = code.intern_identifier(arg.entry.name)
# required keyword-only argument missing
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseKeywordRequired", "FunctionArguments.c"))
@@ -3723,11 +4119,12 @@ class DefNodeWrapper(FuncDefNode):
# parse the exact number of positional arguments from
# the args tuple
for i, arg in enumerate(positional_args):
- code.putln("values[%d] = PyTuple_GET_ITEM(%s, %d);" % (i, Naming.args_cname, i))
+ code.putln("values[%d] = __Pyx_Arg_%s(%s, %d);" % (
+ i, self.signature.fastvar, Naming.args_cname, i))
else:
# parse the positional arguments from the variable length
# args tuple and reject illegal argument tuple sizes
- code.putln('switch (PyTuple_GET_SIZE(%s)) {' % Naming.args_cname)
+ code.putln('switch (%s) {' % Naming.nargs_cname)
if self.star_arg:
code.putln('default:')
reversed_args = list(enumerate(positional_args))[::-1]
@@ -3736,7 +4133,8 @@ class DefNodeWrapper(FuncDefNode):
if i != reversed_args[0][0]:
code.putln('CYTHON_FALLTHROUGH;')
code.put('case %2d: ' % (i+1))
- code.putln("values[%d] = PyTuple_GET_ITEM(%s, %d);" % (i, Naming.args_cname, i))
+ code.putln("values[%d] = __Pyx_Arg_%s(%s, %d);" % (
+ i, self.signature.fastvar, Naming.args_cname, i))
if min_positional_args == 0:
code.putln('CYTHON_FALLTHROUGH;')
code.put('case 0: ')
@@ -3751,7 +4149,7 @@ class DefNodeWrapper(FuncDefNode):
code.put_goto(argtuple_error_label)
code.putln('}')
- code.putln('}') # end of the conditional unpacking blocks
+ code.putln('}') # end of the conditional unpacking blocks
# Convert arg values to their final type and assign them.
# Also inject non-Python default arguments, which do cannot
@@ -3759,17 +4157,17 @@ class DefNodeWrapper(FuncDefNode):
for i, arg in enumerate(all_args):
self.generate_arg_assignment(arg, "values[%d]" % i, code)
- code.putln('}') # end of the whole argument unpacking block
+ code.putln('}') # end of the whole argument unpacking block
if code.label_used(argtuple_error_label):
code.put_goto(success_label)
code.put_label(argtuple_error_label)
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseArgTupleInvalid", "FunctionArguments.c"))
- code.put('__Pyx_RaiseArgtupleInvalid("%s", %d, %d, %d, PyTuple_GET_SIZE(%s)); ' % (
- self.name, has_fixed_positional_count,
+ code.put('__Pyx_RaiseArgtupleInvalid(%s, %d, %d, %d, %s); ' % (
+ self_name_csafe, has_fixed_positional_count,
min_positional_args, max_positional_args,
- Naming.args_cname))
+ Naming.nargs_cname))
code.putln(code.error_goto(self.pos))
def generate_arg_assignment(self, arg, item, code):
@@ -3792,8 +4190,7 @@ class DefNodeWrapper(FuncDefNode):
arg.entry.cname,
arg.calculate_default_value_code(code)))
if arg.type.is_memoryviewslice:
- code.put_incref_memoryviewslice(arg.entry.cname,
- have_gil=True)
+ code.put_var_incref_memoryviewslice(arg.entry, have_gil=True)
code.putln('}')
else:
error(arg.pos, "Cannot convert Python object argument to type '%s'" % arg.type)
@@ -3805,26 +4202,30 @@ class DefNodeWrapper(FuncDefNode):
self.starstar_arg.entry.cname,
self.starstar_arg.entry.cname,
self.error_value()))
- code.put_gotref(self.starstar_arg.entry.cname)
+ code.put_var_gotref(self.starstar_arg.entry)
if self.star_arg:
self.star_arg.entry.xdecref_cleanup = 0
- code.putln('if (PyTuple_GET_SIZE(%s) > %d) {' % (
- Naming.args_cname,
- max_positional_args))
- code.putln('%s = PyTuple_GetSlice(%s, %d, PyTuple_GET_SIZE(%s));' % (
- self.star_arg.entry.cname, Naming.args_cname,
- max_positional_args, Naming.args_cname))
- code.putln("if (unlikely(!%s)) {" % self.star_arg.entry.cname)
- if self.starstar_arg:
- code.put_decref_clear(self.starstar_arg.entry.cname, py_object_type)
- code.put_finish_refcount_context()
- code.putln('return %s;' % self.error_value())
- code.putln('}')
- code.put_gotref(self.star_arg.entry.cname)
- code.putln('} else {')
- code.put("%s = %s; " % (self.star_arg.entry.cname, Naming.empty_tuple))
- code.put_incref(Naming.empty_tuple, py_object_type)
- code.putln('}')
+ if max_positional_args == 0:
+ # If there are no positional arguments, use the args tuple
+ # directly
+ assert not self.signature.use_fastcall
+ code.put_incref(Naming.args_cname, py_object_type)
+ code.putln("%s = %s;" % (self.star_arg.entry.cname, Naming.args_cname))
+ else:
+ # It is possible that this is a slice of "negative" length,
+ # as in args[5:3]. That's not a problem, the function below
+ # handles that efficiently and returns the empty tuple.
+ code.putln('%s = __Pyx_ArgsSlice_%s(%s, %d, %s);' % (
+ self.star_arg.entry.cname, self.signature.fastvar,
+ Naming.args_cname, max_positional_args, Naming.nargs_cname))
+ code.putln("if (unlikely(!%s)) {" %
+ self.star_arg.entry.type.nullcheck_string(self.star_arg.entry.cname))
+ if self.starstar_arg:
+ code.put_var_decref_clear(self.starstar_arg.entry)
+ code.put_finish_refcount_context()
+ code.putln('return %s;' % self.error_value())
+ code.putln('}')
+ code.put_var_gotref(self.star_arg.entry)
def generate_argument_values_setup_code(self, args, code):
max_args = len(args)
@@ -3846,22 +4247,45 @@ class DefNodeWrapper(FuncDefNode):
code.putln('values[%d] = %s;' % (i, arg.type.as_pyobject(default_value)))
def generate_keyword_unpacking_code(self, min_positional_args, max_positional_args,
- has_fixed_positional_count, has_kw_only_args,
- all_args, argtuple_error_label, code):
+ has_fixed_positional_count,
+ has_kw_only_args, all_args, argtuple_error_label, code):
+ # First we count how many arguments must be passed as positional
+ num_required_posonly_args = num_pos_only_args = 0
+ for i, arg in enumerate(all_args):
+ if arg.pos_only:
+ num_pos_only_args += 1
+ if not arg.default:
+ num_required_posonly_args += 1
+
code.putln('Py_ssize_t kw_args;')
- code.putln('const Py_ssize_t pos_args = PyTuple_GET_SIZE(%s);' % Naming.args_cname)
# copy the values from the args tuple and check that it's not too long
- code.putln('switch (pos_args) {')
+ code.putln('switch (%s) {' % Naming.nargs_cname)
if self.star_arg:
code.putln('default:')
- for i in range(max_positional_args-1, -1, -1):
+
+ for i in range(max_positional_args-1, num_required_posonly_args-1, -1):
code.put('case %2d: ' % (i+1))
- code.putln("values[%d] = PyTuple_GET_ITEM(%s, %d);" % (
- i, Naming.args_cname, i))
+ code.putln("values[%d] = __Pyx_Arg_%s(%s, %d);" % (
+ i, self.signature.fastvar, Naming.args_cname, i))
code.putln('CYTHON_FALLTHROUGH;')
- code.putln('case 0: break;')
+ if num_required_posonly_args > 0:
+ code.put('case %2d: ' % num_required_posonly_args)
+ for i in range(num_required_posonly_args-1, -1, -1):
+ code.putln("values[%d] = __Pyx_Arg_%s(%s, %d);" % (
+ i, self.signature.fastvar, Naming.args_cname, i))
+ code.putln('break;')
+ for i in range(num_required_posonly_args-2, -1, -1):
+ code.put('case %2d: ' % (i+1))
+ code.putln('CYTHON_FALLTHROUGH;')
+
+ code.put('case 0: ')
+ if num_required_posonly_args == 0:
+ code.putln('break;')
+ else:
+ # catch-all for not enough pos-only args passed
+ code.put_goto(argtuple_error_label)
if not self.star_arg:
- code.put('default: ') # more arguments than allowed
+ code.put('default: ') # more arguments than allowed
code.put_goto(argtuple_error_label)
code.putln('}')
@@ -3874,7 +4298,10 @@ class DefNodeWrapper(FuncDefNode):
# If we received kwargs, fill up the positional/required
# arguments with values from the kw dict
- code.putln('kw_args = PyDict_Size(%s);' % Naming.kwds_cname)
+ self_name_csafe = self.name.as_c_string_literal()
+
+ code.putln('kw_args = __Pyx_NumKwargs_%s(%s);' % (
+ self.signature.fastvar, Naming.kwds_cname))
if self.num_required_args or max_positional_args > 0:
last_required_arg = -1
for i, arg in enumerate(all_args):
@@ -3882,30 +4309,32 @@ class DefNodeWrapper(FuncDefNode):
last_required_arg = i
if last_required_arg < max_positional_args:
last_required_arg = max_positional_args-1
- if max_positional_args > 0:
- code.putln('switch (pos_args) {')
- for i, arg in enumerate(all_args[:last_required_arg+1]):
- if max_positional_args > 0 and i <= max_positional_args:
- if i != 0:
+ if max_positional_args > num_pos_only_args:
+ code.putln('switch (%s) {' % Naming.nargs_cname)
+ for i, arg in enumerate(all_args[num_pos_only_args:last_required_arg+1], num_pos_only_args):
+ if max_positional_args > num_pos_only_args and i <= max_positional_args:
+ if i != num_pos_only_args:
code.putln('CYTHON_FALLTHROUGH;')
if self.star_arg and i == max_positional_args:
code.putln('default:')
else:
code.putln('case %2d:' % i)
- pystring_cname = code.intern_identifier(arg.name)
+ pystring_cname = code.intern_identifier(arg.entry.name)
if arg.default:
if arg.kw_only:
# optional kw-only args are handled separately below
continue
code.putln('if (kw_args > 0) {')
# don't overwrite default argument
- code.putln('PyObject* value = __Pyx_PyDict_GetItemStr(%s, %s);' % (
- Naming.kwds_cname, pystring_cname))
+ code.putln('PyObject* value = __Pyx_GetKwValue_%s(%s, %s, %s);' % (
+ self.signature.fastvar, Naming.kwds_cname, Naming.kwvalues_cname, pystring_cname))
code.putln('if (value) { values[%d] = value; kw_args--; }' % i)
+ code.putln('else if (unlikely(PyErr_Occurred())) %s' % code.error_goto(self.pos))
code.putln('}')
else:
- code.putln('if (likely((values[%d] = __Pyx_PyDict_GetItemStr(%s, %s)) != 0)) kw_args--;' % (
- i, Naming.kwds_cname, pystring_cname))
+ code.putln('if (likely((values[%d] = __Pyx_GetKwValue_%s(%s, %s, %s)) != 0)) kw_args--;' % (
+ i, self.signature.fastvar, Naming.kwds_cname, Naming.kwvalues_cname, pystring_cname))
+ code.putln('else if (unlikely(PyErr_Occurred())) %s' % code.error_goto(self.pos))
if i < min_positional_args:
if i == 0:
# special case: we know arg 0 is missing
@@ -3918,8 +4347,8 @@ class DefNodeWrapper(FuncDefNode):
code.putln('else {')
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseArgTupleInvalid", "FunctionArguments.c"))
- code.put('__Pyx_RaiseArgtupleInvalid("%s", %d, %d, %d, %d); ' % (
- self.name, has_fixed_positional_count,
+ code.put('__Pyx_RaiseArgtupleInvalid(%s, %d, %d, %d, %d); ' % (
+ self_name_csafe, has_fixed_positional_count,
min_positional_args, max_positional_args, i))
code.putln(code.error_goto(self.pos))
code.putln('}')
@@ -3927,11 +4356,11 @@ class DefNodeWrapper(FuncDefNode):
code.putln('else {')
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseKeywordRequired", "FunctionArguments.c"))
- code.put('__Pyx_RaiseKeywordRequired("%s", %s); ' % (
- self.name, pystring_cname))
+ code.put('__Pyx_RaiseKeywordRequired(%s, %s); ' % (
+ self_name_csafe, pystring_cname))
code.putln(code.error_goto(self.pos))
code.putln('}')
- if max_positional_args > 0:
+ if max_positional_args > num_pos_only_args:
code.putln('}')
if has_kw_only_args:
@@ -3947,34 +4376,69 @@ class DefNodeWrapper(FuncDefNode):
# arguments, this will always do the right thing for unpacking
# keyword arguments, so that we can concentrate on optimising
# common cases above.
+ #
+ # ParseOptionalKeywords() needs to know how many of the arguments
+ # that could be passed as keywords have in fact been passed as
+ # positional args.
+ if num_pos_only_args > 0:
+ # There are positional-only arguments which we don't want to count,
+ # since they cannot be keyword arguments. Subtract the number of
+ # pos-only arguments from the number of positional arguments we got.
+ # If we get a negative number then none of the keyword arguments were
+ # passed as positional args.
+ code.putln('const Py_ssize_t kwd_pos_args = (unlikely(%s < %d)) ? 0 : %s - %d;' % (
+ Naming.nargs_cname, num_pos_only_args,
+ Naming.nargs_cname, num_pos_only_args,
+ ))
+ elif max_positional_args > 0:
+ code.putln('const Py_ssize_t kwd_pos_args = %s;' % Naming.nargs_cname)
+
if max_positional_args == 0:
pos_arg_count = "0"
elif self.star_arg:
- code.putln("const Py_ssize_t used_pos_args = (pos_args < %d) ? pos_args : %d;" % (
- max_positional_args, max_positional_args))
+ # If there is a *arg, the number of used positional args could be larger than
+ # the number of possible keyword arguments. But ParseOptionalKeywords() uses the
+ # number of positional args as an index into the keyword argument name array,
+ # if this is larger than the number of kwd args we get a segfault. So round
+ # this down to max_positional_args - num_pos_only_args (= num possible kwd args).
+ code.putln("const Py_ssize_t used_pos_args = (kwd_pos_args < %d) ? kwd_pos_args : %d;" % (
+ max_positional_args - num_pos_only_args, max_positional_args - num_pos_only_args))
pos_arg_count = "used_pos_args"
else:
- pos_arg_count = "pos_args"
+ pos_arg_count = "kwd_pos_args"
+ if num_pos_only_args < len(all_args):
+ values_array = 'values + %d' % num_pos_only_args
+ else:
+ values_array = 'values'
code.globalstate.use_utility_code(
UtilityCode.load_cached("ParseKeywords", "FunctionArguments.c"))
- code.putln('if (unlikely(__Pyx_ParseOptionalKeywords(%s, %s, %s, values, %s, "%s") < 0)) %s' % (
+ code.putln('if (unlikely(__Pyx_ParseOptionalKeywords(%s, %s, %s, %s, %s, %s, %s) < 0)) %s' % (
Naming.kwds_cname,
+ Naming.kwvalues_cname,
Naming.pykwdlist_cname,
self.starstar_arg and self.starstar_arg.entry.cname or '0',
+ values_array,
pos_arg_count,
- self.name,
+ self_name_csafe,
code.error_goto(self.pos)))
code.putln('}')
def generate_optional_kwonly_args_unpacking_code(self, all_args, code):
optional_args = []
first_optional_arg = -1
+ num_posonly_args = 0
for i, arg in enumerate(all_args):
+ if arg.pos_only:
+ num_posonly_args += 1
if not arg.kw_only or not arg.default:
continue
if not optional_args:
first_optional_arg = i
optional_args.append(arg.name)
+ if num_posonly_args > 0:
+ posonly_correction = '-%d' % num_posonly_args
+ else:
+ posonly_correction = ''
if optional_args:
if len(optional_args) > 1:
# if we receive more than the named kwargs, we either have **kwargs
@@ -3990,9 +4454,14 @@ class DefNodeWrapper(FuncDefNode):
else:
code.putln('if (kw_args == 1) {')
code.putln('const Py_ssize_t index = %d;' % first_optional_arg)
- code.putln('PyObject* value = __Pyx_PyDict_GetItemStr(%s, *%s[index]);' % (
- Naming.kwds_cname, Naming.pykwdlist_cname))
+ code.putln('PyObject* value = __Pyx_GetKwValue_%s(%s, %s, *%s[index%s]);' % (
+ self.signature.fastvar,
+ Naming.kwds_cname,
+ Naming.kwvalues_cname,
+ Naming.pykwdlist_cname,
+ posonly_correction))
code.putln('if (value) { values[index] = value; kw_args--; }')
+ code.putln('else if (unlikely(PyErr_Occurred())) %s' % code.error_goto(self.pos))
if len(optional_args) > 1:
code.putln('}')
code.putln('}')
@@ -4061,6 +4530,36 @@ class DefNodeWrapper(FuncDefNode):
arg.type.is_buffer or
arg.type.is_memoryviewslice):
self.generate_arg_none_check(arg, code)
+ if self.target.entry.is_special:
+ for n in reversed(range(len(self.args), self.signature.max_num_fixed_args())):
+ # for special functions with optional args (e.g. power which can
+ # take 2 or 3 args), unused args are None since this is what the
+ # compilers sets
+ if self.target.entry.name == "__ipow__":
+ # Bug in Python < 3.8 - __ipow__ is used as a binary function
+ # and attempts to access the third argument will always fail
+ code.putln("#if PY_VERSION_HEX >= 0x03080000")
+ code.putln("if (unlikely(unused_arg_%s != Py_None)) {" % n)
+ code.putln(
+ 'PyErr_SetString(PyExc_TypeError, '
+ '"%s() takes %s arguments but %s were given");' % (
+ self.target.entry.qualified_name, self.signature.max_num_fixed_args(), n))
+ code.putln("%s;" % code.error_goto(self.pos))
+ code.putln("}")
+ if self.target.entry.name == "__ipow__":
+ code.putln("#endif /*PY_VERSION_HEX >= 0x03080000*/")
+ if self.target.entry.name == "__ipow__" and len(self.args) != 2:
+ # It's basically impossible to safely support it:
+ # Class().__ipow__(1) is guaranteed to crash.
+ # Therefore, raise an error.
+ # Use "if" instead of "#if" to avoid warnings about unused variables
+ code.putln("if ((PY_VERSION_HEX < 0x03080000)) {")
+ code.putln(
+ 'PyErr_SetString(PyExc_NotImplementedError, '
+ '"3-argument %s cannot be used in Python<3.8");' % (
+ self.target.entry.qualified_name))
+ code.putln("%s;" % code.error_goto(self.pos))
+ code.putln('}')
def error_value(self):
return self.signature.error_value
@@ -4073,9 +4572,7 @@ class GeneratorDefNode(DefNode):
#
is_generator = True
- is_coroutine = False
is_iterable_coroutine = False
- is_asyncgen = False
gen_type_name = 'Generator'
needs_closure = True
@@ -4110,7 +4607,7 @@ class GeneratorDefNode(DefNode):
code.putln('%s = __Pyx_CyFunction_GetClassObj(%s);' % (
classobj_cname, Naming.self_cname))
code.put_incref(classobj_cname, py_object_type)
- code.put_giveref(classobj_cname)
+ code.put_giveref(classobj_cname, py_object_type)
code.put_finish_refcount_context()
code.putln('return (PyObject *) gen;')
code.putln('}')
@@ -4234,7 +4731,7 @@ class GeneratorBodyDefNode(DefNode):
code.putln("%s = %s; %s" % (
Naming.retval_cname, comp_init,
code.error_goto_if_null(Naming.retval_cname, self.pos)))
- code.put_gotref(Naming.retval_cname)
+ code.put_gotref(Naming.retval_cname, py_object_type)
# ----- Function body
self.generate_function_body(env, code)
@@ -4280,7 +4777,7 @@ class GeneratorBodyDefNode(DefNode):
# ----- Non-error return cleanup
code.put_label(code.return_label)
if self.is_inlined:
- code.put_xgiveref(Naming.retval_cname)
+ code.put_xgiveref(Naming.retval_cname, py_object_type)
else:
code.put_xdecref_clear(Naming.retval_cname, py_object_type)
# For Py3.7, clearing is already done below.
@@ -4357,7 +4854,10 @@ class OverrideCheckNode(StatNode):
return self
def generate_execution_code(self, code):
- interned_attr_cname = code.intern_identifier(self.py_func.entry.name)
+ # For fused functions, look up the dispatch function, not the specialisation.
+ method_entry = self.py_func.fused_py_func.entry if self.py_func.fused_py_func else self.py_func.entry
+ interned_attr_cname = code.intern_identifier(method_entry.name)
+
# Check to see if we are an extension type
if self.py_func.is_module_scope:
self_arg = "((PyObject *)%s)" % Naming.module_cname
@@ -4369,8 +4869,8 @@ class OverrideCheckNode(StatNode):
if self.py_func.is_module_scope:
code.putln("else {")
else:
- code.putln("else if (unlikely((Py_TYPE(%s)->tp_dictoffset != 0)"
- " || (Py_TYPE(%s)->tp_flags & (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE)))) {" % (
+ code.putln("else if (unlikely((Py_TYPE(%s)->tp_dictoffset != 0) || "
+ "__Pyx_PyType_HasFeature(Py_TYPE(%s), (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE)))) {" % (
self_arg, self_arg))
code.putln("#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS")
@@ -4396,12 +4896,16 @@ class OverrideCheckNode(StatNode):
err = code.error_goto_if_null(func_node_temp, self.pos)
code.putln("%s = __Pyx_PyObject_GetAttrStr(%s, %s); %s" % (
func_node_temp, self_arg, interned_attr_cname, err))
- code.put_gotref(func_node_temp)
+ code.put_gotref(func_node_temp, py_object_type)
- is_builtin_function_or_method = "PyCFunction_Check(%s)" % func_node_temp
is_overridden = "(PyCFunction_GET_FUNCTION(%s) != (PyCFunction)(void*)%s)" % (
- func_node_temp, self.py_func.entry.func_cname)
- code.putln("if (!%s || %s) {" % (is_builtin_function_or_method, is_overridden))
+ func_node_temp, method_entry.func_cname)
+ code.putln("#ifdef __Pyx_CyFunction_USED")
+ code.putln("if (!__Pyx_IsCyOrPyCFunction(%s)" % func_node_temp)
+ code.putln("#else")
+ code.putln("if (!PyCFunction_Check(%s)" % func_node_temp)
+ code.putln("#endif")
+ code.putln(" || %s) {" % is_overridden)
self.body.generate_execution_code(code)
code.putln("}")
@@ -4443,25 +4947,31 @@ class PyClassDefNode(ClassDefNode):
# A Python class definition.
#
# name EncodedString Name of the class
- # doc string or None
+ # doc string or None The class docstring
# body StatNode Attribute definition code
# entry Symtab.Entry
# scope PyClassScope
# decorators [DecoratorNode] list of decorators or None
+ # bases ExprNode Expression that evaluates to a tuple of base classes
#
# The following subnodes are constructed internally:
#
+ # doc_node NameNode '__doc__' name that is made available to the class body
# dict DictNode Class dictionary or Py3 namespace
# classobj ClassNode Class object
# target NameNode Variable to assign class object to
+ # orig_bases None or ExprNode "bases" before transformation by PEP560 __mro_entries__,
+ # used to create the __orig_bases__ attribute
- child_attrs = ["body", "dict", "metaclass", "mkw", "bases", "class_result",
- "target", "class_cell", "decorators"]
+ child_attrs = ["doc_node", "body", "dict", "metaclass", "mkw", "bases", "class_result",
+ "target", "class_cell", "decorators", "orig_bases"]
decorators = None
class_result = None
is_py3_style_class = False # Python3 style class (kwargs)
metaclass = None
mkw = None
+ doc_node = None
+ orig_bases = None
def __init__(self, pos, name, bases, doc, body, decorators=None,
keyword_args=None, force_py3_semantics=False):
@@ -4475,6 +4985,7 @@ class PyClassDefNode(ClassDefNode):
if self.doc and Options.docstrings:
doc = embed_position(self.pos, self.doc)
doc_node = ExprNodes.StringNode(pos, value=doc)
+ self.doc_node = ExprNodes.NameNode(name=EncodedString('__doc__'), type=py_object_type, pos=pos)
else:
doc_node = None
@@ -4523,7 +5034,9 @@ class PyClassDefNode(ClassDefNode):
self.classobj = ExprNodes.Py3ClassNode(
pos, name=name, class_def_node=self, doc=doc_node,
calculate_metaclass=needs_metaclass_calculation,
- allow_py2_metaclass=allow_py2_metaclass)
+ allow_py2_metaclass=allow_py2_metaclass,
+ force_type=force_py3_semantics,
+ )
else:
# no bases, no metaclass => old style class creation
self.dict = ExprNodes.DictNode(pos, key_value_pairs=[])
@@ -4560,7 +5073,7 @@ class PyClassDefNode(ClassDefNode):
return cenv
def analyse_declarations(self, env):
- class_result = self.classobj
+ unwrapped_class_result = class_result = self.classobj
if self.decorators:
from .ExprNodes import SimpleCallNode
for decorator in self.decorators[::-1]:
@@ -4579,9 +5092,27 @@ class PyClassDefNode(ClassDefNode):
cenv = self.create_scope(env)
cenv.directives = env.directives
cenv.class_obj_cname = self.target.entry.cname
+ if self.doc_node:
+ self.doc_node.analyse_target_declaration(cenv)
self.body.analyse_declarations(cenv)
+ unwrapped_class_result.analyse_annotations(cenv)
+
+ update_bases_functype = PyrexTypes.CFuncType(
+ PyrexTypes.py_object_type, [
+ PyrexTypes.CFuncTypeArg("bases", PyrexTypes.py_object_type, None)
+ ])
def analyse_expressions(self, env):
+ if self.bases and not (self.bases.is_sequence_constructor and len(self.bases.args) == 0):
+ from .ExprNodes import PythonCapiCallNode, CloneNode
+ # handle the Python 3.7 __mro_entries__ transformation
+ orig_bases = self.bases.analyse_expressions(env)
+ self.bases = PythonCapiCallNode(orig_bases.pos,
+ function_name="__Pyx_PEP560_update_bases",
+ func_type=self.update_bases_functype,
+ utility_code=UtilityCode.load_cached('Py3UpdateBases', 'ObjectHandling.c'),
+ args=[CloneNode(orig_bases)])
+ self.orig_bases = orig_bases
if self.bases:
self.bases = self.bases.analyse_expressions(env)
if self.mkw:
@@ -4592,7 +5123,7 @@ class PyClassDefNode(ClassDefNode):
self.class_result = self.class_result.analyse_expressions(env)
cenv = self.scope
self.body = self.body.analyse_expressions(cenv)
- self.target.analyse_target_expression(env, self.classobj)
+ self.target = self.target.analyse_target_expression(env, self.classobj)
self.class_cell = self.class_cell.analyse_expressions(cenv)
return self
@@ -4604,6 +5135,8 @@ class PyClassDefNode(ClassDefNode):
code.mark_pos(self.pos)
code.pyclass_stack.append(self)
cenv = self.scope
+ if self.orig_bases:
+ self.orig_bases.generate_evaluation_code(code)
if self.bases:
self.bases.generate_evaluation_code(code)
if self.mkw:
@@ -4611,6 +5144,17 @@ class PyClassDefNode(ClassDefNode):
if self.metaclass:
self.metaclass.generate_evaluation_code(code)
self.dict.generate_evaluation_code(code)
+ if self.orig_bases:
+ # update __orig_bases__ if needed
+ code.putln("if (%s != %s) {" % (self.bases.result(), self.orig_bases.result()))
+ code.putln(
+ code.error_goto_if_neg('PyDict_SetItemString(%s, "__orig_bases__", %s)' % (
+ self.dict.result(), self.orig_bases.result()),
+ self.pos
+ ))
+ code.putln("}")
+ self.orig_bases.generate_disposal_code(code)
+ self.orig_bases.free_temps(code)
cenv.namespace_cname = cenv.class_obj_cname = self.dict.result()
class_cell = self.class_cell
@@ -4677,6 +5221,10 @@ class CClassDefNode(ClassDefNode):
decorators = None
shadow = False
+ @property
+ def punycode_class_name(self):
+ return punycodify_name(self.class_name)
+
def buffer_defaults(self, env):
if not hasattr(self, '_buffer_defaults'):
from . import Buffer
@@ -4713,6 +5261,8 @@ class CClassDefNode(ClassDefNode):
api=self.api,
buffer_defaults=self.buffer_defaults(env),
shadow=self.shadow)
+ if self.bases and len(self.bases.args) > 1:
+ self.entry.type.multiple_bases = True
def analyse_declarations(self, env):
#print "CClassDefNode.analyse_declarations:", self.class_name
@@ -4757,7 +5307,8 @@ class CClassDefNode(ClassDefNode):
error(base.pos, "Base class '%s' of type '%s' is final" % (
base_type, self.class_name))
elif base_type.is_builtin_type and \
- base_type.name in ('tuple', 'str', 'bytes'):
+ base_type.name in ('tuple', 'bytes'):
+ # str in Py2 is also included in this, but now checked at run-time
error(base.pos, "inheritance from PyVarObject types like '%s' is not currently supported"
% base_type.name)
else:
@@ -4783,7 +5334,7 @@ class CClassDefNode(ClassDefNode):
if self.visibility == 'extern':
if (self.module_name == '__builtin__' and
self.class_name in Builtin.builtin_types and
- env.qualified_name[:8] != 'cpython.'): # allow overloaded names for cimporting from cpython
+ env.qualified_name[:8] != 'cpython.'): # allow overloaded names for cimporting from cpython
warning(self.pos, "%s already a builtin Cython type" % self.class_name, 1)
self.entry = home_scope.declare_c_class(
@@ -4801,6 +5352,8 @@ class CClassDefNode(ClassDefNode):
api=self.api,
buffer_defaults=self.buffer_defaults(env),
shadow=self.shadow)
+ if self.bases and len(self.bases.args) > 1:
+ self.entry.type.multiple_bases = True
if self.shadow:
home_scope.lookup(self.class_name).as_variable = self.entry
@@ -4809,6 +5362,15 @@ class CClassDefNode(ClassDefNode):
self.scope = scope = self.entry.type.scope
if scope is not None:
scope.directives = env.directives
+ if "dataclasses.dataclass" in env.directives:
+ is_frozen = False
+ # Retrieve the @dataclass config (args, kwargs), as passed into the decorator.
+ dataclass_config = env.directives["dataclasses.dataclass"]
+ if dataclass_config:
+ decorator_kwargs = dataclass_config[1]
+ frozen_flag = decorator_kwargs.get('frozen')
+ is_frozen = frozen_flag and frozen_flag.is_literal and frozen_flag.value
+ scope.is_c_dataclass_scope = "frozen" if is_frozen else True
if self.doc and Options.docstrings:
scope.doc = embed_position(self.pos, self.doc)
@@ -4868,71 +5430,206 @@ class CClassDefNode(ClassDefNode):
# This is needed to generate evaluation code for
# default values of method arguments.
code.mark_pos(self.pos)
- if self.body:
- self.body.generate_execution_code(code)
if not self.entry.type.early_init:
+ bases = None
if self.type_init_args:
+ # Extract bases tuple and validate 'best base' by actually calling 'type()'.
+ bases = code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=True)
+
self.type_init_args.generate_evaluation_code(code)
- bases = "PyTuple_GET_ITEM(%s, 1)" % self.type_init_args.result()
+ code.putln("%s = PyTuple_GET_ITEM(%s, 1);" % (bases, self.type_init_args.result()))
+ code.put_incref(bases, PyrexTypes.py_object_type)
+
first_base = "((PyTypeObject*)PyTuple_GET_ITEM(%s, 0))" % bases
# Let Python do the base types compatibility checking.
- trial_type = code.funcstate.allocate_temp(PyrexTypes.py_object_type, True)
- code.putln("%s = PyType_Type.tp_new(&PyType_Type, %s, NULL);" % (
+ trial_type = code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=True)
+ code.putln("%s = __Pyx_PyType_GetSlot(&PyType_Type, tp_new, newfunc)(&PyType_Type, %s, NULL);" % (
trial_type, self.type_init_args.result()))
code.putln(code.error_goto_if_null(trial_type, self.pos))
- code.put_gotref(trial_type)
- code.putln("if (((PyTypeObject*) %s)->tp_base != %s) {" % (
+ code.put_gotref(trial_type, py_object_type)
+ code.putln("if (__Pyx_PyType_GetSlot((PyTypeObject*) %s, tp_base, PyTypeObject*) != %s) {" % (
trial_type, first_base))
- code.putln("PyErr_Format(PyExc_TypeError, \"best base '%s' must be equal to first base '%s'\",")
- code.putln(" ((PyTypeObject*) %s)->tp_base->tp_name, %s->tp_name);" % (
- trial_type, first_base))
+ trial_type_base = "__Pyx_PyType_GetSlot((PyTypeObject*) %s, tp_base, PyTypeObject*)" % trial_type
+ code.putln("__Pyx_TypeName base_name = __Pyx_PyType_GetName(%s);" % trial_type_base)
+ code.putln("__Pyx_TypeName type_name = __Pyx_PyType_GetName(%s);" % first_base)
+ code.putln("PyErr_Format(PyExc_TypeError, "
+ "\"best base '\" __Pyx_FMT_TYPENAME \"' must be equal to first base '\" __Pyx_FMT_TYPENAME \"'\",")
+ code.putln(" base_name, type_name);")
+ code.putln("__Pyx_DECREF_TypeName(base_name);")
+ code.putln("__Pyx_DECREF_TypeName(type_name);")
code.putln(code.error_goto(self.pos))
code.putln("}")
- code.funcstate.release_temp(trial_type)
- code.put_incref(bases, PyrexTypes.py_object_type)
- code.put_giveref(bases)
- code.putln("%s.tp_bases = %s;" % (self.entry.type.typeobj_cname, bases))
+
code.put_decref_clear(trial_type, PyrexTypes.py_object_type)
+ code.funcstate.release_temp(trial_type)
+
self.type_init_args.generate_disposal_code(code)
self.type_init_args.free_temps(code)
- self.generate_type_ready_code(self.entry, code, True)
+ self.generate_type_ready_code(self.entry, code, bases_tuple_cname=bases, check_heap_type_bases=True)
+ if bases is not None:
+ code.put_decref_clear(bases, PyrexTypes.py_object_type)
+ code.funcstate.release_temp(bases)
+
+ if self.body:
+ self.body.generate_execution_code(code)
# Also called from ModuleNode for early init types.
@staticmethod
- def generate_type_ready_code(entry, code, heap_type_bases=False):
+ def generate_type_ready_code(entry, code, bases_tuple_cname=None, check_heap_type_bases=False):
# Generate a call to PyType_Ready for an extension
# type defined in this module.
type = entry.type
- typeobj_cname = type.typeobj_cname
+ typeptr_cname = type.typeptr_cname
scope = type.scope
if not scope: # could be None if there was an error
return
- if entry.visibility != 'extern':
- for slot in TypeSlots.slot_table:
- slot.generate_dynamic_init_code(scope, code)
- if heap_type_bases:
- code.globalstate.use_utility_code(
- UtilityCode.load_cached('PyType_Ready', 'ExtensionTypes.c'))
- readyfunc = "__Pyx_PyType_Ready"
+ if entry.visibility == 'extern':
+ # Generate code to initialise the typeptr of an external extension
+ # type defined in this module to point to its type object.
+ if type.typeobj_cname:
+ # FIXME: this should not normally be set :-?
+ assert not type.typeobj_cname
+ code.putln("%s = &%s;" % (
+ type.typeptr_cname,
+ type.typeobj_cname,
+ ))
+ return
+ # TODO: remove 'else:' and dedent
+ else:
+ assert typeptr_cname
+ assert type.typeobj_cname
+ typespec_cname = "%s_spec" % type.typeobj_cname
+ code.putln("#if CYTHON_USE_TYPE_SPECS")
+ tuple_temp = None
+ if not bases_tuple_cname and scope.parent_type.base_type:
+ tuple_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
+ code.putln("%s = PyTuple_Pack(1, (PyObject *)%s); %s" % (
+ tuple_temp,
+ scope.parent_type.base_type.typeptr_cname,
+ code.error_goto_if_null(tuple_temp, entry.pos),
+ ))
+ code.put_gotref(tuple_temp, py_object_type)
+
+ if bases_tuple_cname or tuple_temp:
+ if check_heap_type_bases:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached('ValidateBasesTuple', 'ExtensionTypes.c'))
+ code.put_error_if_neg(entry.pos, "__Pyx_validate_bases_tuple(%s.name, %s, %s)" % (
+ typespec_cname,
+ TypeSlots.get_slot_by_name("tp_dictoffset", scope.directives).slot_code(scope),
+ bases_tuple_cname or tuple_temp,
+ ))
+
+ code.putln("%s = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(%s, &%s, %s);" % (
+ typeptr_cname,
+ Naming.module_cname,
+ typespec_cname,
+ bases_tuple_cname or tuple_temp,
+ ))
+ if tuple_temp:
+ code.put_xdecref_clear(tuple_temp, type=py_object_type)
+ code.funcstate.release_temp(tuple_temp)
+ code.putln(code.error_goto_if_null(typeptr_cname, entry.pos))
else:
- readyfunc = "PyType_Ready"
- code.putln(
- "if (%s(&%s) < 0) %s" % (
- readyfunc,
- typeobj_cname,
- code.error_goto(entry.pos)))
- # Don't inherit tp_print from builtin types, restoring the
+ code.putln(
+ "%s = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(%s, &%s, NULL); %s" % (
+ typeptr_cname,
+ Naming.module_cname,
+ typespec_cname,
+ code.error_goto_if_null(typeptr_cname, entry.pos),
+ ))
+
+ # The buffer interface is not currently supported by PyType_FromSpec().
+ buffer_slot = TypeSlots.get_slot_by_name("tp_as_buffer", code.globalstate.directives)
+ if not buffer_slot.is_empty(scope):
+ code.putln("#if !CYTHON_COMPILING_IN_LIMITED_API")
+ code.putln("%s->%s = %s;" % (
+ typeptr_cname,
+ buffer_slot.slot_name,
+ buffer_slot.slot_code(scope),
+ ))
+ # Still need to inherit buffer methods since PyType_Ready() didn't do it for us.
+ for buffer_method_name in ("__getbuffer__", "__releasebuffer__"):
+ buffer_slot = TypeSlots.get_slot_table(
+ code.globalstate.directives).get_slot_by_method_name(buffer_method_name)
+ if buffer_slot.slot_code(scope) == "0" and not TypeSlots.get_base_slot_function(scope, buffer_slot):
+ code.putln("if (!%s->tp_as_buffer->%s &&"
+ " %s->tp_base->tp_as_buffer &&"
+ " %s->tp_base->tp_as_buffer->%s) {" % (
+ typeptr_cname, buffer_slot.slot_name,
+ typeptr_cname,
+ typeptr_cname, buffer_slot.slot_name,
+ ))
+ code.putln("%s->tp_as_buffer->%s = %s->tp_base->tp_as_buffer->%s;" % (
+ typeptr_cname, buffer_slot.slot_name,
+ typeptr_cname, buffer_slot.slot_name,
+ ))
+ code.putln("}")
+ code.putln("#elif defined(Py_bf_getbuffer) && defined(Py_bf_releasebuffer)")
+ code.putln("/* PY_VERSION_HEX >= 0x03090000 || Py_LIMITED_API >= 0x030B0000 */")
+ code.putln("#elif defined(_MSC_VER)")
+ code.putln("#pragma message (\"The buffer protocol is not supported in the Limited C-API < 3.11.\")")
+ code.putln("#else")
+ code.putln("#warning \"The buffer protocol is not supported in the Limited C-API < 3.11.\"")
+ code.putln("#endif")
+
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("FixUpExtensionType", "ExtensionTypes.c"))
+ code.put_error_if_neg(entry.pos, "__Pyx_fix_up_extension_type_from_spec(&%s, %s)" % (
+ typespec_cname, typeptr_cname))
+
+ code.putln("#else")
+ if bases_tuple_cname:
+ code.put_incref(bases_tuple_cname, py_object_type)
+ code.put_giveref(bases_tuple_cname, py_object_type)
+ code.putln("%s.tp_bases = %s;" % (type.typeobj_cname, bases_tuple_cname))
+ code.putln("%s = &%s;" % (
+ typeptr_cname,
+ type.typeobj_cname,
+ ))
+ code.putln("#endif") # if CYTHON_USE_TYPE_SPECS
+
+ base_type = type.base_type
+ while base_type:
+ if base_type.is_external and not base_type.objstruct_cname == "PyTypeObject":
+ # 'type' is special-cased because it is actually based on PyHeapTypeObject
+ # Variable length bases are allowed if the current class doesn't grow
+ code.putln("if (sizeof(%s%s) != sizeof(%s%s)) {" % (
+ "" if type.typedef_flag else "struct ", type.objstruct_cname,
+ "" if base_type.typedef_flag else "struct ", base_type.objstruct_cname))
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("ValidateExternBase", "ExtensionTypes.c"))
+ code.put_error_if_neg(entry.pos, "__Pyx_validate_extern_base(%s)" % (
+ type.base_type.typeptr_cname))
+ code.putln("}")
+ break
+ base_type = base_type.base_type
+
+ code.putln("#if !CYTHON_COMPILING_IN_LIMITED_API")
+ # FIXME: these still need to get initialised even with the limited-API
+ for slot in TypeSlots.get_slot_table(code.globalstate.directives):
+ slot.generate_dynamic_init_code(scope, code)
+ code.putln("#endif")
+
+ code.putln("#if !CYTHON_USE_TYPE_SPECS")
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached('PyType_Ready', 'ExtensionTypes.c'))
+ code.put_error_if_neg(entry.pos, "__Pyx_PyType_Ready(%s)" % typeptr_cname)
+ code.putln("#endif")
+
+ # Don't inherit tp_print from builtin types in Python 2, restoring the
# behavior of using tp_repr or tp_str instead.
# ("tp_print" was renamed to "tp_vectorcall_offset" in Py3.8b1)
- code.putln("#if PY_VERSION_HEX < 0x030800B1")
- code.putln("%s.tp_print = 0;" % typeobj_cname)
+ code.putln("#if PY_MAJOR_VERSION < 3")
+ code.putln("%s->tp_print = 0;" % typeptr_cname)
code.putln("#endif")
# Use specialised attribute lookup for types with generic lookup but no instance dict.
getattr_slot_func = TypeSlots.get_slot_code_by_name(scope, 'tp_getattro')
dictoffset_slot_func = TypeSlots.get_slot_code_by_name(scope, 'tp_dictoffset')
if getattr_slot_func == '0' and dictoffset_slot_func == '0':
+ code.putln("#if !CYTHON_COMPILING_IN_LIMITED_API") # FIXME
if type.is_final_type:
py_cfunc = "__Pyx_PyObject_GenericGetAttrNoDict" # grepable
utility_func = "PyObject_GenericGetAttrNoDict"
@@ -4942,11 +5639,12 @@ class CClassDefNode(ClassDefNode):
code.globalstate.use_utility_code(UtilityCode.load_cached(utility_func, "ObjectHandling.c"))
code.putln("if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) &&"
- " likely(!%s.tp_dictoffset && %s.tp_getattro == PyObject_GenericGetAttr)) {" % (
- typeobj_cname, typeobj_cname))
- code.putln("%s.tp_getattro = %s;" % (
- typeobj_cname, py_cfunc))
+ " likely(!%s->tp_dictoffset && %s->tp_getattro == PyObject_GenericGetAttr)) {" % (
+ typeptr_cname, typeptr_cname))
+ code.putln("%s->tp_getattro = %s;" % (
+ typeptr_cname, py_cfunc))
code.putln("}")
+ code.putln("#endif") # if !CYTHON_COMPILING_IN_LIMITED_API
# Fix special method docstrings. This is a bit of a hack, but
# unless we let PyType_Ready create the slot wrappers we have
@@ -4955,19 +5653,20 @@ class CClassDefNode(ClassDefNode):
is_buffer = func.name in ('__getbuffer__', '__releasebuffer__')
if (func.is_special and Options.docstrings and
func.wrapperbase_cname and not is_buffer):
- slot = TypeSlots.method_name_to_slot.get(func.name)
+ slot = TypeSlots.get_slot_table(
+ entry.type.scope.directives).get_slot_by_method_name(func.name)
preprocessor_guard = slot.preprocessor_guard_code() if slot else None
if preprocessor_guard:
code.putln(preprocessor_guard)
code.putln('#if CYTHON_UPDATE_DESCRIPTOR_DOC')
code.putln("{")
code.putln(
- 'PyObject *wrapper = PyObject_GetAttrString((PyObject *)&%s, "%s"); %s' % (
- typeobj_cname,
+ 'PyObject *wrapper = PyObject_GetAttrString((PyObject *)%s, "%s"); %s' % (
+ typeptr_cname,
func.name,
code.error_goto_if_null('wrapper', entry.pos)))
code.putln(
- "if (Py_TYPE(wrapper) == &PyWrapperDescr_Type) {")
+ "if (__Pyx_IS_TYPE(wrapper, &PyWrapperDescr_Type)) {")
code.putln(
"%s = *((PyWrapperDescrObject *)wrapper)->d_base;" % (
func.wrapperbase_cname))
@@ -4981,34 +5680,34 @@ class CClassDefNode(ClassDefNode):
code.putln('#endif')
if preprocessor_guard:
code.putln('#endif')
+
if type.vtable_cname:
code.globalstate.use_utility_code(
UtilityCode.load_cached('SetVTable', 'ImportExport.c'))
- code.putln(
- "if (__Pyx_SetVtable(%s.tp_dict, %s) < 0) %s" % (
- typeobj_cname,
- type.vtabptr_cname,
- code.error_goto(entry.pos)))
- if heap_type_bases:
- code.globalstate.use_utility_code(
- UtilityCode.load_cached('MergeVTables', 'ImportExport.c'))
- code.putln("if (__Pyx_MergeVtables(&%s) < 0) %s" % (
- typeobj_cname,
- code.error_goto(entry.pos)))
+ code.put_error_if_neg(entry.pos, "__Pyx_SetVtable(%s, %s)" % (
+ typeptr_cname,
+ type.vtabptr_cname,
+ ))
+ # TODO: find a way to make this work with the Limited API!
+ code.putln("#if !CYTHON_COMPILING_IN_LIMITED_API")
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached('MergeVTables', 'ImportExport.c'))
+ code.put_error_if_neg(entry.pos, "__Pyx_MergeVtables(%s)" % typeptr_cname)
+ code.putln("#endif")
if not type.scope.is_internal and not type.scope.directives.get('internal'):
# scope.is_internal is set for types defined by
# Cython (such as closures), the 'internal'
# directive is set by users
- code.putln(
- 'if (PyObject_SetAttr(%s, %s, (PyObject *)&%s) < 0) %s' % (
- Naming.module_cname,
- code.intern_identifier(scope.class_name),
- typeobj_cname,
- code.error_goto(entry.pos)))
+ code.put_error_if_neg(entry.pos, "PyObject_SetAttr(%s, %s, (PyObject *) %s)" % (
+ Naming.module_cname,
+ code.intern_identifier(scope.class_name),
+ typeptr_cname,
+ ))
+
weakref_entry = scope.lookup_here("__weakref__") if not scope.is_closure_class_scope else None
if weakref_entry:
if weakref_entry.type is py_object_type:
- tp_weaklistoffset = "%s.tp_weaklistoffset" % typeobj_cname
+ tp_weaklistoffset = "%s->tp_weaklistoffset" % typeptr_cname
if type.typedef_flag:
objstruct = type.objstruct_cname
else:
@@ -5020,21 +5719,16 @@ class CClassDefNode(ClassDefNode):
weakref_entry.cname))
else:
error(weakref_entry.pos, "__weakref__ slot must be of type 'object'")
+
if scope.lookup_here("__reduce_cython__") if not scope.is_closure_class_scope else None:
# Unfortunately, we cannot reliably detect whether a
# superclass defined __reduce__ at compile time, so we must
# do so at runtime.
code.globalstate.use_utility_code(
UtilityCode.load_cached('SetupReduce', 'ExtensionTypes.c'))
- code.putln('if (__Pyx_setup_reduce((PyObject*)&%s) < 0) %s' % (
- typeobj_cname,
- code.error_goto(entry.pos)))
- # Generate code to initialise the typeptr of an extension
- # type defined in this module to point to its type object.
- if type.typeobj_cname:
- code.putln(
- "%s = &%s;" % (
- type.typeptr_cname, type.typeobj_cname))
+ code.putln("#if !CYTHON_COMPILING_IN_LIMITED_API") # FIXME
+ code.put_error_if_neg(entry.pos, "__Pyx_setup_reduce((PyObject *) %s)" % typeptr_cname)
+ code.putln("#endif")
def annotate(self, code):
if self.type_init_args:
@@ -5048,14 +5742,13 @@ class PropertyNode(StatNode):
#
# name string
# doc EncodedString or None Doc string
- # entry Symtab.Entry
+ # entry Symtab.Entry The Entry of the property attribute
# body StatListNode
child_attrs = ["body"]
def analyse_declarations(self, env):
self.entry = env.declare_property(self.name, self.doc, self.pos)
- self.entry.scope.directives = env.directives
self.body.analyse_declarations(self.entry.scope)
def analyse_expressions(self, env):
@@ -5072,6 +5765,44 @@ class PropertyNode(StatNode):
self.body.annotate(code)
+class CPropertyNode(StatNode):
+ """Definition of a C property, backed by a CFuncDefNode getter.
+ """
+ # name string
+ # doc EncodedString or None Doc string of the property
+ # entry Symtab.Entry The Entry of the property attribute
+ # body StatListNode[CFuncDefNode] (for compatibility with PropertyNode)
+
+ child_attrs = ["body"]
+ is_cproperty = True
+
+ @property
+ def cfunc(self):
+ stats = self.body.stats
+ assert stats and isinstance(stats[0], CFuncDefNode), stats
+ return stats[0]
+
+ def analyse_declarations(self, env):
+ scope = PropertyScope(self.name, class_scope=env)
+ self.body.analyse_declarations(scope)
+ entry = self.entry = env.declare_property(
+ self.name, self.doc, self.pos, ctype=self.cfunc.return_type, property_scope=scope)
+ entry.getter_cname = self.cfunc.entry.cname
+
+ def analyse_expressions(self, env):
+ self.body = self.body.analyse_expressions(env)
+ return self
+
+ def generate_function_definitions(self, env, code):
+ self.body.generate_function_definitions(env, code)
+
+ def generate_execution_code(self, code):
+ pass
+
+ def annotate(self, code):
+ self.body.annotate(code)
+
+
class GlobalNode(StatNode):
# Global variable declaration.
#
@@ -5207,12 +5938,14 @@ class SingleAssignmentNode(AssignmentNode):
# rhs ExprNode Right hand side
# first bool Is this guaranteed the first assignment to lhs?
# is_overloaded_assignment bool Is this assignment done via an overloaded operator=
+ # is_assignment_expression bool Internally SingleAssignmentNode is used to implement assignment expressions
# exception_check
# exception_value
child_attrs = ["lhs", "rhs"]
first = False
is_overloaded_assignment = False
+ is_assignment_expression = False
declaration_only = False
def analyse_declarations(self, env):
@@ -5297,7 +6030,17 @@ class SingleAssignmentNode(AssignmentNode):
if self.declaration_only:
return
else:
- self.lhs.analyse_target_declaration(env)
+ if self.is_assignment_expression:
+ self.lhs.analyse_assignment_expression_target_declaration(env)
+ else:
+ self.lhs.analyse_target_declaration(env)
+ # if an entry doesn't exist that just implies that lhs isn't made up purely
+ # of AttributeNodes and NameNodes - it isn't useful as a known path to
+ # a standard library module
+ if (self.lhs.is_attribute or self.lhs.is_name) and self.lhs.entry and not self.lhs.entry.known_standard_library_import:
+ stdlib_import_name = self.rhs.get_known_standard_library_import()
+ if stdlib_import_name:
+ self.lhs.entry.known_standard_library_import = stdlib_import_name
def analyse_types(self, env, use_temp=0):
from . import ExprNodes
@@ -5320,8 +6063,8 @@ class SingleAssignmentNode(AssignmentNode):
elif self.lhs.type.is_array:
if not isinstance(self.lhs, ExprNodes.SliceIndexNode):
# cannot assign to C array, only to its full slice
- self.lhs = ExprNodes.SliceIndexNode(self.lhs.pos, base=self.lhs, start=None, stop=None)
- self.lhs = self.lhs.analyse_target_types(env)
+ lhs = ExprNodes.SliceIndexNode(self.lhs.pos, base=self.lhs, start=None, stop=None)
+ self.lhs = lhs.analyse_target_types(env)
if self.lhs.type.is_cpp_class:
op = env.lookup_operator_for_types(self.pos, '=', [self.lhs.type, self.rhs.type])
@@ -5821,7 +6564,7 @@ class ExecStatNode(StatNode):
arg.free_temps(code)
code.putln(
code.error_goto_if_null(temp_result, self.pos))
- code.put_gotref(temp_result)
+ code.put_gotref(temp_result, py_object_type)
code.put_decref_clear(temp_result, py_object_type)
code.funcstate.release_temp(temp_result)
@@ -6062,9 +6805,15 @@ class RaiseStatNode(StatNode):
# exc_value ExprNode or None
# exc_tb ExprNode or None
# cause ExprNode or None
+ #
+ # set in FlowControl
+ # in_try_block bool
child_attrs = ["exc_type", "exc_value", "exc_tb", "cause"]
is_terminator = True
+ builtin_exc_name = None
+ wrap_tuple_value = False
+ in_try_block = False
def analyse_expressions(self, env):
if self.exc_type:
@@ -6072,6 +6821,12 @@ class RaiseStatNode(StatNode):
self.exc_type = exc_type.coerce_to_pyobject(env)
if self.exc_value:
exc_value = self.exc_value.analyse_types(env)
+ if self.wrap_tuple_value:
+ if exc_value.type is Builtin.tuple_type or not exc_value.type.is_builtin_type:
+ # prevent tuple values from being interpreted as argument value tuples
+ from .ExprNodes import TupleNode
+ exc_value = TupleNode(exc_value.pos, args=[exc_value.coerce_to_pyobject(env)], slow=True)
+ exc_value = exc_value.analyse_types(env, skip_children=True)
self.exc_value = exc_value.coerce_to_pyobject(env)
if self.exc_tb:
exc_tb = self.exc_tb.analyse_types(env)
@@ -6080,7 +6835,6 @@ class RaiseStatNode(StatNode):
cause = self.cause.analyse_types(env)
self.cause = cause.coerce_to_pyobject(env)
# special cases for builtin exceptions
- self.builtin_exc_name = None
if self.exc_type and not self.exc_value and not self.exc_tb:
exc = self.exc_type
from . import ExprNodes
@@ -6088,9 +6842,19 @@ class RaiseStatNode(StatNode):
not (exc.args or (exc.arg_tuple is not None and exc.arg_tuple.args))):
exc = exc.function # extract the exception type
if exc.is_name and exc.entry.is_builtin:
+ from . import Symtab
self.builtin_exc_name = exc.name
if self.builtin_exc_name == 'MemoryError':
- self.exc_type = None # has a separate implementation
+ self.exc_type = None # has a separate implementation
+ elif (self.builtin_exc_name == 'StopIteration' and
+ env.is_local_scope and env.name == "__next__" and
+ env.parent_scope and env.parent_scope.is_c_class_scope and
+ not self.in_try_block):
+ # tp_iternext is allowed to return NULL without raising StopIteration.
+ # For the sake of simplicity, only allow this to happen when not in
+ # a try block
+ self.exc_type = None
+
return self
nogil_check = Node.gil_error
@@ -6101,6 +6865,11 @@ class RaiseStatNode(StatNode):
if self.builtin_exc_name == 'MemoryError':
code.putln('PyErr_NoMemory(); %s' % code.error_goto(self.pos))
return
+ elif self.builtin_exc_name == 'StopIteration' and not self.exc_type:
+ code.putln('%s = 1;' % Naming.error_without_exception_cname)
+ code.putln('%s;' % code.error_goto(None))
+ code.funcstate.error_without_exception = True
+ return
if self.exc_type:
self.exc_type.generate_evaluation_code(code)
@@ -6175,10 +6944,10 @@ class ReraiseStatNode(StatNode):
vars = code.funcstate.exc_vars
if vars:
code.globalstate.use_utility_code(restore_exception_utility_code)
- code.put_giveref(vars[0])
- code.put_giveref(vars[1])
+ code.put_giveref(vars[0], py_object_type)
+ code.put_giveref(vars[1], py_object_type)
# fresh exceptions may not have a traceback yet (-> finally!)
- code.put_xgiveref(vars[2])
+ code.put_xgiveref(vars[2], py_object_type)
code.putln("__Pyx_ErrRestoreWithState(%s, %s, %s);" % tuple(vars))
for varname in vars:
code.put("%s = 0; " % varname)
@@ -6189,65 +6958,55 @@ class ReraiseStatNode(StatNode):
UtilityCode.load_cached("ReRaiseException", "Exceptions.c"))
code.putln("__Pyx_ReraiseException(); %s" % code.error_goto(self.pos))
+
class AssertStatNode(StatNode):
# assert statement
#
- # cond ExprNode
- # value ExprNode or None
+ # condition ExprNode
+ # value ExprNode or None
+ # exception (Raise/GIL)StatNode created from 'value' in PostParse transform
- child_attrs = ["cond", "value"]
+ child_attrs = ["condition", "value", "exception"]
+ exception = None
+
+ def analyse_declarations(self, env):
+ assert self.value is None, "Message should have been replaced in PostParse()"
+ assert self.exception is not None, "Message should have been replaced in PostParse()"
+ self.exception.analyse_declarations(env)
def analyse_expressions(self, env):
- self.cond = self.cond.analyse_boolean_expression(env)
- if self.value:
- value = self.value.analyse_types(env)
- if value.type is Builtin.tuple_type or not value.type.is_builtin_type:
- # prevent tuple values from being interpreted as argument value tuples
- from .ExprNodes import TupleNode
- value = TupleNode(value.pos, args=[value], slow=True)
- self.value = value.analyse_types(env, skip_children=True).coerce_to_pyobject(env)
- else:
- self.value = value.coerce_to_pyobject(env)
+ self.condition = self.condition.analyse_temp_boolean_expression(env)
+ self.exception = self.exception.analyse_expressions(env)
return self
- nogil_check = Node.gil_error
- gil_message = "Raising exception"
-
def generate_execution_code(self, code):
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("AssertionsEnabled", "Exceptions.c"))
code.putln("#ifndef CYTHON_WITHOUT_ASSERTIONS")
- code.putln("if (unlikely(!Py_OptimizeFlag)) {")
+ code.putln("if (unlikely(__pyx_assertions_enabled())) {")
code.mark_pos(self.pos)
- self.cond.generate_evaluation_code(code)
- code.putln(
- "if (unlikely(!%s)) {" % self.cond.result())
- if self.value:
- self.value.generate_evaluation_code(code)
- code.putln(
- "PyErr_SetObject(PyExc_AssertionError, %s);" % self.value.py_result())
- self.value.generate_disposal_code(code)
- self.value.free_temps(code)
- else:
- code.putln(
- "PyErr_SetNone(PyExc_AssertionError);")
+ self.condition.generate_evaluation_code(code)
code.putln(
- code.error_goto(self.pos))
+ "if (unlikely(!%s)) {" % self.condition.result())
+ self.exception.generate_execution_code(code)
code.putln(
"}")
- self.cond.generate_disposal_code(code)
- self.cond.free_temps(code)
+ self.condition.generate_disposal_code(code)
+ self.condition.free_temps(code)
code.putln(
"}")
+ code.putln("#else")
+ # avoid unused labels etc.
+ code.putln("if ((1)); else %s" % code.error_goto(self.pos, used=False))
code.putln("#endif")
def generate_function_definitions(self, env, code):
- self.cond.generate_function_definitions(env, code)
- if self.value is not None:
- self.value.generate_function_definitions(env, code)
+ self.condition.generate_function_definitions(env, code)
+ self.exception.generate_function_definitions(env, code)
def annotate(self, code):
- self.cond.annotate(code)
- if self.value:
- self.value.annotate(code)
+ self.condition.annotate(code)
+ self.exception.annotate(code)
class IfStatNode(StatNode):
@@ -6274,13 +7033,9 @@ class IfStatNode(StatNode):
code.mark_pos(self.pos)
end_label = code.new_label()
last = len(self.if_clauses)
- if self.else_clause:
- # If the 'else' clause is 'unlikely', then set the preceding 'if' clause to 'likely' to reflect that.
- self._set_branch_hint(self.if_clauses[-1], self.else_clause, inverse=True)
- else:
+ if not self.else_clause:
last -= 1 # avoid redundant goto at end of last if-clause
for i, if_clause in enumerate(self.if_clauses):
- self._set_branch_hint(if_clause, if_clause.body)
if_clause.generate_execution_code(code, end_label, is_last=i == last)
if self.else_clause:
code.mark_pos(self.else_clause.pos)
@@ -6289,21 +7044,6 @@ class IfStatNode(StatNode):
code.putln("}")
code.put_label(end_label)
- def _set_branch_hint(self, clause, statements_node, inverse=False):
- if not statements_node.is_terminator:
- return
- if not isinstance(statements_node, StatListNode) or not statements_node.stats:
- return
- # Anything that unconditionally raises exceptions should be considered unlikely.
- if isinstance(statements_node.stats[-1], (RaiseStatNode, ReraiseStatNode)):
- if len(statements_node.stats) > 1:
- # Allow simple statements before the 'raise', but no conditions, loops, etc.
- non_branch_nodes = (ExprStatNode, AssignmentNode, DelStatNode, GlobalNode, NonlocalNode)
- for node in statements_node.stats[:-1]:
- if not isinstance(node, non_branch_nodes):
- return
- clause.branch_hint = 'likely' if inverse else 'unlikely'
-
def generate_function_definitions(self, env, code):
for clause in self.if_clauses:
clause.generate_function_definitions(env, code)
@@ -6589,7 +7329,7 @@ class DictIterationNextNode(Node):
# evaluate all coercions before the assignments
for var, result, target in assignments:
- code.put_gotref(var.result())
+ var.generate_gotref(code)
for var, result, target in assignments:
result.generate_evaluation_code(code)
for var, result, target in assignments:
@@ -6651,7 +7391,7 @@ class SetIterationNextNode(Node):
code.funcstate.release_temp(result_temp)
# evaluate all coercions before the assignments
- code.put_gotref(value_ref.result())
+ value_ref.generate_gotref(code)
self.coerced_value_var.generate_evaluation_code(code)
self.value_target.generate_assignment_code(self.coerced_value_var, code)
value_ref.release(code)
@@ -6719,39 +7459,33 @@ class _ForInStatNode(LoopNode, StatNode):
code.mark_pos(self.pos)
code.put_label(code.continue_label)
code.putln("}")
- break_label = code.break_label
+
+ # clean up before we enter the 'else:' branch
+ self.iterator.generate_disposal_code(code)
+
+ else_label = code.new_label("for_else") if self.else_clause else None
+ end_label = code.new_label("for_end")
+ label_intercepts = code.label_interceptor(
+ [code.break_label],
+ [end_label],
+ skip_to_label=else_label or end_label,
+ pos=self.pos,
+ )
+
+ code.mark_pos(self.pos)
+ for _ in label_intercepts:
+ self.iterator.generate_disposal_code(code)
+
code.set_loop_labels(old_loop_labels)
+ self.iterator.free_temps(code)
if self.else_clause:
- # In nested loops, the 'else' block can contain 'continue' or 'break'
- # statements for the outer loop, but we may need to generate cleanup code
- # before taking those paths, so we intercept them here.
- orig_exit_labels = (code.continue_label, code.break_label)
- code.continue_label = code.new_label('outer_continue')
- code.break_label = code.new_label('outer_break')
-
code.putln("/*else*/ {")
+ code.put_label(else_label)
self.else_clause.generate_execution_code(code)
code.putln("}")
- needs_goto_end = not self.else_clause.is_terminator
- for exit_label, orig_exit_label in zip([code.continue_label, code.break_label], orig_exit_labels):
- if not code.label_used(exit_label):
- continue
- if needs_goto_end:
- code.put_goto(break_label)
- needs_goto_end = False
- code.mark_pos(self.pos)
- code.put_label(exit_label)
- self.iterator.generate_disposal_code(code)
- code.put_goto(orig_exit_label)
- code.set_loop_labels(old_loop_labels)
-
- code.mark_pos(self.pos)
- if code.label_used(break_label):
- code.put_label(break_label)
- self.iterator.generate_disposal_code(code)
- self.iterator.free_temps(code)
+ code.put_label(end_label)
def generate_function_definitions(self, env, code):
self.target.generate_function_definitions(env, code)
@@ -6969,7 +7703,7 @@ class ForFromStatNode(LoopNode, StatNode):
target_node.result(),
interned_cname,
code.error_goto_if_null(target_node.result(), self.target.pos)))
- code.put_gotref(target_node.result())
+ target_node.generate_gotref(code)
else:
target_node = self.target
from_py_node = ExprNodes.CoerceFromPyTypeNode(
@@ -7105,7 +7839,7 @@ class WithStatNode(StatNode):
code.intern_identifier(EncodedString('__aexit__' if self.is_async else '__exit__')),
code.error_goto_if_null(self.exit_var, self.pos),
))
- code.put_gotref(self.exit_var)
+ code.put_gotref(self.exit_var, py_object_type)
# need to free exit_var in the face of exceptions during setup
old_error_label = code.new_error_label()
@@ -7249,17 +7983,17 @@ class TryExceptStatNode(StatNode):
save_exc.putln("__Pyx_ExceptionSave(%s);" % (
', '.join(['&%s' % var for var in exc_save_vars])))
for var in exc_save_vars:
- save_exc.put_xgotref(var)
+ save_exc.put_xgotref(var, py_object_type)
def restore_saved_exception():
for name in exc_save_vars:
- code.put_xgiveref(name)
+ code.put_xgiveref(name, py_object_type)
code.putln("__Pyx_ExceptionReset(%s);" %
', '.join(exc_save_vars))
else:
# try block cannot raise exceptions, but we had to allocate the temps above,
# so just keep the C compiler from complaining about them being unused
- mark_vars_used = ["(void)%s;" % var for var in exc_save_vars]
+ mark_vars_used = ["(void)%s;" % var for var in exc_save_vars]
save_exc.putln("%s /* mark used */" % ' '.join(mark_vars_used))
def restore_saved_exception():
@@ -7297,19 +8031,17 @@ class TryExceptStatNode(StatNode):
if not self.has_default_clause:
code.put_goto(except_error_label)
- for exit_label, old_label in [(except_error_label, old_error_label),
- (try_break_label, old_break_label),
- (try_continue_label, old_continue_label),
- (try_return_label, old_return_label),
- (except_return_label, old_return_label)]:
- if code.label_used(exit_label):
- if not normal_case_terminates and not code.label_used(try_end_label):
- code.put_goto(try_end_label)
- code.put_label(exit_label)
- code.mark_pos(self.pos, trace=False)
- if can_raise:
- restore_saved_exception()
- code.put_goto(old_label)
+ label_intercepts = code.label_interceptor(
+ [except_error_label, try_break_label, try_continue_label, try_return_label, except_return_label],
+ [old_error_label, old_break_label, old_continue_label, old_return_label, old_return_label],
+ skip_to_label=try_end_label if not normal_case_terminates and not code.label_used(try_end_label) else None,
+ pos=self.pos,
+ trace=False,
+ )
+
+ for _ in label_intercepts:
+ if can_raise:
+ restore_saved_exception()
if code.label_used(except_end_label):
if not normal_case_terminates and not code.label_used(try_end_label):
@@ -7402,17 +8134,40 @@ class ExceptClauseNode(Node):
for _ in range(3)]
code.globalstate.use_utility_code(UtilityCode.load_cached("PyErrFetchRestore", "Exceptions.c"))
code.putln("__Pyx_ErrFetch(&%s, &%s, &%s);" % tuple(exc_vars))
- code.globalstate.use_utility_code(UtilityCode.load_cached("FastTypeChecks", "ModuleSetupCode.c"))
- exc_test_func = "__Pyx_PyErr_GivenExceptionMatches(%s, %%s)" % exc_vars[0]
+ exc_type = exc_vars[0]
else:
- exc_vars = ()
- code.globalstate.use_utility_code(UtilityCode.load_cached("PyErrExceptionMatches", "Exceptions.c"))
- exc_test_func = "__Pyx_PyErr_ExceptionMatches(%s)"
+ exc_vars = exc_type = None
- exc_tests = []
for pattern in self.pattern:
pattern.generate_evaluation_code(code)
- exc_tests.append(exc_test_func % pattern.py_result())
+ patterns = [pattern.py_result() for pattern in self.pattern]
+
+ exc_tests = []
+ if exc_type:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("FastTypeChecks", "ModuleSetupCode.c"))
+ if len(patterns) == 2:
+ exc_tests.append("__Pyx_PyErr_GivenExceptionMatches2(%s, %s, %s)" % (
+ exc_type, patterns[0], patterns[1],
+ ))
+ else:
+ exc_tests.extend(
+ "__Pyx_PyErr_GivenExceptionMatches(%s, %s)" % (exc_type, pattern)
+ for pattern in patterns
+ )
+ elif len(patterns) == 2:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("FastTypeChecks", "ModuleSetupCode.c"))
+ exc_tests.append("__Pyx_PyErr_ExceptionMatches2(%s, %s)" % (
+ patterns[0], patterns[1],
+ ))
+ else:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyErrExceptionMatches", "Exceptions.c"))
+ exc_tests.extend(
+ "__Pyx_PyErr_ExceptionMatches(%s)" % pattern
+ for pattern in patterns
+ )
match_flag = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
code.putln("%s = %s;" % (match_flag, ' || '.join(exc_tests)))
@@ -7420,7 +8175,7 @@ class ExceptClauseNode(Node):
pattern.generate_disposal_code(code)
pattern.free_temps(code)
- if has_non_literals:
+ if exc_vars:
code.putln("__Pyx_ErrRestore(%s, %s, %s);" % tuple(exc_vars))
code.putln(' '.join(["%s = 0;" % var for var in exc_vars]))
for temp in exc_vars:
@@ -7455,7 +8210,7 @@ class ExceptClauseNode(Node):
code.putln("if (__Pyx_GetException(%s) < 0) %s" % (
exc_args, code.error_goto(self.pos)))
for var in exc_vars:
- code.put_gotref(var)
+ code.put_gotref(var, py_object_type)
if self.target:
self.exc_value.set_var(exc_vars[1])
self.exc_value.generate_evaluation_code(code)
@@ -7464,9 +8219,7 @@ class ExceptClauseNode(Node):
for tempvar, node in zip(exc_vars, self.excinfo_target.args):
node.set_var(tempvar)
- old_break_label, old_continue_label = code.break_label, code.continue_label
- code.break_label = code.new_label('except_break')
- code.continue_label = code.new_label('except_continue')
+ old_loop_labels = code.new_loop_labels("except_")
old_exc_vars = code.funcstate.exc_vars
code.funcstate.exc_vars = exc_vars
@@ -7480,15 +8233,11 @@ class ExceptClauseNode(Node):
code.put_xdecref_clear(var, py_object_type)
code.put_goto(end_label)
- for new_label, old_label in [(code.break_label, old_break_label),
- (code.continue_label, old_continue_label)]:
- if code.label_used(new_label):
- code.put_label(new_label)
- for var in exc_vars:
- code.put_decref_clear(var, py_object_type)
- code.put_goto(old_label)
- code.break_label = old_break_label
- code.continue_label = old_continue_label
+ for _ in code.label_interceptor(code.get_loop_labels(), old_loop_labels):
+ for var in exc_vars:
+ code.put_decref_clear(var, py_object_type)
+
+ code.set_loop_labels(old_loop_labels)
for temp in exc_vars:
code.funcstate.release_temp(temp)
@@ -7644,12 +8393,8 @@ class TryFinallyStatNode(StatNode):
code.funcstate.release_temp(exc_filename_cname)
code.put_goto(old_error_label)
- for new_label, old_label in zip(code.get_all_labels(), finally_old_labels):
- if not code.label_used(new_label):
- continue
- code.put_label(new_label)
+ for _ in code.label_interceptor(code.get_all_labels(), finally_old_labels):
self.put_error_cleaner(code, exc_vars)
- code.put_goto(old_label)
for cname in exc_vars:
code.funcstate.release_temp(cname)
@@ -7659,6 +8404,7 @@ class TryFinallyStatNode(StatNode):
return_label = code.return_label
exc_vars = ()
+ # TODO: use code.label_interceptor()?
for i, (new_label, old_label) in enumerate(zip(new_labels, old_labels)):
if not code.label_used(new_label):
continue
@@ -7737,7 +8483,7 @@ class TryFinallyStatNode(StatNode):
" unlikely(__Pyx_GetException(&%s, &%s, &%s) < 0)) "
"__Pyx_ErrFetch(&%s, &%s, &%s);" % (exc_vars[:3] * 2))
for var in exc_vars:
- code.put_xgotref(var)
+ code.put_xgotref(var, py_object_type)
if exc_lineno_cnames:
code.putln("%s = %s; %s = %s; %s = %s;" % (
exc_lineno_cnames[0], Naming.lineno_cname,
@@ -7758,11 +8504,11 @@ class TryFinallyStatNode(StatNode):
# unused utility functions and/or temps
code.putln("if (PY_MAJOR_VERSION >= 3) {")
for var in exc_vars[3:]:
- code.put_xgiveref(var)
+ code.put_xgiveref(var, py_object_type)
code.putln("__Pyx_ExceptionReset(%s, %s, %s);" % exc_vars[3:])
code.putln("}")
for var in exc_vars[:3]:
- code.put_xgiveref(var)
+ code.put_xgiveref(var, py_object_type)
code.putln("__Pyx_ErrRestore(%s, %s, %s);" % exc_vars[:3])
if self.is_try_finally_in_nogil:
@@ -7784,7 +8530,7 @@ class TryFinallyStatNode(StatNode):
# unused utility functions and/or temps
code.putln("if (PY_MAJOR_VERSION >= 3) {")
for var in exc_vars[3:]:
- code.put_xgiveref(var)
+ code.put_xgiveref(var, py_object_type)
code.putln("__Pyx_ExceptionReset(%s, %s, %s);" % exc_vars[3:])
code.putln("}")
for var in exc_vars[:3]:
@@ -7811,11 +8557,16 @@ class GILStatNode(NogilTryFinallyStatNode):
# 'with gil' or 'with nogil' statement
#
# state string 'gil' or 'nogil'
+ # scope_gil_state_known bool For nogil functions this can be False, since they can also be run with gil
+ # set to False by GilCheck transform
+ child_attrs = ["condition"] + NogilTryFinallyStatNode.child_attrs
state_temp = None
+ scope_gil_state_known = True
- def __init__(self, pos, state, body):
+ def __init__(self, pos, state, body, condition=None):
self.state = state
+ self.condition = condition
self.create_state_temp_if_needed(pos, state, body)
TryFinallyStatNode.__init__(
self, pos,
@@ -7842,11 +8593,18 @@ class GILStatNode(NogilTryFinallyStatNode):
if self.state == 'gil':
env.has_with_gil_block = True
+ if self.condition is not None:
+ self.condition.analyse_declarations(env)
+
return super(GILStatNode, self).analyse_declarations(env)
def analyse_expressions(self, env):
env.use_utility_code(
UtilityCode.load_cached("ForceInitThreads", "ModuleSetupCode.c"))
+
+ if self.condition is not None:
+ self.condition = self.condition.analyse_expressions(env)
+
was_nogil = env.nogil
env.nogil = self.state == 'nogil'
node = TryFinallyStatNode.analyse_expressions(self, env)
@@ -7867,7 +8625,7 @@ class GILStatNode(NogilTryFinallyStatNode):
code.put_ensure_gil(variable=variable)
code.funcstate.gil_owned = True
else:
- code.put_release_gil(variable=variable)
+ code.put_release_gil(variable=variable, unknown_gil_state=not self.scope_gil_state_known)
code.funcstate.gil_owned = False
TryFinallyStatNode.generate_execution_code(self, code)
@@ -7884,10 +8642,13 @@ class GILExitNode(StatNode):
Used as the 'finally' block in a GILStatNode
state string 'gil' or 'nogil'
+ # scope_gil_state_known bool For nogil functions this can be False, since they can also be run with gil
+ # set to False by GilCheck transform
"""
child_attrs = []
state_temp = None
+ scope_gil_state_known = True
def analyse_expressions(self, env):
return self
@@ -7901,7 +8662,7 @@ class GILExitNode(StatNode):
if self.state == 'gil':
code.put_release_ensured_gil(variable)
else:
- code.put_acquire_gil(variable)
+ code.put_acquire_gil(variable, unknown_gil_state=not self.scope_gil_state_known)
class EnsureGILNode(GILExitNode):
@@ -7933,6 +8694,31 @@ utility_code_for_imports = {
'inspect': ("__Pyx_patch_inspect", "PatchInspect", "Coroutine.c"),
}
+def cimport_numpy_check(node, code):
+ # shared code between CImportStatNode and FromCImportStatNode
+ # check to ensure that import_array is called
+ for mod in code.globalstate.module_node.scope.cimported_modules:
+ if mod.name != node.module_name:
+ continue
+ # there are sometimes several cimported modules with the same name
+ # so complete the loop if necessary
+ import_array = mod.lookup_here("import_array")
+ _import_array = mod.lookup_here("_import_array")
+ # at least one entry used
+ used = (import_array and import_array.used) or (_import_array and _import_array.used)
+ if ((import_array or _import_array) # at least one entry found
+ and not used):
+ # sanity check that this is actually numpy and not a user pxd called "numpy"
+ if _import_array and _import_array.type.is_cfunction:
+ # warning is mainly for the sake of testing
+ warning(node.pos, "'numpy.import_array()' has been added automatically "
+ "since 'numpy' was cimported but 'numpy.import_array' was not called.", 0)
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("NumpyImportArray", "NumpyImportArray.c")
+ )
+ return # no need to continue once the utility code is added
+
+
class CImportStatNode(StatNode):
# cimport statement
@@ -7966,7 +8752,8 @@ class CImportStatNode(StatNode):
env.declare_module(top_name, top_module_scope, self.pos)
else:
name = self.as_name or self.module_name
- env.declare_module(name, module_scope, self.pos)
+ entry = env.declare_module(name, module_scope, self.pos)
+ entry.known_standard_library_import = self.module_name
if self.module_name in utility_code_for_cimports:
env.use_utility_code(utility_code_for_cimports[self.module_name]())
@@ -7974,7 +8761,8 @@ class CImportStatNode(StatNode):
return self
def generate_execution_code(self, code):
- pass
+ if self.module_name == "numpy":
+ cimport_numpy_check(self, code)
class FromCImportStatNode(StatNode):
@@ -7982,7 +8770,7 @@ class FromCImportStatNode(StatNode):
#
# module_name string Qualified name of module
# relative_level int or None Relative import: number of dots before module_name
- # imported_names [(pos, name, as_name, kind)] Names to be imported
+ # imported_names [(pos, name, as_name)] Names to be imported
child_attrs = []
module_name = None
@@ -7993,44 +8781,43 @@ class FromCImportStatNode(StatNode):
if not env.is_module_scope:
error(self.pos, "cimport only allowed at module level")
return
- if self.relative_level and self.relative_level > env.qualified_name.count('.'):
- error(self.pos, "relative cimport beyond main package is not allowed")
- return
+ qualified_name_components = env.qualified_name.count('.') + 1
+ if self.relative_level:
+ if self.relative_level > qualified_name_components:
+ # 1. case: importing beyond package: from .. import pkg
+ error(self.pos, "relative cimport beyond main package is not allowed")
+ return
+ elif self.relative_level == qualified_name_components and not env.is_package:
+ # 2. case: importing from same level but current dir is not package: from . import module
+ error(self.pos, "relative cimport from non-package directory is not allowed")
+ return
module_scope = env.find_module(self.module_name, self.pos, relative_level=self.relative_level)
module_name = module_scope.qualified_name
env.add_imported_module(module_scope)
- for pos, name, as_name, kind in self.imported_names:
+ for pos, name, as_name in self.imported_names:
if name == "*":
for local_name, entry in list(module_scope.entries.items()):
env.add_imported_entry(local_name, entry, pos)
else:
entry = module_scope.lookup(name)
if entry:
- if kind and not self.declaration_matches(entry, kind):
- entry.redeclared(pos)
entry.used = 1
else:
- if kind == 'struct' or kind == 'union':
- entry = module_scope.declare_struct_or_union(
- name, kind=kind, scope=None, typedef_flag=0, pos=pos)
- elif kind == 'class':
- entry = module_scope.declare_c_class(name, pos=pos, module_name=module_name)
+ submodule_scope = env.context.find_module(
+ name, relative_to=module_scope, pos=self.pos, absolute_fallback=False)
+ if submodule_scope.parent_module is module_scope:
+ env.declare_module(as_name or name, submodule_scope, self.pos)
else:
- submodule_scope = env.context.find_module(
- name, relative_to=module_scope, pos=self.pos, absolute_fallback=False)
- if submodule_scope.parent_module is module_scope:
- env.declare_module(as_name or name, submodule_scope, self.pos)
- else:
- error(pos, "Name '%s' not declared in module '%s'" % (name, module_name))
+ error(pos, "Name '%s' not declared in module '%s'" % (name, module_name))
if entry:
local_name = as_name or name
env.add_imported_entry(local_name, entry, pos)
- if module_name.startswith('cpython') or module_name.startswith('cython'): # enough for now
+ if module_name.startswith('cpython') or module_name.startswith('cython'): # enough for now
if module_name in utility_code_for_cimports:
env.use_utility_code(utility_code_for_cimports[module_name]())
- for _, name, _, _ in self.imported_names:
+ for _, name, _ in self.imported_names:
fqname = '%s.%s' % (module_name, name)
if fqname in utility_code_for_cimports:
env.use_utility_code(utility_code_for_cimports[fqname]())
@@ -8053,7 +8840,8 @@ class FromCImportStatNode(StatNode):
return self
def generate_execution_code(self, code):
- pass
+ if self.module_name == "numpy":
+ cimport_numpy_check(self, code)
class FromImportStatNode(StatNode):
@@ -8078,6 +8866,14 @@ class FromImportStatNode(StatNode):
self.import_star = 1
else:
target.analyse_target_declaration(env)
+ if target.entry:
+ if target.get_known_standard_library_import() is None:
+ target.entry.known_standard_library_import = EncodedString(
+ "%s.%s" % (self.module.module_name.value, name))
+ else:
+ # it isn't unambiguous
+ target.entry.known_standard_library_import = ""
+
def analyse_expressions(self, env):
from . import ExprNodes
@@ -8135,7 +8931,7 @@ class FromImportStatNode(StatNode):
self.module.py_result(),
code.intern_identifier(name),
code.error_goto_if_null(item_temp, self.pos)))
- code.put_gotref(item_temp)
+ code.put_gotref(item_temp, py_object_type)
if coerced_item is None:
target.generate_assignment_code(self.item, code)
else:
@@ -8251,7 +9047,7 @@ class ParallelStatNode(StatNode, ParallelNode):
seen.add(dictitem.key.value)
if dictitem.key.value == 'num_threads':
if not dictitem.value.is_none:
- self.num_threads = dictitem.value
+ self.num_threads = dictitem.value
elif self.is_prange and dictitem.key.value == 'chunksize':
if not dictitem.value.is_none:
self.chunksize = dictitem.value
@@ -8512,11 +9308,7 @@ class ParallelStatNode(StatNode, ParallelNode):
if self.is_parallel and not self.is_nested_prange:
code.putln("/* Clean up any temporaries */")
for temp, type in sorted(self.temps):
- if type.is_memoryviewslice:
- code.put_xdecref_memoryviewslice(temp, have_gil=False)
- elif type.is_pyobject:
- code.put_xdecref(temp, type)
- code.putln("%s = NULL;" % temp)
+ code.put_xdecref_clear(temp, type, have_gil=False)
def setup_parallel_control_flow_block(self, code):
"""
@@ -8549,7 +9341,7 @@ class ParallelStatNode(StatNode, ParallelNode):
self.old_return_label = code.return_label
code.return_label = code.new_label(name="return")
- code.begin_block() # parallel control flow block
+ code.begin_block() # parallel control flow block
self.begin_of_parallel_control_block_point = code.insertion_point()
self.begin_of_parallel_control_block_point_after_decls = code.insertion_point()
@@ -8679,7 +9471,7 @@ class ParallelStatNode(StatNode, ParallelNode):
code.putln_openmp("#pragma omp critical(%s)" % section_name)
ParallelStatNode.critical_section_counter += 1
- code.begin_block() # begin critical section
+ code.begin_block() # begin critical section
c = self.begin_of_parallel_control_block_point
@@ -8688,7 +9480,10 @@ class ParallelStatNode(StatNode, ParallelNode):
if not lastprivate or entry.type.is_pyobject:
continue
- type_decl = entry.type.empty_declaration_code()
+ if entry.type.is_cpp_class and not entry.type.is_fake_reference and code.globalstate.directives['cpp_locals']:
+ type_decl = entry.type.cpp_optional_declaration_code("")
+ else:
+ type_decl = entry.type.empty_declaration_code()
temp_cname = "__pyx_parallel_temp%d" % temp_count
private_cname = entry.cname
@@ -8702,12 +9497,19 @@ class ParallelStatNode(StatNode, ParallelNode):
# Declare the parallel private in the outer block
c.putln("%s %s%s;" % (type_decl, temp_cname, init))
+ self.parallel_private_temps.append((temp_cname, private_cname, entry.type))
+
+ if entry.type.is_cpp_class:
+ # moving is fine because we're quitting the loop and so won't be directly accessing the variable again
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("MoveIfSupported", "CppSupport.cpp"))
+ private_cname = "__PYX_STD_MOVE_IF_SUPPORTED(%s)" % private_cname
# Initialize before escaping
code.putln("%s = %s;" % (temp_cname, private_cname))
- self.parallel_private_temps.append((temp_cname, private_cname))
- code.end_block() # end critical section
+
+ code.end_block() # end critical section
def fetch_parallel_exception(self, code):
"""
@@ -8747,7 +9549,7 @@ class ParallelStatNode(StatNode, ParallelNode):
pos_info = chain(*zip(self.parallel_pos_info, self.pos_info))
code.funcstate.uses_error_indicator = True
code.putln("%s = %s; %s = %s; %s = %s;" % tuple(pos_info))
- code.put_gotref(Naming.parallel_exc_type)
+ code.put_gotref(Naming.parallel_exc_type, py_object_type)
code.putln(
"}")
@@ -8760,7 +9562,7 @@ class ParallelStatNode(StatNode, ParallelNode):
code.begin_block()
code.put_ensure_gil(declare_gilstate=True)
- code.put_giveref(Naming.parallel_exc_type)
+ code.put_giveref(Naming.parallel_exc_type, py_object_type)
code.putln("__Pyx_ErrRestoreWithState(%s, %s, %s);" % self.parallel_exc)
pos_info = chain(*zip(self.pos_info, self.parallel_pos_info))
code.putln("%s = %s; %s = %s; %s = %s;" % tuple(pos_info))
@@ -8826,7 +9628,10 @@ class ParallelStatNode(StatNode, ParallelNode):
code.putln(
"if (%s) {" % Naming.parallel_why)
- for temp_cname, private_cname in self.parallel_private_temps:
+ for temp_cname, private_cname, temp_type in self.parallel_private_temps:
+ if temp_type.is_cpp_class:
+ # utility code was loaded earlier
+ temp_cname = "__PYX_STD_MOVE_IF_SUPPORTED(%s)" % temp_cname
code.putln("%s = %s;" % (private_cname, temp_cname))
code.putln("switch (%s) {" % Naming.parallel_why)
@@ -8848,11 +9653,11 @@ class ParallelStatNode(StatNode, ParallelNode):
self.restore_parallel_exception(code)
code.put_goto(code.error_label)
- code.putln("}") # end switch
+ code.putln("}") # end switch
code.putln(
- "}") # end if
+ "}") # end if
- code.end_block() # end parallel control flow block
+ code.end_block() # end parallel control flow block
self.redef_builtin_expect_apple_gcc_bug(code)
# FIXME: improve with version number for OS X Lion
@@ -8971,9 +9776,6 @@ class ParallelRangeNode(ParallelStatNode):
else:
self.start, self.stop, self.step = self.args
- if hasattr(self.schedule, 'decode'):
- self.schedule = self.schedule.decode('ascii')
-
if self.schedule not in (None, 'static', 'dynamic', 'guided', 'runtime'):
error(self.pos, "Invalid schedule argument to prange: %s" % (self.schedule,))
@@ -9028,7 +9830,8 @@ class ParallelRangeNode(ParallelStatNode):
# ensure lastprivate behaviour and propagation. If the target index is
# not a NameNode, it won't have an entry, and an error was issued by
# ParallelRangeTransform
- if hasattr(self.target, 'entry'):
+ target_entry = getattr(self.target, 'entry', None)
+ if target_entry:
self.assignments[self.target.entry] = self.target.pos, None
node = super(ParallelRangeNode, self).analyse_expressions(env)
@@ -9141,9 +9944,12 @@ class ParallelRangeNode(ParallelStatNode):
# TODO: check if the step is 0 and if so, raise an exception in a
# 'with gil' block. For now, just abort
- code.putln("if ((%(step)s == 0)) abort();" % fmt_dict)
+ if self.step is not None and self.step.has_constant_result() and self.step.constant_result == 0:
+ error(node.pos, "Iteration with step 0 is invalid.")
+ elif not fmt_dict['step'].isdigit() or int(fmt_dict['step']) == 0:
+ code.putln("if (((%(step)s) == 0)) abort();" % fmt_dict)
- self.setup_parallel_control_flow_block(code) # parallel control flow block
+ self.setup_parallel_control_flow_block(code) # parallel control flow block
# Note: nsteps is private in an outer scope if present
code.putln("%(nsteps)s = (%(stop)s - %(start)s + %(step)s - %(step)s/abs(%(step)s)) / %(step)s;" % fmt_dict)
@@ -9155,9 +9961,9 @@ class ParallelRangeNode(ParallelStatNode):
# erroneously believes that nsteps may be <= 0, leaving the private
# target index uninitialized
code.putln("if (%(nsteps)s > 0)" % fmt_dict)
- code.begin_block() # if block
+ code.begin_block() # if block
self.generate_loop(code, fmt_dict)
- code.end_block() # end if block
+ code.end_block() # end if block
self.restore_labels(code)
@@ -9165,13 +9971,13 @@ class ParallelRangeNode(ParallelStatNode):
if self.breaking_label_used:
code.put("if (%s < 2)" % Naming.parallel_why)
- code.begin_block() # else block
+ code.begin_block() # else block
code.putln("/* else */")
self.else_clause.generate_execution_code(code)
- code.end_block() # end else block
+ code.end_block() # end else block
# ------ cleanup ------
- self.end_parallel_control_flow_block(code) # end parallel control flow block
+ self.end_parallel_control_flow_block(code) # end parallel control flow block
# And finally, release our privates and write back any closure
# variables
@@ -9202,7 +10008,7 @@ class ParallelRangeNode(ParallelStatNode):
code.putln("")
code.putln("#endif /* _OPENMP */")
- code.begin_block() # pragma omp parallel begin block
+ code.begin_block() # pragma omp parallel begin block
# Initialize the GIL if needed for this thread
self.begin_parallel_block(code)