summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
authorThomas Green <tomgreen66@hotmail.com>2021-12-08 11:57:10 +0000
committerGitHub <noreply@github.com>2021-12-08 11:57:10 +0000
commitdc766fc1abb546ab883f76ef4e405e99e9287ab6 (patch)
tree9e7c7748ba8bfbb2ba5224633b0725909712d2fa /numpy
parent1cfdac82ac793061d8ca4b07c046fc6b21ee7e54 (diff)
parentab7a1927353ab9dd52e3f2f7a1a889ae790667b9 (diff)
downloadnumpy-dc766fc1abb546ab883f76ef4e405e99e9287ab6.tar.gz
Merge branch 'numpy:main' into armcompiler
Diffstat (limited to 'numpy')
-rw-r--r--numpy/__init__.pyi6
-rw-r--r--numpy/array_api/_array_object.py14
-rw-r--r--numpy/array_api/_statistical_functions.py4
-rw-r--r--numpy/array_api/tests/test_array_object.py21
-rw-r--r--numpy/compat/py3k.py4
-rw-r--r--numpy/core/_add_newdocs.py5
-rw-r--r--numpy/core/code_generators/cversions.txt1
-rw-r--r--numpy/core/code_generators/ufunc_docstrings.py1
-rw-r--r--numpy/core/fromnumeric.py59
-rw-r--r--numpy/core/function_base.pyi184
-rw-r--r--numpy/core/include/numpy/experimental_dtype_api.h6
-rw-r--r--numpy/core/include/numpy/numpyconfig.h15
-rw-r--r--numpy/core/numeric.py8
-rw-r--r--numpy/core/setup.py2
-rw-r--r--numpy/core/setup_common.py1
-rw-r--r--numpy/core/src/multiarray/alloc.c45
-rw-r--r--numpy/core/src/multiarray/methods.c2
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c2
-rw-r--r--numpy/core/src/multiarray/nditer_pywrap.c20
-rw-r--r--numpy/core/src/multiarray/scalartypes.c.src10
-rw-r--r--numpy/core/src/umath/_operand_flag_tests.c (renamed from numpy/core/src/umath/_operand_flag_tests.c.src)0
-rw-r--r--numpy/core/src/umath/dispatching.c410
-rw-r--r--numpy/core/src/umath/dispatching.h14
-rw-r--r--numpy/core/src/umath/legacy_array_method.c34
-rw-r--r--numpy/core/src/umath/loops_exponent_log.dispatch.c.src18
-rw-r--r--numpy/core/src/umath/ufunc_object.c85
-rw-r--r--numpy/core/tests/test_datetime.py8
-rw-r--r--numpy/core/tests/test_deprecations.py27
-rw-r--r--numpy/core/tests/test_multiarray.py36
-rw-r--r--numpy/core/tests/test_ufunc.py63
-rw-r--r--numpy/core/tests/test_umath.py2
-rw-r--r--numpy/distutils/ccompiler_opt.py4
-rw-r--r--numpy/distutils/checks/cpu_asimdfhm.c4
-rw-r--r--numpy/distutils/misc_util.py27
-rw-r--r--numpy/f2py/__init__.py2
-rw-r--r--numpy/f2py/cfuncs.py59
-rw-r--r--numpy/f2py/tests/test_abstract_interface.py29
-rw-r--r--numpy/f2py/tests/test_array_from_pyobj.py384
-rw-r--r--numpy/f2py/tests/test_assumed_shape.py19
-rw-r--r--numpy/f2py/tests/test_block_docstring.py5
-rw-r--r--numpy/f2py/tests/test_callback.py62
-rw-r--r--numpy/f2py/tests/test_common.py13
-rw-r--r--numpy/f2py/tests/test_compile_function.py85
-rw-r--r--numpy/f2py/tests/test_crackfortran.py82
-rw-r--r--numpy/f2py/tests/test_kind.py26
-rw-r--r--numpy/f2py/tests/test_mixed.py12
-rw-r--r--numpy/f2py/tests/test_module_doc.py22
-rw-r--r--numpy/f2py/tests/test_parameter.py33
-rw-r--r--numpy/f2py/tests/test_quoted_character.py6
-rw-r--r--numpy/f2py/tests/test_regression.py23
-rw-r--r--numpy/f2py/tests/test_return_character.py42
-rw-r--r--numpy/f2py/tests/test_return_complex.py56
-rw-r--r--numpy/f2py/tests/test_return_integer.py36
-rw-r--r--numpy/f2py/tests/test_return_logical.py33
-rw-r--r--numpy/f2py/tests/test_return_real.py51
-rw-r--r--numpy/f2py/tests/test_semicolon_split.py27
-rw-r--r--numpy/f2py/tests/test_size.py6
-rw-r--r--numpy/f2py/tests/test_string.py93
-rw-r--r--numpy/f2py/tests/test_symbolic.py459
-rw-r--r--numpy/f2py/tests/util.py161
-rw-r--r--numpy/lib/index_tricks.py4
-rw-r--r--numpy/lib/npyio.py24
-rw-r--r--numpy/lib/recfunctions.py21
-rw-r--r--numpy/lib/scimath.py9
-rw-r--r--numpy/lib/scimath.pyi101
-rw-r--r--numpy/lib/shape_base.pyi4
-rw-r--r--numpy/lib/tests/test_io.py4
-rw-r--r--numpy/lib/type_check.py74
-rw-r--r--numpy/lib/type_check.pyi3
-rw-r--r--numpy/lib/utils.py13
-rw-r--r--numpy/linalg/tests/test_build.py53
-rw-r--r--numpy/ma/core.py10
-rw-r--r--numpy/random/_examples/cython/setup.py1
-rw-r--r--numpy/random/_mt19937.pyx2
-rw-r--r--numpy/random/mtrand.pyx17
-rw-r--r--numpy/random/tests/test_extending.py8
-rw-r--r--numpy/random/tests/test_randomstate_regression.py13
-rw-r--r--numpy/testing/_private/utils.py4
-rw-r--r--numpy/typing/tests/data/fail/array_constructors.pyi6
-rw-r--r--numpy/typing/tests/data/fail/shape_base.pyi8
-rw-r--r--numpy/typing/tests/data/reveal/array_constructors.pyi22
-rw-r--r--numpy/typing/tests/data/reveal/emath.pyi52
-rw-r--r--numpy/typing/tests/data/reveal/ndarray_misc.pyi3
-rw-r--r--numpy/typing/tests/test_typing.py8
84 files changed, 2079 insertions, 1353 deletions
diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi
index e01df7c90..eb1e81c6a 100644
--- a/numpy/__init__.pyi
+++ b/numpy/__init__.pyi
@@ -2445,11 +2445,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
def __ior__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ...
@overload
def __ior__(self: NDArray[object_], other: Any) -> NDArray[object_]: ...
- @overload
- def __ior__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ...
- @overload
+
def __dlpack__(self: NDArray[number[Any]], *, stream: None = ...) -> _PyCapsule: ...
- @overload
def __dlpack_device__(self) -> Tuple[int, L[0]]: ...
# Keep `dtype` at the bottom to avoid name conflicts with `np.dtype`
@@ -4342,4 +4339,3 @@ class _SupportsDLPack(Protocol[_T_contra]):
def __dlpack__(self, *, stream: None | _T_contra = ...) -> _PyCapsule: ...
def _from_dlpack(__obj: _SupportsDLPack[None]) -> NDArray[Any]: ...
-
diff --git a/numpy/array_api/_array_object.py b/numpy/array_api/_array_object.py
index 8794c5ea5..75baf34b0 100644
--- a/numpy/array_api/_array_object.py
+++ b/numpy/array_api/_array_object.py
@@ -33,6 +33,7 @@ from typing import TYPE_CHECKING, Optional, Tuple, Union, Any
if TYPE_CHECKING:
from ._typing import Any, PyCapsule, Device, Dtype
+ import numpy.typing as npt
import numpy as np
@@ -108,6 +109,17 @@ class Array:
mid = np.array2string(self._array, separator=', ', prefix=prefix, suffix=suffix)
return prefix + mid + suffix
+ # This function is not required by the spec, but we implement it here for
+ # convenience so that np.asarray(np.array_api.Array) will work.
+ def __array__(self, dtype: None | np.dtype[Any] = None) -> npt.NDArray[Any]:
+ """
+ Warning: this method is NOT part of the array API spec. Implementers
+ of other libraries need not include it, and users should not assume it
+ will be present in other implementations.
+
+ """
+ return np.asarray(self._array, dtype=dtype)
+
# These are various helper functions to make the array behavior match the
# spec in places where it either deviates from or is more strict than
# NumPy behavior
@@ -1072,4 +1084,4 @@ class Array:
# https://data-apis.org/array-api/latest/API_specification/array_object.html#t
if self.ndim != 2:
raise ValueError("x.T requires x to have 2 dimensions. Use x.mT to transpose stacks of matrices and permute_dims() to permute dimensions.")
- return self._array.T
+ return self.__class__._new(self._array.T)
diff --git a/numpy/array_api/_statistical_functions.py b/numpy/array_api/_statistical_functions.py
index 7bee3f4db..5bc831ac2 100644
--- a/numpy/array_api/_statistical_functions.py
+++ b/numpy/array_api/_statistical_functions.py
@@ -65,8 +65,8 @@ def prod(
# Note: sum() and prod() always upcast float32 to float64 for dtype=None
# We need to do so here before computing the product to avoid overflow
if dtype is None and x.dtype == float32:
- x = asarray(x, dtype=float64)
- return Array._new(np.prod(x._array, axis=axis, keepdims=keepdims))
+ dtype = float64
+ return Array._new(np.prod(x._array, dtype=dtype, axis=axis, keepdims=keepdims))
def std(
diff --git a/numpy/array_api/tests/test_array_object.py b/numpy/array_api/tests/test_array_object.py
index 12479d765..b980bacca 100644
--- a/numpy/array_api/tests/test_array_object.py
+++ b/numpy/array_api/tests/test_array_object.py
@@ -4,6 +4,7 @@ from numpy.testing import assert_raises
import numpy as np
from .. import ones, asarray, result_type, all, equal
+from .._array_object import Array
from .._dtypes import (
_all_dtypes,
_boolean_dtypes,
@@ -301,3 +302,23 @@ def test_device_property():
assert all(equal(asarray(a, device='cpu'), a))
assert_raises(ValueError, lambda: asarray(a, device='gpu'))
+
+def test_array_properties():
+ a = ones((1, 2, 3))
+ b = ones((2, 3))
+ assert_raises(ValueError, lambda: a.T)
+
+ assert isinstance(b.T, Array)
+ assert b.T.shape == (3, 2)
+
+ assert isinstance(a.mT, Array)
+ assert a.mT.shape == (1, 3, 2)
+ assert isinstance(b.mT, Array)
+ assert b.mT.shape == (3, 2)
+
+def test___array__():
+ a = ones((2, 3), dtype=int16)
+ assert np.asarray(a) is a._array
+ b = np.asarray(a, dtype=np.float64)
+ assert np.all(np.equal(b, np.ones((2, 3), dtype=np.float64)))
+ assert b.dtype == np.float64
diff --git a/numpy/compat/py3k.py b/numpy/compat/py3k.py
index 1fa17621a..3d10bb988 100644
--- a/numpy/compat/py3k.py
+++ b/numpy/compat/py3k.py
@@ -107,7 +107,9 @@ class contextlib_nullcontext:
def npy_load_module(name, fn, info=None):
"""
- Load a module.
+ Load a module. Uses ``load_module`` which will be deprecated in python
+ 3.12. An alternative that uses ``exec_module`` is in
+ numpy.distutils.misc_util.exec_mod_from_location
.. versionadded:: 1.11.2
diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py
index 078c58976..7d009ad9f 100644
--- a/numpy/core/_add_newdocs.py
+++ b/numpy/core/_add_newdocs.py
@@ -2658,8 +2658,9 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('shape',
See Also
--------
- numpy.reshape : similar function
- ndarray.reshape : similar method
+ numpy.shape : Equivalent getter function.
+ numpy.reshape : Function similar to setting ``shape``.
+ ndarray.reshape : Method similar to setting ``shape``.
"""))
diff --git a/numpy/core/code_generators/cversions.txt b/numpy/core/code_generators/cversions.txt
index e7b3ef697..e1ee8a860 100644
--- a/numpy/core/code_generators/cversions.txt
+++ b/numpy/core/code_generators/cversions.txt
@@ -61,4 +61,5 @@
0x0000000e = 17a0f366e55ec05e5c5c149123478452
# Version 15 (NumPy 1.22) Configurable memory allocations
+# Version 14 (NumPy 1.23) No change.
0x0000000f = b8783365b873681cd204be50cdfb448d
diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py
index c9be94569..cd584eea7 100644
--- a/numpy/core/code_generators/ufunc_docstrings.py
+++ b/numpy/core/code_generators/ufunc_docstrings.py
@@ -3827,6 +3827,7 @@ add_newdoc('numpy.core.umath', 'sqrt',
--------
lib.scimath.sqrt
A version which returns complex numbers when given negative reals.
+ Note: 0.0 and -0.0 are handled differently for complex inputs.
Notes
-----
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index 3242124ac..f26f306fa 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -17,7 +17,7 @@ _dt_ = nt.sctype2char
# functions that are methods
__all__ = [
- 'alen', 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax',
+ 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax',
'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip',
'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean',
'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put',
@@ -1980,25 +1980,27 @@ def shape(a):
See Also
--------
- len
+ len : ``len(a)`` is equivalent to ``np.shape(a)[0]`` for N-D arrays with
+ ``N>=1``.
ndarray.shape : Equivalent array method.
Examples
--------
>>> np.shape(np.eye(3))
(3, 3)
- >>> np.shape([[1, 2]])
+ >>> np.shape([[1, 3]])
(1, 2)
>>> np.shape([0])
(1,)
>>> np.shape(0)
()
- >>> a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
+ >>> a = np.array([(1, 2), (3, 4), (5, 6)],
+ ... dtype=[('x', 'i4'), ('y', 'i4')])
>>> np.shape(a)
- (2,)
+ (3,)
>>> a.shape
- (2,)
+ (3,)
"""
try:
@@ -2917,51 +2919,6 @@ def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
keepdims=keepdims, initial=initial, where=where)
-def _alen_dispathcer(a):
- return (a,)
-
-
-@array_function_dispatch(_alen_dispathcer)
-def alen(a):
- """
- Return the length of the first dimension of the input array.
-
- .. deprecated:: 1.18
- `numpy.alen` is deprecated, use `len` instead.
-
- Parameters
- ----------
- a : array_like
- Input array.
-
- Returns
- -------
- alen : int
- Length of the first dimension of `a`.
-
- See Also
- --------
- shape, size
-
- Examples
- --------
- >>> a = np.zeros((7,4,5))
- >>> a.shape[0]
- 7
- >>> np.alen(a)
- 7
-
- """
- # NumPy 1.18.0, 2019-08-02
- warnings.warn(
- "`np.alen` is deprecated, use `len` instead",
- DeprecationWarning, stacklevel=2)
- try:
- return len(a)
- except TypeError:
- return len(array(a, ndmin=1))
-
-
def _prod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
initial=None, where=None):
return (a, out)
diff --git a/numpy/core/function_base.pyi b/numpy/core/function_base.pyi
index 68d3b3a98..6e0843a0e 100644
--- a/numpy/core/function_base.pyi
+++ b/numpy/core/function_base.pyi
@@ -1,55 +1,195 @@
-from typing import overload, Tuple, Union, Sequence, Any, SupportsIndex, Literal, List
+from typing import (
+ Literal as L,
+ overload,
+ Tuple,
+ Union,
+ Any,
+ SupportsIndex,
+ List,
+ Type,
+ TypeVar,
+)
-from numpy import ndarray
-from numpy.typing import ArrayLike, DTypeLike, _SupportsArray, _NumberLike_co
+from numpy import floating, complexfloating, generic, dtype
+from numpy.typing import (
+ NDArray,
+ ArrayLike,
+ DTypeLike,
+ _SupportsDType,
+ _SupportsArray,
+ _NumberLike_co,
+ _ArrayLikeFloat_co,
+ _ArrayLikeComplex_co,
+)
-# TODO: wait for support for recursive types
-_ArrayLikeNested = Sequence[Sequence[Any]]
-_ArrayLikeNumber = Union[
- _NumberLike_co, Sequence[_NumberLike_co], ndarray, _SupportsArray, _ArrayLikeNested
+_SCT = TypeVar("_SCT", bound=generic)
+
+_DTypeLike = Union[
+ dtype[_SCT],
+ Type[_SCT],
+ _SupportsDType[dtype[_SCT]],
]
__all__: List[str]
@overload
def linspace(
- start: _ArrayLikeNumber,
- stop: _ArrayLikeNumber,
+ start: _ArrayLikeFloat_co,
+ stop: _ArrayLikeFloat_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ retstep: L[False] = ...,
+ dtype: None = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def linspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ retstep: L[False] = ...,
+ dtype: None = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def linspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ retstep: L[False] = ...,
+ dtype: _DTypeLike[_SCT] = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def linspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
num: SupportsIndex = ...,
endpoint: bool = ...,
- retstep: Literal[False] = ...,
+ retstep: L[False] = ...,
dtype: DTypeLike = ...,
axis: SupportsIndex = ...,
-) -> ndarray: ...
+) -> NDArray[Any]: ...
@overload
def linspace(
- start: _ArrayLikeNumber,
- stop: _ArrayLikeNumber,
+ start: _ArrayLikeFloat_co,
+ stop: _ArrayLikeFloat_co,
num: SupportsIndex = ...,
endpoint: bool = ...,
- retstep: Literal[True] = ...,
+ retstep: L[True] = ...,
+ dtype: None = ...,
+ axis: SupportsIndex = ...,
+) -> Tuple[NDArray[floating[Any]], floating[Any]]: ...
+@overload
+def linspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ retstep: L[True] = ...,
+ dtype: None = ...,
+ axis: SupportsIndex = ...,
+) -> Tuple[NDArray[complexfloating[Any, Any]], complexfloating[Any, Any]]: ...
+@overload
+def linspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ retstep: L[True] = ...,
+ dtype: _DTypeLike[_SCT] = ...,
+ axis: SupportsIndex = ...,
+) -> Tuple[NDArray[_SCT], _SCT]: ...
+@overload
+def linspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ retstep: L[True] = ...,
dtype: DTypeLike = ...,
axis: SupportsIndex = ...,
-) -> Tuple[ndarray, Any]: ...
+) -> Tuple[NDArray[Any], Any]: ...
+@overload
def logspace(
- start: _ArrayLikeNumber,
- stop: _ArrayLikeNumber,
+ start: _ArrayLikeFloat_co,
+ stop: _ArrayLikeFloat_co,
num: SupportsIndex = ...,
endpoint: bool = ...,
- base: _ArrayLikeNumber = ...,
+ base: _ArrayLikeFloat_co = ...,
+ dtype: None = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def logspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ base: _ArrayLikeComplex_co = ...,
+ dtype: None = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def logspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ base: _ArrayLikeComplex_co = ...,
+ dtype: _DTypeLike[_SCT] = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def logspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ base: _ArrayLikeComplex_co = ...,
dtype: DTypeLike = ...,
axis: SupportsIndex = ...,
-) -> ndarray: ...
+) -> NDArray[Any]: ...
+@overload
+def geomspace(
+ start: _ArrayLikeFloat_co,
+ stop: _ArrayLikeFloat_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ dtype: None = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def geomspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ dtype: None = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def geomspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ dtype: _DTypeLike[_SCT] = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[_SCT]: ...
+@overload
def geomspace(
- start: _ArrayLikeNumber,
- stop: _ArrayLikeNumber,
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
num: SupportsIndex = ...,
endpoint: bool = ...,
dtype: DTypeLike = ...,
axis: SupportsIndex = ...,
-) -> ndarray: ...
+) -> NDArray[Any]: ...
# Re-exported to `np.lib.function_base`
def add_newdoc(
diff --git a/numpy/core/include/numpy/experimental_dtype_api.h b/numpy/core/include/numpy/experimental_dtype_api.h
index 554c7fb6c..effa66baf 100644
--- a/numpy/core/include/numpy/experimental_dtype_api.h
+++ b/numpy/core/include/numpy/experimental_dtype_api.h
@@ -181,6 +181,12 @@ typedef PyObject *_ufunc_addloop_fromspec_func(
/*
* Type of the C promoter function, which must be wrapped into a
* PyCapsule with name "numpy._ufunc_promoter".
+ *
+ * Note that currently the output dtypes are always NULL unless they are
+ * also part of the signature. This is an implementation detail and could
+ * change in the future. However, in general promoters should not have a
+ * need for output dtypes.
+ * (There are potential use-cases, these are currently unsupported.)
*/
typedef int promoter_function(PyObject *ufunc,
PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[],
diff --git a/numpy/core/include/numpy/numpyconfig.h b/numpy/core/include/numpy/numpyconfig.h
index f761555b9..b2e7c458e 100644
--- a/numpy/core/include/numpy/numpyconfig.h
+++ b/numpy/core/include/numpy/numpyconfig.h
@@ -23,12 +23,18 @@
#undef NPY_SIZEOF_LONGDOUBLE
#undef NPY_SIZEOF_COMPLEX_LONGDOUBLE
- #ifdef __x86_64
- #define NPY_SIZEOF_LONGDOUBLE 16
- #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32
- #elif defined(__arm64__)
+ #if defined(__arm64__)
#define NPY_SIZEOF_LONGDOUBLE 8
#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 16
+ #elif defined(__x86_64)
+ #define NPY_SIZEOF_LONGDOUBLE 16
+ #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32
+ #elif defined (__i386)
+ #define NPY_SIZEOF_LONGDOUBLE 12
+ #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 24
+ #elif defined(__ppc__) || defined (__ppc64__)
+ #define NPY_SIZEOF_LONGDOUBLE 16
+ #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32
#else
#error "unknown architecture"
#endif
@@ -57,5 +63,6 @@
#define NPY_1_20_API_VERSION 0x0000000e
#define NPY_1_21_API_VERSION 0x0000000e
#define NPY_1_22_API_VERSION 0x0000000f
+#define NPY_1_23_API_VERSION 0x0000000f
#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_ */
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index 344d40d93..014fa0a39 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -1829,6 +1829,14 @@ def fromfunction(function, shape, *, dtype=float, like=None, **kwargs):
Examples
--------
+ >>> np.fromfunction(lambda i, j: i, (2, 2), dtype=float)
+ array([[0., 0.],
+ [1., 1.]])
+
+ >>> np.fromfunction(lambda i, j: j, (2, 2), dtype=float)
+ array([[0., 1.],
+ [0., 1.]])
+
>>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int)
array([[ True, False, False],
[False, True, False],
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index a5f423d8f..17fbd99af 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -1065,7 +1065,7 @@ def configuration(parent_package='',top_path=None):
#######################################################################
config.add_extension('_operand_flag_tests',
- sources=[join('src', 'umath', '_operand_flag_tests.c.src')])
+ sources=[join('src', 'umath', '_operand_flag_tests.c')])
#######################################################################
# SIMD module #
diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py
index 70e8fc897..772c87c96 100644
--- a/numpy/core/setup_common.py
+++ b/numpy/core/setup_common.py
@@ -44,6 +44,7 @@ C_ABI_VERSION = 0x01000009
# 0x0000000e - 1.20.x
# 0x0000000e - 1.21.x
# 0x0000000f - 1.22.x
+# 0x0000000f - 1.23.x
C_API_VERSION = 0x0000000f
class MismatchCAPIWarning(Warning):
diff --git a/numpy/core/src/multiarray/alloc.c b/numpy/core/src/multiarray/alloc.c
index 0a694cf62..94a7daa83 100644
--- a/numpy/core/src/multiarray/alloc.c
+++ b/numpy/core/src/multiarray/alloc.c
@@ -186,6 +186,24 @@ npy_free_cache_dim(void * p, npy_uintp sz)
&PyArray_free);
}
+/* Similar to array_dealloc in arrayobject.c */
+static NPY_INLINE void
+WARN_NO_RETURN(PyObject* warning, const char * msg) {
+ if (PyErr_WarnEx(warning, msg, 1) < 0) {
+ PyObject * s;
+
+ s = PyUnicode_FromString("PyDataMem_UserFREE");
+ if (s) {
+ PyErr_WriteUnraisable(s);
+ Py_DECREF(s);
+ }
+ else {
+ PyErr_WriteUnraisable(Py_None);
+ }
+ }
+}
+
+
/* malloc/free/realloc hook */
NPY_NO_EXPORT PyDataMem_EventHookFunc *_PyDataMem_eventhook = NULL;
@@ -210,6 +228,8 @@ NPY_NO_EXPORT void *_PyDataMem_eventhook_user_data = NULL;
* operations that might cause new allocation events (such as the
* creation/destruction numpy objects, or creating/destroying Python
* objects which might cause a gc)
+ *
+ * Deprecated in 1.23
*/
NPY_NO_EXPORT PyDataMem_EventHookFunc *
PyDataMem_SetEventHook(PyDataMem_EventHookFunc *newhook,
@@ -218,6 +238,10 @@ PyDataMem_SetEventHook(PyDataMem_EventHookFunc *newhook,
PyDataMem_EventHookFunc *temp;
NPY_ALLOW_C_API_DEF
NPY_ALLOW_C_API
+ /* 2021-11-18, 1.23 */
+ WARN_NO_RETURN(PyExc_DeprecationWarning,
+ "PyDataMem_SetEventHook is deprecated, use tracemalloc "
+ "and the 'np.lib.tracemalloc_domain' domain");
temp = _PyDataMem_eventhook;
_PyDataMem_eventhook = newhook;
if (old_data != NULL) {
@@ -435,33 +459,14 @@ PyDataMem_UserNEW_ZEROED(size_t nmemb, size_t size, PyObject *mem_handler)
return result;
}
-/* Similar to array_dealloc in arrayobject.c */
-static NPY_INLINE void
-WARN_IN_FREE(PyObject* warning, const char * msg) {
- if (PyErr_WarnEx(warning, msg, 1) < 0) {
- PyObject * s;
-
- s = PyUnicode_FromString("PyDataMem_UserFREE");
- if (s) {
- PyErr_WriteUnraisable(s);
- Py_DECREF(s);
- }
- else {
- PyErr_WriteUnraisable(Py_None);
- }
- }
-}
-
-
NPY_NO_EXPORT void
PyDataMem_UserFREE(void *ptr, size_t size, PyObject *mem_handler)
{
PyDataMem_Handler *handler = (PyDataMem_Handler *) PyCapsule_GetPointer(mem_handler, "mem_handler");
if (handler == NULL) {
- WARN_IN_FREE(PyExc_RuntimeWarning,
+ WARN_NO_RETURN(PyExc_RuntimeWarning,
"Could not get pointer to 'mem_handler' from PyCapsule");
- PyErr_Clear();
return;
}
PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr);
diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c
index 627096b3c..b0b6f42f1 100644
--- a/numpy/core/src/multiarray/methods.c
+++ b/numpy/core/src/multiarray/methods.c
@@ -2246,7 +2246,7 @@ array_dumps(PyArrayObject *self, PyObject *args, PyObject *kwds)
static PyObject *
-array_sizeof(PyArrayObject *self)
+array_sizeof(PyArrayObject *self, PyObject *NPY_UNUSED(args))
{
/* object + dimension and strides */
Py_ssize_t nbytes = Py_TYPE(self)->tp_basicsize +
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index dbf5ab161..cf0160a2b 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -4212,7 +4212,7 @@ normalize_axis_index(PyObject *NPY_UNUSED(self),
static PyObject *
-_reload_guard(PyObject *NPY_UNUSED(self)) {
+_reload_guard(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) {
static int initialized = 0;
#if !defined(PYPY_VERSION)
diff --git a/numpy/core/src/multiarray/nditer_pywrap.c b/numpy/core/src/multiarray/nditer_pywrap.c
index 8e072d5f4..2675496ab 100644
--- a/numpy/core/src/multiarray/nditer_pywrap.c
+++ b/numpy/core/src/multiarray/nditer_pywrap.c
@@ -1190,7 +1190,7 @@ npyiter_resetbasepointers(NewNpyArrayIterObject *self)
}
static PyObject *
-npyiter_reset(NewNpyArrayIterObject *self)
+npyiter_reset(NewNpyArrayIterObject *self, PyObject *NPY_UNUSED(args))
{
if (self->iter == NULL) {
PyErr_SetString(PyExc_ValueError,
@@ -1227,7 +1227,7 @@ npyiter_reset(NewNpyArrayIterObject *self)
* copied.
*/
static PyObject *
-npyiter_copy(NewNpyArrayIterObject *self)
+npyiter_copy(NewNpyArrayIterObject *self, PyObject *NPY_UNUSED(args))
{
NewNpyArrayIterObject *iter;
@@ -1263,7 +1263,7 @@ npyiter_copy(NewNpyArrayIterObject *self)
}
static PyObject *
-npyiter_iternext(NewNpyArrayIterObject *self)
+npyiter_iternext(NewNpyArrayIterObject *self, PyObject *NPY_UNUSED(args))
{
if (self->iter != NULL && self->iternext != NULL &&
!self->finished && self->iternext(self->iter)) {
@@ -1320,7 +1320,8 @@ npyiter_remove_axis(NewNpyArrayIterObject *self, PyObject *args)
}
static PyObject *
-npyiter_remove_multi_index(NewNpyArrayIterObject *self)
+npyiter_remove_multi_index(
+ NewNpyArrayIterObject *self, PyObject *NPY_UNUSED(args))
{
if (self->iter == NULL) {
PyErr_SetString(PyExc_ValueError,
@@ -1345,7 +1346,8 @@ npyiter_remove_multi_index(NewNpyArrayIterObject *self)
}
static PyObject *
-npyiter_enable_external_loop(NewNpyArrayIterObject *self)
+npyiter_enable_external_loop(
+ NewNpyArrayIterObject *self, PyObject *NPY_UNUSED(args))
{
if (self->iter == NULL) {
PyErr_SetString(PyExc_ValueError,
@@ -1370,7 +1372,7 @@ npyiter_enable_external_loop(NewNpyArrayIterObject *self)
}
static PyObject *
-npyiter_debug_print(NewNpyArrayIterObject *self)
+npyiter_debug_print(NewNpyArrayIterObject *self, PyObject *NPY_UNUSED(args))
{
if (self->iter != NULL) {
NpyIter_DebugPrint(self->iter);
@@ -2315,7 +2317,7 @@ npyiter_ass_subscript(NewNpyArrayIterObject *self, PyObject *op,
}
static PyObject *
-npyiter_enter(NewNpyArrayIterObject *self)
+npyiter_enter(NewNpyArrayIterObject *self, PyObject *NPY_UNUSED(args))
{
if (self->iter == NULL) {
PyErr_SetString(PyExc_RuntimeError, "operation on non-initialized iterator");
@@ -2326,7 +2328,7 @@ npyiter_enter(NewNpyArrayIterObject *self)
}
static PyObject *
-npyiter_close(NewNpyArrayIterObject *self)
+npyiter_close(NewNpyArrayIterObject *self, PyObject *NPY_UNUSED(args))
{
NpyIter *iter = self->iter;
int ret;
@@ -2347,7 +2349,7 @@ static PyObject *
npyiter_exit(NewNpyArrayIterObject *self, PyObject *NPY_UNUSED(args))
{
/* even if called via exception handling, writeback any data */
- return npyiter_close(self);
+ return npyiter_close(self, NULL);
}
static PyMethodDef npyiter_methods[] = {
diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src
index bbbc5bfa2..db1e49db8 100644
--- a/numpy/core/src/multiarray/scalartypes.c.src
+++ b/numpy/core/src/multiarray/scalartypes.c.src
@@ -229,7 +229,7 @@ gentype_multiply(PyObject *m1, PyObject *m2)
* #convert = Long*8, LongLong*2#
*/
static PyObject *
-@type@_bit_count(PyObject *self)
+@type@_bit_count(PyObject *self, PyObject *NPY_UNUSED(args))
{
@type@ scalar = PyArrayScalar_VAL(self, @Name@);
uint8_t count = npy_popcount@c@(scalar);
@@ -1160,7 +1160,7 @@ gentype_size_get(PyObject *NPY_UNUSED(self), void *NPY_UNUSED(ignored))
}
static PyObject *
-gentype_sizeof(PyObject *self)
+gentype_sizeof(PyObject *self, PyObject *NPY_UNUSED(args))
{
Py_ssize_t nbytes;
PyObject * isz = gentype_itemsize_get(self, NULL);
@@ -1918,7 +1918,7 @@ static PyObject *
*/
/* Heavily copied from the builtin float.as_integer_ratio */
static PyObject *
-@name@_as_integer_ratio(PyObject *self)
+@name@_as_integer_ratio(PyObject *self, PyObject *NPY_UNUSED(args))
{
#if @is_half@
npy_double val = npy_half_to_double(PyArrayScalar_VAL(self, @Name@));
@@ -1999,7 +1999,7 @@ error:
* #c = f, f, , l#
*/
static PyObject *
-@name@_is_integer(PyObject *self)
+@name@_is_integer(PyObject *self, PyObject *NPY_UNUSED(args))
{
#if @is_half@
npy_double val = npy_half_to_double(PyArrayScalar_VAL(self, @Name@));
@@ -2022,7 +2022,7 @@ static PyObject *
/**end repeat**/
static PyObject *
-integer_is_integer(PyObject *self) {
+integer_is_integer(PyObject *self, PyObject *NPY_UNUSED(args)) {
Py_RETURN_TRUE;
}
diff --git a/numpy/core/src/umath/_operand_flag_tests.c.src b/numpy/core/src/umath/_operand_flag_tests.c
index c59e13baf..c59e13baf 100644
--- a/numpy/core/src/umath/_operand_flag_tests.c.src
+++ b/numpy/core/src/umath/_operand_flag_tests.c
diff --git a/numpy/core/src/umath/dispatching.c b/numpy/core/src/umath/dispatching.c
index 8e99c0420..4c6b09b80 100644
--- a/numpy/core/src/umath/dispatching.c
+++ b/numpy/core/src/umath/dispatching.c
@@ -46,19 +46,23 @@
#include "dispatching.h"
#include "dtypemeta.h"
+#include "common_dtype.h"
#include "npy_hashtable.h"
#include "legacy_array_method.h"
#include "ufunc_object.h"
#include "ufunc_type_resolution.h"
+#define PROMOTION_DEBUG_TRACING 0
+
+
/* forward declaration */
static NPY_INLINE PyObject *
promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc,
PyArrayObject *const ops[],
PyArray_DTypeMeta *signature[],
PyArray_DTypeMeta *op_dtypes[],
- npy_bool allow_legacy_promotion, npy_bool cache);
+ npy_bool allow_legacy_promotion);
/**
@@ -147,6 +151,23 @@ PyUFunc_AddLoop(PyUFuncObject *ufunc, PyObject *info, int ignore_duplicate)
* (Based on `isinstance()`, the knowledge that non-abstract DTypes cannot
* be subclassed is used, however.)
*
+ * NOTE: This currently does not take into account output dtypes which do not
+ * have to match. The possible extension here is that if an output
+ * is given (and thus an output dtype), but not part of the signature
+ * we could ignore it for matching, but *prefer* a loop that matches
+ * better.
+ * Why is this not done currently? First, it seems a niche feature that
+ * loops can only be distinguished based on the output dtype. Second,
+ * there are some nasty theoretical things because:
+ *
+ * np.add(f4, f4, out=f8)
+ * np.add(f4, f4, out=f8, dtype=f8)
+ *
+ * are different, the first uses the f4 loop, the second the f8 loop.
+ * The problem is, that the current cache only uses the op_dtypes and
+ * both are `(f4, f4, f8)`. The cache would need to store also which
+ * output was provided by `dtype=`/`signature=`.
+ *
* @param ufunc
* @param op_dtypes The DTypes that are either passed in (defined by an
* operand) or defined by the `signature` as also passed in as
@@ -159,17 +180,35 @@ PyUFunc_AddLoop(PyUFuncObject *ufunc, PyObject *info, int ignore_duplicate)
*/
static int
resolve_implementation_info(PyUFuncObject *ufunc,
- PyArray_DTypeMeta *op_dtypes[], PyObject **out_info)
+ PyArray_DTypeMeta *op_dtypes[], npy_bool only_promoters,
+ PyObject **out_info)
{
int nin = ufunc->nin, nargs = ufunc->nargs;
Py_ssize_t size = PySequence_Length(ufunc->_loops);
PyObject *best_dtypes = NULL;
PyObject *best_resolver_info = NULL;
+#if PROMOTION_DEBUG_TRACING
+ printf("Promoting for '%s' promoters only: %d\n",
+ ufunc->name ? ufunc->name : "<unknown>", (int)only_promoters);
+ printf(" DTypes: ");
+ PyObject *tmp = PyArray_TupleFromItems(ufunc->nargs, op_dtypes, 1);
+ PyObject_Print(tmp, stdout, 0);
+ Py_DECREF(tmp);
+ printf("\n");
+ Py_DECREF(tmp);
+#endif
+
for (Py_ssize_t res_idx = 0; res_idx < size; res_idx++) {
/* Test all resolvers */
PyObject *resolver_info = PySequence_Fast_GET_ITEM(
ufunc->_loops, res_idx);
+
+ if (only_promoters && PyObject_TypeCheck(
+ PyTuple_GET_ITEM(resolver_info, 1), &PyArrayMethod_Type)) {
+ continue;
+ }
+
PyObject *curr_dtypes = PyTuple_GET_ITEM(resolver_info, 0);
/*
* Test if the current resolver matches, it could make sense to
@@ -179,20 +218,31 @@ resolve_implementation_info(PyUFuncObject *ufunc,
npy_bool matches = NPY_TRUE;
/*
- * NOTE: We check also the output DType. In principle we do not
- * have to strictly match it (unless it is provided by the
- * `signature`). This assumes that a (fallback) promoter will
- * unset the output DType if no exact match is found.
+ * NOTE: We currently match the output dtype exactly here, this is
+ * actually only necessary if the signature includes.
+ * Currently, we rely that op-dtypes[nin:nout] is NULLed if not.
*/
for (Py_ssize_t i = 0; i < nargs; i++) {
PyArray_DTypeMeta *given_dtype = op_dtypes[i];
PyArray_DTypeMeta *resolver_dtype = (
(PyArray_DTypeMeta *)PyTuple_GET_ITEM(curr_dtypes, i));
assert((PyObject *)given_dtype != Py_None);
- if (given_dtype == NULL && i >= nin) {
- /* Unspecified out always matches (see below for inputs) */
- continue;
+ if (given_dtype == NULL) {
+ if (i >= nin) {
+ /* Unspecified out always matches (see below for inputs) */
+ continue;
+ }
+ /*
+ * This is a reduce-like operation, which always have the form
+ * `(res_DType, op_DType, res_DType)`. If the first and last
+ * dtype of the loops match, this should be reduce-compatible.
+ */
+ if (PyTuple_GET_ITEM(curr_dtypes, 0)
+ == PyTuple_GET_ITEM(curr_dtypes, 2)) {
+ continue;
+ }
}
+
if (resolver_dtype == (PyArray_DTypeMeta *)Py_None) {
/* always matches */
continue;
@@ -204,24 +254,7 @@ resolve_implementation_info(PyUFuncObject *ufunc,
matches = NPY_FALSE;
break;
}
- if (given_dtype == NULL) {
- /*
- * If an input was not specified, this is a reduce-like
- * operation: reductions use `(operand_DType, NULL, out_DType)`
- * as they only have a single operand. This allows special
- * reduce promotion rules useful for example for sum/product.
- * E.g. `np.add.reduce([True, True])` promotes to integer.
- *
- * Continuing here allows a promoter to handle reduce-like
- * promotions explicitly if necessary.
- * TODO: The `!NPY_DT_is_abstract(resolver_dtype)` currently
- * ensures that this is a promoter. If we allow
- * `ArrayMethods` to use abstract DTypes, we may have to
- * reject it here or the `ArrayMethod` has to implement
- * the reduce promotion.
- */
- continue;
- }
+
int subclass = PyObject_IsSubclass(
(PyObject *)given_dtype, (PyObject *)resolver_dtype);
if (subclass < 0) {
@@ -254,8 +287,12 @@ resolve_implementation_info(PyUFuncObject *ufunc,
* In all cases, we give up resolution, since it would be
* necessary to compare to two "best" cases.
*/
- int unambiguously_equally_good = 1;
for (Py_ssize_t i = 0; i < nargs; i++) {
+ if (i == ufunc->nin && current_best != -1) {
+ /* inputs prefer one loop and outputs have lower priority */
+ break;
+ }
+
int best;
PyObject *prev_dtype = PyTuple_GET_ITEM(best_dtypes, i);
@@ -265,50 +302,18 @@ resolve_implementation_info(PyUFuncObject *ufunc,
/* equivalent, so this entry does not matter */
continue;
}
- /*
- * TODO: Even if the input is not specified, if we have
- * abstract DTypes and one is a subclass of the other,
- * the subclass should be considered a better match
- * (subclasses are always more specific).
- */
- /* Whether this (normally output) dtype was specified at all */
if (op_dtypes[i] == NULL) {
/*
- * When DType is completely unspecified, prefer abstract
- * over concrete, assuming it will resolve.
- * Furthermore, we cannot decide which abstract/None
- * is "better", only concrete ones which are subclasses
- * of Abstract ones are defined as worse.
+ * If an a dtype is NULL it always matches, so there is no
+ * point in defining one as more precise than the other.
*/
- npy_bool prev_is_concrete = NPY_FALSE;
- npy_bool new_is_concrete = NPY_FALSE;
- if ((prev_dtype != Py_None) &&
- !NPY_DT_is_abstract((PyArray_DTypeMeta *)prev_dtype)) {
- prev_is_concrete = NPY_TRUE;
- }
- if ((new_dtype != Py_None) &&
- !NPY_DT_is_abstract((PyArray_DTypeMeta *)new_dtype)) {
- new_is_concrete = NPY_TRUE;
- }
- if (prev_is_concrete == new_is_concrete) {
- best = -1;
- }
- else if (prev_is_concrete) {
- unambiguously_equally_good = 0;
- best = 1;
- }
- else {
- unambiguously_equally_good = 0;
- best = 0;
- }
+ continue;
}
/* If either is None, the other is strictly more specific */
- else if (prev_dtype == Py_None) {
- unambiguously_equally_good = 0;
+ if (prev_dtype == Py_None) {
best = 1;
}
else if (new_dtype == Py_None) {
- unambiguously_equally_good = 0;
best = 0;
}
/*
@@ -318,20 +323,25 @@ resolve_implementation_info(PyUFuncObject *ufunc,
else if (!NPY_DT_is_abstract((PyArray_DTypeMeta *)prev_dtype) &&
!NPY_DT_is_abstract((PyArray_DTypeMeta *)new_dtype)) {
/*
- * Ambiguous unless the are identical (checked above),
- * but since they are concrete it does not matter which
- * best to compare.
+ * Ambiguous unless they are identical (checked above),
+ * or one matches exactly.
*/
- best = -1;
+ if (prev_dtype == (PyObject *)op_dtypes[i]) {
+ best = 0;
+ }
+ else if (new_dtype == (PyObject *)op_dtypes[i]) {
+ best = 1;
+ }
+ else {
+ best = -1;
+ }
}
else if (!NPY_DT_is_abstract((PyArray_DTypeMeta *)prev_dtype)) {
/* old is not abstract, so better (both not possible) */
- unambiguously_equally_good = 0;
best = 0;
}
else if (!NPY_DT_is_abstract((PyArray_DTypeMeta *)new_dtype)) {
/* new is not abstract, so better (both not possible) */
- unambiguously_equally_good = 0;
best = 1;
}
/*
@@ -349,6 +359,10 @@ resolve_implementation_info(PyUFuncObject *ufunc,
return -1;
}
+ if (best == -1) {
+ /* no new info, nothing to update */
+ continue;
+ }
if ((current_best != -1) && (current_best != best)) {
/*
* We need a clear best, this could be tricky, unless
@@ -367,15 +381,34 @@ resolve_implementation_info(PyUFuncObject *ufunc,
if (current_best == -1) {
/*
- * TODO: It would be nice to have a "diagnostic mode" that
- * informs if this happens! (An immediate error currently
- * blocks later legacy resolution, but may work in the
- * future.)
+ * We could not find a best loop, but promoters should be
+ * designed in a way to disambiguate such scenarios, so we
+ * retry the whole lookup using only promoters.
+ * (There is a small chance we already got two promoters.
+ * We just redo it anyway for simplicity.)
*/
- if (unambiguously_equally_good) {
- /* unset the best resolver to indicate this */
- best_resolver_info = NULL;
- continue;
+ if (!only_promoters) {
+ return resolve_implementation_info(ufunc,
+ op_dtypes, NPY_TRUE, out_info);
+ }
+ /*
+ * If this is already the retry, we are out of luck. Promoters
+ * should be designed in a way that this cannot happen!
+ * (It should be noted, that the retry might not find anything
+ * and we still do a legacy lookup later.)
+ */
+ PyObject *given = PyArray_TupleFromItems(
+ ufunc->nargs, (PyObject **)op_dtypes, 1);
+ if (given != NULL) {
+ PyErr_Format(PyExc_RuntimeError,
+ "Could not find a loop for the inputs:\n %S\n"
+ "The two promoters %S and %S matched the input "
+ "equally well. Promoters must be designed "
+ "to be unambiguous. NOTE: This indicates an error "
+ "in NumPy or an extending library and should be "
+ "reported.",
+ given, best_dtypes, curr_dtypes);
+ Py_DECREF(given);
}
*out_info = NULL;
return 0;
@@ -457,10 +490,9 @@ call_promoter_and_recurse(PyUFuncObject *ufunc, PyObject *promoter,
if (Py_EnterRecursiveCall(" during ufunc promotion.") != 0) {
goto finish;
}
- /* TODO: The caching logic here may need revising: */
resolved_info = promote_and_get_info_and_ufuncimpl(ufunc,
operands, signature, new_op_dtypes,
- /* no legacy promotion */ NPY_FALSE, /* cache */ NPY_TRUE);
+ /* no legacy promotion */ NPY_FALSE);
Py_LeaveRecursiveCall();
@@ -625,7 +657,7 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc,
PyArrayObject *const ops[],
PyArray_DTypeMeta *signature[],
PyArray_DTypeMeta *op_dtypes[],
- npy_bool allow_legacy_promotion, npy_bool cache)
+ npy_bool allow_legacy_promotion)
{
/*
* Fetch the dispatching info which consists of the implementation and
@@ -644,11 +676,12 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc,
}
/*
- * If `info == NULL`, the caching failed, repeat using the full resolution
- * in `resolve_implementation_info`.
+ * If `info == NULL`, loading from cache failed, use the full resolution
+ * in `resolve_implementation_info` (which caches its result on success).
*/
if (info == NULL) {
- if (resolve_implementation_info(ufunc, op_dtypes, &info) < 0) {
+ if (resolve_implementation_info(ufunc,
+ op_dtypes, NPY_FALSE, &info) < 0) {
return NULL;
}
if (info != NULL && PyObject_TypeCheck(
@@ -657,41 +690,12 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc,
* Found the ArrayMethod and NOT promoter. Before returning it
* add it to the cache for faster lookup in the future.
*/
- if (cache && PyArrayIdentityHash_SetItem(ufunc->_dispatch_cache,
+ if (PyArrayIdentityHash_SetItem(ufunc->_dispatch_cache,
(PyObject **)op_dtypes, info, 0) < 0) {
return NULL;
}
return info;
}
- else if (info == NULL && op_dtypes[0] == NULL) {
- /*
- * If we have a reduction, fill in the unspecified input/array
- * assuming it should have the same dtype as the operand input
- * (or the output one if given).
- * Then, try again. In some cases, this will choose different
- * paths, such as `ll->?` instead of an `??->?` loop for `np.equal`
- * when the input is `.l->.` (`.` meaning undefined). This will
- * then cause an error. But cast to `?` would always lose
- * information, and in many cases important information:
- *
- * ```python
- * from operator import eq
- * from functools import reduce
- *
- * reduce(eq, [1, 2, 3]) != reduce(eq, [True, True, True])
- * ```
- *
- * The special cases being `logical_(and|or|xor)` which can always
- * cast to boolean ahead of time and still give the right answer
- * (unsafe cast to bool is fine here). We special case these at
- * the time of this comment (NumPy 1.21).
- */
- assert(ufunc->nin == 2 && ufunc->nout == 1);
- op_dtypes[0] = op_dtypes[2] != NULL ? op_dtypes[2] : op_dtypes[1];
- Py_INCREF(op_dtypes[0]);
- return promote_and_get_info_and_ufuncimpl(ufunc,
- ops, signature, op_dtypes, allow_legacy_promotion, 1);
- }
}
/*
@@ -707,6 +711,11 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc,
return NULL;
}
else if (info != NULL) {
+ /* Add result to the cache using the original types: */
+ if (PyArrayIdentityHash_SetItem(ufunc->_dispatch_cache,
+ (PyObject **)op_dtypes, info, 0) < 0) {
+ return NULL;
+ }
return info;
}
}
@@ -730,7 +739,12 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc,
return NULL;
}
info = promote_and_get_info_and_ufuncimpl(ufunc,
- ops, signature, new_op_dtypes, NPY_FALSE, cacheable);
+ ops, signature, new_op_dtypes, NPY_FALSE);
+ /* Add this to the cache using the original types: */
+ if (cacheable && PyArrayIdentityHash_SetItem(ufunc->_dispatch_cache,
+ (PyObject **)op_dtypes, info, 0) < 0) {
+ return NULL;
+ }
for (int i = 0; i < ufunc->nargs; i++) {
Py_XDECREF(new_op_dtypes);
}
@@ -745,6 +759,14 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc,
* only work with DType (classes/types). This is because it has to ensure
* that legacy (value-based promotion) is used when necessary.
*
+ * NOTE: The machinery here currently ignores output arguments unless
+ * they are part of the signature. This slightly limits unsafe loop
+ * specializations, which is important for the `ensure_reduce_compatible`
+ * fallback mode.
+ * To fix this, the caching mechanism (and dispatching) can be extended.
+ * When/if that happens, the `ensure_reduce_compatible` could be
+ * deprecated (it should never kick in because promotion kick in first).
+ *
* @param ufunc The ufunc object, used mainly for the fallback.
* @param ops The array operands (used only for the fallback).
* @param signature As input, the DType signature fixed explicitly by the user.
@@ -754,9 +776,16 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc,
* either by the `signature` or by an `operand`.
* (outputs and the second input can be NULL for reductions).
* NOTE: In some cases, the promotion machinery may currently modify
- * these.
+ * these including clearing the output.
* @param force_legacy_promotion If set, we have to use the old type resolution
* to implement value-based promotion/casting.
+ * @param ensure_reduce_compatible Must be set for reductions, in which case
+ * the found implementation is checked for reduce-like compatibility.
+ * If it is *not* compatible and `signature[2] != NULL`, we assume its
+ * output DType is correct (see NOTE above).
+ * If removed, promotion may require information about whether this
+ * is a reduction, so the more likely case is to always keep fixing this
+ * when necessary, but push down the handling so it can be cached.
*/
NPY_NO_EXPORT PyArrayMethodObject *
promote_and_get_ufuncimpl(PyUFuncObject *ufunc,
@@ -764,9 +793,10 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc,
PyArray_DTypeMeta *signature[],
PyArray_DTypeMeta *op_dtypes[],
npy_bool force_legacy_promotion,
- npy_bool allow_legacy_promotion)
+ npy_bool allow_legacy_promotion,
+ npy_bool ensure_reduce_compatible)
{
- int nargs = ufunc->nargs;
+ int nin = ufunc->nin, nargs = ufunc->nargs;
/*
* Get the actual DTypes we operate with by mixing the operand array
@@ -782,6 +812,15 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc,
Py_XSETREF(op_dtypes[i], signature[i]);
assert(i >= ufunc->nin || !NPY_DT_is_abstract(signature[i]));
}
+ else if (i >= nin) {
+ /*
+ * We currently just ignore outputs if not in signature, this will
+ * always give the/a correct result (limits registering specialized
+ * loops which include the cast).
+ * (See also comment in resolve_implementation_info.)
+ */
+ Py_CLEAR(op_dtypes[i]);
+ }
}
if (force_legacy_promotion) {
@@ -798,7 +837,7 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc,
}
PyObject *info = promote_and_get_info_and_ufuncimpl(ufunc,
- ops, signature, op_dtypes, allow_legacy_promotion, NPY_TRUE);
+ ops, signature, op_dtypes, allow_legacy_promotion);
if (info == NULL) {
if (!PyErr_Occurred()) {
@@ -809,8 +848,26 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc,
PyArrayMethodObject *method = (PyArrayMethodObject *)PyTuple_GET_ITEM(info, 1);
- /* Fill `signature` with final DTypes used by the ArrayMethod/inner-loop */
+ /*
+ * In certain cases (only the logical ufuncs really), the loop we found may
+ * not be reduce-compatible. Since the machinery can't distinguish a
+ * reduction with an output from a normal ufunc call, we have to assume
+ * the result DType is correct and force it for the input (if not forced
+ * already).
+ * NOTE: This does assume that all loops are "safe" see the NOTE in this
+ * comment. That could be relaxed, in which case we may need to
+ * cache if a call was for a reduction.
+ */
PyObject *all_dtypes = PyTuple_GET_ITEM(info, 0);
+ if (ensure_reduce_compatible && signature[0] == NULL &&
+ PyTuple_GET_ITEM(all_dtypes, 0) != PyTuple_GET_ITEM(all_dtypes, 2)) {
+ signature[0] = (PyArray_DTypeMeta *)PyTuple_GET_ITEM(all_dtypes, 2);
+ Py_INCREF(signature[0]);
+ return promote_and_get_ufuncimpl(ufunc,
+ ops, signature, op_dtypes,
+ force_legacy_promotion, allow_legacy_promotion, NPY_FALSE);
+ }
+
for (int i = 0; i < nargs; i++) {
if (signature[i] == NULL) {
signature[i] = (PyArray_DTypeMeta *)PyTuple_GET_ITEM(all_dtypes, i);
@@ -826,6 +883,112 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc,
/*
+ * Generic promoter used by as a final fallback on ufuncs. Most operations are
+ * homogeneous, so we can try to find the homogeneous dtype on the inputs
+ * and use that.
+ * We need to special case the reduction case, where op_dtypes[0] == NULL
+ * is possible.
+ */
+NPY_NO_EXPORT int
+default_ufunc_promoter(PyUFuncObject *ufunc,
+ PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[],
+ PyArray_DTypeMeta *new_op_dtypes[])
+{
+ if (ufunc->type_resolver == &PyUFunc_SimpleBinaryComparisonTypeResolver
+ && signature[0] == NULL && signature[1] == NULL
+ && signature[2] != NULL && signature[2]->type_num != NPY_BOOL) {
+ /* bail out, this is _only_ to give future/deprecation warning! */
+ return -1;
+ }
+
+ /* If nin < 2 promotion is a no-op, so it should not be registered */
+ assert(ufunc->nin > 1);
+ if (op_dtypes[0] == NULL) {
+ assert(ufunc->nin == 2 && ufunc->nout == 1); /* must be reduction */
+ Py_INCREF(op_dtypes[1]);
+ new_op_dtypes[0] = op_dtypes[1];
+ Py_INCREF(op_dtypes[1]);
+ new_op_dtypes[1] = op_dtypes[1];
+ Py_INCREF(op_dtypes[1]);
+ new_op_dtypes[2] = op_dtypes[1];
+ return 0;
+ }
+ PyArray_DTypeMeta *common = NULL;
+ /*
+ * If a signature is used and homogeneous in its outputs use that
+ * (Could/should likely be rather applied to inputs also, although outs
+ * only could have some advantage and input dtypes are rarely enforced.)
+ */
+ for (int i = ufunc->nin; i < ufunc->nargs; i++) {
+ if (signature[i] != NULL) {
+ if (common == NULL) {
+ Py_INCREF(signature[i]);
+ common = signature[i];
+ }
+ else if (common != signature[i]) {
+ Py_CLEAR(common); /* Not homogeneous, unset common */
+ break;
+ }
+ }
+ }
+ /* Otherwise, use the common DType of all input operands */
+ if (common == NULL) {
+ common = PyArray_PromoteDTypeSequence(ufunc->nin, op_dtypes);
+ if (common == NULL) {
+ if (PyErr_ExceptionMatches(PyExc_TypeError)) {
+ PyErr_Clear(); /* Do not propagate normal promotion errors */
+ }
+ return -1;
+ }
+ }
+
+ for (int i = 0; i < ufunc->nargs; i++) {
+ PyArray_DTypeMeta *tmp = common;
+ if (signature[i]) {
+ tmp = signature[i]; /* never replace a fixed one. */
+ }
+ Py_INCREF(tmp);
+ new_op_dtypes[i] = tmp;
+ }
+ for (int i = ufunc->nin; i < ufunc->nargs; i++) {
+ Py_XINCREF(op_dtypes[i]);
+ new_op_dtypes[i] = op_dtypes[i];
+ }
+
+ Py_DECREF(common);
+ return 0;
+}
+
+
+/*
+ * In some cases, we assume that there will only ever be object loops,
+ * and the object loop should *always* be chosen.
+ * (in those cases more specific loops should not really be registered, but
+ * we do not check that.)
+ *
+ * We default to this for "old-style" ufuncs which have exactly one loop
+ * consisting only of objects (during registration time, numba mutates this
+ * but presumably).
+ */
+NPY_NO_EXPORT int
+object_only_ufunc_promoter(PyUFuncObject *ufunc,
+ PyArray_DTypeMeta *NPY_UNUSED(op_dtypes[]),
+ PyArray_DTypeMeta *signature[],
+ PyArray_DTypeMeta *new_op_dtypes[])
+{
+ PyArray_DTypeMeta *object_DType = PyArray_DTypeFromTypeNum(NPY_OBJECT);
+
+ for (int i = 0; i < ufunc->nargs; i++) {
+ if (signature[i] == NULL) {
+ Py_INCREF(object_DType);
+ new_op_dtypes[i] = object_DType;
+ }
+ }
+ Py_DECREF(object_DType);
+ return 0;
+}
+
+/*
* Special promoter for the logical ufuncs. The logical ufuncs can always
* use the ??->? and still get the correct output (as long as the output
* is not supposed to be `object`).
@@ -843,6 +1006,12 @@ logical_ufunc_promoter(PyUFuncObject *NPY_UNUSED(ufunc),
*/
int force_object = 0;
+ if (signature[0] == NULL && signature[1] == NULL
+ && signature[2] != NULL && signature[2]->type_num != NPY_BOOL) {
+ /* bail out, this is _only_ to give future/deprecation warning! */
+ return -1;
+ }
+
for (int i = 0; i < 3; i++) {
PyArray_DTypeMeta *item;
if (signature[i] != NULL) {
@@ -913,4 +1082,3 @@ install_logical_ufunc_promoter(PyObject *ufunc)
return PyUFunc_AddLoop((PyUFuncObject *)ufunc, info, 0);
}
-
diff --git a/numpy/core/src/umath/dispatching.h b/numpy/core/src/umath/dispatching.h
index 2f314615d..a7e9e88d0 100644
--- a/numpy/core/src/umath/dispatching.h
+++ b/numpy/core/src/umath/dispatching.h
@@ -20,13 +20,25 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc,
PyArray_DTypeMeta *signature[],
PyArray_DTypeMeta *op_dtypes[],
npy_bool force_legacy_promotion,
- npy_bool allow_legacy_promotion);
+ npy_bool allow_legacy_promotion,
+ npy_bool ensure_reduce_compatible);
NPY_NO_EXPORT PyObject *
add_and_return_legacy_wrapping_ufunc_loop(PyUFuncObject *ufunc,
PyArray_DTypeMeta *operation_dtypes[], int ignore_duplicate);
NPY_NO_EXPORT int
+default_ufunc_promoter(PyUFuncObject *ufunc,
+ PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[],
+ PyArray_DTypeMeta *new_op_dtypes[]);
+
+NPY_NO_EXPORT int
+object_only_ufunc_promoter(PyUFuncObject *ufunc,
+ PyArray_DTypeMeta *NPY_UNUSED(op_dtypes[]),
+ PyArray_DTypeMeta *signature[],
+ PyArray_DTypeMeta *new_op_dtypes[]);
+
+NPY_NO_EXPORT int
install_logical_ufunc_promoter(PyObject *ufunc);
diff --git a/numpy/core/src/umath/legacy_array_method.c b/numpy/core/src/umath/legacy_array_method.c
index a423823d4..99de63aac 100644
--- a/numpy/core/src/umath/legacy_array_method.c
+++ b/numpy/core/src/umath/legacy_array_method.c
@@ -123,10 +123,40 @@ simple_legacy_resolve_descriptors(
PyArray_Descr **given_descrs,
PyArray_Descr **output_descrs)
{
+ int i = 0;
int nin = method->nin;
int nout = method->nout;
- for (int i = 0; i < nin + nout; i++) {
+ if (nin == 2 && nout == 1 && given_descrs[2] != NULL
+ && dtypes[0] == dtypes[2]) {
+ /*
+ * Could be a reduction, which requires `descr[0] is descr[2]`
+ * (identity) at least currently. This is because `op[0] is op[2]`.
+ * (If the output descriptor is not passed, the below works.)
+ */
+ output_descrs[2] = ensure_dtype_nbo(given_descrs[2]);
+ if (output_descrs[2] == NULL) {
+ Py_CLEAR(output_descrs[2]);
+ return -1;
+ }
+ Py_INCREF(output_descrs[2]);
+ output_descrs[0] = output_descrs[2];
+ if (dtypes[1] == dtypes[2]) {
+ /* Same for the second one (accumulation is stricter) */
+ Py_INCREF(output_descrs[2]);
+ output_descrs[1] = output_descrs[2];
+ }
+ else {
+ output_descrs[1] = ensure_dtype_nbo(given_descrs[1]);
+ if (output_descrs[1] == NULL) {
+ i = 2;
+ goto fail;
+ }
+ }
+ return NPY_NO_CASTING;
+ }
+
+ for (; i < nin + nout; i++) {
if (given_descrs[i] != NULL) {
output_descrs[i] = ensure_dtype_nbo(given_descrs[i]);
}
@@ -146,7 +176,7 @@ simple_legacy_resolve_descriptors(
return NPY_NO_CASTING;
fail:
- for (int i = 0; i < nin + nout; i++) {
+ for (; i >= 0; i--) {
Py_CLEAR(output_descrs[i]);
}
return -1;
diff --git a/numpy/core/src/umath/loops_exponent_log.dispatch.c.src b/numpy/core/src/umath/loops_exponent_log.dispatch.c.src
index 95cce553a..2dd43fb85 100644
--- a/numpy/core/src/umath/loops_exponent_log.dispatch.c.src
+++ b/numpy/core/src/umath/loops_exponent_log.dispatch.c.src
@@ -386,7 +386,7 @@ avx512_permute_x8var_pd(__m512d t0, __m512d t1, __m512d t2, __m512d t3,
* #and_masks =_mm256_and_ps, _mm512_kand#
* #xor_masks =_mm256_xor_ps, _mm512_kxor#
* #fmadd = _mm256_fmadd_ps, _mm512_fmadd_ps#
- * #mask_to_int = _mm256_movemask_ps, #
+ * #mask_to_int = _mm256_movemask_ps, npyv_tobits_b32#
* #full_mask= 0xFF, 0xFFFF#
* #masked_store = _mm256_maskstore_ps, _mm512_mask_storeu_ps#
* #cvtps_epi32 = _mm256_cvtps_epi32, #
@@ -833,11 +833,19 @@ AVX512F_exp_DOUBLE(npy_double * op,
op += num_lanes;
num_remaining_elements -= num_lanes;
}
- if (overflow_mask) {
+ /*
+ * Don't count on the compiler for cast between mask and int registers.
+ * On gcc7 with flags -march>=nocona -O3 can cause FP stack overflow
+ * which may lead to putting NaN into certain HW/FP calculations.
+ *
+ * For more details, please check the comments in:
+ * - https://github.com/numpy/numpy/issues/20356
+ */
+ if (npyv_tobits_b64(overflow_mask)) {
npy_set_floatstatus_overflow();
}
- if (underflow_mask) {
+ if (npyv_tobits_b64(underflow_mask)) {
npy_set_floatstatus_underflow();
}
}
@@ -1062,10 +1070,10 @@ AVX512F_log_DOUBLE(npy_double * op,
num_remaining_elements -= num_lanes;
}
- if (invalid_mask) {
+ if (npyv_tobits_b64(invalid_mask)) {
npy_set_floatstatus_invalid();
}
- if (divide_by_zero_mask) {
+ if (npyv_tobits_b64(divide_by_zero_mask)) {
npy_set_floatstatus_divbyzero();
}
}
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index 186f18a62..9107323b0 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -998,10 +998,6 @@ convert_ufunc_arguments(PyUFuncObject *ufunc,
}
if (*allow_legacy_promotion && (!all_scalar && any_scalar)) {
*force_legacy_promotion = should_use_min_scalar(nin, out_op, 0, NULL);
- /*
- * TODO: if this is False, we end up in a "very slow" path that should
- * be avoided. This makes `int_arr + 0.` ~40% slower.
- */
}
/* Convert and fill in output arguments */
@@ -2717,11 +2713,11 @@ reducelike_promote_and_resolve(PyUFuncObject *ufunc,
char *method)
{
/*
- * Note that the `ops` is not realy correct. But legacy resolution
+ * Note that the `ops` is not really correct. But legacy resolution
* cannot quite handle the correct ops (e.g. a NULL first item if `out`
- * is NULL), and it should only matter in very strange cases.
+ * is NULL) so we pass `arr` instead in that case.
*/
- PyArrayObject *ops[3] = {arr, arr, NULL};
+ PyArrayObject *ops[3] = {out ? out : arr, arr, out};
/*
* TODO: If `out` is not provided, arguably `initial` could define
* the first DType (and maybe also the out one), that way
@@ -2741,11 +2737,12 @@ reducelike_promote_and_resolve(PyUFuncObject *ufunc,
}
PyArrayMethodObject *ufuncimpl = promote_and_get_ufuncimpl(ufunc,
- ops, signature, operation_DTypes, NPY_FALSE, NPY_TRUE);
- Py_DECREF(operation_DTypes[1]);
+ ops, signature, operation_DTypes, NPY_FALSE, NPY_FALSE, NPY_TRUE);
+ /* Output can currently get cleared, others XDECREF in case of error */
+ Py_XDECREF(operation_DTypes[1]);
if (out != NULL) {
- Py_DECREF(operation_DTypes[0]);
- Py_DECREF(operation_DTypes[2]);
+ Py_XDECREF(operation_DTypes[0]);
+ Py_XDECREF(operation_DTypes[2]);
}
if (ufuncimpl == NULL) {
return NULL;
@@ -2771,8 +2768,10 @@ reducelike_promote_and_resolve(PyUFuncObject *ufunc,
if (out_descrs[0] != out_descrs[2] || (
enforce_uniform_args && out_descrs[0] != out_descrs[1])) {
PyErr_Format(PyExc_TypeError,
- "the resolved dtypes are not compatible with %s.%s",
- ufunc_get_name_cstr(ufunc), method);
+ "the resolved dtypes are not compatible with %s.%s. "
+ "Resolved (%R, %R, %R)",
+ ufunc_get_name_cstr(ufunc), method,
+ out_descrs[0], out_descrs[1], out_descrs[2]);
goto fail;
}
/* TODO: This really should _not_ be unsafe casting (same above)! */
@@ -4852,7 +4851,8 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc,
*/
PyArrayMethodObject *ufuncimpl = promote_and_get_ufuncimpl(ufunc,
operands, signature,
- operand_DTypes, force_legacy_promotion, allow_legacy_promotion);
+ operand_DTypes, force_legacy_promotion, allow_legacy_promotion,
+ NPY_FALSE);
if (ufuncimpl == NULL) {
goto fail;
}
@@ -5190,6 +5190,61 @@ PyUFunc_FromFuncAndDataAndSignatureAndIdentity(PyUFuncGenericFunction *func, voi
info = add_and_return_legacy_wrapping_ufunc_loop(ufunc, op_dtypes, 1);
if (info == NULL) {
+ Py_DECREF(ufunc);
+ return NULL;
+ }
+ }
+
+ PyObject *promoter = NULL;
+ if (ufunc->ntypes == 1) {
+ npy_bool all_object = NPY_TRUE;
+ for (int i = 0; i < ufunc->nargs; i++) {
+ if (ufunc->types[i] != NPY_OBJECT) {
+ all_object = NPY_FALSE;
+ break;
+ }
+ }
+ if (all_object) {
+ promoter = PyCapsule_New(&object_only_ufunc_promoter,
+ "numpy._ufunc_promoter", NULL);
+ if (promoter == NULL) {
+ Py_DECREF(ufunc);
+ return NULL;
+ }
+ }
+ }
+ if (promoter == NULL && ufunc->nin > 1) {
+ promoter = PyCapsule_New(&default_ufunc_promoter,
+ "numpy._ufunc_promoter", NULL);
+ if (promoter == NULL) {
+ Py_DECREF(ufunc);
+ return NULL;
+ }
+ }
+ if (promoter != NULL) {
+ /* Always install default promoter using the common DType */
+ PyObject *dtype_tuple = PyTuple_New(ufunc->nargs);
+ if (dtype_tuple == NULL) {
+ Py_DECREF(promoter);
+ Py_DECREF(ufunc);
+ return NULL;
+ }
+ for (int i = 0; i < ufunc->nargs; i++) {
+ Py_INCREF(Py_None);
+ PyTuple_SET_ITEM(dtype_tuple, i, Py_None);
+ }
+ PyObject *info = PyTuple_Pack(2, dtype_tuple, promoter);
+ Py_DECREF(dtype_tuple);
+ Py_DECREF(promoter);
+ if (info == NULL) {
+ Py_DECREF(ufunc);
+ return NULL;
+ }
+
+ int res = PyUFunc_AddLoop((PyUFuncObject *)ufunc, info, 0);
+ Py_DECREF(info);
+ if (res < 0) {
+ Py_DECREF(ufunc);
return NULL;
}
}
@@ -5963,7 +6018,7 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args)
PyArrayMethodObject *ufuncimpl = promote_and_get_ufuncimpl(ufunc,
operands, signature, operand_DTypes,
- force_legacy_promotion, allow_legacy_promotion);
+ force_legacy_promotion, allow_legacy_promotion, NPY_FALSE);
if (ufuncimpl == NULL) {
goto fail;
}
diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py
index b95d669a8..50da7b800 100644
--- a/numpy/core/tests/test_datetime.py
+++ b/numpy/core/tests/test_datetime.py
@@ -2033,15 +2033,15 @@ class TestDateTime:
# subtracting two datetime64 works, but we cannot reduce it, since
# the result of that subtraction will have a different dtype.
arr = np.array(["2021-12-02", "2019-05-12"], dtype="M8[ms]")
- msg = r"the resolved dtypes are not compatible with subtract\."
+ msg = r"ufunc 'subtract' did not contain a loop with signature "
- with pytest.raises(TypeError, match=msg + "reduce"):
+ with pytest.raises(TypeError, match=msg):
np.subtract.reduce(arr)
- with pytest.raises(TypeError, match=msg + "accumulate"):
+ with pytest.raises(TypeError, match=msg):
np.subtract.accumulate(arr)
- with pytest.raises(TypeError, match=msg + "reduceat"):
+ with pytest.raises(TypeError, match=msg):
np.subtract.reduceat(arr, [0])
def test_datetime_busday_offset(self):
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index 94583a5ee..a269eb519 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -13,7 +13,8 @@ import sys
import numpy as np
from numpy.testing import (
- assert_raises, assert_warns, assert_, assert_array_equal, SkipTest, KnownFailureException
+ assert_raises, assert_warns, assert_, assert_array_equal, SkipTest,
+ KnownFailureException, break_cycles,
)
from numpy.core._multiarray_tests import fromstring_null_term_c_api
@@ -426,11 +427,6 @@ class TestBincount(_DeprecationTestCase):
self.assert_deprecated(lambda: np.bincount([1, 2, 3], minlength=None))
-class TestAlen(_DeprecationTestCase):
- # 2019-08-02, 1.18.0
- def test_alen(self):
- self.assert_deprecated(lambda: np.alen(np.array([1, 2, 3])))
-
class TestGeneratorSum(_DeprecationTestCase):
# 2018-02-25, 1.15.0
@@ -1250,3 +1246,22 @@ class TestQuantileInterpolationDeprecation(_DeprecationTestCase):
warnings.simplefilter("always", DeprecationWarning)
with pytest.raises(TypeError):
func([0., 1.], 0., interpolation="nearest", method="nearest")
+
+
+class TestMemEventHook(_DeprecationTestCase):
+ # Deprecated 2021-11-18, NumPy 1.23
+ def test_mem_seteventhook(self):
+ # The actual tests are within the C code in
+ # multiarray/_multiarray_tests.c.src
+ import numpy.core._multiarray_tests as ma_tests
+ with pytest.warns(DeprecationWarning,
+ match='PyDataMem_SetEventHook is deprecated'):
+ ma_tests.test_pydatamem_seteventhook_start()
+ # force an allocation and free of a numpy array
+ # needs to be larger then limit of small memory cacher in ctors.c
+ a = np.zeros(1000)
+ del a
+ break_cycles()
+ with pytest.warns(DeprecationWarning,
+ match='PyDataMem_SetEventHook is deprecated'):
+ ma_tests.test_pydatamem_seteventhook_end()
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 4413cd0d0..9d728afa4 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -6888,26 +6888,6 @@ class TestInner:
assert_equal(np.inner(b, a).transpose(2,3,0,1), desired)
-class TestAlen:
- def test_basic(self):
- with pytest.warns(DeprecationWarning):
- m = np.array([1, 2, 3])
- assert_equal(np.alen(m), 3)
-
- m = np.array([[1, 2, 3], [4, 5, 7]])
- assert_equal(np.alen(m), 2)
-
- m = [1, 2, 3]
- assert_equal(np.alen(m), 3)
-
- m = [[1, 2, 3], [4, 5, 7]]
- assert_equal(np.alen(m), 2)
-
- def test_singleton(self):
- with pytest.warns(DeprecationWarning):
- assert_equal(np.alen(5), 1)
-
-
class TestChoose:
def setup(self):
self.x = 2*np.ones((3,), dtype=int)
@@ -7832,9 +7812,9 @@ class TestArrayCreationCopyArgument(object):
pyscalar = arr.item(0)
# Test never-copy raises error:
- assert_raises(ValueError, np.array, scalar,
+ assert_raises(ValueError, np.array, scalar,
copy=np._CopyMode.NEVER)
- assert_raises(ValueError, np.array, pyscalar,
+ assert_raises(ValueError, np.array, pyscalar,
copy=np._CopyMode.NEVER)
assert_raises(ValueError, np.array, pyscalar,
copy=self.RaiseOnBool())
@@ -8187,18 +8167,6 @@ def test_scalar_element_deletion():
assert_raises(ValueError, a[0].__delitem__, 'x')
-class TestMemEventHook:
- def test_mem_seteventhook(self):
- # The actual tests are within the C code in
- # multiarray/_multiarray_tests.c.src
- _multiarray_tests.test_pydatamem_seteventhook_start()
- # force an allocation and free of a numpy array
- # needs to be larger then limit of small memory cacher in ctors.c
- a = np.zeros(1000)
- del a
- break_cycles()
- _multiarray_tests.test_pydatamem_seteventhook_end()
-
class TestMapIter:
def test_mapiter(self):
# The actual tests are within the C code in
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index ef0bac957..76e4cdcfd 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -1762,12 +1762,15 @@ class TestUfunc:
result = _rational_tests.test_add(a, b)
assert_equal(result, target)
- # But since we use the old type resolver, this may not work
- # for dtype variations unless the output dtype is given:
+ # This works even more generally, so long the default common-dtype
+ # promoter works out:
result = _rational_tests.test_add(a, b.astype(np.uint16), out=c)
assert_equal(result, target)
+
+ # But, it can be fooled, e.g. (use scalars, which forces legacy
+ # type resolution to kick in, which then fails):
with assert_raises(TypeError):
- _rational_tests.test_add(a, b.astype(np.uint16))
+ _rational_tests.test_add(a, np.uint16(2))
def test_operand_flags(self):
a = np.arange(16, dtype='l').reshape(4, 4)
@@ -2123,6 +2126,17 @@ class TestUfunc:
c = np.array([1., 2.])
assert_array_equal(ufunc(a, c), ufunc([True, True], True))
assert ufunc.reduce(a) == True
+ # check that the output has no effect:
+ out = np.zeros(2, dtype=np.int32)
+ expected = ufunc([True, True], True).astype(out.dtype)
+ assert_array_equal(ufunc(a, c, out=out), expected)
+ out = np.zeros((), dtype=np.int32)
+ assert ufunc.reduce(a, out=out) == True
+ # Last check, test reduction when out and a match (the complexity here
+ # is that the "i,i->?" may seem right, but should not match.
+ a = np.array([3], dtype="i")
+ out = np.zeros((), dtype=a.dtype)
+ assert ufunc.reduce(a, out=out) == 1
@pytest.mark.parametrize("ufunc",
[np.logical_and, np.logical_or, np.logical_xor])
@@ -2134,6 +2148,49 @@ class TestUfunc:
# It would be safe, but not equiv casting:
ufunc(a, c, out=out, casting="equiv")
+ def test_reducelike_out_promotes(self):
+ # Check that the out argument to reductions is considered for
+ # promotion. See also gh-20455.
+ # Note that these paths could prefer `initial=` in the future and
+ # do not up-cast to the default integer for add and prod
+ arr = np.ones(1000, dtype=np.uint8)
+ out = np.zeros((), dtype=np.uint16)
+ assert np.add.reduce(arr, out=out) == 1000
+ arr[:10] = 2
+ assert np.multiply.reduce(arr, out=out) == 2**10
+
+ # For legacy dtypes, the signature currently has to be forced if `out=`
+ # is passed. The two paths below should differ, without `dtype=` the
+ # expected result should be: `np.prod(arr.astype("f8")).astype("f4")`!
+ arr = np.full(5, 2**25-1, dtype=np.int64)
+
+ # float32 and int64 promote to float64:
+ res = np.zeros((), dtype=np.float32)
+ # If `dtype=` is passed, the calculation is forced to float32:
+ single_res = np.zeros((), dtype=np.float32)
+ np.multiply.reduce(arr, out=single_res, dtype=np.float32)
+ assert single_res != res
+
+ def test_reducelike_output_needs_identical_cast(self):
+ # Checks the case where the we have a simple byte-swap works, maily
+ # tests that this is not rejected directly.
+ # (interesting because we require descriptor identity in reducelikes).
+ arr = np.ones(20, dtype="f8")
+ out = np.empty((), dtype=arr.dtype.newbyteorder())
+ expected = np.add.reduce(arr)
+ np.add.reduce(arr, out=out)
+ assert_array_equal(expected, out)
+ # Check reduceat:
+ out = np.empty(2, dtype=arr.dtype.newbyteorder())
+ expected = np.add.reduceat(arr, [0, 1])
+ np.add.reduceat(arr, [0, 1], out=out)
+ assert_array_equal(expected, out)
+ # And accumulate:
+ out = np.empty(arr.shape, dtype=arr.dtype.newbyteorder())
+ expected = np.add.accumulate(arr)
+ np.add.accumulate(arr, out=out)
+ assert_array_equal(expected, out)
+
def test_reduce_noncontig_output(self):
# Check that reduction deals with non-contiguous output arrays
# appropriately.
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index fc7c592f0..c0b26e75b 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -3433,7 +3433,7 @@ class TestComplexFunctions:
x_series = np.logspace(-20, -3.001, 200)
x_basic = np.logspace(-2.999, 0, 10, endpoint=False)
- if glibc_older_than("2.19") and dtype is np.longcomplex:
+ if dtype is np.longcomplex:
if (platform.machine() == 'aarch64' and bad_arcsinh()):
pytest.skip("Trig functions of np.longcomplex values known "
"to be inaccurate on aarch64 for some compilation "
diff --git a/numpy/distutils/ccompiler_opt.py b/numpy/distutils/ccompiler_opt.py
index 39847c20f..b38e47c13 100644
--- a/numpy/distutils/ccompiler_opt.py
+++ b/numpy/distutils/ccompiler_opt.py
@@ -654,9 +654,9 @@ class _Distutils:
@staticmethod
def dist_load_module(name, path):
"""Load a module from file, required by the abstract class '_Cache'."""
- from numpy.compat import npy_load_module
+ from .misc_util import exec_mod_from_location
try:
- return npy_load_module(name, path)
+ return exec_mod_from_location(name, path)
except Exception as e:
_Distutils.dist_log(e, stderr=True)
return None
diff --git a/numpy/distutils/checks/cpu_asimdfhm.c b/numpy/distutils/checks/cpu_asimdfhm.c
index bb437aa40..cb49751c4 100644
--- a/numpy/distutils/checks/cpu_asimdfhm.c
+++ b/numpy/distutils/checks/cpu_asimdfhm.c
@@ -10,8 +10,8 @@ int main(void)
float32x4_t vf = vdupq_n_f32(1.0f);
float32x2_t vlf = vdup_n_f32(1.0f);
- int ret = (int)vget_lane_f32(vfmlal_low_u32(vlf, vlhp, vlhp), 0);
- ret += (int)vgetq_lane_f32(vfmlslq_high_u32(vf, vhp, vhp), 0);
+ int ret = (int)vget_lane_f32(vfmlal_low_f16(vlf, vlhp, vlhp), 0);
+ ret += (int)vgetq_lane_f32(vfmlslq_high_f16(vf, vhp, vhp), 0);
return ret;
}
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
index f0f9b4bd7..513be75db 100644
--- a/numpy/distutils/misc_util.py
+++ b/numpy/distutils/misc_util.py
@@ -31,8 +31,6 @@ def clean_up_temporary_directory():
atexit.register(clean_up_temporary_directory)
-from numpy.compat import npy_load_module
-
__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict',
'dict_append', 'appendpath', 'generate_config_py',
'get_cmd', 'allpath', 'get_mathlibs',
@@ -44,7 +42,8 @@ __all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict',
'dot_join', 'get_frame', 'minrelpath', 'njoin',
'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language',
'get_build_architecture', 'get_info', 'get_pkg_info',
- 'get_num_build_jobs', 'sanitize_cxx_flags']
+ 'get_num_build_jobs', 'sanitize_cxx_flags',
+ 'exec_mod_from_location']
class InstallableLib:
"""
@@ -945,9 +944,8 @@ class Configuration:
try:
setup_name = os.path.splitext(os.path.basename(setup_py))[0]
n = dot_join(self.name, subpackage_name, setup_name)
- setup_module = npy_load_module('_'.join(n.split('.')),
- setup_py,
- ('.py', 'U', 1))
+ setup_module = exec_mod_from_location(
+ '_'.join(n.split('.')), setup_py)
if not hasattr(setup_module, 'configuration'):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
@@ -1993,8 +1991,8 @@ class Configuration:
name = os.path.splitext(os.path.basename(fn))[0]
n = dot_join(self.name, name)
try:
- version_module = npy_load_module('_'.join(n.split('.')),
- fn, info)
+ version_module = exec_mod_from_location(
+ '_'.join(n.split('.')), fn)
except ImportError as e:
self.warn(str(e))
version_module = None
@@ -2481,7 +2479,7 @@ def get_build_architecture():
return get_build_architecture()
-_cxx_ignore_flags = {'-Werror=implicit-function-declaration'}
+_cxx_ignore_flags = {'-Werror=implicit-function-declaration', '-std=c99'}
def sanitize_cxx_flags(cxxflags):
@@ -2491,3 +2489,14 @@ def sanitize_cxx_flags(cxxflags):
return [flag for flag in cxxflags if flag not in _cxx_ignore_flags]
+def exec_mod_from_location(modname, modfile):
+ '''
+ Use importlib machinery to import a module `modname` from the file
+ `modfile`. Depending on the `spec.loader`, the module may not be
+ registered in sys.modules.
+ '''
+ spec = importlib.util.spec_from_file_location(modname, modfile)
+ foo = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(foo)
+ return foo
+
diff --git a/numpy/f2py/__init__.py b/numpy/f2py/__init__.py
index f147f1b97..b1cb74fae 100644
--- a/numpy/f2py/__init__.py
+++ b/numpy/f2py/__init__.py
@@ -145,7 +145,7 @@ def get_include():
Notes
-----
- .. versionadded:: 1.22.0
+ .. versionadded:: 1.21.1
Unless the build system you are using has specific support for f2py,
building a Python extension using a ``.pyf`` signature file is a two-step
diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py
index 1d9236dcd..528c4adee 100644
--- a/numpy/f2py/cfuncs.py
+++ b/numpy/f2py/cfuncs.py
@@ -845,20 +845,26 @@ int_from_pyobj(int* v, PyObject *obj, const char *errmess)
return !(*v == -1 && PyErr_Occurred());
}
- if (PyComplex_Check(obj))
+ if (PyComplex_Check(obj)) {
+ PyErr_Clear();
tmp = PyObject_GetAttrString(obj,\"real\");
- else if (PyBytes_Check(obj) || PyUnicode_Check(obj))
+ }
+ else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) {
/*pass*/;
- else if (PySequence_Check(obj))
+ }
+ else if (PySequence_Check(obj)) {
+ PyErr_Clear();
tmp = PySequence_GetItem(obj, 0);
+ }
+
if (tmp) {
- PyErr_Clear();
if (int_from_pyobj(v, tmp, errmess)) {
Py_DECREF(tmp);
return 1;
}
Py_DECREF(tmp);
}
+
{
PyObject* err = PyErr_Occurred();
if (err == NULL) {
@@ -888,15 +894,19 @@ long_from_pyobj(long* v, PyObject *obj, const char *errmess) {
return !(*v == -1 && PyErr_Occurred());
}
- if (PyComplex_Check(obj))
+ if (PyComplex_Check(obj)) {
+ PyErr_Clear();
tmp = PyObject_GetAttrString(obj,\"real\");
- else if (PyBytes_Check(obj) || PyUnicode_Check(obj))
+ }
+ else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) {
/*pass*/;
- else if (PySequence_Check(obj))
- tmp = PySequence_GetItem(obj,0);
+ }
+ else if (PySequence_Check(obj)) {
+ PyErr_Clear();
+ tmp = PySequence_GetItem(obj, 0);
+ }
if (tmp) {
- PyErr_Clear();
if (long_from_pyobj(v, tmp, errmess)) {
Py_DECREF(tmp);
return 1;
@@ -934,14 +944,19 @@ long_long_from_pyobj(long_long* v, PyObject *obj, const char *errmess)
return !(*v == -1 && PyErr_Occurred());
}
- if (PyComplex_Check(obj))
+ if (PyComplex_Check(obj)) {
+ PyErr_Clear();
tmp = PyObject_GetAttrString(obj,\"real\");
- else if (PyBytes_Check(obj) || PyUnicode_Check(obj))
+ }
+ else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) {
/*pass*/;
- else if (PySequence_Check(obj))
- tmp = PySequence_GetItem(obj,0);
- if (tmp) {
+ }
+ else if (PySequence_Check(obj)) {
PyErr_Clear();
+ tmp = PySequence_GetItem(obj, 0);
+ }
+
+ if (tmp) {
if (long_long_from_pyobj(v, tmp, errmess)) {
Py_DECREF(tmp);
return 1;
@@ -1001,14 +1016,20 @@ double_from_pyobj(double* v, PyObject *obj, const char *errmess)
Py_DECREF(tmp);
return !(*v == -1.0 && PyErr_Occurred());
}
- if (PyComplex_Check(obj))
+
+ if (PyComplex_Check(obj)) {
+ PyErr_Clear();
tmp = PyObject_GetAttrString(obj,\"real\");
- else if (PyBytes_Check(obj) || PyUnicode_Check(obj))
+ }
+ else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) {
/*pass*/;
- else if (PySequence_Check(obj))
- tmp = PySequence_GetItem(obj,0);
- if (tmp) {
+ }
+ else if (PySequence_Check(obj)) {
PyErr_Clear();
+ tmp = PySequence_GetItem(obj, 0);
+ }
+
+ if (tmp) {
if (double_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
Py_DECREF(tmp);
}
diff --git a/numpy/f2py/tests/test_abstract_interface.py b/numpy/f2py/tests/test_abstract_interface.py
index 936c1f7bc..7aecf57fc 100644
--- a/numpy/f2py/tests/test_abstract_interface.py
+++ b/numpy/f2py/tests/test_abstract_interface.py
@@ -1,12 +1,13 @@
+from pathlib import Path
import textwrap
from . import util
from numpy.f2py import crackfortran
class TestAbstractInterface(util.F2PyTest):
- suffix = '.f90'
+ suffix = ".f90"
- skip = ['add1', 'add2']
+ skip = ["add1", "add2"]
code = textwrap.dedent("""
module ops_module
@@ -50,17 +51,17 @@ class TestAbstractInterface(util.F2PyTest):
def test_parse_abstract_interface(self, tmp_path):
# Test gh18403
- f_path = tmp_path / "gh18403_mod.f90"
- with f_path.open('w') as ff:
- ff.write(textwrap.dedent("""\
- module test
- abstract interface
- subroutine foo()
- end subroutine
- end interface
- end module test
- """))
+ f_path = Path(tmp_path / "gh18403_mod.f90")
+ f_path.write_text(
+ textwrap.dedent("""\
+ module test
+ abstract interface
+ subroutine foo()
+ end subroutine
+ end interface
+ end module test
+ """))
mod = crackfortran.crackfortran([str(f_path)])
assert len(mod) == 1
- assert len(mod[0]['body']) == 1
- assert mod[0]['body'][0]['block'] == 'abstract interface'
+ assert len(mod[0]["body"]) == 1
+ assert mod[0]["body"][0]["block"] == "abstract interface"
diff --git a/numpy/f2py/tests/test_array_from_pyobj.py b/numpy/f2py/tests/test_array_from_pyobj.py
index 649fd1c48..78569a8d6 100644
--- a/numpy/f2py/tests/test_array_from_pyobj.py
+++ b/numpy/f2py/tests/test_array_from_pyobj.py
@@ -31,11 +31,13 @@ def setup_module():
define_macros=[])
"""
d = os.path.dirname(__file__)
- src = [os.path.join(d, 'src', 'array_from_pyobj', 'wrapmodule.c'),
- os.path.join(d, '..', 'src', 'fortranobject.c'),
- os.path.join(d, '..', 'src', 'fortranobject.h')]
+ src = [
+ util.getpath("tests", "src", "array_from_pyobj", "wrapmodule.c"),
+ util.getpath("src", "fortranobject.c"),
+ util.getpath("src", "fortranobject.h"),
+ ]
wrap = util.build_module_distutils(src, config_code,
- 'test_array_from_pyobj_ext')
+ "test_array_from_pyobj_ext")
def flags_info(arr):
@@ -45,39 +47,49 @@ def flags_info(arr):
def flags2names(flags):
info = []
- for flagname in ['CONTIGUOUS', 'FORTRAN', 'OWNDATA', 'ENSURECOPY',
- 'ENSUREARRAY', 'ALIGNED', 'NOTSWAPPED', 'WRITEABLE',
- 'WRITEBACKIFCOPY', 'UPDATEIFCOPY', 'BEHAVED', 'BEHAVED_RO',
- 'CARRAY', 'FARRAY'
- ]:
+ for flagname in [
+ "CONTIGUOUS",
+ "FORTRAN",
+ "OWNDATA",
+ "ENSURECOPY",
+ "ENSUREARRAY",
+ "ALIGNED",
+ "NOTSWAPPED",
+ "WRITEABLE",
+ "WRITEBACKIFCOPY",
+ "UPDATEIFCOPY",
+ "BEHAVED",
+ "BEHAVED_RO",
+ "CARRAY",
+ "FARRAY",
+ ]:
if abs(flags) & getattr(wrap, flagname, 0):
info.append(flagname)
return info
class Intent:
-
def __init__(self, intent_list=[]):
self.intent_list = intent_list[:]
flags = 0
for i in intent_list:
- if i == 'optional':
+ if i == "optional":
flags |= wrap.F2PY_OPTIONAL
else:
- flags |= getattr(wrap, 'F2PY_INTENT_' + i.upper())
+ flags |= getattr(wrap, "F2PY_INTENT_" + i.upper())
self.flags = flags
def __getattr__(self, name):
name = name.lower()
- if name == 'in_':
- name = 'in'
+ if name == "in_":
+ name = "in"
return self.__class__(self.intent_list + [name])
def __str__(self):
- return 'intent(%s)' % (','.join(self.intent_list))
+ return "intent(%s)" % (",".join(self.intent_list))
def __repr__(self):
- return 'Intent(%r)' % (self.intent_list)
+ return "Intent(%r)" % (self.intent_list)
def is_intent(self, *names):
for name in names:
@@ -88,32 +100,46 @@ class Intent:
def is_intent_exact(self, *names):
return len(self.intent_list) == len(names) and self.is_intent(*names)
-intent = Intent()
-
-_type_names = ['BOOL', 'BYTE', 'UBYTE', 'SHORT', 'USHORT', 'INT', 'UINT',
- 'LONG', 'ULONG', 'LONGLONG', 'ULONGLONG',
- 'FLOAT', 'DOUBLE', 'CFLOAT']
-
-_cast_dict = {'BOOL': ['BOOL']}
-_cast_dict['BYTE'] = _cast_dict['BOOL'] + ['BYTE']
-_cast_dict['UBYTE'] = _cast_dict['BOOL'] + ['UBYTE']
-_cast_dict['BYTE'] = ['BYTE']
-_cast_dict['UBYTE'] = ['UBYTE']
-_cast_dict['SHORT'] = _cast_dict['BYTE'] + ['UBYTE', 'SHORT']
-_cast_dict['USHORT'] = _cast_dict['UBYTE'] + ['BYTE', 'USHORT']
-_cast_dict['INT'] = _cast_dict['SHORT'] + ['USHORT', 'INT']
-_cast_dict['UINT'] = _cast_dict['USHORT'] + ['SHORT', 'UINT']
-_cast_dict['LONG'] = _cast_dict['INT'] + ['LONG']
-_cast_dict['ULONG'] = _cast_dict['UINT'] + ['ULONG']
-
-_cast_dict['LONGLONG'] = _cast_dict['LONG'] + ['LONGLONG']
-_cast_dict['ULONGLONG'] = _cast_dict['ULONG'] + ['ULONGLONG']
-
-_cast_dict['FLOAT'] = _cast_dict['SHORT'] + ['USHORT', 'FLOAT']
-_cast_dict['DOUBLE'] = _cast_dict['INT'] + ['UINT', 'FLOAT', 'DOUBLE']
+intent = Intent()
-_cast_dict['CFLOAT'] = _cast_dict['FLOAT'] + ['CFLOAT']
+_type_names = [
+ "BOOL",
+ "BYTE",
+ "UBYTE",
+ "SHORT",
+ "USHORT",
+ "INT",
+ "UINT",
+ "LONG",
+ "ULONG",
+ "LONGLONG",
+ "ULONGLONG",
+ "FLOAT",
+ "DOUBLE",
+ "CFLOAT",
+]
+
+_cast_dict = {"BOOL": ["BOOL"]}
+_cast_dict["BYTE"] = _cast_dict["BOOL"] + ["BYTE"]
+_cast_dict["UBYTE"] = _cast_dict["BOOL"] + ["UBYTE"]
+_cast_dict["BYTE"] = ["BYTE"]
+_cast_dict["UBYTE"] = ["UBYTE"]
+_cast_dict["SHORT"] = _cast_dict["BYTE"] + ["UBYTE", "SHORT"]
+_cast_dict["USHORT"] = _cast_dict["UBYTE"] + ["BYTE", "USHORT"]
+_cast_dict["INT"] = _cast_dict["SHORT"] + ["USHORT", "INT"]
+_cast_dict["UINT"] = _cast_dict["USHORT"] + ["SHORT", "UINT"]
+
+_cast_dict["LONG"] = _cast_dict["INT"] + ["LONG"]
+_cast_dict["ULONG"] = _cast_dict["UINT"] + ["ULONG"]
+
+_cast_dict["LONGLONG"] = _cast_dict["LONG"] + ["LONGLONG"]
+_cast_dict["ULONGLONG"] = _cast_dict["ULONG"] + ["ULONGLONG"]
+
+_cast_dict["FLOAT"] = _cast_dict["SHORT"] + ["USHORT", "FLOAT"]
+_cast_dict["DOUBLE"] = _cast_dict["INT"] + ["UINT", "FLOAT", "DOUBLE"]
+
+_cast_dict["CFLOAT"] = _cast_dict["FLOAT"] + ["CFLOAT"]
# 32 bit system malloc typically does not provide the alignment required by
# 16 byte long double types this means the inout intent cannot be satisfied
@@ -121,15 +147,22 @@ _cast_dict['CFLOAT'] = _cast_dict['FLOAT'] + ['CFLOAT']
# when numpy gains an aligned allocator the tests could be enabled again
#
# Furthermore, on macOS ARM64, LONGDOUBLE is an alias for DOUBLE.
-if ((np.intp().dtype.itemsize != 4 or np.clongdouble().dtype.alignment <= 8) and
- sys.platform != 'win32' and
- (platform.system(), platform.processor()) != ('Darwin', 'arm')):
- _type_names.extend(['LONGDOUBLE', 'CDOUBLE', 'CLONGDOUBLE'])
- _cast_dict['LONGDOUBLE'] = _cast_dict['LONG'] + \
- ['ULONG', 'FLOAT', 'DOUBLE', 'LONGDOUBLE']
- _cast_dict['CLONGDOUBLE'] = _cast_dict['LONGDOUBLE'] + \
- ['CFLOAT', 'CDOUBLE', 'CLONGDOUBLE']
- _cast_dict['CDOUBLE'] = _cast_dict['DOUBLE'] + ['CFLOAT', 'CDOUBLE']
+if ((np.intp().dtype.itemsize != 4 or np.clongdouble().dtype.alignment <= 8)
+ and sys.platform != "win32"
+ and (platform.system(), platform.processor()) != ("Darwin", "arm")):
+ _type_names.extend(["LONGDOUBLE", "CDOUBLE", "CLONGDOUBLE"])
+ _cast_dict["LONGDOUBLE"] = _cast_dict["LONG"] + [
+ "ULONG",
+ "FLOAT",
+ "DOUBLE",
+ "LONGDOUBLE",
+ ]
+ _cast_dict["CLONGDOUBLE"] = _cast_dict["LONGDOUBLE"] + [
+ "CFLOAT",
+ "CDOUBLE",
+ "CLONGDOUBLE",
+ ]
+ _cast_dict["CDOUBLE"] = _cast_dict["DOUBLE"] + ["CFLOAT", "CDOUBLE"]
class Type:
@@ -154,7 +187,7 @@ class Type:
def _init(self, name):
self.NAME = name.upper()
info = typeinfo[self.NAME]
- self.type_num = getattr(wrap, 'NPY_' + self.NAME)
+ self.type_num = getattr(wrap, "NPY_" + self.NAME)
assert_equal(self.type_num, info.num)
self.dtype = np.dtype(info.type)
self.type = info.type
@@ -195,7 +228,6 @@ class Type:
class Array:
-
def __init__(self, typ, dims, intent, obj):
self.type = typ
self.dims = dims
@@ -211,16 +243,18 @@ class Array:
self.arr_attr = wrap.array_attrs(self.arr)
if len(dims) > 1:
- if self.intent.is_intent('c'):
+ if self.intent.is_intent("c"):
assert_(intent.flags & wrap.F2PY_INTENT_C)
- assert_(not self.arr.flags['FORTRAN'],
- repr((self.arr.flags, getattr(obj, 'flags', None))))
- assert_(self.arr.flags['CONTIGUOUS'])
+ assert_(
+ not self.arr.flags["FORTRAN"],
+ repr((self.arr.flags, getattr(obj, "flags", None))),
+ )
+ assert_(self.arr.flags["CONTIGUOUS"])
assert_(not self.arr_attr[6] & wrap.FORTRAN)
else:
assert_(not intent.flags & wrap.F2PY_INTENT_C)
- assert_(self.arr.flags['FORTRAN'])
- assert_(not self.arr.flags['CONTIGUOUS'])
+ assert_(self.arr.flags["FORTRAN"])
+ assert_(not self.arr.flags["CONTIGUOUS"])
assert_(self.arr_attr[6] & wrap.FORTRAN)
if obj is None:
@@ -228,53 +262,71 @@ class Array:
self.pyarr_attr = None
return
- if intent.is_intent('cache'):
+ if intent.is_intent("cache"):
assert_(isinstance(obj, np.ndarray), repr(type(obj)))
self.pyarr = np.array(obj).reshape(*dims).copy()
else:
self.pyarr = np.array(
- np.array(obj, dtype=typ.dtypechar).reshape(*dims),
- order=self.intent.is_intent('c') and 'C' or 'F')
- assert_(self.pyarr.dtype == typ,
- repr((self.pyarr.dtype, typ)))
- self.pyarr.setflags(write=self.arr.flags['WRITEABLE'])
- assert_(self.pyarr.flags['OWNDATA'], (obj, intent))
+ np.array(obj, dtype=typ.dtypechar).reshape(*dims),
+ order=self.intent.is_intent("c") and "C" or "F",
+ )
+ assert_(self.pyarr.dtype == typ, repr((self.pyarr.dtype, typ)))
+ self.pyarr.setflags(write=self.arr.flags["WRITEABLE"])
+ assert_(self.pyarr.flags["OWNDATA"], (obj, intent))
self.pyarr_attr = wrap.array_attrs(self.pyarr)
if len(dims) > 1:
- if self.intent.is_intent('c'):
- assert_(not self.pyarr.flags['FORTRAN'])
- assert_(self.pyarr.flags['CONTIGUOUS'])
+ if self.intent.is_intent("c"):
+ assert_(not self.pyarr.flags["FORTRAN"])
+ assert_(self.pyarr.flags["CONTIGUOUS"])
assert_(not self.pyarr_attr[6] & wrap.FORTRAN)
else:
- assert_(self.pyarr.flags['FORTRAN'])
- assert_(not self.pyarr.flags['CONTIGUOUS'])
+ assert_(self.pyarr.flags["FORTRAN"])
+ assert_(not self.pyarr.flags["CONTIGUOUS"])
assert_(self.pyarr_attr[6] & wrap.FORTRAN)
assert_(self.arr_attr[1] == self.pyarr_attr[1]) # nd
assert_(self.arr_attr[2] == self.pyarr_attr[2]) # dimensions
if self.arr_attr[1] <= 1:
- assert_(self.arr_attr[3] == self.pyarr_attr[3],
- repr((self.arr_attr[3], self.pyarr_attr[3],
- self.arr.tobytes(), self.pyarr.tobytes()))) # strides
- assert_(self.arr_attr[5][-2:] == self.pyarr_attr[5][-2:],
- repr((self.arr_attr[5], self.pyarr_attr[5]))) # descr
- assert_(self.arr_attr[6] == self.pyarr_attr[6],
- repr((self.arr_attr[6], self.pyarr_attr[6],
- flags2names(0 * self.arr_attr[6] - self.pyarr_attr[6]),
- flags2names(self.arr_attr[6]), intent))) # flags
-
- if intent.is_intent('cache'):
- assert_(self.arr_attr[5][3] >= self.type.elsize,
- repr((self.arr_attr[5][3], self.type.elsize)))
+ assert_(
+ self.arr_attr[3] == self.pyarr_attr[3],
+ repr((
+ self.arr_attr[3],
+ self.pyarr_attr[3],
+ self.arr.tobytes(),
+ self.pyarr.tobytes(),
+ )),
+ ) # strides
+ assert_(
+ self.arr_attr[5][-2:] == self.pyarr_attr[5][-2:],
+ repr((self.arr_attr[5], self.pyarr_attr[5])),
+ ) # descr
+ assert_(
+ self.arr_attr[6] == self.pyarr_attr[6],
+ repr((
+ self.arr_attr[6],
+ self.pyarr_attr[6],
+ flags2names(0 * self.arr_attr[6] - self.pyarr_attr[6]),
+ flags2names(self.arr_attr[6]),
+ intent,
+ )),
+ ) # flags
+
+ if intent.is_intent("cache"):
+ assert_(
+ self.arr_attr[5][3] >= self.type.elsize,
+ repr((self.arr_attr[5][3], self.type.elsize)),
+ )
else:
- assert_(self.arr_attr[5][3] == self.type.elsize,
- repr((self.arr_attr[5][3], self.type.elsize)))
+ assert_(
+ self.arr_attr[5][3] == self.type.elsize,
+ repr((self.arr_attr[5][3], self.type.elsize)),
+ )
assert_(self.arr_equal(self.pyarr, self.arr))
if isinstance(self.obj, np.ndarray):
if typ.elsize == Type(obj.dtype).elsize:
- if not intent.is_intent('copy') and self.arr_attr[1] <= 1:
+ if not intent.is_intent("copy") and self.arr_attr[1] <= 1:
assert_(self.has_shared_memory())
def arr_equal(self, arr1, arr2):
@@ -286,8 +338,7 @@ class Array:
return str(self.arr)
def has_shared_memory(self):
- """Check that created array shares data with input array.
- """
+ """Check that created array shares data with input array."""
if self.obj is self.arr:
return True
if not isinstance(self.obj, np.ndarray):
@@ -297,25 +348,24 @@ class Array:
class TestIntent:
-
def test_in_out(self):
- assert_equal(str(intent.in_.out), 'intent(in,out)')
- assert_(intent.in_.c.is_intent('c'))
- assert_(not intent.in_.c.is_intent_exact('c'))
- assert_(intent.in_.c.is_intent_exact('c', 'in'))
- assert_(intent.in_.c.is_intent_exact('in', 'c'))
- assert_(not intent.in_.is_intent('c'))
+ assert_equal(str(intent.in_.out), "intent(in,out)")
+ assert_(intent.in_.c.is_intent("c"))
+ assert_(not intent.in_.c.is_intent_exact("c"))
+ assert_(intent.in_.c.is_intent_exact("c", "in"))
+ assert_(intent.in_.c.is_intent_exact("in", "c"))
+ assert_(not intent.in_.is_intent("c"))
class TestSharedMemory:
num2seq = [1, 2]
num23seq = [[1, 2, 3], [4, 5, 6]]
- @pytest.fixture(autouse=True, scope='class', params=_type_names)
+ @pytest.fixture(autouse=True, scope="class", params=_type_names)
def setup_type(self, request):
request.cls.type = Type(request.param)
- request.cls.array = lambda self, dims, intent, obj: \
- Array(Type(request.param), dims, intent, obj)
+ request.cls.array = lambda self, dims, intent, obj: Array(
+ Type(request.param), dims, intent, obj)
def test_in_from_2seq(self):
a = self.array([2], intent.in_, self.num2seq)
@@ -326,21 +376,21 @@ class TestSharedMemory:
obj = np.array(self.num2seq, dtype=t.dtype)
a = self.array([len(self.num2seq)], intent.in_, obj)
if t.elsize == self.type.elsize:
- assert_(
- a.has_shared_memory(), repr((self.type.dtype, t.dtype)))
+ assert_(a.has_shared_memory(), repr(
+ (self.type.dtype, t.dtype)))
else:
assert_(not a.has_shared_memory(), repr(t.dtype))
- @pytest.mark.parametrize('write', ['w', 'ro'])
- @pytest.mark.parametrize('order', ['C', 'F'])
- @pytest.mark.parametrize('inp', ['2seq', '23seq'])
+ @pytest.mark.parametrize("write", ["w", "ro"])
+ @pytest.mark.parametrize("order", ["C", "F"])
+ @pytest.mark.parametrize("inp", ["2seq", "23seq"])
def test_in_nocopy(self, write, order, inp):
- """Test if intent(in) array can be passed without copies
- """
- seq = getattr(self, 'num' + inp)
+ """Test if intent(in) array can be passed without copies"""
+ seq = getattr(self, "num" + inp)
obj = np.array(seq, dtype=self.type.dtype, order=order)
- obj.setflags(write=(write == 'w'))
- a = self.array(obj.shape, ((order=='C' and intent.in_.c) or intent.in_), obj)
+ obj.setflags(write=(write == "w"))
+ a = self.array(obj.shape,
+ ((order == "C" and intent.in_.c) or intent.in_), obj)
assert a.has_shared_memory()
def test_inout_2seq(self):
@@ -351,29 +401,29 @@ class TestSharedMemory:
try:
a = self.array([2], intent.in_.inout, self.num2seq)
except TypeError as msg:
- if not str(msg).startswith('failed to initialize intent'
- '(inout|inplace|cache) array'):
+ if not str(msg).startswith(
+ "failed to initialize intent(inout|inplace|cache) array"):
raise
else:
- raise SystemError('intent(inout) should have failed on sequence')
+ raise SystemError("intent(inout) should have failed on sequence")
def test_f_inout_23seq(self):
- obj = np.array(self.num23seq, dtype=self.type.dtype, order='F')
+ obj = np.array(self.num23seq, dtype=self.type.dtype, order="F")
shape = (len(self.num23seq), len(self.num23seq[0]))
a = self.array(shape, intent.in_.inout, obj)
assert_(a.has_shared_memory())
- obj = np.array(self.num23seq, dtype=self.type.dtype, order='C')
+ obj = np.array(self.num23seq, dtype=self.type.dtype, order="C")
shape = (len(self.num23seq), len(self.num23seq[0]))
try:
a = self.array(shape, intent.in_.inout, obj)
except ValueError as msg:
- if not str(msg).startswith('failed to initialize intent'
- '(inout) array'):
+ if not str(msg).startswith(
+ "failed to initialize intent(inout) array"):
raise
else:
raise SystemError(
- 'intent(inout) should have failed on improper array')
+ "intent(inout) should have failed on improper array")
def test_c_inout_23seq(self):
obj = np.array(self.num23seq, dtype=self.type.dtype)
@@ -388,22 +438,23 @@ class TestSharedMemory:
assert_(not a.has_shared_memory(), repr(t.dtype))
def test_c_in_from_23seq(self):
- a = self.array([len(self.num23seq), len(self.num23seq[0])],
- intent.in_, self.num23seq)
+ a = self.array(
+ [len(self.num23seq), len(self.num23seq[0])], intent.in_,
+ self.num23seq)
assert_(not a.has_shared_memory())
def test_in_from_23casttype(self):
for t in self.type.cast_types():
obj = np.array(self.num23seq, dtype=t.dtype)
- a = self.array([len(self.num23seq), len(self.num23seq[0])],
- intent.in_, obj)
+ a = self.array(
+ [len(self.num23seq), len(self.num23seq[0])], intent.in_, obj)
assert_(not a.has_shared_memory(), repr(t.dtype))
def test_f_in_from_23casttype(self):
for t in self.type.cast_types():
- obj = np.array(self.num23seq, dtype=t.dtype, order='F')
- a = self.array([len(self.num23seq), len(self.num23seq[0])],
- intent.in_, obj)
+ obj = np.array(self.num23seq, dtype=t.dtype, order="F")
+ a = self.array(
+ [len(self.num23seq), len(self.num23seq[0])], intent.in_, obj)
if t.elsize == self.type.elsize:
assert_(a.has_shared_memory(), repr(t.dtype))
else:
@@ -412,8 +463,8 @@ class TestSharedMemory:
def test_c_in_from_23casttype(self):
for t in self.type.cast_types():
obj = np.array(self.num23seq, dtype=t.dtype)
- a = self.array([len(self.num23seq), len(self.num23seq[0])],
- intent.in_.c, obj)
+ a = self.array(
+ [len(self.num23seq), len(self.num23seq[0])], intent.in_.c, obj)
if t.elsize == self.type.elsize:
assert_(a.has_shared_memory(), repr(t.dtype))
else:
@@ -421,16 +472,18 @@ class TestSharedMemory:
def test_f_copy_in_from_23casttype(self):
for t in self.type.cast_types():
- obj = np.array(self.num23seq, dtype=t.dtype, order='F')
- a = self.array([len(self.num23seq), len(self.num23seq[0])],
- intent.in_.copy, obj)
+ obj = np.array(self.num23seq, dtype=t.dtype, order="F")
+ a = self.array(
+ [len(self.num23seq), len(self.num23seq[0])], intent.in_.copy,
+ obj)
assert_(not a.has_shared_memory(), repr(t.dtype))
def test_c_copy_in_from_23casttype(self):
for t in self.type.cast_types():
obj = np.array(self.num23seq, dtype=t.dtype)
- a = self.array([len(self.num23seq), len(self.num23seq[0])],
- intent.in_.c.copy, obj)
+ a = self.array(
+ [len(self.num23seq), len(self.num23seq[0])], intent.in_.c.copy,
+ obj)
assert_(not a.has_shared_memory(), repr(t.dtype))
def test_in_cache_from_2casttype(self):
@@ -438,14 +491,14 @@ class TestSharedMemory:
if t.elsize != self.type.elsize:
continue
obj = np.array(self.num2seq, dtype=t.dtype)
- shape = (len(self.num2seq),)
+ shape = (len(self.num2seq), )
a = self.array(shape, intent.in_.c.cache, obj)
assert_(a.has_shared_memory(), repr(t.dtype))
a = self.array(shape, intent.in_.cache, obj)
assert_(a.has_shared_memory(), repr(t.dtype))
- obj = np.array(self.num2seq, dtype=t.dtype, order='F')
+ obj = np.array(self.num2seq, dtype=t.dtype, order="F")
a = self.array(shape, intent.in_.c.cache, obj)
assert_(a.has_shared_memory(), repr(t.dtype))
@@ -455,31 +508,31 @@ class TestSharedMemory:
try:
a = self.array(shape, intent.in_.cache, obj[::-1])
except ValueError as msg:
- if not str(msg).startswith('failed to initialize'
- ' intent(cache) array'):
+ if not str(msg).startswith(
+ "failed to initialize intent(cache) array"):
raise
else:
raise SystemError(
- 'intent(cache) should have failed on multisegmented array')
+ "intent(cache) should have failed on multisegmented array")
def test_in_cache_from_2casttype_failure(self):
for t in self.type.all_types():
if t.elsize >= self.type.elsize:
continue
obj = np.array(self.num2seq, dtype=t.dtype)
- shape = (len(self.num2seq),)
+ shape = (len(self.num2seq), )
try:
self.array(shape, intent.in_.cache, obj) # Should succeed
except ValueError as msg:
- if not str(msg).startswith('failed to initialize'
- ' intent(cache) array'):
+ if not str(msg).startswith(
+ "failed to initialize intent(cache) array"):
raise
else:
raise SystemError(
- 'intent(cache) should have failed on smaller array')
+ "intent(cache) should have failed on smaller array")
def test_cache_hidden(self):
- shape = (2,)
+ shape = (2, )
a = self.array(shape, intent.cache.hide, None)
assert_(a.arr.shape == shape)
@@ -491,15 +544,15 @@ class TestSharedMemory:
try:
a = self.array(shape, intent.cache.hide, None)
except ValueError as msg:
- if not str(msg).startswith('failed to create intent'
- '(cache|hide)|optional array'):
+ if not str(msg).startswith(
+ "failed to create intent(cache|hide)|optional array"):
raise
else:
raise SystemError(
- 'intent(cache) should have failed on undefined dimensions')
+ "intent(cache) should have failed on undefined dimensions")
def test_hidden(self):
- shape = (2,)
+ shape = (2, )
a = self.array(shape, intent.hide, None)
assert_(a.arr.shape == shape)
assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)))
@@ -508,27 +561,27 @@ class TestSharedMemory:
a = self.array(shape, intent.hide, None)
assert_(a.arr.shape == shape)
assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)))
- assert_(a.arr.flags['FORTRAN'] and not a.arr.flags['CONTIGUOUS'])
+ assert_(a.arr.flags["FORTRAN"] and not a.arr.flags["CONTIGUOUS"])
shape = (2, 3)
a = self.array(shape, intent.c.hide, None)
assert_(a.arr.shape == shape)
assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)))
- assert_(not a.arr.flags['FORTRAN'] and a.arr.flags['CONTIGUOUS'])
+ assert_(not a.arr.flags["FORTRAN"] and a.arr.flags["CONTIGUOUS"])
shape = (-1, 3)
try:
a = self.array(shape, intent.hide, None)
except ValueError as msg:
- if not str(msg).startswith('failed to create intent'
- '(cache|hide)|optional array'):
+ if not str(msg).startswith(
+ "failed to create intent(cache|hide)|optional array"):
raise
else:
- raise SystemError('intent(hide) should have failed'
- ' on undefined dimensions')
+ raise SystemError(
+ "intent(hide) should have failed on undefined dimensions")
def test_optional_none(self):
- shape = (2,)
+ shape = (2, )
a = self.array(shape, intent.optional, None)
assert_(a.arr.shape == shape)
assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)))
@@ -537,17 +590,17 @@ class TestSharedMemory:
a = self.array(shape, intent.optional, None)
assert_(a.arr.shape == shape)
assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)))
- assert_(a.arr.flags['FORTRAN'] and not a.arr.flags['CONTIGUOUS'])
+ assert_(a.arr.flags["FORTRAN"] and not a.arr.flags["CONTIGUOUS"])
shape = (2, 3)
a = self.array(shape, intent.c.optional, None)
assert_(a.arr.shape == shape)
assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)))
- assert_(not a.arr.flags['FORTRAN'] and a.arr.flags['CONTIGUOUS'])
+ assert_(not a.arr.flags["FORTRAN"] and a.arr.flags["CONTIGUOUS"])
def test_optional_from_2seq(self):
obj = self.num2seq
- shape = (len(obj),)
+ shape = (len(obj), )
a = self.array(shape, intent.optional, obj)
assert_(a.arr.shape == shape)
assert_(not a.has_shared_memory())
@@ -565,16 +618,18 @@ class TestSharedMemory:
def test_inplace(self):
obj = np.array(self.num23seq, dtype=self.type.dtype)
- assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS'])
+ assert_(not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"])
shape = obj.shape
a = self.array(shape, intent.inplace, obj)
assert_(obj[1][2] == a.arr[1][2], repr((obj, a.arr)))
a.arr[1][2] = 54
- assert_(obj[1][2] == a.arr[1][2] ==
- np.array(54, dtype=self.type.dtype), repr((obj, a.arr)))
+ assert_(
+ obj[1][2] == a.arr[1][2] == np.array(54, dtype=self.type.dtype),
+ repr((obj, a.arr)),
+ )
assert_(a.arr is obj)
- assert_(obj.flags['FORTRAN']) # obj attributes are changed inplace!
- assert_(not obj.flags['CONTIGUOUS'])
+ assert_(obj.flags["FORTRAN"]) # obj attributes are changed inplace!
+ assert_(not obj.flags["CONTIGUOUS"])
def test_inplace_from_casttype(self):
for t in self.type.cast_types():
@@ -583,14 +638,17 @@ class TestSharedMemory:
obj = np.array(self.num23seq, dtype=t.dtype)
assert_(obj.dtype.type == t.type)
assert_(obj.dtype.type is not self.type.type)
- assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS'])
+ assert_(not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"])
shape = obj.shape
a = self.array(shape, intent.inplace, obj)
assert_(obj[1][2] == a.arr[1][2], repr((obj, a.arr)))
a.arr[1][2] = 54
- assert_(obj[1][2] == a.arr[1][2] ==
- np.array(54, dtype=self.type.dtype), repr((obj, a.arr)))
+ assert_(
+ obj[1][2] == a.arr[1][2] == np.array(54,
+ dtype=self.type.dtype),
+ repr((obj, a.arr)),
+ )
assert_(a.arr is obj)
- assert_(obj.flags['FORTRAN']) # obj attributes changed inplace!
- assert_(not obj.flags['CONTIGUOUS'])
+ assert_(obj.flags["FORTRAN"]) # obj attributes changed inplace!
+ assert_(not obj.flags["CONTIGUOUS"])
assert_(obj.dtype.type is self.type.type) # obj changed inplace!
diff --git a/numpy/f2py/tests/test_assumed_shape.py b/numpy/f2py/tests/test_assumed_shape.py
index 79e3ad138..0d226cb44 100644
--- a/numpy/f2py/tests/test_assumed_shape.py
+++ b/numpy/f2py/tests/test_assumed_shape.py
@@ -6,17 +6,14 @@ from numpy.testing import assert_
from . import util
-def _path(*a):
- return os.path.join(*((os.path.dirname(__file__),) + a))
-
-
class TestAssumedShapeSumExample(util.F2PyTest):
- sources = [_path('src', 'assumed_shape', 'foo_free.f90'),
- _path('src', 'assumed_shape', 'foo_use.f90'),
- _path('src', 'assumed_shape', 'precision.f90'),
- _path('src', 'assumed_shape', 'foo_mod.f90'),
- _path('src', 'assumed_shape', '.f2py_f2cmap'),
- ]
+ sources = [
+ util.getpath("tests", "src", "assumed_shape", "foo_free.f90"),
+ util.getpath("tests", "src", "assumed_shape", "foo_use.f90"),
+ util.getpath("tests", "src", "assumed_shape", "precision.f90"),
+ util.getpath("tests", "src", "assumed_shape", "foo_mod.f90"),
+ util.getpath("tests", "src", "assumed_shape", ".f2py_f2cmap"),
+ ]
@pytest.mark.slow
def test_all(self):
@@ -40,7 +37,7 @@ class TestF2cmapOption(TestAssumedShapeSumExample):
f2cmap_src = self.sources.pop(-1)
self.f2cmap_file = tempfile.NamedTemporaryFile(delete=False)
- with open(f2cmap_src, 'rb') as f:
+ with open(f2cmap_src, "rb") as f:
self.f2cmap_file.write(f.read())
self.f2cmap_file.close()
diff --git a/numpy/f2py/tests/test_block_docstring.py b/numpy/f2py/tests/test_block_docstring.py
index 7d725165b..36446fe64 100644
--- a/numpy/f2py/tests/test_block_docstring.py
+++ b/numpy/f2py/tests/test_block_docstring.py
@@ -4,6 +4,7 @@ from . import util
from numpy.testing import assert_equal, IS_PYPY
+
class TestBlockDocString(util.F2PyTest):
code = """
SUBROUTINE FOO()
@@ -14,8 +15,8 @@ class TestBlockDocString(util.F2PyTest):
END
"""
- @pytest.mark.skipif(sys.platform=='win32',
- reason='Fails with MinGW64 Gfortran (Issue #9673)')
+ @pytest.mark.skipif(sys.platform == "win32",
+ reason="Fails with MinGW64 Gfortran (Issue #9673)")
@pytest.mark.xfail(IS_PYPY,
reason="PyPy cannot modify tp_doc after PyType_Ready")
def test_block_docstring(self):
diff --git a/numpy/f2py/tests/test_callback.py b/numpy/f2py/tests/test_callback.py
index 5d2aab94d..8682afe05 100644
--- a/numpy/f2py/tests/test_callback.py
+++ b/numpy/f2py/tests/test_callback.py
@@ -77,7 +77,7 @@ cf2py intent(out) r
end
"""
- @pytest.mark.parametrize('name', 't,t2'.split(','))
+ @pytest.mark.parametrize("name", "t,t2".split(","))
def test_all(self, name):
self.check_function(name)
@@ -116,18 +116,18 @@ cf2py intent(out) r
t = getattr(self.module, name)
r = t(lambda: 4)
assert_(r == 4, repr(r))
- r = t(lambda a: 5, fun_extra_args=(6,))
+ r = t(lambda a: 5, fun_extra_args=(6, ))
assert_(r == 5, repr(r))
- r = t(lambda a: a, fun_extra_args=(6,))
+ r = t(lambda a: a, fun_extra_args=(6, ))
assert_(r == 6, repr(r))
- r = t(lambda a: 5 + a, fun_extra_args=(7,))
+ r = t(lambda a: 5 + a, fun_extra_args=(7, ))
assert_(r == 12, repr(r))
- r = t(lambda a: math.degrees(a), fun_extra_args=(math.pi,))
+ r = t(lambda a: math.degrees(a), fun_extra_args=(math.pi, ))
assert_(r == 180, repr(r))
- r = t(math.degrees, fun_extra_args=(math.pi,))
+ r = t(math.degrees, fun_extra_args=(math.pi, ))
assert_(r == 180, repr(r))
- r = t(self.module.func, fun_extra_args=(6,))
+ r = t(self.module.func, fun_extra_args=(6, ))
assert_(r == 17, repr(r))
r = t(self.module.func0)
assert_(r == 11, repr(r))
@@ -135,48 +135,47 @@ cf2py intent(out) r
assert_(r == 11, repr(r))
class A:
-
def __call__(self):
return 7
def mth(self):
return 9
+
a = A()
r = t(a)
assert_(r == 7, repr(r))
r = t(a.mth)
assert_(r == 9, repr(r))
- @pytest.mark.skipif(sys.platform=='win32',
- reason='Fails with MinGW64 Gfortran (Issue #9673)')
+ @pytest.mark.skipif(sys.platform == "win32",
+ reason="Fails with MinGW64 Gfortran (Issue #9673)")
def test_string_callback(self):
-
def callback(code):
- if code == 'r':
+ if code == "r":
return 0
else:
return 1
- f = getattr(self.module, 'string_callback')
+ f = getattr(self.module, "string_callback")
r = f(callback)
assert_(r == 0, repr(r))
- @pytest.mark.skipif(sys.platform=='win32',
- reason='Fails with MinGW64 Gfortran (Issue #9673)')
+ @pytest.mark.skipif(sys.platform == "win32",
+ reason="Fails with MinGW64 Gfortran (Issue #9673)")
def test_string_callback_array(self):
# See gh-10027
- cu = np.zeros((1, 8), 'S1')
+ cu = np.zeros((1, 8), "S1")
def callback(cu, lencu):
if cu.shape != (lencu, 8):
return 1
- if cu.dtype != 'S1':
+ if cu.dtype != "S1":
return 2
- if not np.all(cu == b''):
+ if not np.all(cu == b""):
return 3
return 0
- f = getattr(self.module, 'string_callback_array')
+ f = getattr(self.module, "string_callback_array")
res = f(callback, cu, len(cu))
assert_(res == 0, repr(res))
@@ -205,8 +204,10 @@ cf2py intent(out) r
except Exception:
errors.append(traceback.format_exc())
- threads = [threading.Thread(target=runner, args=(arg,))
- for arg in ("t", "t2") for n in range(20)]
+ threads = [
+ threading.Thread(target=runner, args=(arg, ))
+ for arg in ("t", "t2") for n in range(20)
+ ]
for t in threads:
t.start()
@@ -222,12 +223,12 @@ cf2py intent(out) r
try:
self.module.hidden_callback(2)
except Exception as msg:
- assert_(str(msg).startswith('Callback global_f not defined'))
+ assert_(str(msg).startswith("Callback global_f not defined"))
try:
self.module.hidden_callback2(2)
except Exception as msg:
- assert_(str(msg).startswith('cb: Callback global_f not defined'))
+ assert_(str(msg).startswith("cb: Callback global_f not defined"))
self.module.global_f = lambda x: x + 1
r = self.module.hidden_callback(2)
@@ -241,7 +242,7 @@ cf2py intent(out) r
try:
self.module.hidden_callback(2)
except Exception as msg:
- assert_(str(msg).startswith('Callback global_f not defined'))
+ assert_(str(msg).startswith("Callback global_f not defined"))
self.module.global_f = lambda x=0: x + 3
r = self.module.hidden_callback(2)
@@ -257,15 +258,15 @@ class TestF77CallbackPythonTLS(TestF77Callback):
Callback tests using Python thread-local storage instead of
compiler-provided
"""
+
options = ["-DF2PY_USE_PYTHON_TLS"]
class TestF90Callback(util.F2PyTest):
- suffix = '.f90'
+ suffix = ".f90"
- code = textwrap.dedent(
- """
+ code = textwrap.dedent("""
function gh17797(f, y) result(r)
external f
integer(8) :: r, f
@@ -276,7 +277,6 @@ class TestF90Callback(util.F2PyTest):
""")
def test_gh17797(self):
-
def incr(x):
return x + 123
@@ -292,10 +292,9 @@ class TestGH18335(util.F2PyTest):
other tests!
"""
- suffix = '.f90'
+ suffix = ".f90"
- code = textwrap.dedent(
- """
+ code = textwrap.dedent("""
! When gh18335_workaround is defined as an extension,
! the issue cannot be reproduced.
!subroutine gh18335_workaround(f, y)
@@ -316,7 +315,6 @@ class TestGH18335(util.F2PyTest):
""")
def test_gh18335(self):
-
def foo(x):
x[0] += 1
diff --git a/numpy/f2py/tests/test_common.py b/numpy/f2py/tests/test_common.py
index e4bf35504..056ae5ee8 100644
--- a/numpy/f2py/tests/test_common.py
+++ b/numpy/f2py/tests/test_common.py
@@ -7,19 +7,16 @@ from . import util
from numpy.testing import assert_array_equal
-def _path(*a):
- return os.path.join(*((os.path.dirname(__file__),) + a))
class TestCommonBlock(util.F2PyTest):
- sources = [_path('src', 'common', 'block.f')]
+ sources = [util.getpath("tests", "src", "common", "block.f")]
- @pytest.mark.skipif(sys.platform=='win32',
- reason='Fails with MinGW64 Gfortran (Issue #9673)')
+ @pytest.mark.skipif(sys.platform == "win32",
+ reason="Fails with MinGW64 Gfortran (Issue #9673)")
def test_common_block(self):
self.module.initcb()
assert_array_equal(self.module.block.long_bn,
np.array(1.0, dtype=np.float64))
assert_array_equal(self.module.block.string_bn,
- np.array('2', dtype='|S1'))
- assert_array_equal(self.module.block.ok,
- np.array(3, dtype=np.int32))
+ np.array("2", dtype="|S1"))
+ assert_array_equal(self.module.block.ok, np.array(3, dtype=np.int32))
diff --git a/numpy/f2py/tests/test_compile_function.py b/numpy/f2py/tests/test_compile_function.py
index f76fd6448..e92362d82 100644
--- a/numpy/f2py/tests/test_compile_function.py
+++ b/numpy/f2py/tests/test_compile_function.py
@@ -17,14 +17,13 @@ def setup_module():
if not util.has_c_compiler():
pytest.skip("Needs C compiler")
if not util.has_f77_compiler():
- pytest.skip('Needs FORTRAN 77 compiler')
+ pytest.skip("Needs FORTRAN 77 compiler")
# extra_args can be a list (since gh-11937) or string.
# also test absence of extra_args
-@pytest.mark.parametrize(
- "extra_args", [['--noopt', '--debug'], '--noopt --debug', '']
- )
+@pytest.mark.parametrize("extra_args",
+ [["--noopt", "--debug"], "--noopt --debug", ""])
@pytest.mark.leaks_references(reason="Imported module seems never deleted.")
def test_f2py_init_compile(extra_args):
# flush through the f2py __init__ compile() function code path as a
@@ -33,7 +32,7 @@ def test_f2py_init_compile(extra_args):
# the Fortran 77 syntax requires 6 spaces before any commands, but
# more space may be added/
- fsource = """
+ fsource = """
integer function foo()
foo = 10 + 5
return
@@ -45,7 +44,7 @@ def test_f2py_init_compile(extra_args):
modname = util.get_temp_module_name()
cwd = os.getcwd()
- target = os.path.join(moddir, str(uuid.uuid4()) + '.f')
+ target = os.path.join(moddir, str(uuid.uuid4()) + ".f")
# try running compile() with and without a source_fn provided so
# that the code path where a temporary file for writing Fortran
# source is created is also explored
@@ -54,33 +53,28 @@ def test_f2py_init_compile(extra_args):
# util.py, but don't actually use build_module() because it has
# its own invocation of subprocess that circumvents the
# f2py.compile code block under test
- try:
- os.chdir(moddir)
- ret_val = numpy.f2py.compile(
- fsource,
- modulename=modname,
- extra_args=extra_args,
- source_fn=source_fn
- )
- finally:
- os.chdir(cwd)
-
- # check for compile success return value
- assert_equal(ret_val, 0)
-
- # we are not currently able to import the Python-Fortran
- # interface module on Windows / Appveyor, even though we do get
- # successful compilation on that platform with Python 3.x
- if sys.platform != 'win32':
- # check for sensible result of Fortran function; that means
- # we can import the module name in Python and retrieve the
- # result of the sum operation
- return_check = import_module(modname)
- calc_result = return_check.foo()
- assert_equal(calc_result, 15)
- # Removal from sys.modules, is not as such necessary. Even with
- # removal, the module (dict) stays alive.
- del sys.modules[modname]
+ with util.switchdir(moddir):
+ ret_val = numpy.f2py.compile(fsource,
+ modulename=modname,
+ extra_args=extra_args,
+ source_fn=source_fn)
+
+ # check for compile success return value
+ assert_equal(ret_val, 0)
+
+ # we are not currently able to import the Python-Fortran
+ # interface module on Windows / Appveyor, even though we do get
+ # successful compilation on that platform with Python 3.x
+ if sys.platform != "win32":
+ # check for sensible result of Fortran function; that means
+ # we can import the module name in Python and retrieve the
+ # result of the sum operation
+ return_check = import_module(modname)
+ calc_result = return_check.foo()
+ assert_equal(calc_result, 15)
+ # Removal from sys.modules, is not as such necessary. Even with
+ # removal, the module (dict) stays alive.
+ del sys.modules[modname]
def test_f2py_init_compile_failure():
@@ -99,7 +93,7 @@ def test_f2py_init_compile_bad_cmd():
# downstream NOTE: how bad of an idea is this patching?
try:
temp = sys.executable
- sys.executable = 'does not exist'
+ sys.executable = "does not exist"
# the OSError should take precedence over invalid Fortran
ret_val = numpy.f2py.compile(b"invalid")
@@ -108,18 +102,17 @@ def test_f2py_init_compile_bad_cmd():
sys.executable = temp
-@pytest.mark.parametrize('fsource',
- ['program test_f2py\nend program test_f2py',
- b'program test_f2py\nend program test_f2py',])
+@pytest.mark.parametrize(
+ "fsource",
+ [
+ "program test_f2py\nend program test_f2py",
+ b"program test_f2py\nend program test_f2py",
+ ],
+)
def test_compile_from_strings(tmpdir, fsource):
# Make sure we can compile str and bytes gh-12796
- cwd = os.getcwd()
- try:
- os.chdir(str(tmpdir))
- ret_val = numpy.f2py.compile(
- fsource,
- modulename='test_compile_from_strings',
- extension='.f90')
+ with util.switchdir(tmpdir):
+ ret_val = numpy.f2py.compile(fsource,
+ modulename="test_compile_from_strings",
+ extension=".f90")
assert_equal(ret_val, 0)
- finally:
- os.chdir(cwd)
diff --git a/numpy/f2py/tests/test_crackfortran.py b/numpy/f2py/tests/test_crackfortran.py
index 039e085b4..41d9840ed 100644
--- a/numpy/f2py/tests/test_crackfortran.py
+++ b/numpy/f2py/tests/test_crackfortran.py
@@ -36,15 +36,14 @@ class TestNoSpace(util.F2PyTest):
assert_array_equal(k, w + 1)
self.module.subc([w, k])
assert_array_equal(k, w + 1)
- assert self.module.t0(23) == b'2'
+ assert self.module.t0(23) == b"2"
-class TestPublicPrivate():
-
+class TestPublicPrivate:
def test_defaultPrivate(self, tmp_path):
f_path = tmp_path / "mod.f90"
- with f_path.open('w') as ff:
- ff.write(textwrap.dedent("""\
+ f_path.write_text(
+ textwrap.dedent("""\
module foo
private
integer :: a
@@ -60,17 +59,18 @@ class TestPublicPrivate():
mod = crackfortran.crackfortran([str(f_path)])
assert len(mod) == 1
mod = mod[0]
- assert 'private' in mod['vars']['a']['attrspec']
- assert 'public' not in mod['vars']['a']['attrspec']
- assert 'private' in mod['vars']['b']['attrspec']
- assert 'public' not in mod['vars']['b']['attrspec']
- assert 'private' not in mod['vars']['seta']['attrspec']
- assert 'public' in mod['vars']['seta']['attrspec']
+ assert "private" in mod["vars"]["a"]["attrspec"]
+ assert "public" not in mod["vars"]["a"]["attrspec"]
+ assert "private" in mod["vars"]["b"]["attrspec"]
+ assert "public" not in mod["vars"]["b"]["attrspec"]
+ assert "private" not in mod["vars"]["seta"]["attrspec"]
+ assert "public" in mod["vars"]["seta"]["attrspec"]
def test_defaultPublic(self, tmp_path):
f_path = tmp_path / "mod.f90"
- with f_path.open('w') as ff:
- ff.write(textwrap.dedent("""\
+ with f_path.open("w") as ff:
+ ff.write(
+ textwrap.dedent("""\
module foo
public
integer, private :: a
@@ -85,10 +85,10 @@ class TestPublicPrivate():
mod = crackfortran.crackfortran([str(f_path)])
assert len(mod) == 1
mod = mod[0]
- assert 'private' in mod['vars']['a']['attrspec']
- assert 'public' not in mod['vars']['a']['attrspec']
- assert 'private' not in mod['vars']['seta']['attrspec']
- assert 'public' in mod['vars']['seta']['attrspec']
+ assert "private" in mod["vars"]["a"]["attrspec"]
+ assert "public" not in mod["vars"]["a"]["attrspec"]
+ assert "private" not in mod["vars"]["seta"]["attrspec"]
+ assert "public" in mod["vars"]["seta"]["attrspec"]
class TestExternal(util.F2PyTest):
@@ -111,19 +111,21 @@ class TestExternal(util.F2PyTest):
def test_external_as_statement(self):
def incr(x):
return x + 123
+
r = self.module.external_as_statement(incr)
assert r == 123
def test_external_as_attribute(self):
def incr(x):
return x + 123
+
r = self.module.external_as_attribute(incr)
assert r == 123
class TestCrackFortran(util.F2PyTest):
- suffix = '.f90'
+ suffix = ".f90"
code = textwrap.dedent("""
subroutine gh2848( &
@@ -146,7 +148,7 @@ class TestCrackFortran(util.F2PyTest):
assert r == (1, 2)
-class TestMarkinnerspaces():
+class TestMarkinnerspaces:
# issue #14118: markinnerspaces does not handle multiple quotations
def test_do_not_touch_normal_spaces(self):
@@ -155,13 +157,13 @@ class TestMarkinnerspaces():
assert_equal(markinnerspaces(i), i)
def test_one_relevant_space(self):
- assert_equal(markinnerspaces("a 'b c' \\\' \\\'"), "a 'b@_@c' \\' \\'")
+ assert_equal(markinnerspaces("a 'b c' \\' \\'"), "a 'b@_@c' \\' \\'")
assert_equal(markinnerspaces(r'a "b c" \" \"'), r'a "b@_@c" \" \"')
def test_ignore_inner_quotes(self):
- assert_equal(markinnerspaces('a \'b c" " d\' e'),
+ assert_equal(markinnerspaces("a 'b c\" \" d' e"),
"a 'b@_@c\"@_@\"@_@d' e")
- assert_equal(markinnerspaces('a "b c\' \' d" e'),
+ assert_equal(markinnerspaces("a \"b c' ' d\" e"),
"a \"b@_@c'@_@'@_@d\" e")
def test_multiple_relevant_spaces(self):
@@ -200,7 +202,7 @@ class TestDimSpec(util.F2PyTest):
"""
- suffix = '.f90'
+ suffix = ".f90"
code_template = textwrap.dedent("""
function get_arr_size_{count}(a, n) result (length)
@@ -221,33 +223,36 @@ class TestDimSpec(util.F2PyTest):
end subroutine
""")
- linear_dimspecs = ['n', '2*n', '2:n', 'n/2', '5 - n/2', '3*n:20',
- 'n*(n+1):n*(n+5)']
- nonlinear_dimspecs = ['2*n:3*n*n+2*n']
+ linear_dimspecs = [
+ "n", "2*n", "2:n", "n/2", "5 - n/2", "3*n:20", "n*(n+1):n*(n+5)"
+ ]
+ nonlinear_dimspecs = ["2*n:3*n*n+2*n"]
all_dimspecs = linear_dimspecs + nonlinear_dimspecs
- code = ''
+ code = ""
for count, dimspec in enumerate(all_dimspecs):
code += code_template.format(
- count=count, dimspec=dimspec,
- first=dimspec.split(':')[0] if ':' in dimspec else '1')
+ count=count,
+ dimspec=dimspec,
+ first=dimspec.split(":")[0] if ":" in dimspec else "1",
+ )
- @pytest.mark.parametrize('dimspec', all_dimspecs)
+ @pytest.mark.parametrize("dimspec", all_dimspecs)
def test_array_size(self, dimspec):
count = self.all_dimspecs.index(dimspec)
- get_arr_size = getattr(self.module, f'get_arr_size_{count}')
+ get_arr_size = getattr(self.module, f"get_arr_size_{count}")
for n in [1, 2, 3, 4, 5]:
sz, a = get_arr_size(n)
assert len(a) == sz
- @pytest.mark.parametrize('dimspec', all_dimspecs)
+ @pytest.mark.parametrize("dimspec", all_dimspecs)
def test_inv_array_size(self, dimspec):
count = self.all_dimspecs.index(dimspec)
- get_arr_size = getattr(self.module, f'get_arr_size_{count}')
- get_inv_arr_size = getattr(self.module, f'get_inv_arr_size_{count}')
+ get_arr_size = getattr(self.module, f"get_arr_size_{count}")
+ get_inv_arr_size = getattr(self.module, f"get_inv_arr_size_{count}")
for n in [1, 2, 3, 4, 5]:
sz, a = get_arr_size(n)
@@ -266,11 +271,12 @@ class TestDimSpec(util.F2PyTest):
assert sz == sz1, (n, n1, sz, sz1)
-class TestModuleDeclaration():
+class TestModuleDeclaration:
def test_dependencies(self, tmp_path):
f_path = tmp_path / "mod.f90"
- with f_path.open('w') as ff:
- ff.write(textwrap.dedent("""\
+ with f_path.open("w") as ff:
+ ff.write(
+ textwrap.dedent("""\
module foo
type bar
character(len = 4) :: text
@@ -280,4 +286,4 @@ class TestModuleDeclaration():
"""))
mod = crackfortran.crackfortran([str(f_path)])
assert len(mod) == 1
- assert mod[0]['vars']['abar']['='] == "bar('abar')"
+ assert mod[0]["vars"]["abar"]["="] == "bar('abar')"
diff --git a/numpy/f2py/tests/test_kind.py b/numpy/f2py/tests/test_kind.py
index a7e2b28ed..78a11fc6c 100644
--- a/numpy/f2py/tests/test_kind.py
+++ b/numpy/f2py/tests/test_kind.py
@@ -4,17 +4,13 @@ import pytest
from numpy.testing import assert_
from numpy.f2py.crackfortran import (
_selected_int_kind_func as selected_int_kind,
- _selected_real_kind_func as selected_real_kind
- )
+ _selected_real_kind_func as selected_real_kind,
+)
from . import util
-def _path(*a):
- return os.path.join(*((os.path.dirname(__file__),) + a))
-
-
class TestKind(util.F2PyTest):
- sources = [_path('src', 'kind', 'foo.f90')]
+ sources = [util.getpath("tests", "src", "kind", "foo.f90")]
@pytest.mark.slow
def test_all(self):
@@ -22,11 +18,15 @@ class TestKind(util.F2PyTest):
selectedintkind = self.module.selectedintkind
for i in range(40):
- assert_(selectedintkind(i) in [selected_int_kind(i), -1],
- 'selectedintkind(%s): expected %r but got %r' %
- (i, selected_int_kind(i), selectedintkind(i)))
+ assert_(
+ selectedintkind(i) in [selected_int_kind(i), -1],
+ "selectedintkind(%s): expected %r but got %r" %
+ (i, selected_int_kind(i), selectedintkind(i)),
+ )
for i in range(20):
- assert_(selectedrealkind(i) in [selected_real_kind(i), -1],
- 'selectedrealkind(%s): expected %r but got %r' %
- (i, selected_real_kind(i), selectedrealkind(i)))
+ assert_(
+ selectedrealkind(i) in [selected_real_kind(i), -1],
+ "selectedrealkind(%s): expected %r but got %r" %
+ (i, selected_real_kind(i), selectedrealkind(i)),
+ )
diff --git a/numpy/f2py/tests/test_mixed.py b/numpy/f2py/tests/test_mixed.py
index 04266ca5b..95444bea5 100644
--- a/numpy/f2py/tests/test_mixed.py
+++ b/numpy/f2py/tests/test_mixed.py
@@ -6,14 +6,12 @@ from numpy.testing import assert_, assert_equal, IS_PYPY
from . import util
-def _path(*a):
- return os.path.join(*((os.path.dirname(__file__),) + a))
-
-
class TestMixed(util.F2PyTest):
- sources = [_path('src', 'mixed', 'foo.f'),
- _path('src', 'mixed', 'foo_fixed.f90'),
- _path('src', 'mixed', 'foo_free.f90')]
+ sources = [
+ util.getpath("tests", "src", "mixed", "foo.f"),
+ util.getpath("tests", "src", "mixed", "foo_fixed.f90"),
+ util.getpath("tests", "src", "mixed", "foo_free.f90"),
+ ]
def test_all(self):
assert_(self.module.bar11() == 11)
diff --git a/numpy/f2py/tests/test_module_doc.py b/numpy/f2py/tests/test_module_doc.py
index 4b9555cee..b66cff000 100644
--- a/numpy/f2py/tests/test_module_doc.py
+++ b/numpy/f2py/tests/test_module_doc.py
@@ -7,24 +7,24 @@ from . import util
from numpy.testing import assert_equal, IS_PYPY
-def _path(*a):
- return os.path.join(*((os.path.dirname(__file__),) + a))
-
-
class TestModuleDocString(util.F2PyTest):
- sources = [_path('src', 'module_data', 'module_data_docstring.f90')]
+ sources = [
+ util.getpath("tests", "src", "module_data",
+ "module_data_docstring.f90")
+ ]
- @pytest.mark.skipif(sys.platform=='win32',
- reason='Fails with MinGW64 Gfortran (Issue #9673)')
+ @pytest.mark.skipif(sys.platform == "win32",
+ reason="Fails with MinGW64 Gfortran (Issue #9673)")
@pytest.mark.xfail(IS_PYPY,
reason="PyPy cannot modify tp_doc after PyType_Ready")
def test_module_docstring(self):
- assert_equal(self.module.mod.__doc__,
- textwrap.dedent('''\
+ assert_equal(
+ self.module.mod.__doc__,
+ textwrap.dedent("""\
i : 'i'-scalar
x : 'i'-array(4)
a : 'f'-array(2,3)
b : 'f'-array(-1,-1), not allocated\x00
foo()\n
- Wrapper for ``foo``.\n\n''')
- )
+ Wrapper for ``foo``.\n\n"""),
+ )
diff --git a/numpy/f2py/tests/test_parameter.py b/numpy/f2py/tests/test_parameter.py
index b61827169..4ea102e84 100644
--- a/numpy/f2py/tests/test_parameter.py
+++ b/numpy/f2py/tests/test_parameter.py
@@ -7,17 +7,14 @@ from numpy.testing import assert_raises, assert_equal
from . import util
-def _path(*a):
- return os.path.join(*((os.path.dirname(__file__),) + a))
-
-
class TestParameters(util.F2PyTest):
# Check that intent(in out) translates as intent(inout)
- sources = [_path('src', 'parameter', 'constant_real.f90'),
- _path('src', 'parameter', 'constant_integer.f90'),
- _path('src', 'parameter', 'constant_both.f90'),
- _path('src', 'parameter', 'constant_compound.f90'),
- _path('src', 'parameter', 'constant_non_compound.f90'),
+ sources = [
+ util.getpath("tests", "src", "parameter", "constant_real.f90"),
+ util.getpath("tests", "src", "parameter", "constant_integer.f90"),
+ util.getpath("tests", "src", "parameter", "constant_both.f90"),
+ util.getpath("tests", "src", "parameter", "constant_compound.f90"),
+ util.getpath("tests", "src", "parameter", "constant_non_compound.f90"),
]
@pytest.mark.slow
@@ -29,7 +26,7 @@ class TestParameters(util.F2PyTest):
# check values with contiguous array
x = np.arange(3, dtype=np.float32)
self.module.foo_single(x)
- assert_equal(x, [0 + 1 + 2*3, 1, 2])
+ assert_equal(x, [0 + 1 + 2 * 3, 1, 2])
@pytest.mark.slow
def test_constant_real_double(self):
@@ -40,7 +37,7 @@ class TestParameters(util.F2PyTest):
# check values with contiguous array
x = np.arange(3, dtype=np.float64)
self.module.foo_double(x)
- assert_equal(x, [0 + 1 + 2*3, 1, 2])
+ assert_equal(x, [0 + 1 + 2 * 3, 1, 2])
@pytest.mark.slow
def test_constant_compound_int(self):
@@ -51,14 +48,14 @@ class TestParameters(util.F2PyTest):
# check values with contiguous array
x = np.arange(3, dtype=np.int32)
self.module.foo_compound_int(x)
- assert_equal(x, [0 + 1 + 2*6, 1, 2])
+ assert_equal(x, [0 + 1 + 2 * 6, 1, 2])
@pytest.mark.slow
def test_constant_non_compound_int(self):
# check values
x = np.arange(4, dtype=np.int32)
self.module.foo_non_compound_int(x)
- assert_equal(x, [0 + 1 + 2 + 3*4, 1, 2, 3])
+ assert_equal(x, [0 + 1 + 2 + 3 * 4, 1, 2, 3])
@pytest.mark.slow
def test_constant_integer_int(self):
@@ -69,7 +66,7 @@ class TestParameters(util.F2PyTest):
# check values with contiguous array
x = np.arange(3, dtype=np.int32)
self.module.foo_int(x)
- assert_equal(x, [0 + 1 + 2*3, 1, 2])
+ assert_equal(x, [0 + 1 + 2 * 3, 1, 2])
@pytest.mark.slow
def test_constant_integer_long(self):
@@ -80,7 +77,7 @@ class TestParameters(util.F2PyTest):
# check values with contiguous array
x = np.arange(3, dtype=np.int64)
self.module.foo_long(x)
- assert_equal(x, [0 + 1 + 2*3, 1, 2])
+ assert_equal(x, [0 + 1 + 2 * 3, 1, 2])
@pytest.mark.slow
def test_constant_both(self):
@@ -91,7 +88,7 @@ class TestParameters(util.F2PyTest):
# check values with contiguous array
x = np.arange(3, dtype=np.float64)
self.module.foo(x)
- assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3])
+ assert_equal(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3])
@pytest.mark.slow
def test_constant_no(self):
@@ -102,7 +99,7 @@ class TestParameters(util.F2PyTest):
# check values with contiguous array
x = np.arange(3, dtype=np.float64)
self.module.foo_no(x)
- assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3])
+ assert_equal(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3])
@pytest.mark.slow
def test_constant_sum(self):
@@ -113,4 +110,4 @@ class TestParameters(util.F2PyTest):
# check values with contiguous array
x = np.arange(3, dtype=np.float64)
self.module.foo_sum(x)
- assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3])
+ assert_equal(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3])
diff --git a/numpy/f2py/tests/test_quoted_character.py b/numpy/f2py/tests/test_quoted_character.py
index 20c77666c..efb9ad08b 100644
--- a/numpy/f2py/tests/test_quoted_character.py
+++ b/numpy/f2py/tests/test_quoted_character.py
@@ -26,7 +26,7 @@ Cf2py intent(out) OUT1, OUT2, OUT3, OUT4, OUT5, OUT6
END
"""
- @pytest.mark.skipif(sys.platform=='win32',
- reason='Fails with MinGW64 Gfortran (Issue #9673)')
+ @pytest.mark.skipif(sys.platform == "win32",
+ reason="Fails with MinGW64 Gfortran (Issue #9673)")
def test_quoted_character(self):
- assert_equal(self.module.foo(), (b"'", b'"', b';', b'!', b'(', b')'))
+ assert_equal(self.module.foo(), (b"'", b'"', b";", b"!", b"(", b")"))
diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py
index b91499e4a..682b9e98c 100644
--- a/numpy/f2py/tests/test_regression.py
+++ b/numpy/f2py/tests/test_regression.py
@@ -7,13 +7,9 @@ from numpy.testing import assert_, assert_raises, assert_equal, assert_string_eq
from . import util
-def _path(*a):
- return os.path.join(*((os.path.dirname(__file__),) + a))
-
-
class TestIntentInOut(util.F2PyTest):
# Check that intent(in out) translates as intent(inout)
- sources = [_path('src', 'regression', 'inout.f90')]
+ sources = [util.getpath("tests", "src", "regression", "inout.f90")]
@pytest.mark.slow
def test_inout(self):
@@ -30,18 +26,22 @@ class TestIntentInOut(util.F2PyTest):
class TestNumpyVersionAttribute(util.F2PyTest):
# Check that th attribute __f2py_numpy_version__ is present
# in the compiled module and that has the value np.__version__.
- sources = [_path('src', 'regression', 'inout.f90')]
+ sources = [util.getpath("tests", "src", "regression", "inout.f90")]
@pytest.mark.slow
def test_numpy_version_attribute(self):
# Check that self.module has an attribute named "__f2py_numpy_version__"
- assert_(hasattr(self.module, "__f2py_numpy_version__"),
- msg="Fortran module does not have __f2py_numpy_version__")
+ assert_(
+ hasattr(self.module, "__f2py_numpy_version__"),
+ msg="Fortran module does not have __f2py_numpy_version__",
+ )
# Check that the attribute __f2py_numpy_version__ is a string
- assert_(isinstance(self.module.__f2py_numpy_version__, str),
- msg="__f2py_numpy_version__ is not a string")
+ assert_(
+ isinstance(self.module.__f2py_numpy_version__, str),
+ msg="__f2py_numpy_version__ is not a string",
+ )
# Check that __f2py_numpy_version__ has the value numpy.__version__
assert_string_equal(np.__version__, self.module.__f2py_numpy_version__)
@@ -50,6 +50,5 @@ class TestNumpyVersionAttribute(util.F2PyTest):
def test_include_path():
incdir = np.f2py.get_include()
fnames_in_dir = os.listdir(incdir)
- for fname in ('fortranobject.c', 'fortranobject.h'):
+ for fname in ("fortranobject.c", "fortranobject.h"):
assert fname in fnames_in_dir
-
diff --git a/numpy/f2py/tests/test_return_character.py b/numpy/f2py/tests/test_return_character.py
index 2c999ed0b..3c3a43e1b 100644
--- a/numpy/f2py/tests/test_return_character.py
+++ b/numpy/f2py/tests/test_return_character.py
@@ -4,29 +4,29 @@ from numpy import array
from numpy.testing import assert_
from . import util
import platform
-IS_S390X = platform.machine() == 's390x'
+IS_S390X = platform.machine() == "s390x"
-class TestReturnCharacter(util.F2PyTest):
+class TestReturnCharacter(util.F2PyTest):
def check_function(self, t, tname):
- if tname in ['t0', 't1', 's0', 's1']:
- assert_(t(23) == b'2')
- r = t('ab')
- assert_(r == b'a', repr(r))
- r = t(array('ab'))
- assert_(r == b'a', repr(r))
- r = t(array(77, 'u1'))
- assert_(r == b'M', repr(r))
- #assert_(_raises(ValueError, t, array([77,87])))
- #assert_(_raises(ValueError, t, array(77)))
- elif tname in ['ts', 'ss']:
- assert_(t(23) == b'23', repr(t(23)))
- assert_(t('123456789abcdef') == b'123456789a')
- elif tname in ['t5', 's5']:
- assert_(t(23) == b'23', repr(t(23)))
- assert_(t('ab') == b'ab', repr(t('ab')))
- assert_(t('123456789abcdef') == b'12345')
+ if tname in ["t0", "t1", "s0", "s1"]:
+ assert_(t(23) == b"2")
+ r = t("ab")
+ assert_(r == b"a", repr(r))
+ r = t(array("ab"))
+ assert_(r == b"a", repr(r))
+ r = t(array(77, "u1"))
+ assert_(r == b"M", repr(r))
+ # assert_(_raises(ValueError, t, array([77,87])))
+ # assert_(_raises(ValueError, t, array(77)))
+ elif tname in ["ts", "ss"]:
+ assert_(t(23) == b"23", repr(t(23)))
+ assert_(t("123456789abcdef") == b"123456789a")
+ elif tname in ["t5", "s5"]:
+ assert_(t(23) == b"23", repr(t(23)))
+ assert_(t("ab") == b"ab", repr(t("ab")))
+ assert_(t("123456789abcdef") == b"12345")
else:
raise NotImplementedError
@@ -81,7 +81,7 @@ cf2py intent(out) ts
"""
@pytest.mark.xfail(IS_S390X, reason="callback returns ' '")
- @pytest.mark.parametrize('name', 't0,t1,t5,s0,s1,s5,ss'.split(','))
+ @pytest.mark.parametrize("name", "t0,t1,t5,s0,s1,s5,ss".split(","))
def test_all(self, name):
self.check_function(getattr(self.module, name), name)
@@ -140,6 +140,6 @@ end module f90_return_char
"""
@pytest.mark.xfail(IS_S390X, reason="callback returns ' '")
- @pytest.mark.parametrize('name', 't0,t1,t5,ts,s0,s1,s5,ss'.split(','))
+ @pytest.mark.parametrize("name", "t0,t1,t5,ts,s0,s1,s5,ss".split(","))
def test_all(self, name):
self.check_function(getattr(self.module.f90_return_char, name), name)
diff --git a/numpy/f2py/tests/test_return_complex.py b/numpy/f2py/tests/test_return_complex.py
index 3d2e2b94f..ae0e3ab25 100644
--- a/numpy/f2py/tests/test_return_complex.py
+++ b/numpy/f2py/tests/test_return_complex.py
@@ -6,9 +6,8 @@ from . import util
class TestReturnComplex(util.F2PyTest):
-
def check_function(self, t, tname):
- if tname in ['t0', 't8', 's0', 's8']:
+ if tname in ["t0", "t8", "s0", "s8"]:
err = 1e-5
else:
err = 0.0
@@ -16,27 +15,27 @@ class TestReturnComplex(util.F2PyTest):
assert_(abs(t(234.6) - 234.6) <= err)
assert_(abs(t(234) - 234.0) <= err)
assert_(abs(t(234.6 + 3j) - (234.6 + 3j)) <= err)
- #assert_( abs(t('234')-234.)<=err)
- #assert_( abs(t('234.6')-234.6)<=err)
- assert_(abs(t(-234) + 234.) <= err)
- assert_(abs(t([234]) - 234.) <= err)
- assert_(abs(t((234,)) - 234.) <= err)
- assert_(abs(t(array(234)) - 234.) <= err)
- assert_(abs(t(array(23 + 4j, 'F')) - (23 + 4j)) <= err)
- assert_(abs(t(array([234])) - 234.) <= err)
- assert_(abs(t(array([[234]])) - 234.) <= err)
- assert_(abs(t(array([234], 'b')) + 22.) <= err)
- assert_(abs(t(array([234], 'h')) - 234.) <= err)
- assert_(abs(t(array([234], 'i')) - 234.) <= err)
- assert_(abs(t(array([234], 'l')) - 234.) <= err)
- assert_(abs(t(array([234], 'q')) - 234.) <= err)
- assert_(abs(t(array([234], 'f')) - 234.) <= err)
- assert_(abs(t(array([234], 'd')) - 234.) <= err)
- assert_(abs(t(array([234 + 3j], 'F')) - (234 + 3j)) <= err)
- assert_(abs(t(array([234], 'D')) - 234.) <= err)
-
- #assert_raises(TypeError, t, array([234], 'a1'))
- assert_raises(TypeError, t, 'abc')
+ # assert_( abs(t('234')-234.)<=err)
+ # assert_( abs(t('234.6')-234.6)<=err)
+ assert_(abs(t(-234) + 234.0) <= err)
+ assert_(abs(t([234]) - 234.0) <= err)
+ assert_(abs(t((234, )) - 234.0) <= err)
+ assert_(abs(t(array(234)) - 234.0) <= err)
+ assert_(abs(t(array(23 + 4j, "F")) - (23 + 4j)) <= err)
+ assert_(abs(t(array([234])) - 234.0) <= err)
+ assert_(abs(t(array([[234]])) - 234.0) <= err)
+ assert_(abs(t(array([234], "b")) + 22.0) <= err)
+ assert_(abs(t(array([234], "h")) - 234.0) <= err)
+ assert_(abs(t(array([234], "i")) - 234.0) <= err)
+ assert_(abs(t(array([234], "l")) - 234.0) <= err)
+ assert_(abs(t(array([234], "q")) - 234.0) <= err)
+ assert_(abs(t(array([234], "f")) - 234.0) <= err)
+ assert_(abs(t(array([234], "d")) - 234.0) <= err)
+ assert_(abs(t(array([234 + 3j], "F")) - (234 + 3j)) <= err)
+ assert_(abs(t(array([234], "D")) - 234.0) <= err)
+
+ # assert_raises(TypeError, t, array([234], 'a1'))
+ assert_raises(TypeError, t, "abc")
assert_raises(IndexError, t, [])
assert_raises(IndexError, t, ())
@@ -45,8 +44,8 @@ class TestReturnComplex(util.F2PyTest):
assert_raises(TypeError, t, {})
try:
- r = t(10 ** 400)
- assert_(repr(r) in ['(inf+0j)', '(Infinity+0j)'], repr(r))
+ r = t(10**400)
+ assert_(repr(r) in ["(inf+0j)", "(Infinity+0j)"], repr(r))
except OverflowError:
pass
@@ -100,7 +99,7 @@ cf2py intent(out) td
end
"""
- @pytest.mark.parametrize('name', 't0,t8,t16,td,s0,s8,s16,sd'.split(','))
+ @pytest.mark.parametrize("name", "t0,t8,t16,td,s0,s8,s16,sd".split(","))
def test_all(self, name):
self.check_function(getattr(self.module, name), name)
@@ -158,6 +157,7 @@ module f90_return_complex
end module f90_return_complex
"""
- @pytest.mark.parametrize('name', 't0,t8,t16,td,s0,s8,s16,sd'.split(','))
+ @pytest.mark.parametrize("name", "t0,t8,t16,td,s0,s8,s16,sd".split(","))
def test_all(self, name):
- self.check_function(getattr(self.module.f90_return_complex, name), name)
+ self.check_function(getattr(self.module.f90_return_complex, name),
+ name)
diff --git a/numpy/f2py/tests/test_return_integer.py b/numpy/f2py/tests/test_return_integer.py
index 0a8121dc1..9c2bdbce2 100644
--- a/numpy/f2py/tests/test_return_integer.py
+++ b/numpy/f2py/tests/test_return_integer.py
@@ -6,27 +6,26 @@ from . import util
class TestReturnInteger(util.F2PyTest):
-
def check_function(self, t, tname):
assert_(t(123) == 123, repr(t(123)))
assert_(t(123.6) == 123)
- assert_(t('123') == 123)
+ assert_(t("123") == 123)
assert_(t(-123) == -123)
assert_(t([123]) == 123)
- assert_(t((123,)) == 123)
+ assert_(t((123, )) == 123)
assert_(t(array(123)) == 123)
assert_(t(array([123])) == 123)
assert_(t(array([[123]])) == 123)
- assert_(t(array([123], 'b')) == 123)
- assert_(t(array([123], 'h')) == 123)
- assert_(t(array([123], 'i')) == 123)
- assert_(t(array([123], 'l')) == 123)
- assert_(t(array([123], 'B')) == 123)
- assert_(t(array([123], 'f')) == 123)
- assert_(t(array([123], 'd')) == 123)
+ assert_(t(array([123], "b")) == 123)
+ assert_(t(array([123], "h")) == 123)
+ assert_(t(array([123], "i")) == 123)
+ assert_(t(array([123], "l")) == 123)
+ assert_(t(array([123], "B")) == 123)
+ assert_(t(array([123], "f")) == 123)
+ assert_(t(array([123], "d")) == 123)
- #assert_raises(ValueError, t, array([123],'S3'))
- assert_raises(ValueError, t, 'abc')
+ # assert_raises(ValueError, t, array([123],'S3'))
+ assert_raises(ValueError, t, "abc")
assert_raises(IndexError, t, [])
assert_raises(IndexError, t, ())
@@ -34,7 +33,7 @@ class TestReturnInteger(util.F2PyTest):
assert_raises(Exception, t, t)
assert_raises(Exception, t, {})
- if tname in ['t8', 's8']:
+ if tname in ["t8", "s8"]:
assert_raises(OverflowError, t, 100000000000000000000000)
assert_raises(OverflowError, t, 10000000011111111111111.23)
@@ -99,8 +98,8 @@ cf2py intent(out) t8
end
"""
- @pytest.mark.parametrize('name',
- 't0,t1,t2,t4,t8,s0,s1,s2,s4,s8'.split(','))
+ @pytest.mark.parametrize("name",
+ "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(","))
def test_all(self, name):
self.check_function(getattr(self.module, name), name)
@@ -169,7 +168,8 @@ module f90_return_integer
end module f90_return_integer
"""
- @pytest.mark.parametrize('name',
- 't0,t1,t2,t4,t8,s0,s1,s2,s4,s8'.split(','))
+ @pytest.mark.parametrize("name",
+ "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(","))
def test_all(self, name):
- self.check_function(getattr(self.module.f90_return_integer, name), name)
+ self.check_function(getattr(self.module.f90_return_integer, name),
+ name)
diff --git a/numpy/f2py/tests/test_return_logical.py b/numpy/f2py/tests/test_return_logical.py
index 9db939c7e..c1a365c7a 100644
--- a/numpy/f2py/tests/test_return_logical.py
+++ b/numpy/f2py/tests/test_return_logical.py
@@ -6,7 +6,6 @@ from . import util
class TestReturnLogical(util.F2PyTest):
-
def check_function(self, t):
assert_(t(True) == 1, repr(t(True)))
assert_(t(False) == 0, repr(t(False)))
@@ -18,28 +17,28 @@ class TestReturnLogical(util.F2PyTest):
assert_(t(234) == 1)
assert_(t(234.6) == 1)
assert_(t(234.6 + 3j) == 1)
- assert_(t('234') == 1)
- assert_(t('aaa') == 1)
- assert_(t('') == 0)
+ assert_(t("234") == 1)
+ assert_(t("aaa") == 1)
+ assert_(t("") == 0)
assert_(t([]) == 0)
assert_(t(()) == 0)
assert_(t({}) == 0)
assert_(t(t) == 1)
assert_(t(-234) == 1)
- assert_(t(10 ** 100) == 1)
+ assert_(t(10**100) == 1)
assert_(t([234]) == 1)
- assert_(t((234,)) == 1)
+ assert_(t((234, )) == 1)
assert_(t(array(234)) == 1)
assert_(t(array([234])) == 1)
assert_(t(array([[234]])) == 1)
- assert_(t(array([234], 'b')) == 1)
- assert_(t(array([234], 'h')) == 1)
- assert_(t(array([234], 'i')) == 1)
- assert_(t(array([234], 'l')) == 1)
- assert_(t(array([234], 'f')) == 1)
- assert_(t(array([234], 'd')) == 1)
- assert_(t(array([234 + 3j], 'F')) == 1)
- assert_(t(array([234], 'D')) == 1)
+ assert_(t(array([234], "b")) == 1)
+ assert_(t(array([234], "h")) == 1)
+ assert_(t(array([234], "i")) == 1)
+ assert_(t(array([234], "l")) == 1)
+ assert_(t(array([234], "f")) == 1)
+ assert_(t(array([234], "d")) == 1)
+ assert_(t(array([234 + 3j], "F")) == 1)
+ assert_(t(array([234], "D")) == 1)
assert_(t(array(0)) == 0)
assert_(t(array([0])) == 0)
assert_(t(array([[0]])) == 0)
@@ -109,7 +108,7 @@ c end
"""
@pytest.mark.slow
- @pytest.mark.parametrize('name', 't0,t1,t2,t4,s0,s1,s2,s4'.split(','))
+ @pytest.mark.parametrize("name", "t0,t1,t2,t4,s0,s1,s2,s4".split(","))
def test_all(self, name):
self.check_function(getattr(self.module, name))
@@ -179,7 +178,7 @@ end module f90_return_logical
"""
@pytest.mark.slow
- @pytest.mark.parametrize('name',
- 't0,t1,t2,t4,t8,s0,s1,s2,s4,s8'.split(','))
+ @pytest.mark.parametrize("name",
+ "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(","))
def test_all(self, name):
self.check_function(getattr(self.module.f90_return_logical, name))
diff --git a/numpy/f2py/tests/test_return_real.py b/numpy/f2py/tests/test_return_real.py
index 8e5022a8e..d5e5ee482 100644
--- a/numpy/f2py/tests/test_return_real.py
+++ b/numpy/f2py/tests/test_return_real.py
@@ -7,34 +7,33 @@ from . import util
class TestReturnReal(util.F2PyTest):
-
def check_function(self, t, tname):
- if tname in ['t0', 't4', 's0', 's4']:
+ if tname in ["t0", "t4", "s0", "s4"]:
err = 1e-5
else:
err = 0.0
assert_(abs(t(234) - 234.0) <= err)
assert_(abs(t(234.6) - 234.6) <= err)
- assert_(abs(t('234') - 234) <= err)
- assert_(abs(t('234.6') - 234.6) <= err)
+ assert_(abs(t("234") - 234) <= err)
+ assert_(abs(t("234.6") - 234.6) <= err)
assert_(abs(t(-234) + 234) <= err)
assert_(abs(t([234]) - 234) <= err)
- assert_(abs(t((234,)) - 234.) <= err)
- assert_(abs(t(array(234)) - 234.) <= err)
- assert_(abs(t(array([234])) - 234.) <= err)
- assert_(abs(t(array([[234]])) - 234.) <= err)
- assert_(abs(t(array([234], 'b')) + 22) <= err)
- assert_(abs(t(array([234], 'h')) - 234.) <= err)
- assert_(abs(t(array([234], 'i')) - 234.) <= err)
- assert_(abs(t(array([234], 'l')) - 234.) <= err)
- assert_(abs(t(array([234], 'B')) - 234.) <= err)
- assert_(abs(t(array([234], 'f')) - 234.) <= err)
- assert_(abs(t(array([234], 'd')) - 234.) <= err)
- if tname in ['t0', 't4', 's0', 's4']:
+ assert_(abs(t((234, )) - 234.0) <= err)
+ assert_(abs(t(array(234)) - 234.0) <= err)
+ assert_(abs(t(array([234])) - 234.0) <= err)
+ assert_(abs(t(array([[234]])) - 234.0) <= err)
+ assert_(abs(t(array([234], "b")) + 22) <= err)
+ assert_(abs(t(array([234], "h")) - 234.0) <= err)
+ assert_(abs(t(array([234], "i")) - 234.0) <= err)
+ assert_(abs(t(array([234], "l")) - 234.0) <= err)
+ assert_(abs(t(array([234], "B")) - 234.0) <= err)
+ assert_(abs(t(array([234], "f")) - 234.0) <= err)
+ assert_(abs(t(array([234], "d")) - 234.0) <= err)
+ if tname in ["t0", "t4", "s0", "s4"]:
assert_(t(1e200) == t(1e300)) # inf
- #assert_raises(ValueError, t, array([234], 'S1'))
- assert_raises(ValueError, t, 'abc')
+ # assert_raises(ValueError, t, array([234], 'S1'))
+ assert_raises(ValueError, t, "abc")
assert_raises(IndexError, t, [])
assert_raises(IndexError, t, ())
@@ -43,17 +42,17 @@ class TestReturnReal(util.F2PyTest):
assert_raises(Exception, t, {})
try:
- r = t(10 ** 400)
- assert_(repr(r) in ['inf', 'Infinity'], repr(r))
+ r = t(10**400)
+ assert_(repr(r) in ["inf", "Infinity"], repr(r))
except OverflowError:
pass
-
@pytest.mark.skipif(
- platform.system() == 'Darwin',
+ platform.system() == "Darwin",
reason="Prone to error when run with numpy/f2py/tests on mac os, "
- "but not when run in isolation")
+ "but not when run in isolation",
+)
class TestCReturnReal(TestReturnReal):
suffix = ".pyf"
module_name = "c_ext_return_real"
@@ -86,7 +85,7 @@ end interface
end python module c_ext_return_real
"""
- @pytest.mark.parametrize('name', 't4,t8,s4,s8'.split(','))
+ @pytest.mark.parametrize("name", "t4,t8,s4,s8".split(","))
def test_all(self, name):
self.check_function(getattr(self.module, name), name)
@@ -140,7 +139,7 @@ cf2py intent(out) td
end
"""
- @pytest.mark.parametrize('name', 't0,t4,t8,td,s0,s4,s8,sd'.split(','))
+ @pytest.mark.parametrize("name", "t0,t4,t8,td,s0,s4,s8,sd".split(","))
def test_all(self, name):
self.check_function(getattr(self.module, name), name)
@@ -198,6 +197,6 @@ module f90_return_real
end module f90_return_real
"""
- @pytest.mark.parametrize('name', 't0,t4,t8,td,s0,s4,s8,sd'.split(','))
+ @pytest.mark.parametrize("name", "t0,t4,t8,td,s0,s4,s8,sd".split(","))
def test_all(self, name):
self.check_function(getattr(self.module.f90_return_real, name), name)
diff --git a/numpy/f2py/tests/test_semicolon_split.py b/numpy/f2py/tests/test_semicolon_split.py
index d8b4bf222..745c472f8 100644
--- a/numpy/f2py/tests/test_semicolon_split.py
+++ b/numpy/f2py/tests/test_semicolon_split.py
@@ -4,15 +4,17 @@ import pytest
from . import util
from numpy.testing import assert_equal
+
@pytest.mark.skipif(
- platform.system() == 'Darwin',
+ platform.system() == "Darwin",
reason="Prone to error when run with numpy/f2py/tests on mac os, "
- "but not when run in isolation")
+ "but not when run in isolation",
+)
class TestMultiline(util.F2PyTest):
suffix = ".pyf"
module_name = "multiline"
- code = """
-python module {module}
+ code = f"""
+python module {module_name}
usercode '''
void foo(int* x) {{
char dummy = ';';
@@ -25,22 +27,23 @@ void foo(int* x) {{
integer intent(out) :: x
end subroutine foo
end interface
-end python module {module}
- """.format(module=module_name)
+end python module {module_name}
+ """
def test_multiline(self):
assert_equal(self.module.foo(), 42)
@pytest.mark.skipif(
- platform.system() == 'Darwin',
+ platform.system() == "Darwin",
reason="Prone to error when run with numpy/f2py/tests on mac os, "
- "but not when run in isolation")
+ "but not when run in isolation",
+)
class TestCallstatement(util.F2PyTest):
suffix = ".pyf"
module_name = "callstatement"
- code = """
-python module {module}
+ code = f"""
+python module {module_name}
usercode '''
void foo(int* x) {{
}}
@@ -56,8 +59,8 @@ void foo(int* x) {{
}}
end subroutine foo
end interface
-end python module {module}
- """.format(module=module_name)
+end python module {module_name}
+ """
def test_callstatement(self):
assert_equal(self.module.foo(), 42)
diff --git a/numpy/f2py/tests/test_size.py b/numpy/f2py/tests/test_size.py
index b609fa77f..3360e2a3d 100644
--- a/numpy/f2py/tests/test_size.py
+++ b/numpy/f2py/tests/test_size.py
@@ -5,12 +5,8 @@ from numpy.testing import assert_equal
from . import util
-def _path(*a):
- return os.path.join(*((os.path.dirname(__file__),) + a))
-
-
class TestSizeSumExample(util.F2PyTest):
- sources = [_path('src', 'size', 'foo.f90')]
+ sources = [util.getpath("tests", "src", "size", "foo.f90")]
@pytest.mark.slow
def test_all(self):
diff --git a/numpy/f2py/tests/test_string.py b/numpy/f2py/tests/test_string.py
index 7b27f8786..1a6d59610 100644
--- a/numpy/f2py/tests/test_string.py
+++ b/numpy/f2py/tests/test_string.py
@@ -6,26 +6,22 @@ import numpy as np
from . import util
-def _path(*a):
- return os.path.join(*((os.path.dirname(__file__),) + a))
-
-
class TestString(util.F2PyTest):
- sources = [_path('src', 'string', 'char.f90')]
+ sources = [util.getpath("tests", "src", "string", "char.f90")]
@pytest.mark.slow
def test_char(self):
- strings = np.array(['ab', 'cd', 'ef'], dtype='c').T
- inp, out = self.module.char_test.change_strings(strings,
- strings.shape[1])
+ strings = np.array(["ab", "cd", "ef"], dtype="c").T
+ inp, out = self.module.char_test.change_strings(
+ strings, strings.shape[1])
assert_array_equal(inp, strings)
expected = strings.copy()
- expected[1, :] = 'AAA'
+ expected[1, :] = "AAA"
assert_array_equal(out, expected)
class TestDocStringArguments(util.F2PyTest):
- suffix = '.f'
+ suffix = ".f"
code = """
C FILE: STRING.F
@@ -34,39 +30,30 @@ C FILE: STRING.F
CHARACTER*(*) C,D
Cf2py intent(in) a,c
Cf2py intent(inout) b,d
- PRINT*, "A=",A
- PRINT*, "B=",B
- PRINT*, "C=",C
- PRINT*, "D=",D
- PRINT*, "CHANGE A,B,C,D"
A(1:1) = 'A'
B(1:1) = 'B'
C(1:1) = 'C'
D(1:1) = 'D'
- PRINT*, "A=",A
- PRINT*, "B=",B
- PRINT*, "C=",C
- PRINT*, "D=",D
END
C END OF FILE STRING.F
"""
def test_example(self):
- a = np.array(b'123\0\0')
- b = np.array(b'123\0\0')
- c = np.array(b'123')
- d = np.array(b'123')
+ a = np.array(b"123\0\0")
+ b = np.array(b"123\0\0")
+ c = np.array(b"123")
+ d = np.array(b"123")
self.module.foo(a, b, c, d)
- assert a.tobytes() == b'123\0\0'
- assert b.tobytes() == b'B23\0\0', (b.tobytes(),)
- assert c.tobytes() == b'123'
- assert d.tobytes() == b'D23'
+ assert a.tobytes() == b"123\0\0"
+ assert b.tobytes() == b"B23\0\0"
+ assert c.tobytes() == b"123"
+ assert d.tobytes() == b"D23"
class TestFixedString(util.F2PyTest):
- suffix = '.f90'
+ suffix = ".f90"
code = textwrap.dedent("""
function sint(s) result(i)
@@ -122,41 +109,41 @@ class TestFixedString(util.F2PyTest):
end = len(s)
i = 0
for j in range(start, min(end, len(s))):
- i += s[j] * 10 ** j
+ i += s[j] * 10**j
return i
- def _get_input(self, intent='in'):
- if intent in ['in']:
- yield ''
- yield '1'
- yield '1234'
- yield '12345'
- yield b''
- yield b'\0'
- yield b'1'
- yield b'\01'
- yield b'1\0'
- yield b'1234'
- yield b'12345'
- yield np.ndarray((), np.bytes_, buffer=b'') # array(b'', dtype='|S0')
- yield np.array(b'') # array(b'', dtype='|S1')
- yield np.array(b'\0')
- yield np.array(b'1')
- yield np.array(b'1\0')
- yield np.array(b'\01')
- yield np.array(b'1234')
- yield np.array(b'123\0')
- yield np.array(b'12345')
+ def _get_input(self, intent="in"):
+ if intent in ["in"]:
+ yield ""
+ yield "1"
+ yield "1234"
+ yield "12345"
+ yield b""
+ yield b"\0"
+ yield b"1"
+ yield b"\01"
+ yield b"1\0"
+ yield b"1234"
+ yield b"12345"
+ yield np.ndarray((), np.bytes_, buffer=b"") # array(b'', dtype='|S0')
+ yield np.array(b"") # array(b'', dtype='|S1')
+ yield np.array(b"\0")
+ yield np.array(b"1")
+ yield np.array(b"1\0")
+ yield np.array(b"\01")
+ yield np.array(b"1234")
+ yield np.array(b"123\0")
+ yield np.array(b"12345")
def test_intent_in(self):
for s in self._get_input():
r = self.module.test_in_bytes4(s)
# also checks that s is not changed inplace
expected = self._sint(s, end=4)
- assert r == expected, (s)
+ assert r == expected, s
def test_intent_inout(self):
- for s in self._get_input(intent='inout'):
+ for s in self._get_input(intent="inout"):
rest = self._sint(s, start=4)
r = self.module.test_inout_bytes4(s)
expected = self._sint(s, end=4)
diff --git a/numpy/f2py/tests/test_symbolic.py b/numpy/f2py/tests/test_symbolic.py
index 52cabac53..4b8993886 100644
--- a/numpy/f2py/tests/test_symbolic.py
+++ b/numpy/f2py/tests/test_symbolic.py
@@ -1,35 +1,55 @@
from numpy.testing import assert_raises
from numpy.f2py.symbolic import (
- Expr, Op, ArithOp, Language,
- as_symbol, as_number, as_string, as_array, as_complex,
- as_terms, as_factors, eliminate_quotes, insert_quotes,
- fromstring, as_expr, as_apply,
- as_numer_denom, as_ternary, as_ref, as_deref,
- normalize, as_eq, as_ne, as_lt, as_gt, as_le, as_ge
- )
+ Expr,
+ Op,
+ ArithOp,
+ Language,
+ as_symbol,
+ as_number,
+ as_string,
+ as_array,
+ as_complex,
+ as_terms,
+ as_factors,
+ eliminate_quotes,
+ insert_quotes,
+ fromstring,
+ as_expr,
+ as_apply,
+ as_numer_denom,
+ as_ternary,
+ as_ref,
+ as_deref,
+ normalize,
+ as_eq,
+ as_ne,
+ as_lt,
+ as_gt,
+ as_le,
+ as_ge,
+)
from . import util
class TestSymbolic(util.F2PyTest):
-
def test_eliminate_quotes(self):
def worker(s):
r, d = eliminate_quotes(s)
s1 = insert_quotes(r, d)
assert s1 == s
- for kind in ['', 'mykind_']:
+ for kind in ["", "mykind_"]:
worker(kind + '"1234" // "ABCD"')
worker(kind + '"1234" // ' + kind + '"ABCD"')
- worker(kind + '"1234" // \'ABCD\'')
- worker(kind + '"1234" // ' + kind + '\'ABCD\'')
+ worker(kind + "\"1234\" // 'ABCD'")
+ worker(kind + '"1234" // ' + kind + "'ABCD'")
worker(kind + '"1\\"2\'AB\'34"')
- worker('a = ' + kind + "'1\\'2\"AB\"34'")
+ worker("a = " + kind + "'1\\'2\"AB\"34'")
def test_sanity(self):
- x = as_symbol('x')
- y = as_symbol('y')
- z = as_symbol('z')
+ x = as_symbol("x")
+ y = as_symbol("y")
+ z = as_symbol("z")
assert x.op == Op.SYMBOL
assert repr(x) == "Expr(Op.SYMBOL, 'x')"
@@ -70,7 +90,7 @@ class TestSymbolic(util.F2PyTest):
assert s != s2
a = as_array((n, m))
- b = as_array((n,))
+ b = as_array((n, ))
assert a.op == Op.ARRAY
assert repr(a) == ("Expr(Op.ARRAY, (Expr(Op.INTEGER, (123, 4)),"
" Expr(Op.INTEGER, (456, 4))))")
@@ -108,88 +128,90 @@ class TestSymbolic(util.F2PyTest):
assert hash(e) is not None
def test_tostring_fortran(self):
- x = as_symbol('x')
- y = as_symbol('y')
- z = as_symbol('z')
+ x = as_symbol("x")
+ y = as_symbol("y")
+ z = as_symbol("z")
n = as_number(123)
m = as_number(456)
a = as_array((n, m))
c = as_complex(n, m)
- assert str(x) == 'x'
- assert str(n) == '123'
- assert str(a) == '[123, 456]'
- assert str(c) == '(123, 456)'
-
- assert str(Expr(Op.TERMS, {x: 1})) == 'x'
- assert str(Expr(Op.TERMS, {x: 2})) == '2 * x'
- assert str(Expr(Op.TERMS, {x: -1})) == '-x'
- assert str(Expr(Op.TERMS, {x: -2})) == '-2 * x'
- assert str(Expr(Op.TERMS, {x: 1, y: 1})) == 'x + y'
- assert str(Expr(Op.TERMS, {x: -1, y: -1})) == '-x - y'
- assert str(Expr(Op.TERMS, {x: 2, y: 3})) == '2 * x + 3 * y'
- assert str(Expr(Op.TERMS, {x: -2, y: 3})) == '-2 * x + 3 * y'
- assert str(Expr(Op.TERMS, {x: 2, y: -3})) == '2 * x - 3 * y'
-
- assert str(Expr(Op.FACTORS, {x: 1})) == 'x'
- assert str(Expr(Op.FACTORS, {x: 2})) == 'x ** 2'
- assert str(Expr(Op.FACTORS, {x: -1})) == 'x ** -1'
- assert str(Expr(Op.FACTORS, {x: -2})) == 'x ** -2'
- assert str(Expr(Op.FACTORS, {x: 1, y: 1})) == 'x * y'
- assert str(Expr(Op.FACTORS, {x: 2, y: 3})) == 'x ** 2 * y ** 3'
+ assert str(x) == "x"
+ assert str(n) == "123"
+ assert str(a) == "[123, 456]"
+ assert str(c) == "(123, 456)"
+
+ assert str(Expr(Op.TERMS, {x: 1})) == "x"
+ assert str(Expr(Op.TERMS, {x: 2})) == "2 * x"
+ assert str(Expr(Op.TERMS, {x: -1})) == "-x"
+ assert str(Expr(Op.TERMS, {x: -2})) == "-2 * x"
+ assert str(Expr(Op.TERMS, {x: 1, y: 1})) == "x + y"
+ assert str(Expr(Op.TERMS, {x: -1, y: -1})) == "-x - y"
+ assert str(Expr(Op.TERMS, {x: 2, y: 3})) == "2 * x + 3 * y"
+ assert str(Expr(Op.TERMS, {x: -2, y: 3})) == "-2 * x + 3 * y"
+ assert str(Expr(Op.TERMS, {x: 2, y: -3})) == "2 * x - 3 * y"
+
+ assert str(Expr(Op.FACTORS, {x: 1})) == "x"
+ assert str(Expr(Op.FACTORS, {x: 2})) == "x ** 2"
+ assert str(Expr(Op.FACTORS, {x: -1})) == "x ** -1"
+ assert str(Expr(Op.FACTORS, {x: -2})) == "x ** -2"
+ assert str(Expr(Op.FACTORS, {x: 1, y: 1})) == "x * y"
+ assert str(Expr(Op.FACTORS, {x: 2, y: 3})) == "x ** 2 * y ** 3"
v = Expr(Op.FACTORS, {x: 2, Expr(Op.TERMS, {x: 1, y: 1}): 3})
- assert str(v) == 'x ** 2 * (x + y) ** 3', str(v)
+ assert str(v) == "x ** 2 * (x + y) ** 3", str(v)
v = Expr(Op.FACTORS, {x: 2, Expr(Op.FACTORS, {x: 1, y: 1}): 3})
- assert str(v) == 'x ** 2 * (x * y) ** 3', str(v)
+ assert str(v) == "x ** 2 * (x * y) ** 3", str(v)
- assert str(Expr(Op.APPLY, ('f', (), {}))) == 'f()'
- assert str(Expr(Op.APPLY, ('f', (x,), {}))) == 'f(x)'
- assert str(Expr(Op.APPLY, ('f', (x, y), {}))) == 'f(x, y)'
- assert str(Expr(Op.INDEXING, ('f', x))) == 'f[x]'
+ assert str(Expr(Op.APPLY, ("f", (), {}))) == "f()"
+ assert str(Expr(Op.APPLY, ("f", (x, ), {}))) == "f(x)"
+ assert str(Expr(Op.APPLY, ("f", (x, y), {}))) == "f(x, y)"
+ assert str(Expr(Op.INDEXING, ("f", x))) == "f[x]"
- assert str(as_ternary(x, y, z)) == 'merge(y, z, x)'
- assert str(as_eq(x, y)) == 'x .eq. y'
- assert str(as_ne(x, y)) == 'x .ne. y'
- assert str(as_lt(x, y)) == 'x .lt. y'
- assert str(as_le(x, y)) == 'x .le. y'
- assert str(as_gt(x, y)) == 'x .gt. y'
- assert str(as_ge(x, y)) == 'x .ge. y'
+ assert str(as_ternary(x, y, z)) == "merge(y, z, x)"
+ assert str(as_eq(x, y)) == "x .eq. y"
+ assert str(as_ne(x, y)) == "x .ne. y"
+ assert str(as_lt(x, y)) == "x .lt. y"
+ assert str(as_le(x, y)) == "x .le. y"
+ assert str(as_gt(x, y)) == "x .gt. y"
+ assert str(as_ge(x, y)) == "x .ge. y"
def test_tostring_c(self):
language = Language.C
- x = as_symbol('x')
- y = as_symbol('y')
- z = as_symbol('z')
+ x = as_symbol("x")
+ y = as_symbol("y")
+ z = as_symbol("z")
n = as_number(123)
- assert Expr(Op.FACTORS, {x: 2}).tostring(language=language) == 'x * x'
- assert Expr(Op.FACTORS, {x + y: 2}).tostring(
- language=language) == '(x + y) * (x + y)'
- assert Expr(Op.FACTORS, {x: 12}).tostring(
- language=language) == 'pow(x, 12)'
-
- assert as_apply(ArithOp.DIV, x, y).tostring(
- language=language) == 'x / y'
- assert as_apply(ArithOp.DIV, x, x + y).tostring(
- language=language) == 'x / (x + y)'
- assert as_apply(ArithOp.DIV, x - y, x + y).tostring(
- language=language) == '(x - y) / (x + y)'
- assert (x + (x - y) / (x + y) + n).tostring(
- language=language) == '123 + x + (x - y) / (x + y)'
-
- assert as_ternary(x, y, z).tostring(language=language) == '(x ? y : z)'
- assert as_eq(x, y).tostring(language=language) == 'x == y'
- assert as_ne(x, y).tostring(language=language) == 'x != y'
- assert as_lt(x, y).tostring(language=language) == 'x < y'
- assert as_le(x, y).tostring(language=language) == 'x <= y'
- assert as_gt(x, y).tostring(language=language) == 'x > y'
- assert as_ge(x, y).tostring(language=language) == 'x >= y'
+ assert Expr(Op.FACTORS, {x: 2}).tostring(language=language) == "x * x"
+ assert (Expr(Op.FACTORS, {
+ x + y: 2
+ }).tostring(language=language) == "(x + y) * (x + y)")
+ assert Expr(Op.FACTORS, {
+ x: 12
+ }).tostring(language=language) == "pow(x, 12)"
+
+ assert as_apply(ArithOp.DIV, x,
+ y).tostring(language=language) == "x / y"
+ assert (as_apply(ArithOp.DIV, x,
+ x + y).tostring(language=language) == "x / (x + y)")
+ assert (as_apply(ArithOp.DIV, x - y, x +
+ y).tostring(language=language) == "(x - y) / (x + y)")
+ assert (x + (x - y) / (x + y) +
+ n).tostring(language=language) == "123 + x + (x - y) / (x + y)"
+
+ assert as_ternary(x, y, z).tostring(language=language) == "(x ? y : z)"
+ assert as_eq(x, y).tostring(language=language) == "x == y"
+ assert as_ne(x, y).tostring(language=language) == "x != y"
+ assert as_lt(x, y).tostring(language=language) == "x < y"
+ assert as_le(x, y).tostring(language=language) == "x <= y"
+ assert as_gt(x, y).tostring(language=language) == "x > y"
+ assert as_ge(x, y).tostring(language=language) == "x >= y"
def test_operations(self):
- x = as_symbol('x')
- y = as_symbol('y')
- z = as_symbol('z')
+ x = as_symbol("x")
+ y = as_symbol("y")
+ z = as_symbol("z")
assert x + x == Expr(Op.TERMS, {x: 2})
assert x - x == Expr(Op.INTEGER, (0, 4))
@@ -205,28 +227,35 @@ class TestSymbolic(util.F2PyTest):
assert 2 * x + 3 * y == Expr(Op.TERMS, {x: 2, y: 3})
assert (x + y) * 2 == Expr(Op.TERMS, {x: 2, y: 2})
- assert x ** 2 == Expr(Op.FACTORS, {x: 2})
- assert (x + y) ** 2 == Expr(Op.TERMS,
- {Expr(Op.FACTORS, {x: 2}): 1,
- Expr(Op.FACTORS, {y: 2}): 1,
- Expr(Op.FACTORS, {x: 1, y: 1}): 2})
- assert (x + y) * x == x ** 2 + x * y
- assert (x + y) ** 2 == x ** 2 + 2 * x * y + y ** 2
- assert (x + y) ** 2 + (x - y) ** 2 == 2 * x ** 2 + 2 * y ** 2
+ assert x**2 == Expr(Op.FACTORS, {x: 2})
+ assert (x + y)**2 == Expr(
+ Op.TERMS,
+ {
+ Expr(Op.FACTORS, {x: 2}): 1,
+ Expr(Op.FACTORS, {y: 2}): 1,
+ Expr(Op.FACTORS, {
+ x: 1,
+ y: 1
+ }): 2,
+ },
+ )
+ assert (x + y) * x == x**2 + x * y
+ assert (x + y)**2 == x**2 + 2 * x * y + y**2
+ assert (x + y)**2 + (x - y)**2 == 2 * x**2 + 2 * y**2
assert (x + y) * z == x * z + y * z
assert z * (x + y) == x * z + y * z
assert (x / 2) == as_apply(ArithOp.DIV, x, as_number(2))
assert (2 * x / 2) == x
- assert (3 * x / 2) == as_apply(ArithOp.DIV, 3*x, as_number(2))
+ assert (3 * x / 2) == as_apply(ArithOp.DIV, 3 * x, as_number(2))
assert (4 * x / 2) == 2 * x
- assert (5 * x / 2) == as_apply(ArithOp.DIV, 5*x, as_number(2))
+ assert (5 * x / 2) == as_apply(ArithOp.DIV, 5 * x, as_number(2))
assert (6 * x / 2) == 3 * x
- assert ((3*5) * x / 6) == as_apply(ArithOp.DIV, 5*x, as_number(2))
- assert (30*x**2*y**4 / (24*x**3*y**3)) == as_apply(ArithOp.DIV,
- 5*y, 4*x)
- assert ((15 * x / 6) / 5) == as_apply(
- ArithOp.DIV, x, as_number(2)), ((15 * x / 6) / 5)
+ assert ((3 * 5) * x / 6) == as_apply(ArithOp.DIV, 5 * x, as_number(2))
+ assert (30 * x**2 * y**4 / (24 * x**3 * y**3)) == as_apply(
+ ArithOp.DIV, 5 * y, 4 * x)
+ assert ((15 * x / 6) / 5) == as_apply(ArithOp.DIV, x,
+ as_number(2)), (15 * x / 6) / 5
assert (x / (5 / x)) == as_apply(ArithOp.DIV, x**2, as_number(5))
assert (x / 2.0) == Expr(Op.TERMS, {x: 0.5})
@@ -238,127 +267,128 @@ class TestSymbolic(util.F2PyTest):
assert s // x == Expr(Op.CONCAT, (s, x))
assert x // s == Expr(Op.CONCAT, (x, s))
- c = as_complex(1., 2.)
- assert -c == as_complex(-1., -2.)
- assert c + c == as_expr((1+2j)*2)
- assert c * c == as_expr((1+2j)**2)
+ c = as_complex(1.0, 2.0)
+ assert -c == as_complex(-1.0, -2.0)
+ assert c + c == as_expr((1 + 2j) * 2)
+ assert c * c == as_expr((1 + 2j)**2)
def test_substitute(self):
- x = as_symbol('x')
- y = as_symbol('y')
- z = as_symbol('z')
+ x = as_symbol("x")
+ y = as_symbol("y")
+ z = as_symbol("z")
a = as_array((x, y))
assert x.substitute({x: y}) == y
assert (x + y).substitute({x: z}) == y + z
assert (x * y).substitute({x: z}) == y * z
- assert (x ** 4).substitute({x: z}) == z ** 4
+ assert (x**4).substitute({x: z}) == z**4
assert (x / y).substitute({x: z}) == z / y
assert x.substitute({x: y + z}) == y + z
assert a.substitute({x: y + z}) == as_array((y + z, y))
- assert as_ternary(x, y, z).substitute(
- {x: y + z}) == as_ternary(y + z, y, z)
- assert as_eq(x, y).substitute(
- {x: y + z}) == as_eq(y + z, y)
+ assert as_ternary(x, y,
+ z).substitute({x: y + z}) == as_ternary(y + z, y, z)
+ assert as_eq(x, y).substitute({x: y + z}) == as_eq(y + z, y)
def test_fromstring(self):
- x = as_symbol('x')
- y = as_symbol('y')
- z = as_symbol('z')
- f = as_symbol('f')
+ x = as_symbol("x")
+ y = as_symbol("y")
+ z = as_symbol("z")
+ f = as_symbol("f")
s = as_string('"ABC"')
t = as_string('"123"')
a = as_array((x, y))
- assert fromstring('x') == x
- assert fromstring('+ x') == x
- assert fromstring('- x') == -x
- assert fromstring('x + y') == x + y
- assert fromstring('x + 1') == x + 1
- assert fromstring('x * y') == x * y
- assert fromstring('x * 2') == x * 2
- assert fromstring('x / y') == x / y
- assert fromstring('x ** 2',
- language=Language.Python) == x ** 2
- assert fromstring('x ** 2 ** 3',
- language=Language.Python) == x ** 2 ** 3
- assert fromstring('(x + y) * z') == (x + y) * z
-
- assert fromstring('f(x)') == f(x)
- assert fromstring('f(x,y)') == f(x, y)
- assert fromstring('f[x]') == f[x]
- assert fromstring('f[x][y]') == f[x][y]
+ assert fromstring("x") == x
+ assert fromstring("+ x") == x
+ assert fromstring("- x") == -x
+ assert fromstring("x + y") == x + y
+ assert fromstring("x + 1") == x + 1
+ assert fromstring("x * y") == x * y
+ assert fromstring("x * 2") == x * 2
+ assert fromstring("x / y") == x / y
+ assert fromstring("x ** 2", language=Language.Python) == x**2
+ assert fromstring("x ** 2 ** 3", language=Language.Python) == x**2**3
+ assert fromstring("(x + y) * z") == (x + y) * z
+
+ assert fromstring("f(x)") == f(x)
+ assert fromstring("f(x,y)") == f(x, y)
+ assert fromstring("f[x]") == f[x]
+ assert fromstring("f[x][y]") == f[x][y]
assert fromstring('"ABC"') == s
- assert normalize(fromstring('"ABC" // "123" ',
- language=Language.Fortran)) == s // t
+ assert (normalize(
+ fromstring('"ABC" // "123" ',
+ language=Language.Fortran)) == s // t)
assert fromstring('f("ABC")') == f(s)
- assert fromstring('MYSTRKIND_"ABC"') == as_string('"ABC"', 'MYSTRKIND')
-
- assert fromstring('(/x, y/)') == a, fromstring('(/x, y/)')
- assert fromstring('f((/x, y/))') == f(a)
- assert fromstring('(/(x+y)*z/)') == as_array(((x+y)*z,))
-
- assert fromstring('123') == as_number(123)
- assert fromstring('123_2') == as_number(123, 2)
- assert fromstring('123_myintkind') == as_number(123, 'myintkind')
-
- assert fromstring('123.0') == as_number(123.0, 4)
- assert fromstring('123.0_4') == as_number(123.0, 4)
- assert fromstring('123.0_8') == as_number(123.0, 8)
- assert fromstring('123.0e0') == as_number(123.0, 4)
- assert fromstring('123.0d0') == as_number(123.0, 8)
- assert fromstring('123d0') == as_number(123.0, 8)
- assert fromstring('123e-0') == as_number(123.0, 4)
- assert fromstring('123d+0') == as_number(123.0, 8)
- assert fromstring('123.0_myrealkind') == as_number(123.0, 'myrealkind')
- assert fromstring('3E4') == as_number(30000.0, 4)
-
- assert fromstring('(1, 2)') == as_complex(1, 2)
- assert fromstring('(1e2, PI)') == as_complex(
- as_number(100.0), as_symbol('PI'))
-
- assert fromstring('[1, 2]') == as_array((as_number(1), as_number(2)))
-
- assert fromstring('POINT(x, y=1)') == as_apply(
- as_symbol('POINT'), x, y=as_number(1))
- assert (fromstring('PERSON(name="John", age=50, shape=(/34, 23/))')
- == as_apply(as_symbol('PERSON'),
- name=as_string('"John"'),
- age=as_number(50),
- shape=as_array((as_number(34), as_number(23)))))
-
- assert fromstring('x?y:z') == as_ternary(x, y, z)
-
- assert fromstring('*x') == as_deref(x)
- assert fromstring('**x') == as_deref(as_deref(x))
- assert fromstring('&x') == as_ref(x)
- assert fromstring('(*x) * (*y)') == as_deref(x) * as_deref(y)
- assert fromstring('(*x) * *y') == as_deref(x) * as_deref(y)
- assert fromstring('*x * *y') == as_deref(x) * as_deref(y)
- assert fromstring('*x**y') == as_deref(x) * as_deref(y)
-
- assert fromstring('x == y') == as_eq(x, y)
- assert fromstring('x != y') == as_ne(x, y)
- assert fromstring('x < y') == as_lt(x, y)
- assert fromstring('x > y') == as_gt(x, y)
- assert fromstring('x <= y') == as_le(x, y)
- assert fromstring('x >= y') == as_ge(x, y)
-
- assert fromstring('x .eq. y', language=Language.Fortran) == as_eq(x, y)
- assert fromstring('x .ne. y', language=Language.Fortran) == as_ne(x, y)
- assert fromstring('x .lt. y', language=Language.Fortran) == as_lt(x, y)
- assert fromstring('x .gt. y', language=Language.Fortran) == as_gt(x, y)
- assert fromstring('x .le. y', language=Language.Fortran) == as_le(x, y)
- assert fromstring('x .ge. y', language=Language.Fortran) == as_ge(x, y)
+ assert fromstring('MYSTRKIND_"ABC"') == as_string('"ABC"', "MYSTRKIND")
+
+ assert fromstring("(/x, y/)") == a, fromstring("(/x, y/)")
+ assert fromstring("f((/x, y/))") == f(a)
+ assert fromstring("(/(x+y)*z/)") == as_array(((x + y) * z, ))
+
+ assert fromstring("123") == as_number(123)
+ assert fromstring("123_2") == as_number(123, 2)
+ assert fromstring("123_myintkind") == as_number(123, "myintkind")
+
+ assert fromstring("123.0") == as_number(123.0, 4)
+ assert fromstring("123.0_4") == as_number(123.0, 4)
+ assert fromstring("123.0_8") == as_number(123.0, 8)
+ assert fromstring("123.0e0") == as_number(123.0, 4)
+ assert fromstring("123.0d0") == as_number(123.0, 8)
+ assert fromstring("123d0") == as_number(123.0, 8)
+ assert fromstring("123e-0") == as_number(123.0, 4)
+ assert fromstring("123d+0") == as_number(123.0, 8)
+ assert fromstring("123.0_myrealkind") == as_number(123.0, "myrealkind")
+ assert fromstring("3E4") == as_number(30000.0, 4)
+
+ assert fromstring("(1, 2)") == as_complex(1, 2)
+ assert fromstring("(1e2, PI)") == as_complex(as_number(100.0),
+ as_symbol("PI"))
+
+ assert fromstring("[1, 2]") == as_array((as_number(1), as_number(2)))
+
+ assert fromstring("POINT(x, y=1)") == as_apply(as_symbol("POINT"),
+ x,
+ y=as_number(1))
+ assert fromstring(
+ 'PERSON(name="John", age=50, shape=(/34, 23/))') == as_apply(
+ as_symbol("PERSON"),
+ name=as_string('"John"'),
+ age=as_number(50),
+ shape=as_array((as_number(34), as_number(23))),
+ )
+
+ assert fromstring("x?y:z") == as_ternary(x, y, z)
+
+ assert fromstring("*x") == as_deref(x)
+ assert fromstring("**x") == as_deref(as_deref(x))
+ assert fromstring("&x") == as_ref(x)
+ assert fromstring("(*x) * (*y)") == as_deref(x) * as_deref(y)
+ assert fromstring("(*x) * *y") == as_deref(x) * as_deref(y)
+ assert fromstring("*x * *y") == as_deref(x) * as_deref(y)
+ assert fromstring("*x**y") == as_deref(x) * as_deref(y)
+
+ assert fromstring("x == y") == as_eq(x, y)
+ assert fromstring("x != y") == as_ne(x, y)
+ assert fromstring("x < y") == as_lt(x, y)
+ assert fromstring("x > y") == as_gt(x, y)
+ assert fromstring("x <= y") == as_le(x, y)
+ assert fromstring("x >= y") == as_ge(x, y)
+
+ assert fromstring("x .eq. y", language=Language.Fortran) == as_eq(x, y)
+ assert fromstring("x .ne. y", language=Language.Fortran) == as_ne(x, y)
+ assert fromstring("x .lt. y", language=Language.Fortran) == as_lt(x, y)
+ assert fromstring("x .gt. y", language=Language.Fortran) == as_gt(x, y)
+ assert fromstring("x .le. y", language=Language.Fortran) == as_le(x, y)
+ assert fromstring("x .ge. y", language=Language.Fortran) == as_ge(x, y)
def test_traverse(self):
- x = as_symbol('x')
- y = as_symbol('y')
- z = as_symbol('z')
- f = as_symbol('f')
+ x = as_symbol("x")
+ y = as_symbol("y")
+ z = as_symbol("z")
+ f = as_symbol("f")
# Use traverse to substitute a symbol
def replace_visit(s, r=z):
@@ -373,8 +403,9 @@ class TestSymbolic(util.F2PyTest):
assert (f[y]).traverse(replace_visit) == f[y]
assert (f[z]).traverse(replace_visit) == f[z]
assert (x + y + z).traverse(replace_visit) == (2 * z + y)
- assert (x + f(y, x - z)).traverse(
- replace_visit) == (z + f(y, as_number(0)))
+ assert (x +
+ f(y, x - z)).traverse(replace_visit) == (z +
+ f(y, as_number(0)))
assert as_eq(x, y).traverse(replace_visit) == as_eq(z, y)
# Use traverse to collect symbols, method 1
@@ -416,28 +447,28 @@ class TestSymbolic(util.F2PyTest):
assert symbols == {x}
def test_linear_solve(self):
- x = as_symbol('x')
- y = as_symbol('y')
- z = as_symbol('z')
+ x = as_symbol("x")
+ y = as_symbol("y")
+ z = as_symbol("z")
assert x.linear_solve(x) == (as_number(1), as_number(0))
- assert (x+1).linear_solve(x) == (as_number(1), as_number(1))
- assert (2*x).linear_solve(x) == (as_number(2), as_number(0))
- assert (2*x+3).linear_solve(x) == (as_number(2), as_number(3))
+ assert (x + 1).linear_solve(x) == (as_number(1), as_number(1))
+ assert (2 * x).linear_solve(x) == (as_number(2), as_number(0))
+ assert (2 * x + 3).linear_solve(x) == (as_number(2), as_number(3))
assert as_number(3).linear_solve(x) == (as_number(0), as_number(3))
assert y.linear_solve(x) == (as_number(0), y)
- assert (y*z).linear_solve(x) == (as_number(0), y * z)
+ assert (y * z).linear_solve(x) == (as_number(0), y * z)
- assert (x+y).linear_solve(x) == (as_number(1), y)
- assert (z*x+y).linear_solve(x) == (z, y)
- assert ((z+y)*x+y).linear_solve(x) == (z + y, y)
- assert (z*y*x+y).linear_solve(x) == (z * y, y)
+ assert (x + y).linear_solve(x) == (as_number(1), y)
+ assert (z * x + y).linear_solve(x) == (z, y)
+ assert ((z + y) * x + y).linear_solve(x) == (z + y, y)
+ assert (z * y * x + y).linear_solve(x) == (z * y, y)
- assert_raises(RuntimeError, lambda: (x*x).linear_solve(x))
+ assert_raises(RuntimeError, lambda: (x * x).linear_solve(x))
def test_as_numer_denom(self):
- x = as_symbol('x')
- y = as_symbol('y')
+ x = as_symbol("x")
+ y = as_symbol("y")
n = as_number(123)
assert as_numer_denom(x) == (x, as_number(1))
@@ -446,11 +477,11 @@ class TestSymbolic(util.F2PyTest):
assert as_numer_denom(x / y) == (x, y)
assert as_numer_denom(x * y) == (x * y, as_number(1))
assert as_numer_denom(n + x / y) == (x + n * y, y)
- assert as_numer_denom(n + x / (y - x / n)) == (y * n ** 2, y * n - x)
+ assert as_numer_denom(n + x / (y - x / n)) == (y * n**2, y * n - x)
def test_polynomial_atoms(self):
- x = as_symbol('x')
- y = as_symbol('y')
+ x = as_symbol("x")
+ y = as_symbol("y")
n = as_number(123)
assert x.polynomial_atoms() == {x}
@@ -459,4 +490,4 @@ class TestSymbolic(util.F2PyTest):
assert (y(x)).polynomial_atoms() == {y(x)}
assert (y(x) + x).polynomial_atoms() == {y(x), x}
assert (y(x) * x[y]).polynomial_atoms() == {y(x), x[y]}
- assert (y(x) ** x).polynomial_atoms() == {y(x)}
+ assert (y(x)**x).polynomial_atoms() == {y(x)}
diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py
index 1a6805e75..c115970f4 100644
--- a/numpy/f2py/tests/util.py
+++ b/numpy/f2py/tests/util.py
@@ -3,6 +3,7 @@ Utility functions for
- building and importing modules on test time, using a temporary location
- detecting if compilers are present
+- determining paths to tests
"""
import os
@@ -14,7 +15,10 @@ import atexit
import textwrap
import re
import pytest
+import contextlib
+import numpy
+from pathlib import Path
from numpy.compat import asbytes, asstr
from numpy.testing import temppath
from importlib import import_module
@@ -78,9 +82,11 @@ def _memoize(func):
if isinstance(ret, Exception):
raise ret
return ret
+
wrapper.__name__ = func.__name__
return wrapper
+
#
# Building modules
#
@@ -93,8 +99,7 @@ def build_module(source_files, options=[], skip=[], only=[], module_name=None):
"""
- code = ("import sys; sys.path = %s; import numpy.f2py as f2py2e; "
- "f2py2e.main()" % repr(sys.path))
+ code = f"import sys; sys.path = {sys.path!r}; import numpy.f2py; numpy.f2py.main()"
d = get_module_dir()
@@ -109,29 +114,30 @@ def build_module(source_files, options=[], skip=[], only=[], module_name=None):
dst_sources.append(dst)
base, ext = os.path.splitext(dst)
- if ext in ('.f90', '.f', '.c', '.pyf'):
+ if ext in (".f90", ".f", ".c", ".pyf"):
f2py_sources.append(dst)
# Prepare options
if module_name is None:
module_name = get_temp_module_name()
- f2py_opts = ['-c', '-m', module_name] + options + f2py_sources
+ f2py_opts = ["-c", "-m", module_name] + options + f2py_sources
if skip:
- f2py_opts += ['skip:'] + skip
+ f2py_opts += ["skip:"] + skip
if only:
- f2py_opts += ['only:'] + only
+ f2py_opts += ["only:"] + only
# Build
cwd = os.getcwd()
try:
os.chdir(d)
- cmd = [sys.executable, '-c', code] + f2py_opts
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ cmd = [sys.executable, "-c", code] + f2py_opts
+ p = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = p.communicate()
if p.returncode != 0:
- raise RuntimeError("Running f2py failed: %s\n%s"
- % (cmd[4:], asstr(out)))
+ raise RuntimeError("Running f2py failed: %s\n%s" %
+ (cmd[4:], asstr(out)))
finally:
os.chdir(cwd)
@@ -144,20 +150,28 @@ def build_module(source_files, options=[], skip=[], only=[], module_name=None):
@_memoize
-def build_code(source_code, options=[], skip=[], only=[], suffix=None,
+def build_code(source_code,
+ options=[],
+ skip=[],
+ only=[],
+ suffix=None,
module_name=None):
"""
Compile and import Fortran code using f2py.
"""
if suffix is None:
- suffix = '.f'
+ suffix = ".f"
with temppath(suffix=suffix) as path:
- with open(path, 'w') as f:
+ with open(path, "w") as f:
f.write(source_code)
- return build_module([path], options=options, skip=skip, only=only,
+ return build_module([path],
+ options=options,
+ skip=skip,
+ only=only,
module_name=module_name)
+
#
# Check if compilers are available at all...
#
@@ -174,10 +188,10 @@ def _get_compiler_status():
# XXX: this is really ugly. But I don't know how to invoke Distutils
# in a safer way...
- code = textwrap.dedent("""\
+ code = textwrap.dedent(f"""\
import os
import sys
- sys.path = %(syspath)s
+ sys.path = {repr(sys.path)}
def configuration(parent_name='',top_path=None):
global config
@@ -189,7 +203,7 @@ def _get_compiler_status():
setup(configuration=configuration)
config_cmd = config.get_config_cmd()
- have_c = config_cmd.try_compile('void foo() {}')
+ have_c = config_cmd.try_compile('void foo() {{}}')
print('COMPILERS:%%d,%%d,%%d' %% (have_c,
config.have_f77c(),
config.have_f90c()))
@@ -199,23 +213,27 @@ def _get_compiler_status():
tmpdir = tempfile.mkdtemp()
try:
- script = os.path.join(tmpdir, 'setup.py')
+ script = os.path.join(tmpdir, "setup.py")
- with open(script, 'w') as f:
+ with open(script, "w") as f:
f.write(code)
- cmd = [sys.executable, 'setup.py', 'config']
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ cmd = [sys.executable, "setup.py", "config"]
+ p = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tmpdir)
out, err = p.communicate()
finally:
shutil.rmtree(tmpdir)
- m = re.search(br'COMPILERS:(\d+),(\d+),(\d+)', out)
+ m = re.search(br"COMPILERS:(\d+),(\d+),(\d+)", out)
if m:
- _compiler_status = (bool(int(m.group(1))), bool(int(m.group(2))),
- bool(int(m.group(3))))
+ _compiler_status = (
+ bool(int(m.group(1))),
+ bool(int(m.group(2))),
+ bool(int(m.group(3))),
+ )
# Finished
return _compiler_status
@@ -231,6 +249,7 @@ def has_f77_compiler():
def has_f90_compiler():
return _get_compiler_status()[2]
+
#
# Building with distutils
#
@@ -256,38 +275,38 @@ def build_module_distutils(source_files, config_code, module_name, **kw):
# Build script
config_code = textwrap.dedent(config_code).replace("\n", "\n ")
- code = textwrap.dedent("""\
- import os
- import sys
- sys.path = %(syspath)s
-
- def configuration(parent_name='',top_path=None):
- from numpy.distutils.misc_util import Configuration
- config = Configuration('', parent_name, top_path)
- %(config_code)s
- return config
+ code = fr"""
+import os
+import sys
+sys.path = {repr(sys.path)}
- if __name__ == "__main__":
- from numpy.distutils.core import setup
- setup(configuration=configuration)
- """) % dict(config_code=config_code, syspath=repr(sys.path))
+def configuration(parent_name='',top_path=None):
+ from numpy.distutils.misc_util import Configuration
+ config = Configuration('', parent_name, top_path)
+ {config_code}
+ return config
- script = os.path.join(d, get_temp_module_name() + '.py')
+if __name__ == "__main__":
+ from numpy.distutils.core import setup
+ setup(configuration=configuration)
+ """
+ script = os.path.join(d, get_temp_module_name() + ".py")
dst_sources.append(script)
- with open(script, 'wb') as f:
+ with open(script, "wb") as f:
f.write(asbytes(code))
# Build
cwd = os.getcwd()
try:
os.chdir(d)
- cmd = [sys.executable, script, 'build_ext', '-i']
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ cmd = [sys.executable, script, "build_ext", "-i"]
+ p = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = p.communicate()
if p.returncode != 0:
- raise RuntimeError("Running distutils build failed: %s\n%s"
- % (cmd[4:], asstr(out)))
+ raise RuntimeError("Running distutils build failed: %s\n%s" %
+ (cmd[4:], asstr(out)))
finally:
os.chdir(cwd)
@@ -299,6 +318,7 @@ def build_module_distutils(source_files, config_code, module_name, **kw):
__import__(module_name)
return sys.modules[module_name]
+
#
# Unittest convenience
#
@@ -310,13 +330,13 @@ class F2PyTest:
options = []
skip = []
only = []
- suffix = '.f'
+ suffix = ".f"
module = None
module_name = None
def setup(self):
- if sys.platform == 'win32':
- pytest.skip('Fails with MinGW64 Gfortran (Issue #9673)')
+ if sys.platform == "win32":
+ pytest.skip("Fails with MinGW64 Gfortran (Issue #9673)")
if self.module is not None:
return
@@ -334,9 +354,9 @@ class F2PyTest:
needs_f77 = False
needs_f90 = False
for fn in codes:
- if fn.endswith('.f'):
+ if str(fn).endswith(".f"):
needs_f77 = True
- elif fn.endswith('.f90'):
+ elif str(fn).endswith(".f90"):
needs_f90 = True
if needs_f77 and not has_f77_compiler():
pytest.skip("No Fortran 77 compiler available")
@@ -345,12 +365,41 @@ class F2PyTest:
# Build the module
if self.code is not None:
- self.module = build_code(self.code, options=self.options,
- skip=self.skip, only=self.only,
- suffix=self.suffix,
- module_name=self.module_name)
+ self.module = build_code(
+ self.code,
+ options=self.options,
+ skip=self.skip,
+ only=self.only,
+ suffix=self.suffix,
+ module_name=self.module_name,
+ )
if self.sources is not None:
- self.module = build_module(self.sources, options=self.options,
- skip=self.skip, only=self.only,
- module_name=self.module_name)
+ self.module = build_module(
+ self.sources,
+ options=self.options,
+ skip=self.skip,
+ only=self.only,
+ module_name=self.module_name,
+ )
+
+
+#
+# Helper functions
+#
+
+
+def getpath(*a):
+ # Package root
+ d = Path(numpy.f2py.__file__).parent.resolve()
+ return d.joinpath(*a)
+
+
+@contextlib.contextmanager
+def switchdir(path):
+ curpath = Path.cwd()
+ os.chdir(path)
+ try:
+ yield
+ finally:
+ os.chdir(curpath)
diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py
index 2a4402c89..b69226d48 100644
--- a/numpy/lib/index_tricks.py
+++ b/numpy/lib/index_tricks.py
@@ -227,13 +227,13 @@ class MGridClass(nd_grid):
See Also
--------
- numpy.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects
+ lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects
ogrid : like mgrid but returns open (not fleshed out) mesh grids
r_ : array concatenator
Examples
--------
- >>> np.mgrid[0:5,0:5]
+ >>> np.mgrid[0:5, 0:5]
array([[[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index 6c34e95fe..a839b892a 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -285,7 +285,8 @@ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
----------
file : file-like object, string, or pathlib.Path
The file to read. File-like objects must support the
- ``seek()`` and ``read()`` methods. Pickled files require that the
+ ``seek()`` and ``read()`` methods and must always
+ be opened in binary mode. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
@@ -1806,22 +1807,21 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
byte_converters = False
# Initialize the filehandle, the LineSplitter and the NameValidator
+ if isinstance(fname, os_PathLike):
+ fname = os_fspath(fname)
+ if isinstance(fname, str):
+ fid = np.lib._datasource.open(fname, 'rt', encoding=encoding)
+ fid_ctx = contextlib.closing(fid)
+ else:
+ fid = fname
+ fid_ctx = contextlib.nullcontext(fid)
try:
- if isinstance(fname, os_PathLike):
- fname = os_fspath(fname)
- if isinstance(fname, str):
- fid = np.lib._datasource.open(fname, 'rt', encoding=encoding)
- fid_ctx = contextlib.closing(fid)
- else:
- fid = fname
- fid_ctx = contextlib.nullcontext(fid)
fhd = iter(fid)
except TypeError as e:
raise TypeError(
- f"fname must be a string, filehandle, list of strings,\n"
- f"or generator. Got {type(fname)} instead."
+ "fname must be a string, a filehandle, a sequence of strings,\n"
+ f"or an iterator of strings. Got {type(fname)} instead."
) from e
-
with fid_ctx:
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip, encoding=encoding)
diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py
index a491f612e..ee4fbcd74 100644
--- a/numpy/lib/recfunctions.py
+++ b/numpy/lib/recfunctions.py
@@ -784,7 +784,8 @@ def repack_fields(a, align=False, recurse=False):
This method removes any overlaps and reorders the fields in memory so they
have increasing byte offsets, and adds or removes padding bytes depending
- on the `align` option, which behaves like the `align` option to `np.dtype`.
+ on the `align` option, which behaves like the `align` option to
+ `numpy.dtype`.
If `align=False`, this method produces a "packed" memory layout in which
each field starts at the byte the previous field ended, and any padding
@@ -917,11 +918,12 @@ def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'):
dtype : dtype, optional
The dtype of the output unstructured array.
copy : bool, optional
- See copy argument to `ndarray.astype`. If true, always return a copy.
- If false, and `dtype` requirements are satisfied, a view is returned.
+ See copy argument to `numpy.ndarray.astype`. If true, always return a
+ copy. If false, and `dtype` requirements are satisfied, a view is
+ returned.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
- See casting argument of `ndarray.astype`. Controls what kind of data
- casting may occur.
+ See casting argument of `numpy.ndarray.astype`. Controls what kind of
+ data casting may occur.
Returns
-------
@@ -1020,11 +1022,12 @@ def unstructured_to_structured(arr, dtype=None, names=None, align=False,
align : boolean, optional
Whether to create an aligned memory layout.
copy : bool, optional
- See copy argument to `ndarray.astype`. If true, always return a copy.
- If false, and `dtype` requirements are satisfied, a view is returned.
+ See copy argument to `numpy.ndarray.astype`. If true, always return a
+ copy. If false, and `dtype` requirements are satisfied, a view is
+ returned.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
- See casting argument of `ndarray.astype`. Controls what kind of data
- casting may occur.
+ See casting argument of `numpy.ndarray.astype`. Controls what kind of
+ data casting may occur.
Returns
-------
diff --git a/numpy/lib/scimath.py b/numpy/lib/scimath.py
index 308f1328b..b7ef0d710 100644
--- a/numpy/lib/scimath.py
+++ b/numpy/lib/scimath.py
@@ -234,6 +234,15 @@ def sqrt(x):
>>> np.emath.sqrt([-1,4])
array([0.+1.j, 2.+0.j])
+ Different results are expected because:
+ floating point 0.0 and -0.0 are distinct.
+
+ For more control, explicitly use complex() as follows:
+
+ >>> np.emath.sqrt(complex(-4.0, 0.0))
+ 2j
+ >>> np.emath.sqrt(complex(-4.0, -0.0))
+ -2j
"""
x = _fix_real_lt_zero(x)
return nx.sqrt(x)
diff --git a/numpy/lib/scimath.pyi b/numpy/lib/scimath.pyi
index d0d4af41e..6f196497d 100644
--- a/numpy/lib/scimath.pyi
+++ b/numpy/lib/scimath.pyi
@@ -1,13 +1,94 @@
-from typing import List
+from typing import List, overload, Any
+
+from numpy import complexfloating
+
+from numpy.typing import (
+ NDArray,
+ _ArrayLikeFloat_co,
+ _ArrayLikeComplex_co,
+ _ComplexLike_co,
+ _FloatLike_co,
+)
__all__: List[str]
-def sqrt(x): ...
-def log(x): ...
-def log10(x): ...
-def logn(n, x): ...
-def log2(x): ...
-def power(x, p): ...
-def arccos(x): ...
-def arcsin(x): ...
-def arctanh(x): ...
+@overload
+def sqrt(x: _FloatLike_co) -> Any: ...
+@overload
+def sqrt(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
+@overload
+def sqrt(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def sqrt(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+
+@overload
+def log(x: _FloatLike_co) -> Any: ...
+@overload
+def log(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
+@overload
+def log(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def log(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+
+@overload
+def log10(x: _FloatLike_co) -> Any: ...
+@overload
+def log10(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
+@overload
+def log10(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def log10(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+
+@overload
+def log2(x: _FloatLike_co) -> Any: ...
+@overload
+def log2(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
+@overload
+def log2(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def log2(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+
+@overload
+def logn(n: _FloatLike_co, x: _FloatLike_co) -> Any: ...
+@overload
+def logn(n: _ComplexLike_co, x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
+@overload
+def logn(n: _ArrayLikeFloat_co, x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def logn(n: _ArrayLikeComplex_co, x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+
+@overload
+def power(x: _FloatLike_co, p: _FloatLike_co) -> Any: ...
+@overload
+def power(x: _ComplexLike_co, p: _ComplexLike_co) -> complexfloating[Any, Any]: ...
+@overload
+def power(x: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def power(x: _ArrayLikeComplex_co, p: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+
+@overload
+def arccos(x: _FloatLike_co) -> Any: ...
+@overload
+def arccos(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
+@overload
+def arccos(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def arccos(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+
+@overload
+def arcsin(x: _FloatLike_co) -> Any: ...
+@overload
+def arcsin(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
+@overload
+def arcsin(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def arcsin(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+
+@overload
+def arctanh(x: _FloatLike_co) -> Any: ...
+@overload
+def arctanh(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
+@overload
+def arctanh(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def arctanh(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
diff --git a/numpy/lib/shape_base.pyi b/numpy/lib/shape_base.pyi
index 8aa283d02..17016c999 100644
--- a/numpy/lib/shape_base.pyi
+++ b/numpy/lib/shape_base.pyi
@@ -18,7 +18,7 @@ from numpy.typing import (
NDArray,
_ShapeLike,
_FiniteNestedSequence,
- _SupportsDType,
+ _SupportsArray,
_ArrayLikeBool_co,
_ArrayLikeUInt_co,
_ArrayLikeInt_co,
@@ -31,7 +31,7 @@ from numpy.core.shape_base import vstack
_SCT = TypeVar("_SCT", bound=generic)
-_ArrayLike = _FiniteNestedSequence[_SupportsDType[dtype[_SCT]]]
+_ArrayLike = _FiniteNestedSequence[_SupportsArray[dtype[_SCT]]]
# The signatures of `__array_wrap__` and `__array_prepare__` are the same;
# give them unique names for the sake of clarity
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index 5201b8e6e..c19660cf0 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -1425,6 +1425,10 @@ class TestFromTxt(LoadTxtBase):
('F', 25.0, 60.0)], dtype=descriptor)
assert_equal(test, control)
+ def test_bad_fname(self):
+ with pytest.raises(TypeError, match='fname must be a string,'):
+ np.genfromtxt(123)
+
def test_commented_header(self):
# Check that names can be retrieved even if the line is commented out.
data = TextIO("""
diff --git a/numpy/lib/type_check.py b/numpy/lib/type_check.py
index 56afd83ce..94d525f51 100644
--- a/numpy/lib/type_check.py
+++ b/numpy/lib/type_check.py
@@ -6,7 +6,7 @@ import warnings
__all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex',
'isreal', 'nan_to_num', 'real', 'real_if_close',
- 'typename', 'asfarray', 'mintypecode', 'asscalar',
+ 'typename', 'asfarray', 'mintypecode',
'common_type']
import numpy.core.numeric as _nx
@@ -276,22 +276,22 @@ def isreal(x):
>>> a = np.array([1+1j, 1+0j, 4.5, 3, 2, 2j], dtype=complex)
>>> np.isreal(a)
array([False, True, True, True, True, False])
-
+
The function does not work on string arrays.
>>> a = np.array([2j, "a"], dtype="U")
>>> np.isreal(a) # Warns about non-elementwise comparison
False
-
+
Returns True for all elements in input array of ``dtype=object`` even if
any of the elements is complex.
>>> a = np.array([1, "2", 3+4j], dtype=object)
>>> np.isreal(a)
array([ True, True, True])
-
+
isreal should not be used with object arrays
-
+
>>> a = np.array([1+2j, 2+1j], dtype=object)
>>> np.isreal(a)
array([ True, True])
@@ -405,14 +405,14 @@ def _nan_to_num_dispatcher(x, copy=None, nan=None, posinf=None, neginf=None):
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
"""
Replace NaN with zero and infinity with large finite numbers (default
- behaviour) or with the numbers defined by the user using the `nan`,
+ behaviour) or with the numbers defined by the user using the `nan`,
`posinf` and/or `neginf` keywords.
If `x` is inexact, NaN is replaced by zero or by the user defined value in
- `nan` keyword, infinity is replaced by the largest finite floating point
- values representable by ``x.dtype`` or by the user defined value in
- `posinf` keyword and -infinity is replaced by the most negative finite
- floating point values representable by ``x.dtype`` or by the user defined
+ `nan` keyword, infinity is replaced by the largest finite floating point
+ values representable by ``x.dtype`` or by the user defined value in
+ `posinf` keyword and -infinity is replaced by the most negative finite
+ floating point values representable by ``x.dtype`` or by the user defined
value in `neginf` keyword.
For complex dtypes, the above is applied to each of the real and
@@ -429,27 +429,27 @@ def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
in-place (False). The in-place operation only occurs if
casting to an array does not require a copy.
Default is True.
-
+
.. versionadded:: 1.13
nan : int, float, optional
- Value to be used to fill NaN values. If no value is passed
+ Value to be used to fill NaN values. If no value is passed
then NaN values will be replaced with 0.0.
-
+
.. versionadded:: 1.17
posinf : int, float, optional
- Value to be used to fill positive infinity values. If no value is
+ Value to be used to fill positive infinity values. If no value is
passed then positive infinity values will be replaced with a very
large number.
-
+
.. versionadded:: 1.17
neginf : int, float, optional
- Value to be used to fill negative infinity values. If no value is
+ Value to be used to fill negative infinity values. If no value is
passed then negative infinity values will be replaced with a very
small (or negative) number.
-
+
.. versionadded:: 1.17
-
+
Returns
-------
@@ -483,7 +483,7 @@ def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary
-1.28000000e+002, 1.28000000e+002])
>>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333)
- array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03,
+ array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03,
-1.2800000e+02, 1.2800000e+02])
>>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)])
array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary
@@ -529,7 +529,7 @@ def _real_if_close_dispatcher(a, tol=None):
@array_function_dispatch(_real_if_close_dispatcher)
def real_if_close(a, tol=100):
"""
- If input is complex with all imaginary parts close to zero, return
+ If input is complex with all imaginary parts close to zero, return
real parts.
"Close to zero" is defined as `tol` * (machine epsilon of the type for
@@ -583,40 +583,6 @@ def real_if_close(a, tol=100):
return a
-def _asscalar_dispatcher(a):
- # 2018-10-10, 1.16
- warnings.warn('np.asscalar(a) is deprecated since NumPy v1.16, use '
- 'a.item() instead', DeprecationWarning, stacklevel=3)
- return (a,)
-
-
-@array_function_dispatch(_asscalar_dispatcher)
-def asscalar(a):
- """
- Convert an array of size 1 to its scalar equivalent.
-
- .. deprecated:: 1.16
-
- Deprecated, use `numpy.ndarray.item()` instead.
-
- Parameters
- ----------
- a : ndarray
- Input array of size 1.
-
- Returns
- -------
- out : scalar
- Scalar representation of `a`. The output data type is the same type
- returned by the input's `item` method.
-
- Examples
- --------
- >>> np.asscalar(np.array([24]))
- 24
- """
- return a.item()
-
#-----------------------------------------------------------------------------
_namefromtype = {'S1': 'character',
diff --git a/numpy/lib/type_check.pyi b/numpy/lib/type_check.pyi
index 0a55dbf21..510f36cd7 100644
--- a/numpy/lib/type_check.pyi
+++ b/numpy/lib/type_check.pyi
@@ -151,9 +151,6 @@ def real_if_close(
tol: float = ...,
) -> NDArray[Any]: ...
-# NOTE: deprecated
-# def asscalar(a): ...
-
@overload
def typename(char: L['S1']) -> L['character']: ...
@overload
diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py
index 1df2ab09b..c74ee127d 100644
--- a/numpy/lib/utils.py
+++ b/numpy/lib/utils.py
@@ -429,7 +429,7 @@ def _makenamedict(module='numpy'):
return thedict, dictlist
-def _info(obj, output=sys.stdout):
+def _info(obj, output=None):
"""Provide information about ndarray obj.
Parameters
@@ -455,6 +455,9 @@ def _info(obj, output=sys.stdout):
strides = obj.strides
endian = obj.dtype.byteorder
+ if output is None:
+ output = sys.stdout
+
print("class: ", nm, file=output)
print("shape: ", obj.shape, file=output)
print("strides: ", strides, file=output)
@@ -481,7 +484,7 @@ def _info(obj, output=sys.stdout):
@set_module('numpy')
-def info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'):
+def info(object=None, maxwidth=76, output=None, toplevel='numpy'):
"""
Get help information for a function, class, or module.
@@ -496,7 +499,8 @@ def info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'):
Printing width.
output : file like object, optional
File like object that the output is written to, default is
- ``stdout``. The object has to be opened in 'w' or 'a' mode.
+ ``None``, in which case ``sys.stdout`` will be used.
+ The object has to be opened in 'w' or 'a' mode.
toplevel : str, optional
Start search at this level.
@@ -541,6 +545,9 @@ def info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'):
elif hasattr(object, '_ppimport_attr'):
object = object._ppimport_attr
+ if output is None:
+ output = sys.stdout
+
if object is None:
info(info)
elif isinstance(object, ndarray):
diff --git a/numpy/linalg/tests/test_build.py b/numpy/linalg/tests/test_build.py
deleted file mode 100644
index 868341ff2..000000000
--- a/numpy/linalg/tests/test_build.py
+++ /dev/null
@@ -1,53 +0,0 @@
-from subprocess import PIPE, Popen
-import sys
-import re
-import pytest
-
-from numpy.linalg import lapack_lite
-from numpy.testing import assert_
-
-
-class FindDependenciesLdd:
-
- def __init__(self):
- self.cmd = ['ldd']
-
- try:
- p = Popen(self.cmd, stdout=PIPE, stderr=PIPE)
- stdout, stderr = p.communicate()
- except OSError as e:
- raise RuntimeError(f'command {self.cmd} cannot be run') from e
-
- def get_dependencies(self, lfile):
- p = Popen(self.cmd + [lfile], stdout=PIPE, stderr=PIPE)
- stdout, stderr = p.communicate()
- if not (p.returncode == 0):
- raise RuntimeError(f'failed dependencies check for {lfile}')
-
- return stdout
-
- def grep_dependencies(self, lfile, deps):
- stdout = self.get_dependencies(lfile)
-
- rdeps = dict([(dep, re.compile(dep)) for dep in deps])
- founds = []
- for l in stdout.splitlines():
- for k, v in rdeps.items():
- if v.search(l):
- founds.append(k)
-
- return founds
-
-
-class TestF77Mismatch:
-
- @pytest.mark.skipif(not(sys.platform[:5] == 'linux'),
- reason="no fortran compiler on non-Linux platform")
- def test_lapack(self):
- f = FindDependenciesLdd()
- deps = f.grep_dependencies(lapack_lite.__file__,
- [b'libg2c', b'libgfortran'])
- assert_(len(deps) <= 1,
- """Both g77 and gfortran runtimes linked in lapack_lite ! This is likely to
-cause random crashes and wrong results. See numpy INSTALL.txt for more
-information.""")
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index 491c2c605..12836967c 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -5666,9 +5666,12 @@ class MaskedArray(ndarray):
Parameters
----------
- axis : {None, int}, optional
+ axis : None or int or tuple of ints, optional
Axis along which to operate. By default, ``axis`` is None and the
flattened input is used.
+ .. versionadded:: 1.7.0
+ If this is a tuple of ints, the minimum is selected over multiple
+ axes, instead of a single axis or all the axes as before.
out : array_like, optional
Alternative output array in which to place the result. Must be of
the same shape and buffer length as the expected output.
@@ -5800,9 +5803,12 @@ class MaskedArray(ndarray):
Parameters
----------
- axis : {None, int}, optional
+ axis : None or int or tuple of ints, optional
Axis along which to operate. By default, ``axis`` is None and the
flattened input is used.
+ .. versionadded:: 1.7.0
+ If this is a tuple of ints, the maximum is selected over multiple
+ axes, instead of a single axis or all the axes as before.
out : array_like, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
diff --git a/numpy/random/_examples/cython/setup.py b/numpy/random/_examples/cython/setup.py
index 7e0dd3e05..f41150fdb 100644
--- a/numpy/random/_examples/cython/setup.py
+++ b/numpy/random/_examples/cython/setup.py
@@ -4,6 +4,7 @@ Build the Cython demonstrations of low-level access to NumPy random
Usage: python setup.py build_ext -i
"""
+import setuptools # triggers monkeypatching distutils
from distutils.core import setup
from os.path import dirname, join, abspath
diff --git a/numpy/random/_mt19937.pyx b/numpy/random/_mt19937.pyx
index 16a377cc6..e9a703e2f 100644
--- a/numpy/random/_mt19937.pyx
+++ b/numpy/random/_mt19937.pyx
@@ -109,7 +109,7 @@ cdef class MT19937(BitGenerator):
**Compatibility Guarantee**
- ``MT19937`` makes a guarantee that a fixed seed and will always produce
+ ``MT19937`` makes a guarantee that a fixed seed will always produce
the same random integer stream.
References
diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx
index 3e13503d0..ce09a041c 100644
--- a/numpy/random/mtrand.pyx
+++ b/numpy/random/mtrand.pyx
@@ -4234,18 +4234,21 @@ cdef class RandomState:
ValueError: pvals < 0, pvals > 1 or pvals contains NaNs
"""
- cdef np.npy_intp d, i, sz, offset
+ cdef np.npy_intp d, i, sz, offset, niter
cdef np.ndarray parr, mnarr
cdef double *pix
cdef long *mnix
cdef long ni
- d = len(pvals)
parr = <np.ndarray>np.PyArray_FROMANY(
- pvals, np.NPY_DOUBLE, 1, 1, np.NPY_ARRAY_ALIGNED | np.NPY_ARRAY_C_CONTIGUOUS)
+ pvals, np.NPY_DOUBLE, 0, 1, np.NPY_ARRAY_ALIGNED | np.NPY_ARRAY_C_CONTIGUOUS)
+ if np.PyArray_NDIM(parr) == 0:
+ raise TypeError("pvals must be a 1-d sequence")
+ d = np.PyArray_SIZE(parr)
pix = <double*>np.PyArray_DATA(parr)
check_array_constraint(parr, 'pvals', CONS_BOUNDED_0_1)
- if kahan_sum(pix, d-1) > (1.0 + 1e-12):
+ # Only check if pvals is non-empty due no checks in kahan_sum
+ if d and kahan_sum(pix, d-1) > (1.0 + 1e-12):
# When floating, but not float dtype, and close, improve the error
# 1.0001 works for float16 and float32
if (isinstance(pvals, np.ndarray)
@@ -4260,7 +4263,6 @@ cdef class RandomState:
else:
msg = "sum(pvals[:-1]) > 1.0"
raise ValueError(msg)
-
if size is None:
shape = (d,)
else:
@@ -4268,7 +4270,6 @@ cdef class RandomState:
shape = (operator.index(size), d)
except:
shape = tuple(size) + (d,)
-
multin = np.zeros(shape, dtype=int)
mnarr = <np.ndarray>multin
mnix = <long*>np.PyArray_DATA(mnarr)
@@ -4276,8 +4277,10 @@ cdef class RandomState:
ni = n
check_constraint(ni, 'n', CONS_NON_NEGATIVE)
offset = 0
+ # gh-20483: Avoids divide by 0
+ niter = sz // d if d else 0
with self.lock, nogil:
- for i in range(sz // d):
+ for i in range(niter):
legacy_random_multinomial(&self._bitgen, ni, &mnix[offset], pix, d, &self._binomial)
offset += d
diff --git a/numpy/random/tests/test_extending.py b/numpy/random/tests/test_extending.py
index 99a819efb..d362092b5 100644
--- a/numpy/random/tests/test_extending.py
+++ b/numpy/random/tests/test_extending.py
@@ -5,6 +5,7 @@ import subprocess
import sys
import warnings
import numpy as np
+from numpy.distutils.misc_util import exec_mod_from_location
try:
import cffi
@@ -75,10 +76,9 @@ def test_cython(tmp_path):
assert so1 is not None
assert so2 is not None
# import the so's without adding the directory to sys.path
- from importlib.machinery import ExtensionFileLoader
- extending = ExtensionFileLoader('extending', so1).load_module()
- extending_distributions = ExtensionFileLoader('extending_distributions', so2).load_module()
-
+ exec_mod_from_location('extending', so1)
+ extending_distributions = exec_mod_from_location(
+ 'extending_distributions', so2)
# actually test the cython c-extension
from numpy.random import PCG64
values = extending_distributions.uniforms_ex(PCG64(0), 10, 'd')
diff --git a/numpy/random/tests/test_randomstate_regression.py b/numpy/random/tests/test_randomstate_regression.py
index 595fb5fd3..7ad19ab55 100644
--- a/numpy/random/tests/test_randomstate_regression.py
+++ b/numpy/random/tests/test_randomstate_regression.py
@@ -201,3 +201,16 @@ class TestRegression:
[3, 4, 2, 3, 3, 1, 5, 3, 1, 3]])
assert_array_equal(random.binomial([[0], [10]], 0.25, size=(2, 10)),
expected)
+
+
+def test_multinomial_empty():
+ # gh-20483
+ # Ensure that empty p-vals are correctly handled
+ assert random.multinomial(10, []).shape == (0,)
+ assert random.multinomial(3, [], size=(7, 5, 3)).shape == (7, 5, 3, 0)
+
+
+def test_multinomial_1d_pval():
+ # gh-20483
+ with pytest.raises(TypeError, match="pvals must be a 1-d"):
+ random.multinomial(10, 0.3)
diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py
index 4c6b64bc9..0eb945d15 100644
--- a/numpy/testing/_private/utils.py
+++ b/numpy/testing/_private/utils.py
@@ -1228,13 +1228,13 @@ def rundocs(filename=None, raise_on_error=True):
>>> np.lib.test(doctests=True) # doctest: +SKIP
"""
- from numpy.compat import npy_load_module
+ from numpy.distutils.misc_util import exec_mod_from_location
import doctest
if filename is None:
f = sys._getframe(1)
filename = f.f_globals['__file__']
name = os.path.splitext(os.path.basename(filename))[0]
- m = npy_load_module(name, filename)
+ m = exec_mod_from_location(name, filename)
tests = doctest.DocTestFinder().find(m)
runner = doctest.DocTestRunner(verbose=False)
diff --git a/numpy/typing/tests/data/fail/array_constructors.pyi b/numpy/typing/tests/data/fail/array_constructors.pyi
index 4f0a60b5b..065b7d8a0 100644
--- a/numpy/typing/tests/data/fail/array_constructors.pyi
+++ b/numpy/typing/tests/data/fail/array_constructors.pyi
@@ -21,10 +21,10 @@ np.linspace(0, 2, retstep=b'False') # E: No overload variant
np.linspace(0, 2, dtype=0) # E: No overload variant
np.linspace(0, 2, axis=None) # E: No overload variant
-np.logspace(None, 'bob') # E: Argument 1
-np.logspace(0, 2, base=None) # E: Argument "base"
+np.logspace(None, 'bob') # E: No overload variant
+np.logspace(0, 2, base=None) # E: No overload variant
-np.geomspace(None, 'bob') # E: Argument 1
+np.geomspace(None, 'bob') # E: No overload variant
np.stack(generator) # E: No overload variant
np.hstack({1, 2}) # E: No overload variant
diff --git a/numpy/typing/tests/data/fail/shape_base.pyi b/numpy/typing/tests/data/fail/shape_base.pyi
new file mode 100644
index 000000000..e709741b7
--- /dev/null
+++ b/numpy/typing/tests/data/fail/shape_base.pyi
@@ -0,0 +1,8 @@
+import numpy as np
+
+class DTypeLike:
+ dtype: np.dtype[np.int_]
+
+dtype_like: DTypeLike
+
+np.expand_dims(dtype_like, (5, 10)) # E: No overload variant
diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi
index 233988e63..ba5710e0f 100644
--- a/numpy/typing/tests/data/reveal/array_constructors.pyi
+++ b/numpy/typing/tests/data/reveal/array_constructors.pyi
@@ -114,10 +114,24 @@ reveal_type(np.require(B, requirements="W")) # E: SubClass[{float64}]
reveal_type(np.require(B, requirements="A")) # E: SubClass[{float64}]
reveal_type(np.require(C)) # E: ndarray[Any, Any]
-reveal_type(np.linspace(0, 10)) # E: ndarray[Any, Any]
-reveal_type(np.linspace(0, 10, retstep=True)) # E: Tuple[ndarray[Any, Any], Any]
-reveal_type(np.logspace(0, 10)) # E: ndarray[Any, Any]
-reveal_type(np.geomspace(1, 10)) # E: ndarray[Any, Any]
+reveal_type(np.linspace(0, 10)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.linspace(0, 10j)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(np.linspace(0, 10, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.linspace(0, 10, dtype=int)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.linspace(0, 10, retstep=True)) # E: Tuple[ndarray[Any, dtype[floating[Any]]], floating[Any]]
+reveal_type(np.linspace(0j, 10, retstep=True)) # E: Tuple[ndarray[Any, dtype[complexfloating[Any, Any]]], complexfloating[Any, Any]]
+reveal_type(np.linspace(0, 10, retstep=True, dtype=np.int64)) # E: Tuple[ndarray[Any, dtype[{int64}]], {int64}]
+reveal_type(np.linspace(0j, 10, retstep=True, dtype=int)) # E: Tuple[ndarray[Any, dtype[Any]], Any]
+
+reveal_type(np.logspace(0, 10)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.logspace(0, 10j)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(np.logspace(0, 10, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.logspace(0, 10, dtype=int)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.geomspace(0, 10)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.geomspace(0, 10j)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(np.geomspace(0, 10, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.geomspace(0, 10, dtype=int)) # E: ndarray[Any, dtype[Any]]
reveal_type(np.zeros_like(A)) # E: ndarray[Any, dtype[{float64}]]
reveal_type(np.zeros_like(C)) # E: ndarray[Any, dtype[Any]]
diff --git a/numpy/typing/tests/data/reveal/emath.pyi b/numpy/typing/tests/data/reveal/emath.pyi
new file mode 100644
index 000000000..9ab2d72d2
--- /dev/null
+++ b/numpy/typing/tests/data/reveal/emath.pyi
@@ -0,0 +1,52 @@
+import numpy as np
+import numpy.typing as npt
+
+AR_f8: npt.NDArray[np.float64]
+AR_c16: npt.NDArray[np.complex128]
+f8: np.float64
+c16: np.complex128
+
+reveal_type(np.emath.sqrt(f8)) # E: Any
+reveal_type(np.emath.sqrt(AR_f8)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.emath.sqrt(c16)) # E: complexfloating[Any, Any]
+reveal_type(np.emath.sqrt(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.emath.log(f8)) # E: Any
+reveal_type(np.emath.log(AR_f8)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.emath.log(c16)) # E: complexfloating[Any, Any]
+reveal_type(np.emath.log(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.emath.log10(f8)) # E: Any
+reveal_type(np.emath.log10(AR_f8)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.emath.log10(c16)) # E: complexfloating[Any, Any]
+reveal_type(np.emath.log10(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.emath.log2(f8)) # E: Any
+reveal_type(np.emath.log2(AR_f8)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.emath.log2(c16)) # E: complexfloating[Any, Any]
+reveal_type(np.emath.log2(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.emath.logn(f8, 2)) # E: Any
+reveal_type(np.emath.logn(AR_f8, 4)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.emath.logn(f8, 1j)) # E: complexfloating[Any, Any]
+reveal_type(np.emath.logn(AR_c16, 1.5)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.emath.power(f8, 2)) # E: Any
+reveal_type(np.emath.power(AR_f8, 4)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.emath.power(f8, 2j)) # E: complexfloating[Any, Any]
+reveal_type(np.emath.power(AR_c16, 1.5)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.emath.arccos(f8)) # E: Any
+reveal_type(np.emath.arccos(AR_f8)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.emath.arccos(c16)) # E: complexfloating[Any, Any]
+reveal_type(np.emath.arccos(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.emath.arcsin(f8)) # E: Any
+reveal_type(np.emath.arcsin(AR_f8)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.emath.arcsin(c16)) # E: complexfloating[Any, Any]
+reveal_type(np.emath.arcsin(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.emath.arctanh(f8)) # E: Any
+reveal_type(np.emath.arctanh(AR_f8)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.emath.arctanh(c16)) # E: complexfloating[Any, Any]
+reveal_type(np.emath.arctanh(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi
index cd1c3136f..f91d6351b 100644
--- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi
+++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi
@@ -24,6 +24,9 @@ AR_V: NDArray[np.void]
ctypes_obj = AR_f8.ctypes
+reveal_type(AR_f8.__dlpack__()) # E: Any
+reveal_type(AR_f8.__dlpack_device__()) # E: Tuple[int, Literal[0]]
+
reveal_type(ctypes_obj.data) # E: int
reveal_type(ctypes_obj.shape) # E: ctypes.Array[{c_intp}]
reveal_type(ctypes_obj.strides) # E: ctypes.Array[{c_intp}]
diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py
index fe58a8f4c..bb3914434 100644
--- a/numpy/typing/tests/test_typing.py
+++ b/numpy/typing/tests/test_typing.py
@@ -136,7 +136,7 @@ def test_fail(path: str) -> None:
output_mypy = OUTPUT_MYPY
assert path in output_mypy
for error_line in output_mypy[path]:
- error_line = _strip_filename(error_line)
+ error_line = _strip_filename(error_line).split("\n", 1)[0]
match = re.match(
r"(?P<lineno>\d+): (error|note): .+$",
error_line,
@@ -368,6 +368,7 @@ Expression: {}
Expected reveal: {!r}
Observed reveal: {!r}
"""
+_STRIP_PATTERN = re.compile(r"(\w+\.)+(\w+)")
def _test_reveal(
@@ -378,9 +379,8 @@ def _test_reveal(
lineno: int,
) -> None:
"""Error-reporting helper function for `test_reveal`."""
- strip_pattern = re.compile(r"(\w+\.)+(\w+)")
- stripped_reveal = strip_pattern.sub(strip_func, reveal)
- stripped_expected_reveal = strip_pattern.sub(strip_func, expected_reveal)
+ stripped_reveal = _STRIP_PATTERN.sub(strip_func, reveal)
+ stripped_expected_reveal = _STRIP_PATTERN.sub(strip_func, expected_reveal)
if stripped_reveal not in stripped_expected_reveal:
raise AssertionError(
_REVEAL_MSG.format(lineno,