summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorQiyu8 <fangchunlin@huawei.com>2020-09-10 19:27:29 +0800
committerQiyu8 <fangchunlin@huawei.com>2020-09-10 19:27:29 +0800
commita5a4fe74046de480bc8f0a233b3714fa30d910c1 (patch)
treeb3ceb6aecd0fa5e982db91aa649744a64902e07e
parent974603643e54a276e7485e9e1fd5080780713035 (diff)
parent74712a53df240f1661fbced15ae984888fd9afa6 (diff)
downloadnumpy-a5a4fe74046de480bc8f0a233b3714fa30d910c1.tar.gz
Merge branch 'master' of github.com:numpy/numpy into usimd-compiled
-rw-r--r--.circleci/config.yml12
-rw-r--r--.gitmodules3
-rw-r--r--.travis.yml54
-rw-r--r--LICENSES_bundled.txt8
-rw-r--r--doc/neps/conf.py78
-rw-r--r--doc/neps/content.rst25
-rw-r--r--doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst307
-rw-r--r--doc/neps/nep-0041-improved-dtype-support.rst52
-rw-r--r--doc/neps/nep-0042-new-dtypes.rst8
-rw-r--r--doc/release/upcoming_changes/16134.compatibility.rst8
-rw-r--r--doc/release/upcoming_changes/16134.improvement.rst6
-rw-r--r--doc/release/upcoming_changes/16675.improvement.rst4
-rw-r--r--doc/release/upcoming_changes/16841.change.rst19
-rw-r--r--doc/release/upcoming_changes/16911.deprecation.rst7
-rw-r--r--doc/release/upcoming_changes/17029.compatibility.rst14
-rw-r--r--doc/release/upcoming_changes/17067.expired.rst8
-rw-r--r--doc/release/upcoming_changes/17068.compatibility.rst4
-rw-r--r--doc/release/upcoming_changes/17116.expired.rst2
-rw-r--r--doc/release/upcoming_changes/17233.deprecation.rst4
-rw-r--r--doc/release/upcoming_changes/17241.compatibility.rst6
m---------doc/scipy-sphinx-theme0
-rw-r--r--doc/source/_templates/defindex.html35
-rw-r--r--doc/source/_templates/indexcontent.html13
-rw-r--r--doc/source/_templates/layout.html56
-rw-r--r--doc/source/conf.py37
-rw-r--r--doc/source/contents.rst25
-rw-r--r--doc/source/dev/index.rst16
-rw-r--r--doc/source/glossary.rst388
-rw-r--r--doc/source/reference/arrays.datetime.rst2
-rw-r--r--doc/source/reference/arrays.dtypes.rst7
-rw-r--r--doc/source/reference/arrays.interface.rst59
-rw-r--r--doc/source/reference/c-api/array.rst2
-rw-r--r--doc/source/reference/c-api/config.rst2
-rw-r--r--doc/source/reference/c-api/dtype.rst14
-rw-r--r--doc/source/reference/c-api/types-and-structures.rst4
-rw-r--r--doc/source/reference/c-api/ufunc.rst2
-rw-r--r--doc/source/reference/internals.rst158
-rw-r--r--doc/source/reference/random/generator.rst94
-rw-r--r--doc/source/reference/random/legacy.rst2
-rw-r--r--doc/source/reference/routines.ctypeslib.rst1
-rw-r--r--doc/source/reference/routines.financial.rst21
-rw-r--r--doc/source/reference/routines.rst1
-rw-r--r--doc/source/user/basics.broadcasting.rst176
-rw-r--r--doc/source/user/basics.byteswapping.rst150
-rw-r--r--doc/source/user/basics.creation.rst139
-rw-r--r--doc/source/user/basics.dispatch.rst266
-rw-r--r--doc/source/user/basics.indexing.rst452
-rw-r--r--doc/source/user/basics.io.genfromtxt.rst26
-rw-r--r--doc/source/user/basics.rec.rst643
-rw-r--r--doc/source/user/basics.subclassing.rst749
-rw-r--r--doc/source/user/basics.types.rst337
-rw-r--r--doc/source/user/index.rst17
-rw-r--r--doc/source/user/misc.rst222
-rw-r--r--doc/source/user/numpy-for-matlab-users.rst68
-rw-r--r--doc/source/user/tutorial-ma.rst30
-rw-r--r--doc/source/user/tutorial-svd.rst33
-rw-r--r--doc/source/user/tutorials_index.rst6
-rw-r--r--doc/source/user/whatisnumpy.rst2
-rw-r--r--doc_requirements.txt1
-rw-r--r--numpy/__init__.cython-30.pxd4
-rw-r--r--numpy/__init__.pxd4
-rw-r--r--numpy/__init__.py26
-rw-r--r--numpy/__init__.pyi605
-rw-r--r--numpy/char.pyi53
-rw-r--r--numpy/core/_add_newdocs.py172
-rw-r--r--numpy/core/_asarray.py94
-rw-r--r--numpy/core/_internal.py34
-rw-r--r--numpy/core/arrayprint.py3
-rw-r--r--numpy/core/code_generators/generate_umath.py2
-rw-r--r--numpy/core/fromnumeric.py13
-rw-r--r--numpy/core/function_base.py10
-rw-r--r--numpy/core/function_base.pyi56
-rw-r--r--numpy/core/include/numpy/arrayscalars.h3
-rw-r--r--numpy/core/include/numpy/ndarraytypes.h4
-rw-r--r--numpy/core/include/numpy/npy_cpu.h3
-rw-r--r--numpy/core/multiarray.py14
-rw-r--r--numpy/core/numeric.py76
-rw-r--r--numpy/core/overrides.py21
-rw-r--r--numpy/core/records.py21
-rw-r--r--numpy/core/setup.py41
-rw-r--r--numpy/core/shape_base.py3
-rw-r--r--numpy/core/src/common/array_assign.c5
-rw-r--r--numpy/core/src/common/lowlevel_strided_loops.h37
-rw-r--r--numpy/core/src/common/npy_binsearch.h.src8
-rw-r--r--numpy/core/src/common/npy_cblas.h35
-rw-r--r--numpy/core/src/common/npy_cpu_dispatch.h33
-rw-r--r--numpy/core/src/common/npy_cpu_features.c.src76
-rw-r--r--numpy/core/src/common/npy_partition.h.src4
-rw-r--r--numpy/core/src/common/npy_sort.h.src52
-rw-r--r--numpy/core/src/common/simd/avx2/arithmetic.h44
-rw-r--r--numpy/core/src/common/simd/avx512/arithmetic.h16
-rw-r--r--numpy/core/src/common/simd/neon/arithmetic.h43
-rw-r--r--numpy/core/src/common/simd/sse/arithmetic.h57
-rw-r--r--numpy/core/src/common/simd/vsx/arithmetic.h16
-rw-r--r--numpy/core/src/multiarray/_multiarray_tests.c.src4
-rw-r--r--numpy/core/src/multiarray/alloc.c5
-rw-r--r--numpy/core/src/multiarray/array_assign_array.c15
-rw-r--r--numpy/core/src/multiarray/array_assign_scalar.c15
-rw-r--r--numpy/core/src/multiarray/array_coercion.c8
-rw-r--r--numpy/core/src/multiarray/arrayfunction_override.c170
-rw-r--r--numpy/core/src/multiarray/arrayfunction_override.h4
-rw-r--r--numpy/core/src/multiarray/arrayobject.c2
-rw-r--r--numpy/core/src/multiarray/arraytypes.c.src22
-rw-r--r--numpy/core/src/multiarray/buffer.c4
-rw-r--r--numpy/core/src/multiarray/calculation.c4
-rw-r--r--numpy/core/src/multiarray/common.c44
-rw-r--r--numpy/core/src/multiarray/common.h38
-rw-r--r--numpy/core/src/multiarray/compiled_base.c2
-rw-r--r--numpy/core/src/multiarray/conversion_utils.c3
-rw-r--r--numpy/core/src/multiarray/convert.c9
-rw-r--r--numpy/core/src/multiarray/convert_datatype.c32
-rw-r--r--numpy/core/src/multiarray/ctors.c156
-rw-r--r--numpy/core/src/multiarray/datetime.c195
-rw-r--r--numpy/core/src/multiarray/datetime_busdaycal.c2
-rw-r--r--numpy/core/src/multiarray/descriptor.c58
-rw-r--r--numpy/core/src/multiarray/dragon4.c4
-rw-r--r--numpy/core/src/multiarray/dtype_transfer.c366
-rw-r--r--numpy/core/src/multiarray/einsum.c.src1895
-rw-r--r--numpy/core/src/multiarray/einsum_debug.h28
-rw-r--r--numpy/core/src/multiarray/einsum_sumprod.c.src1897
-rw-r--r--numpy/core/src/multiarray/einsum_sumprod.h12
-rw-r--r--numpy/core/src/multiarray/flagsobject.c4
-rw-r--r--numpy/core/src/multiarray/getset.c25
-rw-r--r--numpy/core/src/multiarray/iterators.c8
-rw-r--r--numpy/core/src/multiarray/lowlevel_strided_loops.c.src173
-rw-r--r--numpy/core/src/multiarray/mapping.c57
-rw-r--r--numpy/core/src/multiarray/methods.c55
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c315
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.h2
-rw-r--r--numpy/core/src/multiarray/nditer_api.c182
-rw-r--r--numpy/core/src/multiarray/nditer_constr.c59
-rw-r--r--numpy/core/src/multiarray/nditer_impl.h7
-rw-r--r--numpy/core/src/multiarray/nditer_pywrap.c48
-rw-r--r--numpy/core/src/multiarray/nditer_templ.c.src20
-rw-r--r--numpy/core/src/multiarray/number.c8
-rw-r--r--numpy/core/src/multiarray/refcount.c16
-rw-r--r--numpy/core/src/multiarray/scalarapi.c10
-rw-r--r--numpy/core/src/multiarray/scalartypes.c.src76
-rw-r--r--numpy/core/src/multiarray/shape.c16
-rw-r--r--numpy/core/src/multiarray/strfuncs.c186
-rw-r--r--numpy/core/src/multiarray/temp_elide.c4
-rw-r--r--numpy/core/src/multiarray/usertypes.c4
-rw-r--r--numpy/core/src/npymath/npy_math_private.h1
-rw-r--r--numpy/core/src/npysort/binsearch.c.src8
-rw-r--r--numpy/core/src/npysort/heapsort.c.src12
-rw-r--r--numpy/core/src/npysort/mergesort.c.src12
-rw-r--r--numpy/core/src/npysort/quicksort.c.src12
-rw-r--r--numpy/core/src/npysort/radixsort.c.src8
-rw-r--r--numpy/core/src/npysort/selection.c.src2
-rw-r--r--numpy/core/src/npysort/timsort.c.src16
-rw-r--r--numpy/core/src/umath/_rational_tests.c.src15
-rw-r--r--numpy/core/src/umath/_umath_tests.c.src16
-rw-r--r--numpy/core/src/umath/extobj.c8
-rw-r--r--numpy/core/src/umath/override.c2
-rw-r--r--numpy/core/src/umath/reduction.c8
-rw-r--r--numpy/core/src/umath/scalarmath.c.src173
-rw-r--r--numpy/core/src/umath/ufunc_object.c91
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.c45
-rw-r--r--numpy/core/src/umath/umathmodule.c43
-rw-r--r--numpy/core/tests/test_array_coercion.py22
-rw-r--r--numpy/core/tests/test_datetime.py17
-rw-r--r--numpy/core/tests/test_deprecations.py36
-rw-r--r--numpy/core/tests/test_function_base.py5
-rw-r--r--numpy/core/tests/test_multiarray.py22
-rw-r--r--numpy/core/tests/test_nditer.py65
-rw-r--r--numpy/core/tests/test_numeric.py129
-rw-r--r--numpy/core/tests/test_overrides.py167
-rw-r--r--numpy/core/tests/test_records.py7
-rw-r--r--numpy/core/tests/test_regression.py38
-rw-r--r--numpy/core/tests/test_shape_base.py39
-rw-r--r--numpy/core/tests/test_ufunc.py60
-rw-r--r--numpy/core/tests/test_umath.py2
-rw-r--r--numpy/ctypeslib.py8
-rw-r--r--numpy/ctypeslib.pyi7
-rw-r--r--numpy/distutils/__init__.pyi4
-rw-r--r--numpy/distutils/ccompiler_opt.py92
-rw-r--r--numpy/distutils/checks/extra_avx512bw_mask.c18
-rw-r--r--numpy/distutils/checks/extra_avx512f_reduce.c41
-rw-r--r--numpy/distutils/command/build_ext.py2
-rw-r--r--numpy/distutils/fcompiler/__init__.py59
-rw-r--r--numpy/distutils/fcompiler/gnu.py15
-rw-r--r--numpy/distutils/misc_util.py19
-rw-r--r--numpy/distutils/system_info.py19
-rw-r--r--numpy/distutils/tests/test_ccompiler_opt_conf.py51
-rw-r--r--numpy/distutils/unixccompiler.py3
-rw-r--r--numpy/doc/basics.py341
-rw-r--r--numpy/doc/broadcasting.py180
-rw-r--r--numpy/doc/byteswapping.py155
-rw-r--r--numpy/doc/creation.py143
-rw-r--r--numpy/doc/dispatch.py271
-rw-r--r--numpy/doc/glossary.py475
-rw-r--r--numpy/doc/indexing.py456
-rw-r--r--numpy/doc/internals.py162
-rw-r--r--numpy/doc/misc.py226
-rw-r--r--numpy/doc/structured_arrays.py646
-rw-r--r--numpy/doc/subclassing.py752
-rw-r--r--numpy/emath.pyi11
-rw-r--r--numpy/f2py/__init__.pyi5
-rw-r--r--numpy/f2py/cfuncs.py4
-rwxr-xr-xnumpy/f2py/rules.py2
-rw-r--r--numpy/f2py/src/test/foomodule.c2
-rw-r--r--numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c29
-rw-r--r--numpy/fft/__init__.pyi20
-rw-r--r--numpy/fft/helper.py3
-rw-r--r--numpy/fft/tests/test_helper.py5
-rw-r--r--numpy/lib/__init__.py2
-rw-r--r--numpy/lib/__init__.pyi177
-rw-r--r--numpy/lib/_iotools.py2
-rw-r--r--numpy/lib/arraysetops.py6
-rw-r--r--numpy/lib/financial.py967
-rw-r--r--numpy/lib/function_base.py93
-rw-r--r--numpy/lib/index_tricks.py9
-rw-r--r--numpy/lib/npyio.py115
-rw-r--r--numpy/lib/polynomial.py2
-rw-r--r--numpy/lib/shape_base.py2
-rw-r--r--numpy/lib/tests/test_financial.py380
-rw-r--r--numpy/lib/tests/test_financial_expired.py13
-rw-r--r--numpy/lib/tests/test_function_base.py39
-rw-r--r--numpy/lib/tests/test_io.py2
-rw-r--r--numpy/lib/tests/test_polynomial.py9
-rw-r--r--numpy/lib/twodim_base.py37
-rw-r--r--numpy/linalg/__init__.pyi23
-rw-r--r--numpy/linalg/umath_linalg.c.src2
-rw-r--r--numpy/ma/__init__.pyi225
-rw-r--r--numpy/ma/tests/test_core.py2
-rw-r--r--numpy/ma/timer_comparison.py4
-rw-r--r--numpy/matrixlib/__init__.pyi6
-rw-r--r--numpy/polynomial/__init__.pyi9
-rw-r--r--numpy/random/__init__.pyi61
-rw-r--r--numpy/random/_generator.pyx306
-rw-r--r--numpy/random/mtrand.pyx84
-rw-r--r--numpy/random/tests/test_generator_mt19937.py50
-rw-r--r--numpy/rec.pyi5
-rw-r--r--numpy/testing/__init__.pyi44
-rw-r--r--numpy/testing/tests/test_utils.py2
-rw-r--r--numpy/tests/test_public_api.py24
-rw-r--r--numpy/typing/__init__.py5
-rw-r--r--numpy/typing/setup.py (renamed from numpy/tests/setup.py)5
-rw-r--r--numpy/typing/tests/__init__.py0
-rw-r--r--numpy/typing/tests/data/fail/array_like.py (renamed from numpy/tests/typing/fail/array_like.py)0
-rw-r--r--numpy/typing/tests/data/fail/dtype.py (renamed from numpy/tests/typing/fail/dtype.py)0
-rw-r--r--numpy/typing/tests/data/fail/flatiter.py25
-rw-r--r--numpy/typing/tests/data/fail/fromnumeric.py (renamed from numpy/tests/typing/fail/fromnumeric.py)28
-rw-r--r--numpy/typing/tests/data/fail/linspace.py13
-rw-r--r--numpy/typing/tests/data/fail/modules.py3
-rw-r--r--numpy/typing/tests/data/fail/ndarray.py (renamed from numpy/tests/typing/fail/ndarray.py)0
-rw-r--r--numpy/typing/tests/data/fail/numerictypes.py (renamed from numpy/tests/typing/fail/numerictypes.py)0
-rw-r--r--numpy/typing/tests/data/fail/scalars.py (renamed from numpy/tests/typing/fail/scalars.py)11
-rw-r--r--numpy/typing/tests/data/fail/simple.py (renamed from numpy/tests/typing/fail/simple.py)0
-rw-r--r--numpy/typing/tests/data/fail/ufuncs.py (renamed from numpy/tests/typing/fail/ufuncs.py)0
-rw-r--r--numpy/typing/tests/data/fail/warnings_and_errors.py (renamed from numpy/tests/typing/fail/warnings_and_errors.py)0
-rw-r--r--numpy/typing/tests/data/mypy.ini (renamed from numpy/tests/typing/mypy.ini)0
-rw-r--r--numpy/typing/tests/data/pass/array_like.py (renamed from numpy/tests/typing/pass/array_like.py)0
-rw-r--r--numpy/typing/tests/data/pass/dtype.py (renamed from numpy/tests/typing/pass/dtype.py)0
-rw-r--r--numpy/typing/tests/data/pass/flatiter.py14
-rw-r--r--numpy/typing/tests/data/pass/fromnumeric.py (renamed from numpy/tests/typing/pass/fromnumeric.py)75
-rw-r--r--numpy/typing/tests/data/pass/linspace.py22
-rw-r--r--numpy/typing/tests/data/pass/literal.py43
-rw-r--r--numpy/typing/tests/data/pass/ndarray_conversion.py (renamed from numpy/tests/typing/pass/ndarray_conversion.py)0
-rw-r--r--numpy/typing/tests/data/pass/ndarray_shape_manipulation.py (renamed from numpy/tests/typing/pass/ndarray_shape_manipulation.py)0
-rw-r--r--numpy/typing/tests/data/pass/numerictypes.py (renamed from numpy/tests/typing/pass/numerictypes.py)0
-rw-r--r--numpy/typing/tests/data/pass/scalars.py (renamed from numpy/tests/typing/pass/scalars.py)37
-rw-r--r--numpy/typing/tests/data/pass/simple.py (renamed from numpy/tests/typing/pass/simple.py)0
-rw-r--r--numpy/typing/tests/data/pass/simple_py3.py (renamed from numpy/tests/typing/pass/simple_py3.py)0
-rw-r--r--numpy/typing/tests/data/pass/ufuncs.py (renamed from numpy/tests/typing/pass/ufuncs.py)0
-rw-r--r--numpy/typing/tests/data/pass/warnings_and_errors.py (renamed from numpy/tests/typing/pass/warnings_and_errors.py)0
-rw-r--r--numpy/typing/tests/data/reveal/constants.py (renamed from numpy/tests/typing/reveal/constants.py)0
-rw-r--r--numpy/typing/tests/data/reveal/flatiter.py14
-rw-r--r--numpy/typing/tests/data/reveal/fromnumeric.py (renamed from numpy/tests/typing/reveal/fromnumeric.py)73
-rw-r--r--numpy/typing/tests/data/reveal/linspace.py6
-rw-r--r--numpy/typing/tests/data/reveal/modules.py20
-rw-r--r--numpy/typing/tests/data/reveal/ndarray_conversion.py (renamed from numpy/tests/typing/reveal/ndarray_conversion.py)2
-rw-r--r--numpy/typing/tests/data/reveal/ndarray_shape_manipulation.py (renamed from numpy/tests/typing/reveal/ndarray_shape_manipulation.py)0
-rw-r--r--numpy/typing/tests/data/reveal/numerictypes.py (renamed from numpy/tests/typing/reveal/numerictypes.py)0
-rw-r--r--numpy/typing/tests/data/reveal/scalars.py (renamed from numpy/tests/typing/reveal/scalars.py)3
-rw-r--r--numpy/typing/tests/data/reveal/warnings_and_errors.py (renamed from numpy/tests/typing/reveal/warnings_and_errors.py)0
-rw-r--r--numpy/typing/tests/test_typing.py (renamed from numpy/tests/test_typing.py)18
-rw-r--r--numpy/version.pyi7
-rwxr-xr-xruntests.py40
-rwxr-xr-xsetup.py30
-rw-r--r--test_requirements.txt4
-rwxr-xr-xtools/functions_missing_types.py11
-rw-r--r--tools/refguide_check.py16
-rwxr-xr-xtools/travis-sorter.py287
284 files changed, 11863 insertions, 9800 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 2a986cba6..f4ffb5223 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -16,12 +16,11 @@ jobs:
- checkout
- run:
- name: install dependencies
+ name: create virtual environment, install dependencies
command: |
python3 -m venv venv
ln -s $(which python3) venv/bin/python3.6
. venv/bin/activate
- pip install cython sphinx==2.3.1 matplotlib ipython
sudo apt-get update
sudo apt-get install -y graphviz texlive-fonts-recommended texlive-latex-recommended texlive-latex-extra texlive-generic-extra latexmk texlive-xetex
@@ -30,10 +29,9 @@ jobs:
command: |
. venv/bin/activate
pip install --upgrade pip 'setuptools<49.2.0'
- pip install cython
+ pip install -r test_requirements.txt
pip install .
- pip install scipy
- pip install pandas
+ pip install -r doc_requirements.txt
- run:
name: create release notes
@@ -69,8 +67,8 @@ jobs:
path: doc/build/html/
- # - store_artifacts:
- # path: doc/neps/_build/html/
+ - store_artifacts:
+ path: doc/neps/_build/html/
# destination: neps
- add_ssh_keys:
diff --git a/.gitmodules b/.gitmodules
index 1b0706f65..b1e13c3bc 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,6 +1,3 @@
-[submodule "doc/scipy-sphinx-theme"]
- path = doc/scipy-sphinx-theme
- url = https://github.com/scipy/scipy-sphinx-theme.git
[submodule "doc/sphinxext"]
path = doc/sphinxext
url = https://github.com/numpy/numpydoc.git
diff --git a/.travis.yml b/.travis.yml
index c0a0cfae3..91c65e1aa 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -33,7 +33,34 @@ jobs:
python: 3.8
- stage: Comprehensive tests
- python: 3.6
+ python: 3.7
+ os: linux
+ arch: ppc64le
+ env:
+ # use OpenBLAS build, not system ATLAS
+ - DOWNLOAD_OPENBLAS=1
+ - ATLAS=None
+
+ - python: 3.7
+ os: linux
+ arch: s390x
+ env:
+ # use OpenBLAS build, not system ATLAS
+ - DOWNLOAD_OPENBLAS=1
+ - NPY_USE_BLAS_ILP64=1
+ - ATLAS=None
+
+ - python: 3.7
+ os: linux
+ arch: arm64
+ env:
+ # use OpenBLAS build, not system ATLAS
+ - DOWNLOAD_OPENBLAS=1
+ - ATLAS=None
+
+
+
+ - python: 3.6
- python: 3.7
- python: 3.9-dev
@@ -92,31 +119,6 @@ jobs:
- LAPACK=None
- ATLAS=None
- - python: 3.7
- os: linux
- arch: ppc64le
- env:
- # use OpenBLAS build, not system ATLAS
- - DOWNLOAD_OPENBLAS=1
- - ATLAS=None
-
- - python: 3.7
- os: linux
- arch: s390x
- env:
- # use OpenBLAS build, not system ATLAS
- - DOWNLOAD_OPENBLAS=1
- - NPY_USE_BLAS_ILP64=1
- - ATLAS=None
-
- - python: 3.7
- os: linux
- arch: arm64
- env:
- # use OpenBLAS build, not system ATLAS
- - DOWNLOAD_OPENBLAS=1
- - ATLAS=None
-
before_install:
diff --git a/LICENSES_bundled.txt b/LICENSES_bundled.txt
index ea349c7ee..e9c66d1dc 100644
--- a/LICENSES_bundled.txt
+++ b/LICENSES_bundled.txt
@@ -3,22 +3,22 @@ compatibly licensed. We list these here.
Name: Numpydoc
Files: doc/sphinxext/numpydoc/*
-License: 2-clause BSD
+License: BSD-2-Clause
For details, see doc/sphinxext/LICENSE.txt
Name: scipy-sphinx-theme
Files: doc/scipy-sphinx-theme/*
-License: 3-clause BSD, PSF and Apache 2.0
+License: BSD-3-Clause AND PSF-2.0 AND Apache-2.0
For details, see doc/scipy-sphinx-theme/LICENSE.txt
Name: lapack-lite
Files: numpy/linalg/lapack_lite/*
-License: 3-clause BSD
+License: BSD-3-Clause
For details, see numpy/linalg/lapack_lite/LICENSE.txt
Name: tempita
Files: tools/npy_tempita/*
-License: BSD derived
+License: MIT
For details, see tools/npy_tempita/license.txt
Name: dragon4
diff --git a/doc/neps/conf.py b/doc/neps/conf.py
index 6837b12bd..f01ee8a51 100644
--- a/doc/neps/conf.py
+++ b/doc/neps/conf.py
@@ -45,7 +45,7 @@ templates_path = ['../source/_templates/']
source_suffix = '.rst'
# The master toctree document.
-master_doc = 'index'
+master_doc = 'content'
# General information about the project.
project = u'NumPy Enhancement Proposals'
@@ -82,69 +82,21 @@ todo_include_todos = False
## -- Options for HTML output ----------------------------------------------
#
-## The theme to use for HTML and HTML Help pages. See the documentation for
-## a list of builtin themes.
-##
-#html_theme = 'alabaster'
-#
-## Theme options are theme-specific and customize the look and feel of a theme
-## further. For a list of options available for each theme, see the
-## documentation.
-##
-## html_theme_options = {}
-#
-## Add any paths that contain custom static files (such as style sheets) here,
-## relative to this directory. They are copied after the builtin static files,
-## so a file named "default.css" will overwrite the builtin "default.css".
-#html_static_path = ['_static']
-#
-## Custom sidebar templates, must be a dictionary that maps document names
-## to template names.
-##
-## This is required for the alabaster theme
-## refs: https://alabaster.readthedocs.io/en/latest/installation.html#sidebars
-#html_sidebars = {
-# '**': [
-# 'relations.html', # needs 'show_related': True theme option to display
-# 'searchbox.html',
-# ]
-#}
-
-## -----------------------------------------------------------------------------
-# HTML output
-# -----------------------------------------------------------------------------
-themedir = os.path.join(os.pardir, 'scipy-sphinx-theme', '_theme')
-if not os.path.isdir(themedir):
- raise RuntimeError("Get the scipy-sphinx-theme first, "
- "via git submodule init && git submodule update")
-
-html_theme = 'scipy'
-html_theme_path = [themedir]
-
-#if 'scipyorg' in tags:
-if True:
- # Build for the scipy.org website
- html_theme_options = {
- "edit_link": True,
- "sidebar": "right",
- "scipy_org_logo": True,
- "rootlinks": [("https://scipy.org/", "Scipy.org"),
- ("https://docs.scipy.org/", "Docs")]
- }
-else:
- # Default build
- html_theme_options = {
- "edit_link": False,
- "sidebar": "left",
- "scipy_org_logo": False,
- "rootlinks": []
- }
- html_sidebars = {'index': 'indexsidebar.html'}
-
-#html_additional_pages = {
-# 'index': 'indexcontent.html',
-#}
+html_theme = 'pydata_sphinx_theme'
+
+html_logo = '../source/_static/numpylogo.svg'
+
+html_theme_options = {
+ "github_url": "https://github.com/numpy/numpy",
+ "twitter_url": "https://twitter.com/numpy_team",
+ "external_links": [
+ {"name": "Wishlist",
+ "url": "https://github.com/numpy/numpy/issues?q=is%3Aopen+is%3Aissue+label%3A%2223+-+Wish+List%22",
+ },
+ ],
+ "show_prev_next": False,
+}
html_title = "%s" % (project)
html_static_path = ['../source/_static']
diff --git a/doc/neps/content.rst b/doc/neps/content.rst
new file mode 100644
index 000000000..f5d8347c4
--- /dev/null
+++ b/doc/neps/content.rst
@@ -0,0 +1,25 @@
+=====================================
+Roadmap & NumPy Enhancement Proposals
+=====================================
+
+This page provides an overview of development priorities for NumPy.
+Specifically, it contains a roadmap with a higher-level overview, as
+well as NumPy Enhancement Proposals (NEPs)—suggested changes
+to the library—in various stages of discussion or completion (see `NEP
+0 <nep-0000>`__).
+
+Roadmap
+-------
+.. toctree::
+ :maxdepth: 1
+
+ Index <index>
+ The Scope of NumPy <scope>
+ Current roadmap <roadmap>
+ Wishlist (opens new window) |wishlist_link|
+
+.. |wishlist_link| raw:: html
+
+ <a href="https://github.com/numpy/numpy/issues?q=is%3Aopen+is%3Aissue+label%3A%2223+-+Wish+List%22" target=" blank">WishList</a>
+
+
diff --git a/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst b/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst
index 18a00ae6a..dca8b2418 100644
--- a/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst
+++ b/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst
@@ -8,16 +8,196 @@ NEP 35 — Array Creation Dispatching With __array_function__
:Status: Draft
:Type: Standards Track
:Created: 2019-10-15
-:Updated: 2020-08-06
+:Updated: 2020-08-17
:Resolution:
Abstract
--------
We propose the introduction of a new keyword argument ``like=`` to all array
-creation functions to permit dispatching of such functions by the
-``__array_function__`` protocol, addressing one of the protocol shortcomings,
-as described by NEP-18 [1]_.
+creation functions to address one of the shortcomings of ``__array_function__``,
+as described by NEP 18 [1]_. The ``like=`` keyword argument will create an
+instance of the argument's type, enabling direct creation of non-NumPy arrays.
+The target array type must implement the ``__array_function__`` protocol.
+
+Motivation and Scope
+--------------------
+
+Many libraries implement the NumPy API, such as Dask for graph
+computing, CuPy for GPGPU computing, xarray for N-D labeled arrays, etc. Underneath,
+they have adopted the ``__array_function__`` protocol which allows NumPy to understand
+and treat downstream objects as if they are the native ``numpy.ndarray`` object.
+Hence the community while using various libraries still benefits from a unified
+NumPy API. This not only brings great convenience for standardization but also
+removes the burden of learning a new API and rewriting code for every new
+object. In more technical terms, this mechanism of the protocol is called a
+"dispatcher", which is the terminology we use from here onwards when referring
+to that.
+
+
+.. code:: python
+
+ x = dask.array.arange(5) # Creates dask.array
+ np.diff(x) # Returns dask.array
+
+Note above how we called Dask's implementation of ``diff`` via the NumPy
+namespace by calling ``np.diff``, and the same would apply if we had a CuPy
+array or any other array from a library that adopts ``__array_function__``.
+This allows writing code that is agnostic to the implementation library, thus
+users can write their code once and still be able to use different array
+implementations according to their needs.
+
+Obviously, having a protocol in-place is useful if the arrays are created
+elsewhere and let NumPy handle them. But still these arrays have to be started
+in their native library and brought back. Instead if it was possible to create
+these objects through NumPy API then there would be an almost complete
+experience, all using NumPy syntax. For example, say we have some CuPy array
+``cp_arr``, and want a similar CuPy array with identity matrix. We could still
+write the following:
+
+.. code:: python
+
+ x = cupy.identity(3)
+
+Instead, the better way would be using to only use the NumPy API, this could now
+be achieved with:
+
+.. code:: python
+
+ x = np.identity(3, like=cp_arr)
+
+As if by magic, ``x`` will also be a CuPy array, as NumPy was capable to infer
+that from the type of ``cp_arr``. Note that this last step would not be possible
+without ``like=``, as it would be impossible for the NumPy to know the user
+expects a CuPy array based only on the integer input.
+
+The new ``like=`` keyword proposed is solely intended to identify the downstream
+library where to dispatch and the object is used only as reference, meaning that
+no modifications, copies or processing will be performed on that object.
+
+We expect that this functionality will be mostly useful to library developers,
+allowing them to create new arrays for internal usage based on arrays passed
+by the user, preventing unnecessary creation of NumPy arrays that will
+ultimately lead to an additional conversion into a downstream array type.
+
+Support for Python 2.7 has been dropped since NumPy 1.17, therefore we make use
+of the keyword-only argument standard described in PEP-3102 [2]_ to implement
+``like=``, thus preventing it from being passed by position.
+
+.. _neps.like-kwarg.usage-and-impact:
+
+Usage and Impact
+----------------
+
+NumPy users who don't use other arrays from downstream libraries can continue
+to use array creation routines without a ``like=`` argument. Using
+``like=np.ndarray`` will work as if no array was passed via that argument.
+However, this will incur additional checks that will negatively impact
+performance.
+
+To understand the intended use for ``like=``, and before we move to more complex
+cases, consider the following illustrative example consisting only of NumPy and
+CuPy arrays:
+
+.. code:: python
+
+ import numpy as np
+ import cupy
+
+ def my_pad(arr, padding):
+ padding = np.array(padding, like=arr)
+ return np.concatenate((padding, arr, padding))
+
+ my_pad(np.arange(5), [-1, -1]) # Returns np.ndarray
+ my_pad(cupy.arange(5), [-1, -1]) # Returns cupy.core.core.ndarray
+
+Note in the ``my_pad`` function above how ``arr`` is used as a reference to
+dictate what array type padding should have, before concatenating the arrays to
+produce the result. On the other hand, if ``like=`` wasn't used, the NumPy case
+would still work, but CuPy wouldn't allow this kind of automatic
+conversion, ultimately raising a
+``TypeError: Only cupy arrays can be concatenated`` exception.
+
+Now we should look at how a library like Dask could benefit from ``like=``.
+Before we understand that, it's important to understand a bit about Dask basics
+and ensures correctness with ``__array_function__``. Note that Dask can perform
+computations on different sorts of objects, like dataframes, bags and arrays,
+here we will focus strictly on arrays, which are the objects we can use
+``__array_function__`` with.
+
+Dask uses a graph computing model, meaning it breaks down a large problem in
+many smaller problems and merges their results to reach the final result. To
+break the problem down into smaller ones, Dask also breaks arrays into smaller
+arrays that it calls "chunks". A Dask array can thus consist of one or more
+chunks and they may be of different types. However, in the context of
+``__array_function__``, Dask only allows chunks of the same type; for example,
+a Dask array can be formed of several NumPy arrays or several CuPy arrays, but
+not a mix of both.
+
+To avoid mismatched types during computation, Dask keeps an attribute ``_meta`` as
+part of its array throughout computation: this attribute is used to both predict
+the output type at graph creation time, and to create any intermediary arrays
+that are necessary within some function's computation. Going back to our
+previous example, we can use ``_meta`` information to identify what kind of
+array we would use for padding, as seen below:
+
+.. code:: python
+
+ import numpy as np
+ import cupy
+ import dask.array as da
+ from dask.array.utils import meta_from_array
+
+ def my_dask_pad(arr, padding):
+ padding = np.array(padding, like=meta_from_array(arr))
+ return np.concatenate((padding, arr, padding))
+
+ # Returns dask.array<concatenate, shape=(9,), dtype=int64, chunksize=(5,), chunktype=numpy.ndarray>
+ my_dask_pad(da.arange(5), [-1, -1])
+
+ # Returns dask.array<concatenate, shape=(9,), dtype=int64, chunksize=(5,), chunktype=cupy.ndarray>
+ my_dask_pad(da.from_array(cupy.arange(5)), [-1, -1])
+
+Note how ``chunktype`` in the return value above changes from
+``numpy.ndarray`` in the first ``my_dask_pad`` call to ``cupy.ndarray`` in the
+second. We have also renamed the function to ``my_dask_pad`` in this example
+with the intent to make it clear that this is how Dask would implement such
+functionality, should it need to do so, as it requires Dask's internal tools
+that are not of much use elsewhere.
+
+To enable proper identification of the array type we use Dask's utility function
+``meta_from_array``, which was introduced as part of the work to support
+``__array_function__``, allowing Dask to handle ``_meta`` appropriately. Readers
+can think of ``meta_from_array`` as a special function that just returns the
+type of the underlying Dask array, for example:
+
+.. code:: python
+
+ np_arr = da.arange(5)
+ cp_arr = da.from_array(cupy.arange(5))
+
+ meta_from_array(np_arr) # Returns a numpy.ndarray
+ meta_from_array(cp_arr) # Returns a cupy.ndarray
+
+Since the value returned by ``meta_from_array`` is a NumPy-like array, we can
+just pass that directly into the ``like=`` argument.
+
+The ``meta_from_array`` function is primarily targeted at the library's internal
+usage to ensure chunks are created with correct types. Without the ``like=``
+argument, it would be impossible to ensure ``my_pad`` creates a padding array
+with a type matching that of the input array, which would cause a ``TypeError``
+exception to be raised by CuPy, as discussed above would happen to the CuPy case
+alone. Combining Dask's internal handling of meta arrays and the proposed
+``like=`` argument, it now becomes possible to handle cases involving creation
+of non-NumPy arrays, which is likely the heaviest limitation Dask currently
+faces from the ``__array_function__`` protocol.
+
+Backward Compatibility
+----------------------
+
+This proposal does not raise any backward compatibility issues within NumPy,
+given that it only introduces a new keyword argument to existing array creation
+functions with a default ``None`` value, thus not changing current behavior.
Detailed description
--------------------
@@ -28,10 +208,6 @@ did not -- and did not intend to -- address the creation of arrays by downstream
libraries, preventing those libraries from using such important functionality in
that context.
-Other NEPs have been written to address parts of that limitation, such as the
-introduction of the ``__duckarray__`` protocol in NEP-30 [2]_, and the
-introduction of an overriding mechanism called ``uarray`` by NEP-31 [3]_.
-
The purpose of this NEP is to address that shortcoming in a simple and
straighforward way: introduce a new ``like=`` keyword argument, similar to how
the ``empty_like`` family of functions work. When array creation functions
@@ -39,25 +215,25 @@ receive such an argument, they will trigger the ``__array_function__`` protocol,
and call the downstream library's own array creation function implementation.
The ``like=`` argument, as its own name suggests, shall be used solely for the
purpose of identifying where to dispatch. In contrast to the way
-``__array_function__`` has been used so far (the first argument identifies where
-to dispatch), and to avoid breaking NumPy's API with regards to array creation,
-the new ``like=`` keyword shall be used for the purpose of dispatching.
-
-Usage Guidance
-~~~~~~~~~~~~~~
-
-The new ``like=`` keyword is solely intended to identify the downstream library
-where to dispatch and the object is used only as reference, meaning that no
-modifications, copies or processing will be performed on that object.
-
-We expect that this functionality will be mostly useful to library developers,
-allowing them to create new arrays for internal usage based on arrays passed
-by the user, preventing unnecessary creation of NumPy arrays that will
-ultimately lead to an additional conversion into a downstream array type.
-
-Support for Python 2.7 has been dropped since NumPy 1.17, therefore we should
-make use of the keyword-only argument standard described in PEP-3102 [4]_ to
-implement the ``like=``, thus preventing it from being passed by position.
+``__array_function__`` has been used so far (the first argument identifies the
+target downstream library), and to avoid breaking NumPy's API with regards to
+array creation, the new ``like=`` keyword shall be used for the purpose of
+dispatching.
+
+Downstream libraries will benefit from the ``like=`` argument without any
+changes to their API, given the argument is of exclusive implementation in
+NumPy. It will still be required that downstream libraries implement the
+``__array_function__`` protocol, as described by NEP 18 [1]_, and appropriately
+introduce the argument to their calls to NumPy array creation functions, as
+exemplified in :ref:`neps.like-kwarg.usage-and-impact`.
+
+Related work
+------------
+
+Other NEPs have been written to address parts of ``__array_function__``
+protocol's limitation, such as the introduction of the ``__duckarray__``
+protocol in NEP 30 [3]_, and the introduction of an overriding mechanism called
+``uarray`` by NEP 31 [4]_.
Implementation
--------------
@@ -66,10 +242,10 @@ The implementation requires introducing a new ``like=`` keyword to all existing
array creation functions of NumPy. As examples of functions that would add this
new argument (but not limited to) we can cite those taking array-like objects
such as ``array`` and ``asarray``, functions that create arrays based on
-numerical ranges such as ``range`` and ``linspace``, as well as the ``empty``
-family of functions, even though that may be redundant, since there exists
-already specializations for those with the naming format ``empty_like``. As of
-the writing of this NEP, a complete list of array creation functions can be
+numerical inputs such as ``range`` and ``identity``, as well as the ``empty``
+family of functions, even though that may be redundant, since specializations
+for those already exist with the naming format ``empty_like``. As of the
+writing of this NEP, a complete list of array creation functions can be
found in [5]_.
This newly proposed keyword shall be removed by the ``__array_function__``
@@ -135,60 +311,45 @@ There are two downsides to the implementation above for C functions:
2. To follow current implementation standards, documentation should be attached
directly to the Python source code.
-Alternatively for C functions, the implementation of ``like=`` could be moved
-into the C implementation itself. This is not the primary suggestion here due
-to its inherent complexity which would be difficult too long to describe in its
-entirety here, and too tedious for the reader. However, we leave that as an
-option open for discussion.
+The first version of this proposal suggested the implementation above as one
+viable solution for NumPy functions implemented in C. However, due to the
+downsides pointed out above we have decided to discard any changes on the Python
+side and resolve those issues with a pure-C implementation. Please refer to
+[implementation]_ for details.
-Usage
------
+Alternatives
+------------
-The purpose of this NEP is to keep things simple. Similarly, we can exemplify
-the usage of ``like=`` in a simple way. Imagine you have an array of ones
-created by a downstream library, such as CuPy. What you need now is a new array
-that can be created using the NumPy API, but that will in fact be created by
-the downstream library, a simple way to achieve that is shown below.
+Recently a new protocol to replace ``__array_function__`` entirely was proposed
+by NEP 37 [6]_, which would require considerable rework by downstream libraries
+that adopt ``__array_function__`` already, because of that we still believe the
+``like=`` argument is beneficial for NumPy and downstream libraries. However,
+that proposal wouldn't necessarily be considered a direct alternative to the
+present NEP, as it would replace NEP 18 entirely, upon which this builds.
+Discussion on details about this new proposal and why that would require rework
+by downstream libraries is beyond the scope of the present proposal.
-.. code:: python
+Discussion
+----------
- x = cupy.ones(2)
- np.array([1, 3, 5], like=x) # Returns cupy.ndarray
+.. [implementation] `Implementation's pull request on GitHub <https://github.com/numpy/numpy/pull/16935>`_
+.. [discussion] `Further discussion on implementation and the NEP's content <https://mail.python.org/pipermail/numpy-discussion/2020-August/080919.html>`_
-As a second example, we could also create an array of evenly spaced numbers
-using a Dask identity matrix as reference:
+References
+----------
-.. code:: python
+.. [1] `NEP 18 - A dispatch mechanism for NumPy's high level array functions <https://numpy.org/neps/nep-0018-array-function-protocol.html>`_.
- x = dask.array.eye(3)
- np.linspace(0, 2, like=x) # Returns dask.array
+.. [2] `PEP 3102 — Keyword-Only Arguments <https://www.python.org/dev/peps/pep-3102/>`_.
+.. [3] `NEP 30 — Duck Typing for NumPy Arrays - Implementation <https://numpy.org/neps/nep-0030-duck-array-protocol.html>`_.
-Compatibility
--------------
-
-This proposal does not raise any backward compatibility issues within NumPy,
-given that it only introduces a new keyword argument to existing array creation
-functions.
-
-Downstream libraries will benefit from the ``like=`` argument automatically,
-that is, without any explicit changes in their codebase. The only requirement
-is that they already implement the ``__array_function__`` protocol, as
-described by NEP-18 [2]_.
-
-References and Footnotes
-------------------------
-
-.. [1] `NEP-18 - A dispatch mechanism for NumPy's high level array functions <https://numpy.org/neps/nep-0018-array-function-protocol.html>`_.
-
-.. [2] `NEP 30 — Duck Typing for NumPy Arrays - Implementation <https://numpy.org/neps/nep-0030-duck-array-protocol.html>`_.
-
-.. [3] `NEP 31 — Context-local and global overrides of the NumPy API <https://github.com/numpy/numpy/pull/14389>`_.
-
-.. [4] `PEP 3102 — Keyword-Only Arguments <https://www.python.org/dev/peps/pep-3102/>`_.
+.. [4] `NEP 31 — Context-local and global overrides of the NumPy API <https://github.com/numpy/numpy/pull/14389>`_.
.. [5] `Array creation routines <https://docs.scipy.org/doc/numpy-1.17.0/reference/routines.array-creation.html>`_.
+.. [6] `NEP 37 — A dispatch protocol for NumPy-like modules <https://numpy.org/neps/nep-0037-array-module.html>`_.
+
Copyright
---------
diff --git a/doc/neps/nep-0041-improved-dtype-support.rst b/doc/neps/nep-0041-improved-dtype-support.rst
index 56ff5eac6..6dc4ea50c 100644
--- a/doc/neps/nep-0041-improved-dtype-support.rst
+++ b/doc/neps/nep-0041-improved-dtype-support.rst
@@ -514,22 +514,22 @@ are not yet fully clear, we anticipate, and accept the following changes:
* **C-API**:
- * In old versions of NumPy ``PyArray_DescrCheck`` is a macro which uses
- ``type(dtype) is np.dtype``. When compiling against an old NumPy version,
- the macro may have to be replaced with the corresponding
- ``PyObject_IsInstance`` call. (If this is a problem, we could backport
- fixing the macro)
-
- * The UFunc machinery changes will break *limited* parts of the current
- implementation. Replacing e.g. the default ``TypeResolver`` is expected
- to remain supported for a time, although optimized masked inner loop iteration
- (which is not even used *within* NumPy) will no longer be supported.
-
- * All functions currently defined on the dtypes, such as
- ``PyArray_Descr->f->nonzero``, will be defined and accessed differently.
- This means that in the long run lowlevel access code will
- have to be changed to use the new API. Such changes are expected to be
- necessary in very few project.
+ * In old versions of NumPy ``PyArray_DescrCheck`` is a macro which uses
+ ``type(dtype) is np.dtype``. When compiling against an old NumPy version,
+ the macro may have to be replaced with the corresponding
+ ``PyObject_IsInstance`` call. (If this is a problem, we could backport
+ fixing the macro)
+
+ * The UFunc machinery changes will break *limited* parts of the current
+ implementation. Replacing e.g. the default ``TypeResolver`` is expected
+ to remain supported for a time, although optimized masked inner loop iteration
+ (which is not even used *within* NumPy) will no longer be supported.
+
+ * All functions currently defined on the dtypes, such as
+ ``PyArray_Descr->f->nonzero``, will be defined and accessed differently.
+ This means that in the long run lowlevel access code will
+ have to be changed to use the new API. Such changes are expected to be
+ necessary in very few project.
* **dtype implementors (C-API)**:
@@ -541,16 +541,16 @@ are not yet fully clear, we anticipate, and accept the following changes:
At least in some code paths, a similar mechanism is already used.
* The ``scalarkind`` slot and registration of scalar casting will be
- removed/ignored without replacement.
- It currently allows partial value-based casting.
- The ``PyArray_ScalarKind`` function will continue to work for builtin types,
- but will not be used internally and be deprecated.
-
- * Currently user dtypes are defined as instances of ``np.dtype``.
- The creation works by the user providing a prototype instance.
- NumPy will need to modify at least the type during registration.
- This has no effect for either ``rational`` or ``quaternion`` and mutation
- of the structure seems unlikely after registration.
+ removed/ignored without replacement.
+ It currently allows partial value-based casting.
+ The ``PyArray_ScalarKind`` function will continue to work for builtin types,
+ but will not be used internally and be deprecated.
+
+ * Currently user dtypes are defined as instances of ``np.dtype``.
+ The creation works by the user providing a prototype instance.
+ NumPy will need to modify at least the type during registration.
+ This has no effect for either ``rational`` or ``quaternion`` and mutation
+ of the structure seems unlikely after registration.
Since there is a fairly large API surface concerning datatypes, further changes
or the limitation certain function to currently existing datatypes is
diff --git a/doc/neps/nep-0042-new-dtypes.rst b/doc/neps/nep-0042-new-dtypes.rst
index 1f476114f..b37555892 100644
--- a/doc/neps/nep-0042-new-dtypes.rst
+++ b/doc/neps/nep-0042-new-dtypes.rst
@@ -267,7 +267,7 @@ information is currently provided and will be defined on the class:
deprecated. This may be relaxed if a use-case arises.
Additionally, existing methods (and C-side fields) will be provided.
-However, the fields ``kind`` and and ``char`` will be set to ``\0``
+However, the fields ``kind`` and ``char`` will be set to ``\0``
(NULL character) on the C-side.
While discouraged, except for NumPy builtin types, ``kind`` both will return
the ``__qualname__`` of the object to ensure uniqueness for all DTypes.
@@ -307,7 +307,7 @@ is the ``np.datetime64`` scalar.
A potential DType such as ``Categorical`` will not be required to have a clear type
associated with it. Instead, the ``type`` may be ``object`` and the
-categoircal's values are arbitrary objects.
+categorical's values are arbitrary objects.
Unlike with well-defined scalars, this ``type`` cannot
not be used for the dtype discovery necessary for coercion
(compare section `DType Discovery during Array Coercion`_).
@@ -659,7 +659,7 @@ should be "minutes".
Common DType Operations
^^^^^^^^^^^^^^^^^^^^^^^
-Numpy currently provides functions like ``np.result_type`` and
+NumPy currently provides functions like ``np.result_type`` and
``np.promote_types`` for determining common types.
These differ in that ``np.result_type`` can take arrays and scalars as input
and implements value based promotion [1]_.
@@ -972,7 +972,7 @@ In general we could implement certain casts, such as ``int8`` to ``int24``
even if the user only provides an ``int16 -> int24`` cast.
This proposal currently does not provide this functionality. However,
it could be extended in the future to either find such casts dynamically,
-or at least allow ``adjust_descriptors`` to return arbitray ``dtypes``.
+or at least allow ``adjust_descriptors`` to return arbitrary ``dtypes``.
If ``CastingImpl[Int8, Int24].adjust_descriptors((int8, int24))`` returns
``(int16, int24)``, the actual casting process could be extended to include
the ``int8 -> int16`` cast. Unlike the above example, which is limited
diff --git a/doc/release/upcoming_changes/16134.compatibility.rst b/doc/release/upcoming_changes/16134.compatibility.rst
new file mode 100644
index 000000000..373cecec0
--- /dev/null
+++ b/doc/release/upcoming_changes/16134.compatibility.rst
@@ -0,0 +1,8 @@
+Same kind casting in concatenate with ``axis=None``
+---------------------------------------------------
+When `~numpy.concatenate` is called with `axis=None`,
+the flattened arrays were cast with ``unsafe``. Any other axis
+choice uses "same kind". That different default
+has been deprecated and "same kind" casting will be used
+instead. The new ``casting`` keyword argument
+can be used to retain the old behaviour.
diff --git a/doc/release/upcoming_changes/16134.improvement.rst b/doc/release/upcoming_changes/16134.improvement.rst
new file mode 100644
index 000000000..0699f44bd
--- /dev/null
+++ b/doc/release/upcoming_changes/16134.improvement.rst
@@ -0,0 +1,6 @@
+Concatenate supports providing an output dtype
+----------------------------------------------
+Support was added to `~numpy.concatenate` to provide
+an output ``dtype`` and ``casting`` using keyword
+arguments. The ``dtype`` argument cannot be provided
+in conjunction with the ``out`` one.
diff --git a/doc/release/upcoming_changes/16675.improvement.rst b/doc/release/upcoming_changes/16675.improvement.rst
new file mode 100644
index 000000000..bc70d7e0f
--- /dev/null
+++ b/doc/release/upcoming_changes/16675.improvement.rst
@@ -0,0 +1,4 @@
+`numpy.core.records.fromfile` now supports file-like objects
+------------------------------------------------------------
+`numpy.rec.fromfile` can now use file-like objects, for instance
+:py:class:`io.BytesIO`
diff --git a/doc/release/upcoming_changes/16841.change.rst b/doc/release/upcoming_changes/16841.change.rst
new file mode 100644
index 000000000..d9499b6f4
--- /dev/null
+++ b/doc/release/upcoming_changes/16841.change.rst
@@ -0,0 +1,19 @@
+`np.linspace` on integers now use floor
+---------------------------------------
+When using a `int` dtype in `numpy.linspace`, previously float values would
+be rounded towards zero. Now `numpy.floor` is used instead, which rounds toward
+``-inf``. This changes the results for negative values. For example, the
+following would previously give::
+
+ >>> np.linspace(-3, 1, 8, dtype=int)
+ array([-3, -2, -1, -1, 0, 0, 0, 1])
+
+and now results in::
+
+ >>> np.linspace(-3, 1, 8, dtype=int)
+ array([-3, -3, -2, -2, -1, -1, 0, 1])
+
+The former result can still be obtained with::
+
+ >>> np.linspace(-3, 1, 8).astype(int)
+ array([-3, -2, -1, -1, 0, 0, 0, 1])
diff --git a/doc/release/upcoming_changes/16911.deprecation.rst b/doc/release/upcoming_changes/16911.deprecation.rst
deleted file mode 100644
index d4dcb629c..000000000
--- a/doc/release/upcoming_changes/16911.deprecation.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-``trim_zeros`` now requires a 1D array compatible with ``ndarray.astype(bool)``
--------------------------------------------------------------------------------
-The ``trim_zeros`` function will, in the future, require an array with the
-following two properties:
-
-* It must be 1D.
-* It must be convertable into a boolean array.
diff --git a/doc/release/upcoming_changes/17029.compatibility.rst b/doc/release/upcoming_changes/17029.compatibility.rst
new file mode 100644
index 000000000..69069ce18
--- /dev/null
+++ b/doc/release/upcoming_changes/17029.compatibility.rst
@@ -0,0 +1,14 @@
+Casting errors interrupt Iteration
+----------------------------------
+When iterating while casting values, an error may stop the iteration
+earlier than before. In any case, a failed casting operation always
+returned undefined, partial results. Those may now be even more
+undefined and partial.
+For users of the ``NpyIter`` C-API such cast errors will now
+cause the `iternext()` function to return 0 and thus abort
+iteration.
+Currently, there is no API to detect such an error directly.
+It is necessary to check ``PyErr_Occurred()``, which
+may be problematic in combination with ``NpyIter_Reset``.
+These issues always existed, but new API could be added
+if required by users.
diff --git a/doc/release/upcoming_changes/17067.expired.rst b/doc/release/upcoming_changes/17067.expired.rst
new file mode 100644
index 000000000..a1065d2c3
--- /dev/null
+++ b/doc/release/upcoming_changes/17067.expired.rst
@@ -0,0 +1,8 @@
+Financial functions removed
+---------------------------
+In accordance with NEP 32, the financial functions are removed
+from NumPy 1.20. The functions that have been removed are ``fv``,
+``ipmt``, ``irr``, ``mirr``, ``nper``, ``npv``, ``pmt``, ``ppmt``,
+``pv``, and ``rate``. These functions are available in the
+`numpy_financial <https://pypi.org/project/numpy-financial>`_
+library.
diff --git a/doc/release/upcoming_changes/17068.compatibility.rst b/doc/release/upcoming_changes/17068.compatibility.rst
new file mode 100644
index 000000000..7aa4e58ae
--- /dev/null
+++ b/doc/release/upcoming_changes/17068.compatibility.rst
@@ -0,0 +1,4 @@
+f2py generated code may return unicode instead of byte strings
+--------------------------------------------------------------
+Some byte strings previously returned by f2py generated code may now be unicode
+strings. This results from the ongoing Python2 -> Python3 cleanup.
diff --git a/doc/release/upcoming_changes/17116.expired.rst b/doc/release/upcoming_changes/17116.expired.rst
new file mode 100644
index 000000000..d8a3a43d5
--- /dev/null
+++ b/doc/release/upcoming_changes/17116.expired.rst
@@ -0,0 +1,2 @@
+* The 14-year deprecation of ``np.ctypeslib.ctypes_load_library`` is expired.
+ Use :func:`~numpy.ctypeslib.load_library` instead, which is identical.
diff --git a/doc/release/upcoming_changes/17233.deprecation.rst b/doc/release/upcoming_changes/17233.deprecation.rst
new file mode 100644
index 000000000..7615b85c4
--- /dev/null
+++ b/doc/release/upcoming_changes/17233.deprecation.rst
@@ -0,0 +1,4 @@
+The ``ndincr`` method of ``ndindex`` is deprecated
+--------------------------------------------------
+The documentation has warned against using this function since NumPy 1.8.
+Use ``next(it)`` instead of ``it.ndincr()``.
diff --git a/doc/release/upcoming_changes/17241.compatibility.rst b/doc/release/upcoming_changes/17241.compatibility.rst
new file mode 100644
index 000000000..671f73d1e
--- /dev/null
+++ b/doc/release/upcoming_changes/17241.compatibility.rst
@@ -0,0 +1,6 @@
+The first element of the ``__array_interface__["data"]`` tuple must be an integer
+----------------------------------------------------------------------------------
+This has been the documented interface for many years, but there was still
+code that would accept a byte string representation of the pointer address.
+That code has been removed, passing the address as a byte string will now
+raise an error.
diff --git a/doc/scipy-sphinx-theme b/doc/scipy-sphinx-theme
deleted file mode 160000
-Subproject f0d96ae2bf3b010ce53adadde1e38997497a513
diff --git a/doc/source/_templates/defindex.html b/doc/source/_templates/defindex.html
deleted file mode 100644
index 8eaadecb9..000000000
--- a/doc/source/_templates/defindex.html
+++ /dev/null
@@ -1,35 +0,0 @@
-{#
- basic/defindex.html
- ~~~~~~~~~~~~~~~~~~~
-
- Default template for the "index" page.
-
- :copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-#}
-{%- extends "layout.html" %}
-{% set title = _('Overview') %}
-{% block body %}
- <h1>{{ docstitle|e }}</h1>
- <p>
- {{ _('Welcome! This is') }}
- {% block description %}{{ _('the documentation for') }} {{ project|e }}
- {{ release|e }}{% if last_updated %}, {{ _('last updated') }} {{ last_updated|e }}{% endif %}{% endblock %}.
- </p>
- {% block tables %}
- <p><strong>{{ _('Indices and tables:') }}</strong></p>
- <table class="contentstable"><tr>
- <td style="width: 50%">
- <p class="biglink"><a class="biglink" href="{{ pathto("contents") }}">{{ _('Complete Table of Contents') }}</a><br>
- <span class="linkdescr">{{ _('lists all sections and subsections') }}</span></p>
- <p class="biglink"><a class="biglink" href="{{ pathto("search") }}">{{ _('Search Page') }}</a><br>
- <span class="linkdescr">{{ _('search this documentation') }}</span></p>
- </td><td style="width: 50%">
- <p class="biglink"><a class="biglink" href="{{ pathto("modindex") }}">{{ _('Global Module Index') }}</a><br>
- <span class="linkdescr">{{ _('quick access to all modules') }}</span></p>
- <p class="biglink"><a class="biglink" href="{{ pathto("genindex") }}">{{ _('General Index') }}</a><br>
- <span class="linkdescr">{{ _('all functions, classes, terms') }}</span></p>
- </td></tr>
- </table>
- {% endblock %}
-{% endblock %}
diff --git a/doc/source/_templates/indexcontent.html b/doc/source/_templates/indexcontent.html
index d77c5a85e..5929e755d 100644
--- a/doc/source/_templates/indexcontent.html
+++ b/doc/source/_templates/indexcontent.html
@@ -1,5 +1,14 @@
-{% extends "defindex.html" %}
-{% block tables %}
+{#
+ Loosely inspired by the deprecated sphinx/themes/basic/defindex.html
+#}
+{%- extends "layout.html" %}
+{% set title = _('Overview') %}
+{% block body %}
+<h1>{{ docstitle|e }}</h1>
+<p>
+ Welcome! This is the documentation for NumPy {{ release|e }}
+ {% if last_updated %}, last updated {{ last_updated|e }}{% endif %}.
+</p>
<p><strong>For users:</strong></p>
<table class="contentstable" align="center"><tr>
<td width="50%">
diff --git a/doc/source/_templates/layout.html b/doc/source/_templates/layout.html
index 1f8ec518f..0b0ba6271 100644
--- a/doc/source/_templates/layout.html
+++ b/doc/source/_templates/layout.html
@@ -1,54 +1,16 @@
{% extends "!layout.html" %}
{%- block extrahead %}
+ <!-- this is added via javascript in versionwarning.js -->
+ <!-- link rel="canonical" href="http://numpy.org/doc/stable/{{ pagename }}{{ file_suffix }}" / -->
+
<style>
- .main {
- -moz-box-shadow: none;
- -webkit-box-shadow: none;
- box-shadow: none;
- }
- div.top-scipy-org-logo-header {
- background-color: #fafafa;
- border-bottom: 2px solid #013243; /* Warm Black */
- margin-top: 0;
- box-shadow: none;
- }
- div.top-scipy-org-logo-header img {
- height: 100px;
- padding-left: 27px;
- }
- div.spc-navbar .nav-pills > li > a {
- background-color: #4d77cf; /* Han Blue */
- }
+.navbar-brand img {
+ height: 75px;
+}
+.navbar-brand {
+ height: 75px;
+}
</style>
{{ super() }}
{% endblock %}
-
-{%- block header %}
-<div class="container">
- <div class="top-scipy-org-logo-header">
- <a href="{{ pathto('index') }}">
- <img border=0 alt="NumPy" src="{{ pathto('_static/numpylogo.svg', 1) }}">
- </a>
- </div>
-</div>
-
-{% endblock %}
-{% block rootrellink %}
- {% if pagename != 'index' %}
- <li class="active"><a href="{{ pathto('index') }}">{{ shorttitle|e }}</a></li>
- {% endif %}
-{% endblock %}
-
-{% block sidebarsearch %}
-{%- if sourcename %}
-<ul class="this-page-menu">
-{%- if 'reference/generated' in sourcename %}
- <li><a href="/numpy/docs/{{ sourcename.replace('reference/generated/', '').replace('.txt', '') |e }}">{{_('Edit page')}}</a></li>
-{%- else %}
- <li><a href="/numpy/docs/numpy-docs/{{ sourcename.replace('.txt', '.rst') |e }}">{{_('Edit page')}}</a></li>
-{%- endif %}
-</ul>
-{%- endif %}
-{{ super() }}
-{% endblock %}
diff --git a/doc/source/conf.py b/doc/source/conf.py
index b908a5a28..e34be7f5c 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -94,34 +94,15 @@ def setup(app):
# HTML output
# -----------------------------------------------------------------------------
-themedir = os.path.join(os.pardir, 'scipy-sphinx-theme', '_theme')
-if not os.path.isdir(themedir):
- raise RuntimeError("Get the scipy-sphinx-theme first, "
- "via git submodule init && git submodule update")
-
-html_theme = 'scipy'
-html_theme_path = [themedir]
-
-if 'scipyorg' in tags:
- # Build for the scipy.org website
- html_theme_options = {
- "edit_link": True,
- "sidebar": "right",
- "scipy_org_logo": True,
- "rootlinks": [("https://scipy.org/", "Scipy.org"),
- ("https://docs.scipy.org/", "Docs")]
- }
-else:
- # Default build
- html_theme_options = {
- "edit_link": False,
- "sidebar": "left",
- "scipy_org_logo": False,
- "rootlinks": [("https://numpy.org/", "NumPy.org"),
- ("https://numpy.org/doc", "Docs"),
- ]
- }
- html_sidebars = {'index': ['indexsidebar.html', 'searchbox.html']}
+html_theme = 'pydata_sphinx_theme'
+
+html_logo = '_static/numpylogo.svg'
+
+html_theme_options = {
+ "github_url": "https://github.com/numpy/numpy",
+ "twitter_url": "https://twitter.com/numpy_team",
+}
+
html_additional_pages = {
'index': 'indexcontent.html',
diff --git a/doc/source/contents.rst b/doc/source/contents.rst
index baea7784c..5d4e12097 100644
--- a/doc/source/contents.rst
+++ b/doc/source/contents.rst
@@ -5,23 +5,12 @@ NumPy Documentation
###################
.. toctree::
+ :maxdepth: 1
- user/setting-up
- user/quickstart
- user/absolute_beginners
- user/tutorials_index
- user/howtos_index
- reference/index
- user/explanations_index
- f2py/index
- glossary
- dev/index
- dev/underthehood
- docs/index
- docs/howto_document
- benchmarking
- bugs
- release
- about
- license
+ User Guide <user/index>
+ API reference <reference/index>
+ Development <dev/index>
+.. This is not really the index page, that is found in
+ _templates/indexcontent.html The toctree content here will be added to the
+ top of the template header
diff --git a/doc/source/dev/index.rst b/doc/source/dev/index.rst
index aeb277a87..c4f35b68f 100644
--- a/doc/source/dev/index.rst
+++ b/doc/source/dev/index.rst
@@ -4,6 +4,22 @@
Contributing to NumPy
#####################
+.. TODO: this is hidden because there's a bug in the pydata theme that won't render TOC items under headers
+
+.. toctree::
+ :hidden:
+
+ conduct/code_of_conduct
+ Git Basics <gitwash/index>
+ development_environment
+ development_workflow
+ ../benchmarking
+ style_guide
+ releasing
+ governance/index
+ howto-docs
+
+
Not a coder? Not a problem! NumPy is multi-faceted, and we can use a lot of help.
These are all activities we'd like to get help with (they're all important, so
we list them in alphabetical order):
diff --git a/doc/source/glossary.rst b/doc/source/glossary.rst
index b6ea42909..d37534960 100644
--- a/doc/source/glossary.rst
+++ b/doc/source/glossary.rst
@@ -4,4 +4,390 @@ Glossary
.. toctree::
-.. automodule:: numpy.doc.glossary
+.. glossary::
+
+ along an axis
+ Axes are defined for arrays with more than one dimension. A
+ 2-dimensional array has two corresponding axes: the first running
+ vertically downwards across rows (axis 0), and the second running
+ horizontally across columns (axis 1).
+
+ Many operations can take place along one of these axes. For example,
+ we can sum each row of an array, in which case we operate along
+ columns, or axis 1::
+
+ >>> x = np.arange(12).reshape((3,4))
+
+ >>> x
+ array([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11]])
+
+ >>> x.sum(axis=1)
+ array([ 6, 22, 38])
+
+ array
+ A homogeneous container of numerical elements. Each element in the
+ array occupies a fixed amount of memory (hence homogeneous), and
+ can be a numerical element of a single type (such as float, int
+ or complex) or a combination (such as ``(float, int, float)``). Each
+ array has an associated data-type (or ``dtype``), which describes
+ the numerical type of its elements::
+
+ >>> x = np.array([1, 2, 3], float)
+
+ >>> x
+ array([ 1., 2., 3.])
+
+ >>> x.dtype # floating point number, 64 bits of memory per element
+ dtype('float64')
+
+
+ # More complicated data type: each array element is a combination of
+ # and integer and a floating point number
+ >>> np.array([(1, 2.0), (3, 4.0)], dtype=[('x', np.int64), ('y', float)])
+ array([(1, 2.), (3, 4.)], dtype=[('x', '<i8'), ('y', '<f8')])
+
+ Fast element-wise operations, called a :term:`ufunc`, operate on arrays.
+
+ array_like
+ Any sequence that can be interpreted as an ndarray. This includes
+ nested lists, tuples, scalars and existing arrays.
+
+ big-endian
+ When storing a multi-byte value in memory as a sequence of bytes, the
+ sequence addresses/sends/stores the most significant byte first (lowest
+ address) and the least significant byte last (highest address). Common in
+ micro-processors and used for transmission of data over network protocols.
+
+ BLAS
+ `Basic Linear Algebra Subprograms <https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms>`_
+
+ broadcast
+ NumPy can do operations on arrays whose shapes are mismatched::
+
+ >>> x = np.array([1, 2])
+ >>> y = np.array([[3], [4]])
+
+ >>> x
+ array([1, 2])
+
+ >>> y
+ array([[3],
+ [4]])
+
+ >>> x + y
+ array([[4, 5],
+ [5, 6]])
+
+ See `basics.broadcasting` for more information.
+
+ C order
+ See `row-major`
+
+ column-major
+ A way to represent items in a N-dimensional array in the 1-dimensional
+ computer memory. In column-major order, the leftmost index "varies the
+ fastest": for example the array::
+
+ [[1, 2, 3],
+ [4, 5, 6]]
+
+ is represented in the column-major order as::
+
+ [1, 4, 2, 5, 3, 6]
+
+ Column-major order is also known as the Fortran order, as the Fortran
+ programming language uses it.
+
+ decorator
+ An operator that transforms a function. For example, a ``log``
+ decorator may be defined to print debugging information upon
+ function execution::
+
+ >>> def log(f):
+ ... def new_logging_func(*args, **kwargs):
+ ... print("Logging call with parameters:", args, kwargs)
+ ... return f(*args, **kwargs)
+ ...
+ ... return new_logging_func
+
+ Now, when we define a function, we can "decorate" it using ``log``::
+
+ >>> @log
+ ... def add(a, b):
+ ... return a + b
+
+ Calling ``add`` then yields:
+
+ >>> add(1, 2)
+ Logging call with parameters: (1, 2) {}
+ 3
+
+ dictionary
+ Resembling a language dictionary, which provides a mapping between
+ words and descriptions thereof, a Python dictionary is a mapping
+ between two objects::
+
+ >>> x = {1: 'one', 'two': [1, 2]}
+
+ Here, `x` is a dictionary mapping keys to values, in this case
+ the integer 1 to the string "one", and the string "two" to
+ the list ``[1, 2]``. The values may be accessed using their
+ corresponding keys::
+
+ >>> x[1]
+ 'one'
+
+ >>> x['two']
+ [1, 2]
+
+ Note that dictionaries are not stored in any specific order. Also,
+ most mutable (see *immutable* below) objects, such as lists, may not
+ be used as keys.
+
+ For more information on dictionaries, read the
+ `Python tutorial <https://docs.python.org/tutorial/>`_.
+
+ field
+ In a :term:`structured data type`, each sub-type is called a `field`.
+ The `field` has a name (a string), a type (any valid dtype), and
+ an optional `title`. See :ref:`arrays.dtypes`
+
+ Fortran order
+ See `column-major`
+
+ flattened
+ Collapsed to a one-dimensional array. See `numpy.ndarray.flatten`
+ for details.
+
+ homogeneous
+ Describes a block of memory comprised of blocks, each block comprised of
+ items and of the same size, and blocks are interpreted in exactly the
+ same way. In the simplest case each block contains a single item, for
+ instance int32 or float64.
+
+ immutable
+ An object that cannot be modified after execution is called
+ immutable. Two common examples are strings and tuples.
+
+ iterable
+ A sequence that allows "walking" (iterating) over items, typically
+ using a loop such as::
+
+ >>> x = [1, 2, 3]
+ >>> [item**2 for item in x]
+ [1, 4, 9]
+
+ It is often used in combination with ``enumerate``::
+ >>> keys = ['a','b','c']
+ >>> for n, k in enumerate(keys):
+ ... print("Key %d: %s" % (n, k))
+ ...
+ Key 0: a
+ Key 1: b
+ Key 2: c
+
+ itemsize
+ The size of the dtype element in bytes.
+
+ list
+ A Python container that can hold any number of objects or items.
+ The items do not have to be of the same type, and can even be
+ lists themselves::
+
+ >>> x = [2, 2.0, "two", [2, 2.0]]
+
+ The list `x` contains 4 items, each which can be accessed individually::
+
+ >>> x[2] # the string 'two'
+ 'two'
+
+ >>> x[3] # a list, containing an integer 2 and a float 2.0
+ [2, 2.0]
+
+ It is also possible to select more than one item at a time,
+ using *slicing*::
+
+ >>> x[0:2] # or, equivalently, x[:2]
+ [2, 2.0]
+
+ In code, arrays are often conveniently expressed as nested lists::
+
+
+ >>> np.array([[1, 2], [3, 4]])
+ array([[1, 2],
+ [3, 4]])
+
+ For more information, read the section on lists in the `Python
+ tutorial <https://docs.python.org/tutorial/>`_. For a mapping
+ type (key-value), see *dictionary*.
+
+ little-endian
+ When storing a multi-byte value in memory as a sequence of bytes, the
+ sequence addresses/sends/stores the least significant byte first (lowest
+ address) and the most significant byte last (highest address). Common in
+ x86 processors.
+
+ mask
+ A boolean array, used to select only certain elements for an operation::
+
+ >>> x = np.arange(5)
+ >>> x
+ array([0, 1, 2, 3, 4])
+
+ >>> mask = (x > 2)
+ >>> mask
+ array([False, False, False, True, True])
+
+ >>> x[mask] = -1
+ >>> x
+ array([ 0, 1, 2, -1, -1])
+
+ masked array
+ Array that suppressed values indicated by a mask::
+
+ >>> x = np.ma.masked_array([np.nan, 2, np.nan], [True, False, True])
+ >>> x
+ masked_array(data=[--, 2.0, --],
+ mask=[ True, False, True],
+ fill_value=1e+20)
+
+ >>> x + [1, 2, 3]
+ masked_array(data=[--, 4.0, --],
+ mask=[ True, False, True],
+ fill_value=1e+20)
+
+
+ Masked arrays are often used when operating on arrays containing
+ missing or invalid entries.
+
+ matrix
+ A 2-dimensional ndarray that preserves its two-dimensional nature
+ throughout operations. It has certain special operations, such as ``*``
+ (matrix multiplication) and ``**`` (matrix power), defined::
+
+ >>> x = np.mat([[1, 2], [3, 4]])
+ >>> x
+ matrix([[1, 2],
+ [3, 4]])
+
+ >>> x**2
+ matrix([[ 7, 10],
+ [15, 22]])
+
+ ndarray
+ See *array*.
+
+ record array
+ An :term:`ndarray` with :term:`structured data type` which has been
+ subclassed as ``np.recarray`` and whose dtype is of type ``np.record``,
+ making the fields of its data type to be accessible by attribute.
+
+ reference
+ If ``a`` is a reference to ``b``, then ``(a is b) == True``. Therefore,
+ ``a`` and ``b`` are different names for the same Python object.
+
+ row-major
+ A way to represent items in a N-dimensional array in the 1-dimensional
+ computer memory. In row-major order, the rightmost index "varies
+ the fastest": for example the array::
+
+ [[1, 2, 3],
+ [4, 5, 6]]
+
+ is represented in the row-major order as::
+
+ [1, 2, 3, 4, 5, 6]
+
+ Row-major order is also known as the C order, as the C programming
+ language uses it. New NumPy arrays are by default in row-major order.
+
+ slice
+ Used to select only certain elements from a sequence:
+
+ >>> x = range(5)
+ >>> x
+ [0, 1, 2, 3, 4]
+
+ >>> x[1:3] # slice from 1 to 3 (excluding 3 itself)
+ [1, 2]
+
+ >>> x[1:5:2] # slice from 1 to 5, but skipping every second element
+ [1, 3]
+
+ >>> x[::-1] # slice a sequence in reverse
+ [4, 3, 2, 1, 0]
+
+ Arrays may have more than one dimension, each which can be sliced
+ individually:
+
+ >>> x = np.array([[1, 2], [3, 4]])
+ >>> x
+ array([[1, 2],
+ [3, 4]])
+
+ >>> x[:, 1]
+ array([2, 4])
+
+ structured data type
+ A data type composed of other datatypes
+
+ subarray data type
+ A :term:`structured data type` may contain a :term:`ndarray` with its
+ own dtype and shape:
+
+ >>> dt = np.dtype([('a', np.int32), ('b', np.float32, (3,))])
+ >>> np.zeros(3, dtype=dt)
+ array([(0, [0., 0., 0.]), (0, [0., 0., 0.]), (0, [0., 0., 0.])],
+ dtype=[('a', '<i4'), ('b', '<f4', (3,))])
+
+ title
+ In addition to field names, structured array fields may have an
+ associated :ref:`title <titles>` which is an alias to the name and is
+ commonly used for plotting.
+
+ ufunc
+ Universal function. A fast element-wise, :term:`vectorized
+ <vectorization>` array operation. Examples include ``add``, ``sin`` and
+ ``logical_or``.
+
+ vectorization
+ Optimizing a looping block by specialized code. In a traditional sense,
+ vectorization performs the same operation on multiple elements with
+ fixed strides between them via specialized hardware. Compilers know how
+ to take advantage of well-constructed loops to implement such
+ optimizations. NumPy uses :ref:`vectorization <whatis-vectorization>`
+ to mean any optimization via specialized code performing the same
+ operations on multiple elements, typically achieving speedups by
+ avoiding some of the overhead in looking up and converting the elements.
+
+ view
+ An array that does not own its data, but refers to another array's
+ data instead. For example, we may create a view that only shows
+ every second element of another array::
+
+ >>> x = np.arange(5)
+ >>> x
+ array([0, 1, 2, 3, 4])
+
+ >>> y = x[::2]
+ >>> y
+ array([0, 2, 4])
+
+ >>> x[0] = 3 # changing x changes y as well, since y is a view on x
+ >>> y
+ array([3, 2, 4])
+
+ wrapper
+ Python is a high-level (highly abstracted, or English-like) language.
+ This abstraction comes at a price in execution speed, and sometimes
+ it becomes necessary to use lower level languages to do fast
+ computations. A wrapper is code that provides a bridge between
+ high and the low level languages, allowing, e.g., Python to execute
+ code written in C or Fortran.
+
+ Examples include ctypes, SWIG and Cython (which wraps C and C++)
+ and f2py (which wraps Fortran).
+
+
diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst
index 9ce77424a..c5947620e 100644
--- a/doc/source/reference/arrays.datetime.rst
+++ b/doc/source/reference/arrays.datetime.rst
@@ -218,7 +218,7 @@ And here are the time units:
m minute +/- 1.7e13 years [1.7e13 BC, 1.7e13 AD]
s second +/- 2.9e11 years [2.9e11 BC, 2.9e11 AD]
ms millisecond +/- 2.9e8 years [ 2.9e8 BC, 2.9e8 AD]
- us microsecond +/- 2.9e5 years [290301 BC, 294241 AD]
+us / μs microsecond +/- 2.9e5 years [290301 BC, 294241 AD]
ns nanosecond +/- 292 years [ 1678 AD, 2262 AD]
ps picosecond +/- 106 days [ 1969 AD, 1970 AD]
fs femtosecond +/- 2.6 hours [ 1969 AD, 1970 AD]
diff --git a/doc/source/reference/arrays.dtypes.rst b/doc/source/reference/arrays.dtypes.rst
index c7703764f..575984707 100644
--- a/doc/source/reference/arrays.dtypes.rst
+++ b/doc/source/reference/arrays.dtypes.rst
@@ -537,6 +537,13 @@ Attributes providing additional information:
dtype.alignment
dtype.base
+Metadata attached by the user:
+
+.. autosummary::
+ :toctree: generated/
+
+ dtype.metadata
+
Methods
-------
diff --git a/doc/source/reference/arrays.interface.rst b/doc/source/reference/arrays.interface.rst
index 4e95535c0..73e4aef0c 100644
--- a/doc/source/reference/arrays.interface.rst
+++ b/doc/source/reference/arrays.interface.rst
@@ -199,9 +199,9 @@ array using only one attribute lookup and a well-defined C-structure.
.. c:var:: __array_struct__
- A :c:type: `PyCObject` whose :c:data:`voidptr` member contains a
+ A :c:type:`PyCapsule` whose ``pointer`` member contains a
pointer to a filled :c:type:`PyArrayInterface` structure. Memory
- for the structure is dynamically created and the :c:type:`PyCObject`
+ for the structure is dynamically created and the :c:type:`PyCapsule`
is also created with an appropriate destructor so the retriever of
this attribute simply has to apply :c:func:`Py_DECREF()` to the
object returned by this attribute when it is finished. Also,
@@ -211,7 +211,7 @@ array using only one attribute lookup and a well-defined C-structure.
must also not reallocate their memory if other objects are
referencing them.
-The PyArrayInterface structure is defined in ``numpy/ndarrayobject.h``
+The :c:type:`PyArrayInterface` structure is defined in ``numpy/ndarrayobject.h``
as::
typedef struct {
@@ -240,13 +240,14 @@ flag is present.
.. admonition:: New since June 16, 2006:
- In the past most implementations used the "desc" member of the
- :c:type:`PyCObject` itself (do not confuse this with the "descr" member of
+ In the past most implementations used the ``desc`` member of the ``PyCObject``
+ (now :c:type:`PyCapsule`) itself (do not confuse this with the "descr" member of
the :c:type:`PyArrayInterface` structure above --- they are two separate
things) to hold the pointer to the object exposing the interface.
- This is now an explicit part of the interface. Be sure to own a
- reference to the object when the :c:type:`PyCObject` is created using
- :c:type:`PyCObject_FromVoidPtrAndDesc`.
+ This is now an explicit part of the interface. Be sure to take a
+ reference to the object and call :c:func:`PyCapsule_SetContext` before
+ returning the :c:type:`PyCapsule`, and configure a destructor to decref this
+ reference.
Type description examples
@@ -315,25 +316,39 @@ largely aesthetic. In particular:
1. The PyArrayInterface structure had no descr member at the end
(and therefore no flag ARR_HAS_DESCR)
-2. The desc member of the PyCObject returned from __array_struct__ was
+2. The ``context`` member of the :c:type:`PyCapsule` (formally the ``desc``
+ member of the ``PyCObject``) returned from ``__array_struct__`` was
not specified. Usually, it was the object exposing the array (so
that a reference to it could be kept and destroyed when the
- C-object was destroyed). Now it must be a tuple whose first
- element is a string with "PyArrayInterface Version #" and whose
- second element is the object exposing the array.
+ C-object was destroyed). It is now an explicit requirement that this field
+ be used in some way to hold a reference to the owning object.
-3. The tuple returned from __array_interface__['data'] used to be a
+ .. note::
+
+ Until August 2020, this said:
+
+ Now it must be a tuple whose first element is a string with
+ "PyArrayInterface Version #" and whose second element is the object
+ exposing the array.
+
+ This design was retracted almost immediately after it was proposed, in
+ <https://mail.python.org/pipermail/numpy-discussion/2006-June/020995.html>.
+ Despite 14 years of documentation to the contrary, at no point was it
+ valid to assume that ``__array_interface__`` capsules held this tuple
+ content.
+
+3. The tuple returned from ``__array_interface__['data']`` used to be a
hex-string (now it is an integer or a long integer).
-4. There was no __array_interface__ attribute instead all of the keys
- (except for version) in the __array_interface__ dictionary were
+4. There was no ``__array_interface__`` attribute instead all of the keys
+ (except for version) in the ``__array_interface__`` dictionary were
their own attribute: Thus to obtain the Python-side information you
had to access separately the attributes:
- * __array_data__
- * __array_shape__
- * __array_strides__
- * __array_typestr__
- * __array_descr__
- * __array_offset__
- * __array_mask__
+ * ``__array_data__``
+ * ``__array_shape__``
+ * ``__array_strides__``
+ * ``__array_typestr__``
+ * ``__array_descr__``
+ * ``__array_offset__``
+ * ``__array_mask__``
diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst
index b635c4df2..cfe4d2d51 100644
--- a/doc/source/reference/c-api/array.rst
+++ b/doc/source/reference/c-api/array.rst
@@ -826,7 +826,7 @@ General check of Python Type
.. c:function:: PyArray_IsScalar(op, cls)
- Evaluates true if *op* is an instance of :c:data:`Py{cls}ArrType_Type`.
+ Evaluates true if *op* is an instance of ``Py{cls}ArrType_Type``.
.. c:function:: PyArray_CheckScalar(op)
diff --git a/doc/source/reference/c-api/config.rst b/doc/source/reference/c-api/config.rst
index 4592228b5..c3e2c98af 100644
--- a/doc/source/reference/c-api/config.rst
+++ b/doc/source/reference/c-api/config.rst
@@ -19,7 +19,7 @@ avoid namespace pollution.
Data type sizes
---------------
-The :c:data:`NPY_SIZEOF_{CTYPE}` constants are defined so that sizeof
+The ``NPY_SIZEOF_{CTYPE}`` constants are defined so that sizeof
information is available to the pre-processor.
.. c:macro:: NPY_SIZEOF_SHORT
diff --git a/doc/source/reference/c-api/dtype.rst b/doc/source/reference/c-api/dtype.rst
index 082ecfe97..a04d85212 100644
--- a/doc/source/reference/c-api/dtype.rst
+++ b/doc/source/reference/c-api/dtype.rst
@@ -30,7 +30,7 @@ Enumerated Types
There is a list of enumerated types defined providing the basic 24
data types plus some useful generic names. Whenever the code requires
a type number, one of these enumerated types is requested. The types
-are all called :c:data:`NPY_{NAME}`:
+are all called ``NPY_{NAME}``:
.. c:var:: NPY_BOOL
@@ -199,7 +199,7 @@ Other useful related constants are
The various character codes indicating certain types are also part of
an enumerated list. References to type characters (should they be
needed at all) should always use these enumerations. The form of them
-is :c:data:`NPY_{NAME}LTR` where ``{NAME}`` can be
+is ``NPY_{NAME}LTR`` where ``{NAME}`` can be
**BOOL**, **BYTE**, **UBYTE**, **SHORT**, **USHORT**, **INT**,
**UINT**, **LONG**, **ULONG**, **LONGLONG**, **ULONGLONG**,
@@ -247,8 +247,8 @@ Max and min values for integers
Number of bits in data types
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-All :c:data:`NPY_SIZEOF_{CTYPE}` constants have corresponding
-:c:data:`NPY_BITSOF_{CTYPE}` constants defined. The :c:data:`NPY_BITSOF_{CTYPE}`
+All ``NPY_SIZEOF_{CTYPE}`` constants have corresponding
+``NPY_BITSOF_{CTYPE}`` constants defined. The ``NPY_BITSOF_{CTYPE}``
constants provide the number of bits in the data type. Specifically,
the available ``{CTYPE}s`` are
@@ -263,7 +263,7 @@ All of the numeric data types (integer, floating point, and complex)
have constants that are defined to be a specific enumerated type
number. Exactly which enumerated type a bit-width type refers to is
platform dependent. In particular, the constants available are
-:c:data:`PyArray_{NAME}{BITS}` where ``{NAME}`` is **INT**, **UINT**,
+``PyArray_{NAME}{BITS}`` where ``{NAME}`` is **INT**, **UINT**,
**FLOAT**, **COMPLEX** and ``{BITS}`` can be 8, 16, 32, 64, 80, 96, 128,
160, 192, 256, and 512. Obviously not all bit-widths are available on
all platforms for all the kinds of numeric types. Commonly 8-, 16-,
@@ -397,8 +397,8 @@ There are also typedefs for signed integers, unsigned integers,
floating point, and complex floating point types of specific bit-
widths. The available type names are
- :c:type:`npy_int{bits}`, :c:type:`npy_uint{bits}`, :c:type:`npy_float{bits}`,
- and :c:type:`npy_complex{bits}`
+ ``npy_int{bits}``, ``npy_uint{bits}``, ``npy_float{bits}``,
+ and ``npy_complex{bits}``
where ``{bits}`` is the number of bits in the type and can be **8**,
**16**, **32**, **64**, 128, and 256 for integer types; 16, **32**
diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst
index 5f6fd7d4a..ee57d4680 100644
--- a/doc/source/reference/c-api/types-and-structures.rst
+++ b/doc/source/reference/c-api/types-and-structures.rst
@@ -26,7 +26,7 @@ By constructing a new Python type you make available a new object for
Python. The ndarray object is an example of a new type defined in C.
New types are defined in C by two basic steps:
-1. creating a C-structure (usually named :c:type:`Py{Name}Object`) that is
+1. creating a C-structure (usually named ``Py{Name}Object``) that is
binary- compatible with the :c:type:`PyObject` structure itself but holds
the additional information needed for that particular object;
@@ -1204,7 +1204,7 @@ ScalarArrayTypes
There is a Python type for each of the different built-in data types
that can be present in the array Most of these are simple wrappers
around the corresponding data type in C. The C-names for these types
-are :c:data:`Py{TYPE}ArrType_Type` where ``{TYPE}`` can be
+are ``Py{TYPE}ArrType_Type`` where ``{TYPE}`` can be
**Bool**, **Byte**, **Short**, **Int**, **Long**, **LongLong**,
**UByte**, **UShort**, **UInt**, **ULong**, **ULongLong**,
diff --git a/doc/source/reference/c-api/ufunc.rst b/doc/source/reference/c-api/ufunc.rst
index abe8935ae..50963c81f 100644
--- a/doc/source/reference/c-api/ufunc.rst
+++ b/doc/source/reference/c-api/ufunc.rst
@@ -269,7 +269,7 @@ Functions
.. c:function:: int PyUFunc_checkfperr(int errmask, PyObject* errobj)
A simple interface to the IEEE error-flag checking support. The
- *errmask* argument is a mask of :c:data:`UFUNC_MASK_{ERR}` bitmasks
+ *errmask* argument is a mask of ``UFUNC_MASK_{ERR}`` bitmasks
indicating which errors to check for (and how to check for
them). The *errobj* must be a Python tuple with two elements: a
string containing the name which will be used in any communication
diff --git a/doc/source/reference/internals.rst b/doc/source/reference/internals.rst
index aacfabcd3..ed8042c08 100644
--- a/doc/source/reference/internals.rst
+++ b/doc/source/reference/internals.rst
@@ -9,4 +9,160 @@ NumPy internals
internals.code-explanations
alignment
-.. automodule:: numpy.doc.internals
+Internal organization of numpy arrays
+=====================================
+
+It helps to understand a bit about how numpy arrays are handled under the covers to help understand numpy better. This section will not go into great detail. Those wishing to understand the full details are referred to Travis Oliphant's book "Guide to NumPy".
+
+NumPy arrays consist of two major components, the raw array data (from now on,
+referred to as the data buffer), and the information about the raw array data.
+The data buffer is typically what people think of as arrays in C or Fortran,
+a contiguous (and fixed) block of memory containing fixed sized data items.
+NumPy also contains a significant set of data that describes how to interpret
+the data in the data buffer. This extra information contains (among other things):
+
+ 1) The basic data element's size in bytes
+ 2) The start of the data within the data buffer (an offset relative to the
+ beginning of the data buffer).
+ 3) The number of dimensions and the size of each dimension
+ 4) The separation between elements for each dimension (the 'stride'). This
+ does not have to be a multiple of the element size
+ 5) The byte order of the data (which may not be the native byte order)
+ 6) Whether the buffer is read-only
+ 7) Information (via the dtype object) about the interpretation of the basic
+ data element. The basic data element may be as simple as a int or a float,
+ or it may be a compound object (e.g., struct-like), a fixed character field,
+ or Python object pointers.
+ 8) Whether the array is to interpreted as C-order or Fortran-order.
+
+This arrangement allow for very flexible use of arrays. One thing that it allows
+is simple changes of the metadata to change the interpretation of the array buffer.
+Changing the byteorder of the array is a simple change involving no rearrangement
+of the data. The shape of the array can be changed very easily without changing
+anything in the data buffer or any data copying at all
+
+Among other things that are made possible is one can create a new array metadata
+object that uses the same data buffer
+to create a new view of that data buffer that has a different interpretation
+of the buffer (e.g., different shape, offset, byte order, strides, etc) but
+shares the same data bytes. Many operations in numpy do just this such as
+slices. Other operations, such as transpose, don't move data elements
+around in the array, but rather change the information about the shape and strides so that the indexing of the array changes, but the data in the doesn't move.
+
+Typically these new versions of the array metadata but the same data buffer are
+new 'views' into the data buffer. There is a different ndarray object, but it
+uses the same data buffer. This is why it is necessary to force copies through
+use of the .copy() method if one really wants to make a new and independent
+copy of the data buffer.
+
+New views into arrays mean the object reference counts for the data buffer
+increase. Simply doing away with the original array object will not remove the
+data buffer if other views of it still exist.
+
+Multidimensional Array Indexing Order Issues
+============================================
+
+What is the right way to index
+multi-dimensional arrays? Before you jump to conclusions about the one and
+true way to index multi-dimensional arrays, it pays to understand why this is
+a confusing issue. This section will try to explain in detail how numpy
+indexing works and why we adopt the convention we do for images, and when it
+may be appropriate to adopt other conventions.
+
+The first thing to understand is
+that there are two conflicting conventions for indexing 2-dimensional arrays.
+Matrix notation uses the first index to indicate which row is being selected and
+the second index to indicate which column is selected. This is opposite the
+geometrically oriented-convention for images where people generally think the
+first index represents x position (i.e., column) and the second represents y
+position (i.e., row). This alone is the source of much confusion;
+matrix-oriented users and image-oriented users expect two different things with
+regard to indexing.
+
+The second issue to understand is how indices correspond
+to the order the array is stored in memory. In Fortran the first index is the
+most rapidly varying index when moving through the elements of a two
+dimensional array as it is stored in memory. If you adopt the matrix
+convention for indexing, then this means the matrix is stored one column at a
+time (since the first index moves to the next row as it changes). Thus Fortran
+is considered a Column-major language. C has just the opposite convention. In
+C, the last index changes most rapidly as one moves through the array as
+stored in memory. Thus C is a Row-major language. The matrix is stored by
+rows. Note that in both cases it presumes that the matrix convention for
+indexing is being used, i.e., for both Fortran and C, the first index is the
+row. Note this convention implies that the indexing convention is invariant
+and that the data order changes to keep that so.
+
+But that's not the only way
+to look at it. Suppose one has large two-dimensional arrays (images or
+matrices) stored in data files. Suppose the data are stored by rows rather than
+by columns. If we are to preserve our index convention (whether matrix or
+image) that means that depending on the language we use, we may be forced to
+reorder the data if it is read into memory to preserve our indexing
+convention. For example if we read row-ordered data into memory without
+reordering, it will match the matrix indexing convention for C, but not for
+Fortran. Conversely, it will match the image indexing convention for Fortran,
+but not for C. For C, if one is using data stored in row order, and one wants
+to preserve the image index convention, the data must be reordered when
+reading into memory.
+
+In the end, which you do for Fortran or C depends on
+which is more important, not reordering data or preserving the indexing
+convention. For large images, reordering data is potentially expensive, and
+often the indexing convention is inverted to avoid that.
+
+The situation with
+numpy makes this issue yet more complicated. The internal machinery of numpy
+arrays is flexible enough to accept any ordering of indices. One can simply
+reorder indices by manipulating the internal stride information for arrays
+without reordering the data at all. NumPy will know how to map the new index
+order to the data without moving the data.
+
+So if this is true, why not choose
+the index order that matches what you most expect? In particular, why not define
+row-ordered images to use the image convention? (This is sometimes referred
+to as the Fortran convention vs the C convention, thus the 'C' and 'FORTRAN'
+order options for array ordering in numpy.) The drawback of doing this is
+potential performance penalties. It's common to access the data sequentially,
+either implicitly in array operations or explicitly by looping over rows of an
+image. When that is done, then the data will be accessed in non-optimal order.
+As the first index is incremented, what is actually happening is that elements
+spaced far apart in memory are being sequentially accessed, with usually poor
+memory access speeds. For example, for a two dimensional image 'im' defined so
+that im[0, 10] represents the value at x=0, y=10. To be consistent with usual
+Python behavior then im[0] would represent a column at x=0. Yet that data
+would be spread over the whole array since the data are stored in row order.
+Despite the flexibility of numpy's indexing, it can't really paper over the fact
+basic operations are rendered inefficient because of data order or that getting
+contiguous subarrays is still awkward (e.g., im[:,0] for the first row, vs
+im[0]), thus one can't use an idiom such as for row in im; for col in im does
+work, but doesn't yield contiguous column data.
+
+As it turns out, numpy is
+smart enough when dealing with ufuncs to determine which index is the most
+rapidly varying one in memory and uses that for the innermost loop. Thus for
+ufuncs there is no large intrinsic advantage to either approach in most cases.
+On the other hand, use of .flat with an FORTRAN ordered array will lead to
+non-optimal memory access as adjacent elements in the flattened array (iterator,
+actually) are not contiguous in memory.
+
+Indeed, the fact is that Python
+indexing on lists and other sequences naturally leads to an outside-to inside
+ordering (the first index gets the largest grouping, the next the next largest,
+and the last gets the smallest element). Since image data are normally stored
+by rows, this corresponds to position within rows being the last item indexed.
+
+If you do want to use Fortran ordering realize that
+there are two approaches to consider: 1) accept that the first index is just not
+the most rapidly changing in memory and have all your I/O routines reorder
+your data when going from memory to disk or visa versa, or use numpy's
+mechanism for mapping the first index to the most rapidly varying data. We
+recommend the former if possible. The disadvantage of the latter is that many
+of numpy's functions will yield arrays without Fortran ordering unless you are
+careful to use the 'order' keyword. Doing this would be highly inconvenient.
+
+Otherwise we recommend simply learning to reverse the usual order of indices
+when accessing elements of an array. Granted, it goes against the grain, but
+it is more in line with Python semantics and the natural order of the data.
+
+
diff --git a/doc/source/reference/random/generator.rst b/doc/source/reference/random/generator.rst
index a2cbb493a..8706e1de2 100644
--- a/doc/source/reference/random/generator.rst
+++ b/doc/source/reference/random/generator.rst
@@ -36,11 +36,105 @@ Simple random data
Permutations
============
+The methods for randomly permuting a sequence are
+
.. autosummary::
:toctree: generated/
~numpy.random.Generator.shuffle
~numpy.random.Generator.permutation
+ ~numpy.random.Generator.permuted
+
+The following table summarizes the behaviors of the methods.
+
++--------------+-------------------+------------------+
+| method | copy/in-place | axis handling |
++==============+===================+==================+
+| shuffle | in-place | as if 1d |
++--------------+-------------------+------------------+
+| permutation | copy | as if 1d |
++--------------+-------------------+------------------+
+| permuted | either (use 'out' | axis independent |
+| | for in-place) | |
++--------------+-------------------+------------------+
+
+The following subsections provide more details about the differences.
+
+In-place vs. copy
+~~~~~~~~~~~~~~~~~
+The main difference between `Generator.shuffle` and `Generator.permutation`
+is that `Generator.shuffle` operates in-place, while `Generator.permutation`
+returns a copy.
+
+By default, `Generator.permuted` returns a copy. To operate in-place with
+`Generator.permuted`, pass the same array as the first argument *and* as
+the value of the ``out`` parameter. For example,
+
+ >>> rg = np.random.default_rng()
+ >>> x = np.arange(0, 15).reshape(3, 5)
+ >>> x
+ array([[ 0, 1, 2, 3, 4],
+ [ 5, 6, 7, 8, 9],
+ [10, 11, 12, 13, 14]])
+ >>> y = rg.permuted(x, axis=1, out=x)
+ >>> x
+ array([[ 1, 0, 2, 4, 3], # random
+ [ 6, 7, 8, 9, 5],
+ [10, 14, 11, 13, 12]])
+
+Note that when ``out`` is given, the return value is ``out``:
+
+ >>> y is x
+ True
+
+Handling the ``axis`` parameter
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+An important distinction for these methods is how they handle the ``axis``
+parameter. Both `Generator.shuffle` and `Generator.permutation` treat the
+input as a one-dimensional sequence, and the ``axis`` parameter determines
+which dimension of the input array to use as the sequence. In the case of a
+two-dimensional array, ``axis=0`` will, in effect, rearrange the rows of the
+array, and ``axis=1`` will rearrange the columns. For example
+
+ >>> rg = np.random.default_rng()
+ >>> x = np.arange(0, 15).reshape(3, 5)
+ >>> x
+ array([[ 0, 1, 2, 3, 4],
+ [ 5, 6, 7, 8, 9],
+ [10, 11, 12, 13, 14]])
+ >>> rg.permutation(x, axis=1)
+ array([[ 1, 3, 2, 0, 4], # random
+ [ 6, 8, 7, 5, 9],
+ [11, 13, 12, 10, 14]])
+
+Note that the columns have been rearranged "in bulk": the values within
+each column have not changed.
+
+The method `Generator.permuted` treats the ``axis`` parameter similar to
+how `numpy.sort` treats it. Each slice along the given axis is shuffled
+independently of the others. Compare the following example of the use of
+`Generator.permuted` to the above example of `Generator.permutation`:
+
+ >>> rg.permuted(x, axis=1)
+ array([[ 1, 0, 2, 4, 3], # random
+ [ 5, 7, 6, 9, 8],
+ [10, 14, 12, 13, 11]])
+
+In this example, the values within each row (i.e. the values along
+``axis=1``) have been shuffled independently. This is not a "bulk"
+shuffle of the columns.
+
+Shuffling non-NumPy sequences
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+`Generator.shuffle` works on non-NumPy sequences. That is, if it is given
+a sequence that is not a NumPy array, it shuffles that sequence in-place.
+For example,
+
+ >>> rg = np.random.default_rng()
+ >>> a = ['A', 'B', 'C', 'D', 'E']
+ >>> rg.shuffle(a) # shuffle the list in-place
+ >>> a
+ ['B', 'D', 'A', 'E', 'C'] # random
Distributions
=============
diff --git a/doc/source/reference/random/legacy.rst b/doc/source/reference/random/legacy.rst
index 91b91dac8..6cf4775b8 100644
--- a/doc/source/reference/random/legacy.rst
+++ b/doc/source/reference/random/legacy.rst
@@ -133,7 +133,7 @@ Many of the RandomState methods above are exported as functions in
- It uses a `RandomState` rather than the more modern `Generator`.
For backward compatible legacy reasons, we cannot change this. See
-`random-quick-start`.
+:ref:`random-quick-start`.
.. autosummary::
:toctree: generated/
diff --git a/doc/source/reference/routines.ctypeslib.rst b/doc/source/reference/routines.ctypeslib.rst
index 562638e9c..3a059f5d9 100644
--- a/doc/source/reference/routines.ctypeslib.rst
+++ b/doc/source/reference/routines.ctypeslib.rst
@@ -9,6 +9,5 @@ C-Types Foreign Function Interface (:mod:`numpy.ctypeslib`)
.. autofunction:: as_array
.. autofunction:: as_ctypes
.. autofunction:: as_ctypes_type
-.. autofunction:: ctypes_load_library
.. autofunction:: load_library
.. autofunction:: ndpointer
diff --git a/doc/source/reference/routines.financial.rst b/doc/source/reference/routines.financial.rst
deleted file mode 100644
index 5f426d7ab..000000000
--- a/doc/source/reference/routines.financial.rst
+++ /dev/null
@@ -1,21 +0,0 @@
-Financial functions
-*******************
-
-.. currentmodule:: numpy
-
-Simple financial functions
---------------------------
-
-.. autosummary::
- :toctree: generated/
-
- fv
- pv
- npv
- pmt
- ppmt
- ipmt
- irr
- mirr
- nper
- rate
diff --git a/doc/source/reference/routines.rst b/doc/source/reference/routines.rst
index 7a9b97d77..5d6a823b7 100644
--- a/doc/source/reference/routines.rst
+++ b/doc/source/reference/routines.rst
@@ -28,7 +28,6 @@ indentation.
routines.emath
routines.err
routines.fft
- routines.financial
routines.functional
routines.help
routines.indexing
diff --git a/doc/source/user/basics.broadcasting.rst b/doc/source/user/basics.broadcasting.rst
index 00bf17a41..5eae3eb32 100644
--- a/doc/source/user/basics.broadcasting.rst
+++ b/doc/source/user/basics.broadcasting.rst
@@ -10,4 +10,178 @@ Broadcasting
:ref:`array-broadcasting-in-numpy`
An introduction to the concepts discussed here
-.. automodule:: numpy.doc.broadcasting
+.. note::
+ See `this article
+ <https://numpy.org/devdocs/user/theory.broadcasting.html>`_
+ for illustrations of broadcasting concepts.
+
+
+The term broadcasting describes how numpy treats arrays with different
+shapes during arithmetic operations. Subject to certain constraints,
+the smaller array is "broadcast" across the larger array so that they
+have compatible shapes. Broadcasting provides a means of vectorizing
+array operations so that looping occurs in C instead of Python. It does
+this without making needless copies of data and usually leads to
+efficient algorithm implementations. There are, however, cases where
+broadcasting is a bad idea because it leads to inefficient use of memory
+that slows computation.
+
+NumPy operations are usually done on pairs of arrays on an
+element-by-element basis. In the simplest case, the two arrays must
+have exactly the same shape, as in the following example:
+
+ >>> a = np.array([1.0, 2.0, 3.0])
+ >>> b = np.array([2.0, 2.0, 2.0])
+ >>> a * b
+ array([ 2., 4., 6.])
+
+NumPy's broadcasting rule relaxes this constraint when the arrays'
+shapes meet certain constraints. The simplest broadcasting example occurs
+when an array and a scalar value are combined in an operation:
+
+>>> a = np.array([1.0, 2.0, 3.0])
+>>> b = 2.0
+>>> a * b
+array([ 2., 4., 6.])
+
+The result is equivalent to the previous example where ``b`` was an array.
+We can think of the scalar ``b`` being *stretched* during the arithmetic
+operation into an array with the same shape as ``a``. The new elements in
+``b`` are simply copies of the original scalar. The stretching analogy is
+only conceptual. NumPy is smart enough to use the original scalar value
+without actually making copies so that broadcasting operations are as
+memory and computationally efficient as possible.
+
+The code in the second example is more efficient than that in the first
+because broadcasting moves less memory around during the multiplication
+(``b`` is a scalar rather than an array).
+
+General Broadcasting Rules
+==========================
+When operating on two arrays, NumPy compares their shapes element-wise.
+It starts with the trailing (i.e. rightmost) dimensions and works its
+way left. Two dimensions are compatible when
+
+1) they are equal, or
+2) one of them is 1
+
+If these conditions are not met, a
+``ValueError: operands could not be broadcast together`` exception is
+thrown, indicating that the arrays have incompatible shapes. The size of
+the resulting array is the size that is not 1 along each axis of the inputs.
+
+Arrays do not need to have the same *number* of dimensions. For example,
+if you have a ``256x256x3`` array of RGB values, and you want to scale
+each color in the image by a different value, you can multiply the image
+by a one-dimensional array with 3 values. Lining up the sizes of the
+trailing axes of these arrays according to the broadcast rules, shows that
+they are compatible::
+
+ Image (3d array): 256 x 256 x 3
+ Scale (1d array): 3
+ Result (3d array): 256 x 256 x 3
+
+When either of the dimensions compared is one, the other is
+used. In other words, dimensions with size 1 are stretched or "copied"
+to match the other.
+
+In the following example, both the ``A`` and ``B`` arrays have axes with
+length one that are expanded to a larger size during the broadcast
+operation::
+
+ A (4d array): 8 x 1 x 6 x 1
+ B (3d array): 7 x 1 x 5
+ Result (4d array): 8 x 7 x 6 x 5
+
+Here are some more examples::
+
+ A (2d array): 5 x 4
+ B (1d array): 1
+ Result (2d array): 5 x 4
+
+ A (2d array): 5 x 4
+ B (1d array): 4
+ Result (2d array): 5 x 4
+
+ A (3d array): 15 x 3 x 5
+ B (3d array): 15 x 1 x 5
+ Result (3d array): 15 x 3 x 5
+
+ A (3d array): 15 x 3 x 5
+ B (2d array): 3 x 5
+ Result (3d array): 15 x 3 x 5
+
+ A (3d array): 15 x 3 x 5
+ B (2d array): 3 x 1
+ Result (3d array): 15 x 3 x 5
+
+Here are examples of shapes that do not broadcast::
+
+ A (1d array): 3
+ B (1d array): 4 # trailing dimensions do not match
+
+ A (2d array): 2 x 1
+ B (3d array): 8 x 4 x 3 # second from last dimensions mismatched
+
+An example of broadcasting in practice::
+
+ >>> x = np.arange(4)
+ >>> xx = x.reshape(4,1)
+ >>> y = np.ones(5)
+ >>> z = np.ones((3,4))
+
+ >>> x.shape
+ (4,)
+
+ >>> y.shape
+ (5,)
+
+ >>> x + y
+ ValueError: operands could not be broadcast together with shapes (4,) (5,)
+
+ >>> xx.shape
+ (4, 1)
+
+ >>> y.shape
+ (5,)
+
+ >>> (xx + y).shape
+ (4, 5)
+
+ >>> xx + y
+ array([[ 1., 1., 1., 1., 1.],
+ [ 2., 2., 2., 2., 2.],
+ [ 3., 3., 3., 3., 3.],
+ [ 4., 4., 4., 4., 4.]])
+
+ >>> x.shape
+ (4,)
+
+ >>> z.shape
+ (3, 4)
+
+ >>> (x + z).shape
+ (3, 4)
+
+ >>> x + z
+ array([[ 1., 2., 3., 4.],
+ [ 1., 2., 3., 4.],
+ [ 1., 2., 3., 4.]])
+
+Broadcasting provides a convenient way of taking the outer product (or
+any other outer operation) of two arrays. The following example shows an
+outer addition operation of two 1-d arrays::
+
+ >>> a = np.array([0.0, 10.0, 20.0, 30.0])
+ >>> b = np.array([1.0, 2.0, 3.0])
+ >>> a[:, np.newaxis] + b
+ array([[ 1., 2., 3.],
+ [ 11., 12., 13.],
+ [ 21., 22., 23.],
+ [ 31., 32., 33.]])
+
+Here the ``newaxis`` index operator inserts a new axis into ``a``,
+making it a two-dimensional ``4x1`` array. Combining the ``4x1`` array
+with ``b``, which has shape ``(3,)``, yields a ``4x3`` array.
+
+
diff --git a/doc/source/user/basics.byteswapping.rst b/doc/source/user/basics.byteswapping.rst
index 4b1008df3..fecdb9ee8 100644
--- a/doc/source/user/basics.byteswapping.rst
+++ b/doc/source/user/basics.byteswapping.rst
@@ -2,4 +2,152 @@
Byte-swapping
*************
-.. automodule:: numpy.doc.byteswapping
+Introduction to byte ordering and ndarrays
+==========================================
+
+The ``ndarray`` is an object that provide a python array interface to data
+in memory.
+
+It often happens that the memory that you want to view with an array is
+not of the same byte ordering as the computer on which you are running
+Python.
+
+For example, I might be working on a computer with a little-endian CPU -
+such as an Intel Pentium, but I have loaded some data from a file
+written by a computer that is big-endian. Let's say I have loaded 4
+bytes from a file written by a Sun (big-endian) computer. I know that
+these 4 bytes represent two 16-bit integers. On a big-endian machine, a
+two-byte integer is stored with the Most Significant Byte (MSB) first,
+and then the Least Significant Byte (LSB). Thus the bytes are, in memory order:
+
+#. MSB integer 1
+#. LSB integer 1
+#. MSB integer 2
+#. LSB integer 2
+
+Let's say the two integers were in fact 1 and 770. Because 770 = 256 *
+3 + 2, the 4 bytes in memory would contain respectively: 0, 1, 3, 2.
+The bytes I have loaded from the file would have these contents:
+
+>>> big_end_buffer = bytearray([0,1,3,2])
+>>> big_end_buffer
+bytearray(b'\\x00\\x01\\x03\\x02')
+
+We might want to use an ``ndarray`` to access these integers. In that
+case, we can create an array around this memory, and tell numpy that
+there are two integers, and that they are 16 bit and big-endian:
+
+>>> import numpy as np
+>>> big_end_arr = np.ndarray(shape=(2,),dtype='>i2', buffer=big_end_buffer)
+>>> big_end_arr[0]
+1
+>>> big_end_arr[1]
+770
+
+Note the array ``dtype`` above of ``>i2``. The ``>`` means 'big-endian'
+(``<`` is little-endian) and ``i2`` means 'signed 2-byte integer'. For
+example, if our data represented a single unsigned 4-byte little-endian
+integer, the dtype string would be ``<u4``.
+
+In fact, why don't we try that?
+
+>>> little_end_u4 = np.ndarray(shape=(1,),dtype='<u4', buffer=big_end_buffer)
+>>> little_end_u4[0] == 1 * 256**1 + 3 * 256**2 + 2 * 256**3
+True
+
+Returning to our ``big_end_arr`` - in this case our underlying data is
+big-endian (data endianness) and we've set the dtype to match (the dtype
+is also big-endian). However, sometimes you need to flip these around.
+
+.. warning::
+
+ Scalars currently do not include byte order information, so extracting
+ a scalar from an array will return an integer in native byte order.
+ Hence:
+
+ >>> big_end_arr[0].dtype.byteorder == little_end_u4[0].dtype.byteorder
+ True
+
+Changing byte ordering
+======================
+
+As you can imagine from the introduction, there are two ways you can
+affect the relationship between the byte ordering of the array and the
+underlying memory it is looking at:
+
+* Change the byte-ordering information in the array dtype so that it
+ interprets the underlying data as being in a different byte order.
+ This is the role of ``arr.newbyteorder()``
+* Change the byte-ordering of the underlying data, leaving the dtype
+ interpretation as it was. This is what ``arr.byteswap()`` does.
+
+The common situations in which you need to change byte ordering are:
+
+#. Your data and dtype endianness don't match, and you want to change
+ the dtype so that it matches the data.
+#. Your data and dtype endianness don't match, and you want to swap the
+ data so that they match the dtype
+#. Your data and dtype endianness match, but you want the data swapped
+ and the dtype to reflect this
+
+Data and dtype endianness don't match, change dtype to match data
+-----------------------------------------------------------------
+
+We make something where they don't match:
+
+>>> wrong_end_dtype_arr = np.ndarray(shape=(2,),dtype='<i2', buffer=big_end_buffer)
+>>> wrong_end_dtype_arr[0]
+256
+
+The obvious fix for this situation is to change the dtype so it gives
+the correct endianness:
+
+>>> fixed_end_dtype_arr = wrong_end_dtype_arr.newbyteorder()
+>>> fixed_end_dtype_arr[0]
+1
+
+Note the array has not changed in memory:
+
+>>> fixed_end_dtype_arr.tobytes() == big_end_buffer
+True
+
+Data and type endianness don't match, change data to match dtype
+----------------------------------------------------------------
+
+You might want to do this if you need the data in memory to be a certain
+ordering. For example you might be writing the memory out to a file
+that needs a certain byte ordering.
+
+>>> fixed_end_mem_arr = wrong_end_dtype_arr.byteswap()
+>>> fixed_end_mem_arr[0]
+1
+
+Now the array *has* changed in memory:
+
+>>> fixed_end_mem_arr.tobytes() == big_end_buffer
+False
+
+Data and dtype endianness match, swap data and dtype
+----------------------------------------------------
+
+You may have a correctly specified array dtype, but you need the array
+to have the opposite byte order in memory, and you want the dtype to
+match so the array values make sense. In this case you just do both of
+the previous operations:
+
+>>> swapped_end_arr = big_end_arr.byteswap().newbyteorder()
+>>> swapped_end_arr[0]
+1
+>>> swapped_end_arr.tobytes() == big_end_buffer
+False
+
+An easier way of casting the data to a specific dtype and byte ordering
+can be achieved with the ndarray astype method:
+
+>>> swapped_end_arr = big_end_arr.astype('<i2')
+>>> swapped_end_arr[0]
+1
+>>> swapped_end_arr.tobytes() == big_end_buffer
+False
+
+
diff --git a/doc/source/user/basics.creation.rst b/doc/source/user/basics.creation.rst
index b3fa81017..671a8ec59 100644
--- a/doc/source/user/basics.creation.rst
+++ b/doc/source/user/basics.creation.rst
@@ -6,4 +6,141 @@ Array creation
.. seealso:: :ref:`Array creation routines <routines.array-creation>`
-.. automodule:: numpy.doc.creation
+Introduction
+============
+
+There are 5 general mechanisms for creating arrays:
+
+1) Conversion from other Python structures (e.g., lists, tuples)
+2) Intrinsic numpy array creation objects (e.g., arange, ones, zeros,
+ etc.)
+3) Reading arrays from disk, either from standard or custom formats
+4) Creating arrays from raw bytes through the use of strings or buffers
+5) Use of special library functions (e.g., random)
+
+This section will not cover means of replicating, joining, or otherwise
+expanding or mutating existing arrays. Nor will it cover creating object
+arrays or structured arrays. Both of those are covered in their own sections.
+
+Converting Python array_like Objects to NumPy Arrays
+====================================================
+
+In general, numerical data arranged in an array-like structure in Python can
+be converted to arrays through the use of the array() function. The most
+obvious examples are lists and tuples. See the documentation for array() for
+details for its use. Some objects may support the array-protocol and allow
+conversion to arrays this way. A simple way to find out if the object can be
+converted to a numpy array using array() is simply to try it interactively and
+see if it works! (The Python Way).
+
+Examples: ::
+
+ >>> x = np.array([2,3,1,0])
+ >>> x = np.array([2, 3, 1, 0])
+ >>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists,
+ and types
+ >>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
+
+Intrinsic NumPy Array Creation
+==============================
+
+NumPy has built-in functions for creating arrays from scratch:
+
+zeros(shape) will create an array filled with 0 values with the specified
+shape. The default dtype is float64. ::
+
+ >>> np.zeros((2, 3))
+ array([[ 0., 0., 0.], [ 0., 0., 0.]])
+
+ones(shape) will create an array filled with 1 values. It is identical to
+zeros in all other respects.
+
+arange() will create arrays with regularly incrementing values. Check the
+docstring for complete information on the various ways it can be used. A few
+examples will be given here: ::
+
+ >>> np.arange(10)
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+ >>> np.arange(2, 10, dtype=float)
+ array([ 2., 3., 4., 5., 6., 7., 8., 9.])
+ >>> np.arange(2, 3, 0.1)
+ array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
+
+Note that there are some subtleties regarding the last usage that the user
+should be aware of that are described in the arange docstring.
+
+linspace() will create arrays with a specified number of elements, and
+spaced equally between the specified beginning and end values. For
+example: ::
+
+ >>> np.linspace(1., 4., 6)
+ array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
+
+The advantage of this creation function is that one can guarantee the
+number of elements and the starting and end point, which arange()
+generally will not do for arbitrary start, stop, and step values.
+
+indices() will create a set of arrays (stacked as a one-higher dimensioned
+array), one per dimension with each representing variation in that dimension.
+An example illustrates much better than a verbal description: ::
+
+ >>> np.indices((3,3))
+ array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
+
+This is particularly useful for evaluating functions of multiple dimensions on
+a regular grid.
+
+Reading Arrays From Disk
+========================
+
+This is presumably the most common case of large array creation. The details,
+of course, depend greatly on the format of data on disk and so this section
+can only give general pointers on how to handle various formats.
+
+Standard Binary Formats
+-----------------------
+
+Various fields have standard formats for array data. The following lists the
+ones with known python libraries to read them and return numpy arrays (there
+may be others for which it is possible to read and convert to numpy arrays so
+check the last section as well)
+::
+
+ HDF5: h5py
+ FITS: Astropy
+
+Examples of formats that cannot be read directly but for which it is not hard to
+convert are those formats supported by libraries like PIL (able to read and
+write many image formats such as jpg, png, etc).
+
+Common ASCII Formats
+------------------------
+
+Comma Separated Value files (CSV) are widely used (and an export and import
+option for programs like Excel). There are a number of ways of reading these
+files in Python. There are CSV functions in Python and functions in pylab
+(part of matplotlib).
+
+More generic ascii files can be read using the io package in scipy.
+
+Custom Binary Formats
+---------------------
+
+There are a variety of approaches one can use. If the file has a relatively
+simple format then one can write a simple I/O library and use the numpy
+fromfile() function and .tofile() method to read and write numpy arrays
+directly (mind your byteorder though!) If a good C or C++ library exists that
+read the data, one can wrap that library with a variety of techniques though
+that certainly is much more work and requires significantly more advanced
+knowledge to interface with C or C++.
+
+Use of Special Libraries
+------------------------
+
+There are libraries that can be used to generate arrays for special purposes
+and it isn't possible to enumerate all of them. The most common uses are use
+of the many array generation functions in random that can generate arrays of
+random values, and some utility functions to generate special matrices (e.g.
+diagonal).
+
+
diff --git a/doc/source/user/basics.dispatch.rst b/doc/source/user/basics.dispatch.rst
index f7b8da262..c0e1cf9ba 100644
--- a/doc/source/user/basics.dispatch.rst
+++ b/doc/source/user/basics.dispatch.rst
@@ -4,5 +4,269 @@
Writing custom array containers
*******************************
-.. automodule:: numpy.doc.dispatch
+Numpy's dispatch mechanism, introduced in numpy version v1.16 is the
+recommended approach for writing custom N-dimensional array containers that are
+compatible with the numpy API and provide custom implementations of numpy
+functionality. Applications include `dask <http://dask.pydata.org>`_ arrays, an
+N-dimensional array distributed across multiple nodes, and `cupy
+<https://docs-cupy.chainer.org/en/stable/>`_ arrays, an N-dimensional array on
+a GPU.
+
+To get a feel for writing custom array containers, we'll begin with a simple
+example that has rather narrow utility but illustrates the concepts involved.
+
+>>> import numpy as np
+>>> class DiagonalArray:
+... def __init__(self, N, value):
+... self._N = N
+... self._i = value
+... def __repr__(self):
+... return f"{self.__class__.__name__}(N={self._N}, value={self._i})"
+... def __array__(self):
+... return self._i * np.eye(self._N)
+
+Our custom array can be instantiated like:
+
+>>> arr = DiagonalArray(5, 1)
+>>> arr
+DiagonalArray(N=5, value=1)
+
+We can convert to a numpy array using :func:`numpy.array` or
+:func:`numpy.asarray`, which will call its ``__array__`` method to obtain a
+standard ``numpy.ndarray``.
+
+>>> np.asarray(arr)
+array([[1., 0., 0., 0., 0.],
+ [0., 1., 0., 0., 0.],
+ [0., 0., 1., 0., 0.],
+ [0., 0., 0., 1., 0.],
+ [0., 0., 0., 0., 1.]])
+
+If we operate on ``arr`` with a numpy function, numpy will again use the
+``__array__`` interface to convert it to an array and then apply the function
+in the usual way.
+
+>>> np.multiply(arr, 2)
+array([[2., 0., 0., 0., 0.],
+ [0., 2., 0., 0., 0.],
+ [0., 0., 2., 0., 0.],
+ [0., 0., 0., 2., 0.],
+ [0., 0., 0., 0., 2.]])
+
+
+Notice that the return type is a standard ``numpy.ndarray``.
+
+>>> type(arr)
+numpy.ndarray
+
+How can we pass our custom array type through this function? Numpy allows a
+class to indicate that it would like to handle computations in a custom-defined
+way through the interfaces ``__array_ufunc__`` and ``__array_function__``. Let's
+take one at a time, starting with ``_array_ufunc__``. This method covers
+:ref:`ufuncs`, a class of functions that includes, for example,
+:func:`numpy.multiply` and :func:`numpy.sin`.
+
+The ``__array_ufunc__`` receives:
+
+- ``ufunc``, a function like ``numpy.multiply``
+- ``method``, a string, differentiating between ``numpy.multiply(...)`` and
+ variants like ``numpy.multiply.outer``, ``numpy.multiply.accumulate``, and so
+ on. For the common case, ``numpy.multiply(...)``, ``method == '__call__'``.
+- ``inputs``, which could be a mixture of different types
+- ``kwargs``, keyword arguments passed to the function
+
+For this example we will only handle the method ``__call__``
+
+>>> from numbers import Number
+>>> class DiagonalArray:
+... def __init__(self, N, value):
+... self._N = N
+... self._i = value
+... def __repr__(self):
+... return f"{self.__class__.__name__}(N={self._N}, value={self._i})"
+... def __array__(self):
+... return self._i * np.eye(self._N)
+... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+... if method == '__call__':
+... N = None
+... scalars = []
+... for input in inputs:
+... if isinstance(input, Number):
+... scalars.append(input)
+... elif isinstance(input, self.__class__):
+... scalars.append(input._i)
+... if N is not None:
+... if N != self._N:
+... raise TypeError("inconsistent sizes")
+... else:
+... N = self._N
+... else:
+... return NotImplemented
+... return self.__class__(N, ufunc(*scalars, **kwargs))
+... else:
+... return NotImplemented
+
+Now our custom array type passes through numpy functions.
+
+>>> arr = DiagonalArray(5, 1)
+>>> np.multiply(arr, 3)
+DiagonalArray(N=5, value=3)
+>>> np.add(arr, 3)
+DiagonalArray(N=5, value=4)
+>>> np.sin(arr)
+DiagonalArray(N=5, value=0.8414709848078965)
+
+At this point ``arr + 3`` does not work.
+
+>>> arr + 3
+TypeError: unsupported operand type(s) for *: 'DiagonalArray' and 'int'
+
+To support it, we need to define the Python interfaces ``__add__``, ``__lt__``,
+and so on to dispatch to the corresponding ufunc. We can achieve this
+conveniently by inheriting from the mixin
+:class:`~numpy.lib.mixins.NDArrayOperatorsMixin`.
+
+>>> import numpy.lib.mixins
+>>> class DiagonalArray(numpy.lib.mixins.NDArrayOperatorsMixin):
+... def __init__(self, N, value):
+... self._N = N
+... self._i = value
+... def __repr__(self):
+... return f"{self.__class__.__name__}(N={self._N}, value={self._i})"
+... def __array__(self):
+... return self._i * np.eye(self._N)
+... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+... if method == '__call__':
+... N = None
+... scalars = []
+... for input in inputs:
+... if isinstance(input, Number):
+... scalars.append(input)
+... elif isinstance(input, self.__class__):
+... scalars.append(input._i)
+... if N is not None:
+... if N != self._N:
+... raise TypeError("inconsistent sizes")
+... else:
+... N = self._N
+... else:
+... return NotImplemented
+... return self.__class__(N, ufunc(*scalars, **kwargs))
+... else:
+... return NotImplemented
+
+>>> arr = DiagonalArray(5, 1)
+>>> arr + 3
+DiagonalArray(N=5, value=4)
+>>> arr > 0
+DiagonalArray(N=5, value=True)
+
+Now let's tackle ``__array_function__``. We'll create dict that maps numpy
+functions to our custom variants.
+
+>>> HANDLED_FUNCTIONS = {}
+>>> class DiagonalArray(numpy.lib.mixins.NDArrayOperatorsMixin):
+... def __init__(self, N, value):
+... self._N = N
+... self._i = value
+... def __repr__(self):
+... return f"{self.__class__.__name__}(N={self._N}, value={self._i})"
+... def __array__(self):
+... return self._i * np.eye(self._N)
+... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+... if method == '__call__':
+... N = None
+... scalars = []
+... for input in inputs:
+... # In this case we accept only scalar numbers or DiagonalArrays.
+... if isinstance(input, Number):
+... scalars.append(input)
+... elif isinstance(input, self.__class__):
+... scalars.append(input._i)
+... if N is not None:
+... if N != self._N:
+... raise TypeError("inconsistent sizes")
+... else:
+... N = self._N
+... else:
+... return NotImplemented
+... return self.__class__(N, ufunc(*scalars, **kwargs))
+... else:
+... return NotImplemented
+... def __array_function__(self, func, types, args, kwargs):
+... if func not in HANDLED_FUNCTIONS:
+... return NotImplemented
+... # Note: this allows subclasses that don't override
+... # __array_function__ to handle DiagonalArray objects.
+... if not all(issubclass(t, self.__class__) for t in types):
+... return NotImplemented
+... return HANDLED_FUNCTIONS[func](*args, **kwargs)
+...
+
+A convenient pattern is to define a decorator ``implements`` that can be used
+to add functions to ``HANDLED_FUNCTIONS``.
+
+>>> def implements(np_function):
+... "Register an __array_function__ implementation for DiagonalArray objects."
+... def decorator(func):
+... HANDLED_FUNCTIONS[np_function] = func
+... return func
+... return decorator
+...
+
+Now we write implementations of numpy functions for ``DiagonalArray``.
+For completeness, to support the usage ``arr.sum()`` add a method ``sum`` that
+calls ``numpy.sum(self)``, and the same for ``mean``.
+
+>>> @implements(np.sum)
+... def sum(arr):
+... "Implementation of np.sum for DiagonalArray objects"
+... return arr._i * arr._N
+...
+>>> @implements(np.mean)
+... def mean(arr):
+... "Implementation of np.mean for DiagonalArray objects"
+... return arr._i / arr._N
+...
+>>> arr = DiagonalArray(5, 1)
+>>> np.sum(arr)
+5
+>>> np.mean(arr)
+0.2
+
+If the user tries to use any numpy functions not included in
+``HANDLED_FUNCTIONS``, a ``TypeError`` will be raised by numpy, indicating that
+this operation is not supported. For example, concatenating two
+``DiagonalArrays`` does not produce another diagonal array, so it is not
+supported.
+
+>>> np.concatenate([arr, arr])
+TypeError: no implementation found for 'numpy.concatenate' on types that implement __array_function__: [<class '__main__.DiagonalArray'>]
+
+Additionally, our implementations of ``sum`` and ``mean`` do not accept the
+optional arguments that numpy's implementation does.
+
+>>> np.sum(arr, axis=0)
+TypeError: sum() got an unexpected keyword argument 'axis'
+
+The user always has the option of converting to a normal ``numpy.ndarray`` with
+:func:`numpy.asarray` and using standard numpy from there.
+
+>>> np.concatenate([np.asarray(arr), np.asarray(arr)])
+array([[1., 0., 0., 0., 0.],
+ [0., 1., 0., 0., 0.],
+ [0., 0., 1., 0., 0.],
+ [0., 0., 0., 1., 0.],
+ [0., 0., 0., 0., 1.],
+ [1., 0., 0., 0., 0.],
+ [0., 1., 0., 0., 0.],
+ [0., 0., 1., 0., 0.],
+ [0., 0., 0., 1., 0.],
+ [0., 0., 0., 0., 1.]])
+
+Refer to the `dask source code <https://github.com/dask/dask>`_ and
+`cupy source code <https://github.com/cupy/cupy>`_ for more fully-worked
+examples of custom array containers.
+
+See also :doc:`NEP 18<neps:nep-0018-array-function-protocol>`.
diff --git a/doc/source/user/basics.indexing.rst b/doc/source/user/basics.indexing.rst
index 0dca4b884..9545bb78c 100644
--- a/doc/source/user/basics.indexing.rst
+++ b/doc/source/user/basics.indexing.rst
@@ -10,4 +10,454 @@ Indexing
:ref:`Indexing routines <routines.indexing>`
-.. automodule:: numpy.doc.indexing
+Array indexing refers to any use of the square brackets ([]) to index
+array values. There are many options to indexing, which give numpy
+indexing great power, but with power comes some complexity and the
+potential for confusion. This section is just an overview of the
+various options and issues related to indexing. Aside from single
+element indexing, the details on most of these options are to be
+found in related sections.
+
+Assignment vs referencing
+=========================
+
+Most of the following examples show the use of indexing when
+referencing data in an array. The examples work just as well
+when assigning to an array. See the section at the end for
+specific examples and explanations on how assignments work.
+
+Single element indexing
+=======================
+
+Single element indexing for a 1-D array is what one expects. It work
+exactly like that for other standard Python sequences. It is 0-based,
+and accepts negative indices for indexing from the end of the array. ::
+
+ >>> x = np.arange(10)
+ >>> x[2]
+ 2
+ >>> x[-2]
+ 8
+
+Unlike lists and tuples, numpy arrays support multidimensional indexing
+for multidimensional arrays. That means that it is not necessary to
+separate each dimension's index into its own set of square brackets. ::
+
+ >>> x.shape = (2,5) # now x is 2-dimensional
+ >>> x[1,3]
+ 8
+ >>> x[1,-1]
+ 9
+
+Note that if one indexes a multidimensional array with fewer indices
+than dimensions, one gets a subdimensional array. For example: ::
+
+ >>> x[0]
+ array([0, 1, 2, 3, 4])
+
+That is, each index specified selects the array corresponding to the
+rest of the dimensions selected. In the above example, choosing 0
+means that the remaining dimension of length 5 is being left unspecified,
+and that what is returned is an array of that dimensionality and size.
+It must be noted that the returned array is not a copy of the original,
+but points to the same values in memory as does the original array.
+In this case, the 1-D array at the first position (0) is returned.
+So using a single index on the returned array, results in a single
+element being returned. That is: ::
+
+ >>> x[0][2]
+ 2
+
+So note that ``x[0,2] = x[0][2]`` though the second case is more
+inefficient as a new temporary array is created after the first index
+that is subsequently indexed by 2.
+
+Note to those used to IDL or Fortran memory order as it relates to
+indexing. NumPy uses C-order indexing. That means that the last
+index usually represents the most rapidly changing memory location,
+unlike Fortran or IDL, where the first index represents the most
+rapidly changing location in memory. This difference represents a
+great potential for confusion.
+
+Other indexing options
+======================
+
+It is possible to slice and stride arrays to extract arrays of the
+same number of dimensions, but of different sizes than the original.
+The slicing and striding works exactly the same way it does for lists
+and tuples except that they can be applied to multiple dimensions as
+well. A few examples illustrates best: ::
+
+ >>> x = np.arange(10)
+ >>> x[2:5]
+ array([2, 3, 4])
+ >>> x[:-7]
+ array([0, 1, 2])
+ >>> x[1:7:2]
+ array([1, 3, 5])
+ >>> y = np.arange(35).reshape(5,7)
+ >>> y[1:5:2,::3]
+ array([[ 7, 10, 13],
+ [21, 24, 27]])
+
+Note that slices of arrays do not copy the internal array data but
+only produce new views of the original data. This is different from
+list or tuple slicing and an explicit ``copy()`` is recommended if
+the original data is not required anymore.
+
+It is possible to index arrays with other arrays for the purposes of
+selecting lists of values out of arrays into new arrays. There are
+two different ways of accomplishing this. One uses one or more arrays
+of index values. The other involves giving a boolean array of the proper
+shape to indicate the values to be selected. Index arrays are a very
+powerful tool that allow one to avoid looping over individual elements in
+arrays and thus greatly improve performance.
+
+It is possible to use special features to effectively increase the
+number of dimensions in an array through indexing so the resulting
+array acquires the shape needed for use in an expression or with a
+specific function.
+
+Index arrays
+============
+
+NumPy arrays may be indexed with other arrays (or any other sequence-
+like object that can be converted to an array, such as lists, with the
+exception of tuples; see the end of this document for why this is). The
+use of index arrays ranges from simple, straightforward cases to
+complex, hard-to-understand cases. For all cases of index arrays, what
+is returned is a copy of the original data, not a view as one gets for
+slices.
+
+Index arrays must be of integer type. Each value in the array indicates
+which value in the array to use in place of the index. To illustrate: ::
+
+ >>> x = np.arange(10,1,-1)
+ >>> x
+ array([10, 9, 8, 7, 6, 5, 4, 3, 2])
+ >>> x[np.array([3, 3, 1, 8])]
+ array([7, 7, 9, 2])
+
+
+The index array consisting of the values 3, 3, 1 and 8 correspondingly
+create an array of length 4 (same as the index array) where each index
+is replaced by the value the index array has in the array being indexed.
+
+Negative values are permitted and work as they do with single indices
+or slices: ::
+
+ >>> x[np.array([3,3,-3,8])]
+ array([7, 7, 4, 2])
+
+It is an error to have index values out of bounds: ::
+
+ >>> x[np.array([3, 3, 20, 8])]
+ <type 'exceptions.IndexError'>: index 20 out of bounds 0<=index<9
+
+Generally speaking, what is returned when index arrays are used is
+an array with the same shape as the index array, but with the type
+and values of the array being indexed. As an example, we can use a
+multidimensional index array instead: ::
+
+ >>> x[np.array([[1,1],[2,3]])]
+ array([[9, 9],
+ [8, 7]])
+
+Indexing Multi-dimensional arrays
+=================================
+
+Things become more complex when multidimensional arrays are indexed,
+particularly with multidimensional index arrays. These tend to be
+more unusual uses, but they are permitted, and they are useful for some
+problems. We'll start with the simplest multidimensional case (using
+the array y from the previous examples): ::
+
+ >>> y[np.array([0,2,4]), np.array([0,1,2])]
+ array([ 0, 15, 30])
+
+In this case, if the index arrays have a matching shape, and there is
+an index array for each dimension of the array being indexed, the
+resultant array has the same shape as the index arrays, and the values
+correspond to the index set for each position in the index arrays. In
+this example, the first index value is 0 for both index arrays, and
+thus the first value of the resultant array is y[0,0]. The next value
+is y[2,1], and the last is y[4,2].
+
+If the index arrays do not have the same shape, there is an attempt to
+broadcast them to the same shape. If they cannot be broadcast to the
+same shape, an exception is raised: ::
+
+ >>> y[np.array([0,2,4]), np.array([0,1])]
+ <type 'exceptions.ValueError'>: shape mismatch: objects cannot be
+ broadcast to a single shape
+
+The broadcasting mechanism permits index arrays to be combined with
+scalars for other indices. The effect is that the scalar value is used
+for all the corresponding values of the index arrays: ::
+
+ >>> y[np.array([0,2,4]), 1]
+ array([ 1, 15, 29])
+
+Jumping to the next level of complexity, it is possible to only
+partially index an array with index arrays. It takes a bit of thought
+to understand what happens in such cases. For example if we just use
+one index array with y: ::
+
+ >>> y[np.array([0,2,4])]
+ array([[ 0, 1, 2, 3, 4, 5, 6],
+ [14, 15, 16, 17, 18, 19, 20],
+ [28, 29, 30, 31, 32, 33, 34]])
+
+What results is the construction of a new array where each value of
+the index array selects one row from the array being indexed and the
+resultant array has the resulting shape (number of index elements,
+size of row).
+
+An example of where this may be useful is for a color lookup table
+where we want to map the values of an image into RGB triples for
+display. The lookup table could have a shape (nlookup, 3). Indexing
+such an array with an image with shape (ny, nx) with dtype=np.uint8
+(or any integer type so long as values are with the bounds of the
+lookup table) will result in an array of shape (ny, nx, 3) where a
+triple of RGB values is associated with each pixel location.
+
+In general, the shape of the resultant array will be the concatenation
+of the shape of the index array (or the shape that all the index arrays
+were broadcast to) with the shape of any unused dimensions (those not
+indexed) in the array being indexed.
+
+Boolean or "mask" index arrays
+==============================
+
+Boolean arrays used as indices are treated in a different manner
+entirely than index arrays. Boolean arrays must be of the same shape
+as the initial dimensions of the array being indexed. In the
+most straightforward case, the boolean array has the same shape: ::
+
+ >>> b = y>20
+ >>> y[b]
+ array([21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34])
+
+Unlike in the case of integer index arrays, in the boolean case, the
+result is a 1-D array containing all the elements in the indexed array
+corresponding to all the true elements in the boolean array. The
+elements in the indexed array are always iterated and returned in
+:term:`row-major` (C-style) order. The result is also identical to
+``y[np.nonzero(b)]``. As with index arrays, what is returned is a copy
+of the data, not a view as one gets with slices.
+
+The result will be multidimensional if y has more dimensions than b.
+For example: ::
+
+ >>> b[:,5] # use a 1-D boolean whose first dim agrees with the first dim of y
+ array([False, False, False, True, True])
+ >>> y[b[:,5]]
+ array([[21, 22, 23, 24, 25, 26, 27],
+ [28, 29, 30, 31, 32, 33, 34]])
+
+Here the 4th and 5th rows are selected from the indexed array and
+combined to make a 2-D array.
+
+In general, when the boolean array has fewer dimensions than the array
+being indexed, this is equivalent to y[b, ...], which means
+y is indexed by b followed by as many : as are needed to fill
+out the rank of y.
+Thus the shape of the result is one dimension containing the number
+of True elements of the boolean array, followed by the remaining
+dimensions of the array being indexed.
+
+For example, using a 2-D boolean array of shape (2,3)
+with four True elements to select rows from a 3-D array of shape
+(2,3,5) results in a 2-D result of shape (4,5): ::
+
+ >>> x = np.arange(30).reshape(2,3,5)
+ >>> x
+ array([[[ 0, 1, 2, 3, 4],
+ [ 5, 6, 7, 8, 9],
+ [10, 11, 12, 13, 14]],
+ [[15, 16, 17, 18, 19],
+ [20, 21, 22, 23, 24],
+ [25, 26, 27, 28, 29]]])
+ >>> b = np.array([[True, True, False], [False, True, True]])
+ >>> x[b]
+ array([[ 0, 1, 2, 3, 4],
+ [ 5, 6, 7, 8, 9],
+ [20, 21, 22, 23, 24],
+ [25, 26, 27, 28, 29]])
+
+For further details, consult the numpy reference documentation on array indexing.
+
+Combining index arrays with slices
+==================================
+
+Index arrays may be combined with slices. For example: ::
+
+ >>> y[np.array([0, 2, 4]), 1:3]
+ array([[ 1, 2],
+ [15, 16],
+ [29, 30]])
+
+In effect, the slice and index array operation are independent.
+The slice operation extracts columns with index 1 and 2,
+(i.e. the 2nd and 3rd columns),
+followed by the index array operation which extracts rows with
+index 0, 2 and 4 (i.e the first, third and fifth rows).
+
+This is equivalent to::
+
+ >>> y[:, 1:3][np.array([0, 2, 4]), :]
+ array([[ 1, 2],
+ [15, 16],
+ [29, 30]])
+
+Likewise, slicing can be combined with broadcasted boolean indices: ::
+
+ >>> b = y > 20
+ >>> b
+ array([[False, False, False, False, False, False, False],
+ [False, False, False, False, False, False, False],
+ [False, False, False, False, False, False, False],
+ [ True, True, True, True, True, True, True],
+ [ True, True, True, True, True, True, True]])
+ >>> y[b[:,5],1:3]
+ array([[22, 23],
+ [29, 30]])
+
+Structural indexing tools
+=========================
+
+To facilitate easy matching of array shapes with expressions and in
+assignments, the np.newaxis object can be used within array indices
+to add new dimensions with a size of 1. For example: ::
+
+ >>> y.shape
+ (5, 7)
+ >>> y[:,np.newaxis,:].shape
+ (5, 1, 7)
+
+Note that there are no new elements in the array, just that the
+dimensionality is increased. This can be handy to combine two
+arrays in a way that otherwise would require explicitly reshaping
+operations. For example: ::
+
+ >>> x = np.arange(5)
+ >>> x[:,np.newaxis] + x[np.newaxis,:]
+ array([[0, 1, 2, 3, 4],
+ [1, 2, 3, 4, 5],
+ [2, 3, 4, 5, 6],
+ [3, 4, 5, 6, 7],
+ [4, 5, 6, 7, 8]])
+
+The ellipsis syntax maybe used to indicate selecting in full any
+remaining unspecified dimensions. For example: ::
+
+ >>> z = np.arange(81).reshape(3,3,3,3)
+ >>> z[1,...,2]
+ array([[29, 32, 35],
+ [38, 41, 44],
+ [47, 50, 53]])
+
+This is equivalent to: ::
+
+ >>> z[1,:,:,2]
+ array([[29, 32, 35],
+ [38, 41, 44],
+ [47, 50, 53]])
+
+Assigning values to indexed arrays
+==================================
+
+As mentioned, one can select a subset of an array to assign to using
+a single index, slices, and index and mask arrays. The value being
+assigned to the indexed array must be shape consistent (the same shape
+or broadcastable to the shape the index produces). For example, it is
+permitted to assign a constant to a slice: ::
+
+ >>> x = np.arange(10)
+ >>> x[2:7] = 1
+
+or an array of the right size: ::
+
+ >>> x[2:7] = np.arange(5)
+
+Note that assignments may result in changes if assigning
+higher types to lower types (like floats to ints) or even
+exceptions (assigning complex to floats or ints): ::
+
+ >>> x[1] = 1.2
+ >>> x[1]
+ 1
+ >>> x[1] = 1.2j
+ TypeError: can't convert complex to int
+
+
+Unlike some of the references (such as array and mask indices)
+assignments are always made to the original data in the array
+(indeed, nothing else would make sense!). Note though, that some
+actions may not work as one may naively expect. This particular
+example is often surprising to people: ::
+
+ >>> x = np.arange(0, 50, 10)
+ >>> x
+ array([ 0, 10, 20, 30, 40])
+ >>> x[np.array([1, 1, 3, 1])] += 1
+ >>> x
+ array([ 0, 11, 20, 31, 40])
+
+Where people expect that the 1st location will be incremented by 3.
+In fact, it will only be incremented by 1. The reason is because
+a new array is extracted from the original (as a temporary) containing
+the values at 1, 1, 3, 1, then the value 1 is added to the temporary,
+and then the temporary is assigned back to the original array. Thus
+the value of the array at x[1]+1 is assigned to x[1] three times,
+rather than being incremented 3 times.
+
+Dealing with variable numbers of indices within programs
+========================================================
+
+The index syntax is very powerful but limiting when dealing with
+a variable number of indices. For example, if you want to write
+a function that can handle arguments with various numbers of
+dimensions without having to write special case code for each
+number of possible dimensions, how can that be done? If one
+supplies to the index a tuple, the tuple will be interpreted
+as a list of indices. For example (using the previous definition
+for the array z): ::
+
+ >>> indices = (1,1,1,1)
+ >>> z[indices]
+ 40
+
+So one can use code to construct tuples of any number of indices
+and then use these within an index.
+
+Slices can be specified within programs by using the slice() function
+in Python. For example: ::
+
+ >>> indices = (1,1,1,slice(0,2)) # same as [1,1,1,0:2]
+ >>> z[indices]
+ array([39, 40])
+
+Likewise, ellipsis can be specified by code by using the Ellipsis
+object: ::
+
+ >>> indices = (1, Ellipsis, 1) # same as [1,...,1]
+ >>> z[indices]
+ array([[28, 31, 34],
+ [37, 40, 43],
+ [46, 49, 52]])
+
+For this reason it is possible to use the output from the np.nonzero()
+function directly as an index since it always returns a tuple of index
+arrays.
+
+Because the special treatment of tuples, they are not automatically
+converted to an array as a list would be. As an example: ::
+
+ >>> z[[1,1,1,1]] # produces a large array
+ array([[[[27, 28, 29],
+ [30, 31, 32], ...
+ >>> z[(1,1,1,1)] # returns a single value
+ 40
+
+
diff --git a/doc/source/user/basics.io.genfromtxt.rst b/doc/source/user/basics.io.genfromtxt.rst
index 3fce6a8aa..5364acbe9 100644
--- a/doc/source/user/basics.io.genfromtxt.rst
+++ b/doc/source/user/basics.io.genfromtxt.rst
@@ -28,7 +28,7 @@ Defining the input
The only mandatory argument of :func:`~numpy.genfromtxt` is the source of
the data. It can be a string, a list of strings, a generator or an open
-file-like object with a :meth:`read` method, for example, a file or
+file-like object with a ``read`` method, for example, a file or
:class:`io.StringIO` object. If a single string is provided, it is assumed
to be the name of a local or remote file. If a list of strings or a generator
returning strings is provided, each string is treated as one line in a file.
@@ -36,10 +36,10 @@ When the URL of a remote file is passed, the file is automatically downloaded
to the current directory and opened.
Recognized file types are text files and archives. Currently, the function
-recognizes :class:`gzip` and :class:`bz2` (`bzip2`) archives. The type of
+recognizes ``gzip`` and ``bz2`` (``bzip2``) archives. The type of
the archive is determined from the extension of the file: if the filename
-ends with ``'.gz'``, a :class:`gzip` archive is expected; if it ends with
-``'bz2'``, a :class:`bzip2` archive is assumed.
+ends with ``'.gz'``, a ``gzip`` archive is expected; if it ends with
+``'bz2'``, a ``bzip2`` archive is assumed.
@@ -360,9 +360,9 @@ The ``converters`` argument
Usually, defining a dtype is sufficient to define how the sequence of
strings must be converted. However, some additional control may sometimes
be required. For example, we may want to make sure that a date in a format
-``YYYY/MM/DD`` is converted to a :class:`datetime` object, or that a string
-like ``xx%`` is properly converted to a float between 0 and 1. In such
-cases, we should define conversion functions with the ``converters``
+``YYYY/MM/DD`` is converted to a :class:`~datetime.datetime` object, or that
+a string like ``xx%`` is properly converted to a float between 0 and 1. In
+such cases, we should define conversion functions with the ``converters``
arguments.
The value of this argument is typically a dictionary with column indices or
@@ -427,7 +427,7 @@ previous example, we used a converter to transform an empty string into a
float. However, user-defined converters may rapidly become cumbersome to
manage.
-The :func:`~nummpy.genfromtxt` function provides two other complementary
+The :func:`~numpy.genfromtxt` function provides two other complementary
mechanisms: the ``missing_values`` argument is used to recognize
missing data and a second argument, ``filling_values``, is used to
process these missing data.
@@ -514,15 +514,15 @@ output array will then be a :class:`~numpy.ma.MaskedArray`.
Shortcut functions
==================
-In addition to :func:`~numpy.genfromtxt`, the :mod:`numpy.lib.io` module
+In addition to :func:`~numpy.genfromtxt`, the :mod:`numpy.lib.npyio` module
provides several convenience functions derived from
:func:`~numpy.genfromtxt`. These functions work the same way as the
original, but they have different default values.
-:func:`~numpy.recfromtxt`
+:func:`~numpy.npyio.recfromtxt`
Returns a standard :class:`numpy.recarray` (if ``usemask=False``) or a
- :class:`~numpy.ma.MaskedRecords` array (if ``usemaske=True``). The
+ :class:`~numpy.ma.mrecords.MaskedRecords` array (if ``usemaske=True``). The
default dtype is ``dtype=None``, meaning that the types of each column
will be automatically determined.
-:func:`~numpy.recfromcsv`
- Like :func:`~numpy.recfromtxt`, but with a default ``delimiter=","``.
+:func:`~numpy.npyio.recfromcsv`
+ Like :func:`~numpy.npyio.recfromtxt`, but with a default ``delimiter=","``.
diff --git a/doc/source/user/basics.rec.rst b/doc/source/user/basics.rec.rst
index b885c9e77..f579b0d85 100644
--- a/doc/source/user/basics.rec.rst
+++ b/doc/source/user/basics.rec.rst
@@ -4,10 +4,649 @@
Structured arrays
*****************
-.. automodule:: numpy.doc.structured_arrays
+Introduction
+============
+
+Structured arrays are ndarrays whose datatype is a composition of simpler
+datatypes organized as a sequence of named :term:`fields <field>`. For example,
+::
+
+ >>> x = np.array([('Rex', 9, 81.0), ('Fido', 3, 27.0)],
+ ... dtype=[('name', 'U10'), ('age', 'i4'), ('weight', 'f4')])
+ >>> x
+ array([('Rex', 9, 81.), ('Fido', 3, 27.)],
+ dtype=[('name', 'U10'), ('age', '<i4'), ('weight', '<f4')])
+
+Here ``x`` is a one-dimensional array of length two whose datatype is a
+structure with three fields: 1. A string of length 10 or less named 'name', 2.
+a 32-bit integer named 'age', and 3. a 32-bit float named 'weight'.
+
+If you index ``x`` at position 1 you get a structure::
+
+ >>> x[1]
+ ('Fido', 3, 27.0)
+
+You can access and modify individual fields of a structured array by indexing
+with the field name::
+
+ >>> x['age']
+ array([9, 3], dtype=int32)
+ >>> x['age'] = 5
+ >>> x
+ array([('Rex', 5, 81.), ('Fido', 5, 27.)],
+ dtype=[('name', 'U10'), ('age', '<i4'), ('weight', '<f4')])
+
+Structured datatypes are designed to be able to mimic 'structs' in the C
+language, and share a similar memory layout. They are meant for interfacing with
+C code and for low-level manipulation of structured buffers, for example for
+interpreting binary blobs. For these purposes they support specialized features
+such as subarrays, nested datatypes, and unions, and allow control over the
+memory layout of the structure.
+
+Users looking to manipulate tabular data, such as stored in csv files, may find
+other pydata projects more suitable, such as xarray, pandas, or DataArray.
+These provide a high-level interface for tabular data analysis and are better
+optimized for that use. For instance, the C-struct-like memory layout of
+structured arrays in numpy can lead to poor cache behavior in comparison.
+
+.. _defining-structured-types:
+
+Structured Datatypes
+====================
+
+A structured datatype can be thought of as a sequence of bytes of a certain
+length (the structure's :term:`itemsize`) which is interpreted as a collection
+of fields. Each field has a name, a datatype, and a byte offset within the
+structure. The datatype of a field may be any numpy datatype including other
+structured datatypes, and it may also be a :term:`subarray data type` which
+behaves like an ndarray of a specified shape. The offsets of the fields are
+arbitrary, and fields may even overlap. These offsets are usually determined
+automatically by numpy, but can also be specified.
+
+Structured Datatype Creation
+----------------------------
+
+Structured datatypes may be created using the function :func:`numpy.dtype`.
+There are 4 alternative forms of specification which vary in flexibility and
+conciseness. These are further documented in the
+:ref:`Data Type Objects <arrays.dtypes.constructing>` reference page, and in
+summary they are:
+
+1. A list of tuples, one tuple per field
+
+ Each tuple has the form ``(fieldname, datatype, shape)`` where shape is
+ optional. ``fieldname`` is a string (or tuple if titles are used, see
+ :ref:`Field Titles <titles>` below), ``datatype`` may be any object
+ convertible to a datatype, and ``shape`` is a tuple of integers specifying
+ subarray shape.
+
+ >>> np.dtype([('x', 'f4'), ('y', np.float32), ('z', 'f4', (2, 2))])
+ dtype([('x', '<f4'), ('y', '<f4'), ('z', '<f4', (2, 2))])
+
+ If ``fieldname`` is the empty string ``''``, the field will be given a
+ default name of the form ``f#``, where ``#`` is the integer index of the
+ field, counting from 0 from the left::
+
+ >>> np.dtype([('x', 'f4'), ('', 'i4'), ('z', 'i8')])
+ dtype([('x', '<f4'), ('f1', '<i4'), ('z', '<i8')])
+
+ The byte offsets of the fields within the structure and the total
+ structure itemsize are determined automatically.
+
+2. A string of comma-separated dtype specifications
+
+ In this shorthand notation any of the :ref:`string dtype specifications
+ <arrays.dtypes.constructing>` may be used in a string and separated by
+ commas. The itemsize and byte offsets of the fields are determined
+ automatically, and the field names are given the default names ``f0``,
+ ``f1``, etc. ::
+
+ >>> np.dtype('i8, f4, S3')
+ dtype([('f0', '<i8'), ('f1', '<f4'), ('f2', 'S3')])
+ >>> np.dtype('3int8, float32, (2, 3)float64')
+ dtype([('f0', 'i1', (3,)), ('f1', '<f4'), ('f2', '<f8', (2, 3))])
+
+3. A dictionary of field parameter arrays
+
+ This is the most flexible form of specification since it allows control
+ over the byte-offsets of the fields and the itemsize of the structure.
+
+ The dictionary has two required keys, 'names' and 'formats', and four
+ optional keys, 'offsets', 'itemsize', 'aligned' and 'titles'. The values
+ for 'names' and 'formats' should respectively be a list of field names and
+ a list of dtype specifications, of the same length. The optional 'offsets'
+ value should be a list of integer byte-offsets, one for each field within
+ the structure. If 'offsets' is not given the offsets are determined
+ automatically. The optional 'itemsize' value should be an integer
+ describing the total size in bytes of the dtype, which must be large
+ enough to contain all the fields.
+ ::
+
+ >>> np.dtype({'names': ['col1', 'col2'], 'formats': ['i4', 'f4']})
+ dtype([('col1', '<i4'), ('col2', '<f4')])
+ >>> np.dtype({'names': ['col1', 'col2'],
+ ... 'formats': ['i4', 'f4'],
+ ... 'offsets': [0, 4],
+ ... 'itemsize': 12})
+ dtype({'names':['col1','col2'], 'formats':['<i4','<f4'], 'offsets':[0,4], 'itemsize':12})
+
+ Offsets may be chosen such that the fields overlap, though this will mean
+ that assigning to one field may clobber any overlapping field's data. As
+ an exception, fields of :class:`numpy.object` type cannot overlap with
+ other fields, because of the risk of clobbering the internal object
+ pointer and then dereferencing it.
+
+ The optional 'aligned' value can be set to ``True`` to make the automatic
+ offset computation use aligned offsets (see :ref:`offsets-and-alignment`),
+ as if the 'align' keyword argument of :func:`numpy.dtype` had been set to
+ True.
+
+ The optional 'titles' value should be a list of titles of the same length
+ as 'names', see :ref:`Field Titles <titles>` below.
+
+4. A dictionary of field names
+
+ The use of this form of specification is discouraged, but documented here
+ because older numpy code may use it. The keys of the dictionary are the
+ field names and the values are tuples specifying type and offset::
+
+ >>> np.dtype({'col1': ('i1', 0), 'col2': ('f4', 1)})
+ dtype([('col1', 'i1'), ('col2', '<f4')])
+
+ This form is discouraged because Python dictionaries do not preserve order
+ in Python versions before Python 3.6, and the order of the fields in a
+ structured dtype has meaning. :ref:`Field Titles <titles>` may be
+ specified by using a 3-tuple, see below.
+
+Manipulating and Displaying Structured Datatypes
+------------------------------------------------
+
+The list of field names of a structured datatype can be found in the ``names``
+attribute of the dtype object::
+
+ >>> d = np.dtype([('x', 'i8'), ('y', 'f4')])
+ >>> d.names
+ ('x', 'y')
+
+The field names may be modified by assigning to the ``names`` attribute using a
+sequence of strings of the same length.
+
+The dtype object also has a dictionary-like attribute, ``fields``, whose keys
+are the field names (and :ref:`Field Titles <titles>`, see below) and whose
+values are tuples containing the dtype and byte offset of each field. ::
+
+ >>> d.fields
+ mappingproxy({'x': (dtype('int64'), 0), 'y': (dtype('float32'), 8)})
+
+Both the ``names`` and ``fields`` attributes will equal ``None`` for
+unstructured arrays. The recommended way to test if a dtype is structured is
+with `if dt.names is not None` rather than `if dt.names`, to account for dtypes
+with 0 fields.
+
+The string representation of a structured datatype is shown in the "list of
+tuples" form if possible, otherwise numpy falls back to using the more general
+dictionary form.
+
+.. _offsets-and-alignment:
+
+Automatic Byte Offsets and Alignment
+------------------------------------
+
+Numpy uses one of two methods to automatically determine the field byte offsets
+and the overall itemsize of a structured datatype, depending on whether
+``align=True`` was specified as a keyword argument to :func:`numpy.dtype`.
+
+By default (``align=False``), numpy will pack the fields together such that
+each field starts at the byte offset the previous field ended, and the fields
+are contiguous in memory. ::
+
+ >>> def print_offsets(d):
+ ... print("offsets:", [d.fields[name][1] for name in d.names])
+ ... print("itemsize:", d.itemsize)
+ >>> print_offsets(np.dtype('u1, u1, i4, u1, i8, u2'))
+ offsets: [0, 1, 2, 6, 7, 15]
+ itemsize: 17
+
+If ``align=True`` is set, numpy will pad the structure in the same way many C
+compilers would pad a C-struct. Aligned structures can give a performance
+improvement in some cases, at the cost of increased datatype size. Padding
+bytes are inserted between fields such that each field's byte offset will be a
+multiple of that field's alignment, which is usually equal to the field's size
+in bytes for simple datatypes, see :c:member:`PyArray_Descr.alignment`. The
+structure will also have trailing padding added so that its itemsize is a
+multiple of the largest field's alignment. ::
+
+ >>> print_offsets(np.dtype('u1, u1, i4, u1, i8, u2', align=True))
+ offsets: [0, 1, 4, 8, 16, 24]
+ itemsize: 32
+
+Note that although almost all modern C compilers pad in this way by default,
+padding in C structs is C-implementation-dependent so this memory layout is not
+guaranteed to exactly match that of a corresponding struct in a C program. Some
+work may be needed, either on the numpy side or the C side, to obtain exact
+correspondence.
+
+If offsets were specified using the optional ``offsets`` key in the
+dictionary-based dtype specification, setting ``align=True`` will check that
+each field's offset is a multiple of its size and that the itemsize is a
+multiple of the largest field size, and raise an exception if not.
+
+If the offsets of the fields and itemsize of a structured array satisfy the
+alignment conditions, the array will have the ``ALIGNED`` :attr:`flag
+<numpy.ndarray.flags>` set.
+
+A convenience function :func:`numpy.lib.recfunctions.repack_fields` converts an
+aligned dtype or array to a packed one and vice versa. It takes either a dtype
+or structured ndarray as an argument, and returns a copy with fields re-packed,
+with or without padding bytes.
+
+.. _titles:
+
+Field Titles
+------------
+
+In addition to field names, fields may also have an associated :term:`title`,
+an alternate name, which is sometimes used as an additional description or
+alias for the field. The title may be used to index an array, just like a
+field name.
+
+To add titles when using the list-of-tuples form of dtype specification, the
+field name may be specified as a tuple of two strings instead of a single
+string, which will be the field's title and field name respectively. For
+example::
+
+ >>> np.dtype([(('my title', 'name'), 'f4')])
+ dtype([(('my title', 'name'), '<f4')])
+
+When using the first form of dictionary-based specification, the titles may be
+supplied as an extra ``'titles'`` key as described above. When using the second
+(discouraged) dictionary-based specification, the title can be supplied by
+providing a 3-element tuple ``(datatype, offset, title)`` instead of the usual
+2-element tuple::
+
+ >>> np.dtype({'name': ('i4', 0, 'my title')})
+ dtype([(('my title', 'name'), '<i4')])
+
+The ``dtype.fields`` dictionary will contain titles as keys, if any
+titles are used. This means effectively that a field with a title will be
+represented twice in the fields dictionary. The tuple values for these fields
+will also have a third element, the field title. Because of this, and because
+the ``names`` attribute preserves the field order while the ``fields``
+attribute may not, it is recommended to iterate through the fields of a dtype
+using the ``names`` attribute of the dtype, which will not list titles, as
+in::
+
+ >>> for name in d.names:
+ ... print(d.fields[name][:2])
+ (dtype('int64'), 0)
+ (dtype('float32'), 8)
+
+Union types
+-----------
+
+Structured datatypes are implemented in numpy to have base type
+:class:`numpy.void` by default, but it is possible to interpret other numpy
+types as structured types using the ``(base_dtype, dtype)`` form of dtype
+specification described in
+:ref:`Data Type Objects <arrays.dtypes.constructing>`. Here, ``base_dtype`` is
+the desired underlying dtype, and fields and flags will be copied from
+``dtype``. This dtype is similar to a 'union' in C.
+
+Indexing and Assignment to Structured arrays
+============================================
+
+Assigning data to a Structured Array
+------------------------------------
+
+There are a number of ways to assign values to a structured array: Using python
+tuples, using scalar values, or using other structured arrays.
+
+Assignment from Python Native Types (Tuples)
+````````````````````````````````````````````
+
+The simplest way to assign values to a structured array is using python tuples.
+Each assigned value should be a tuple of length equal to the number of fields
+in the array, and not a list or array as these will trigger numpy's
+broadcasting rules. The tuple's elements are assigned to the successive fields
+of the array, from left to right::
+
+ >>> x = np.array([(1, 2, 3), (4, 5, 6)], dtype='i8, f4, f8')
+ >>> x[1] = (7, 8, 9)
+ >>> x
+ array([(1, 2., 3.), (7, 8., 9.)],
+ dtype=[('f0', '<i8'), ('f1', '<f4'), ('f2', '<f8')])
+
+Assignment from Scalars
+```````````````````````
+
+A scalar assigned to a structured element will be assigned to all fields. This
+happens when a scalar is assigned to a structured array, or when an
+unstructured array is assigned to a structured array::
+
+ >>> x = np.zeros(2, dtype='i8, f4, ?, S1')
+ >>> x[:] = 3
+ >>> x
+ array([(3, 3., True, b'3'), (3, 3., True, b'3')],
+ dtype=[('f0', '<i8'), ('f1', '<f4'), ('f2', '?'), ('f3', 'S1')])
+ >>> x[:] = np.arange(2)
+ >>> x
+ array([(0, 0., False, b'0'), (1, 1., True, b'1')],
+ dtype=[('f0', '<i8'), ('f1', '<f4'), ('f2', '?'), ('f3', 'S1')])
+
+Structured arrays can also be assigned to unstructured arrays, but only if the
+structured datatype has just a single field::
+
+ >>> twofield = np.zeros(2, dtype=[('A', 'i4'), ('B', 'i4')])
+ >>> onefield = np.zeros(2, dtype=[('A', 'i4')])
+ >>> nostruct = np.zeros(2, dtype='i4')
+ >>> nostruct[:] = twofield
+ Traceback (most recent call last):
+ ...
+ TypeError: Cannot cast array data from dtype([('A', '<i4'), ('B', '<i4')]) to dtype('int32') according to the rule 'unsafe'
+
+Assignment from other Structured Arrays
+```````````````````````````````````````
+
+Assignment between two structured arrays occurs as if the source elements had
+been converted to tuples and then assigned to the destination elements. That
+is, the first field of the source array is assigned to the first field of the
+destination array, and the second field likewise, and so on, regardless of
+field names. Structured arrays with a different number of fields cannot be
+assigned to each other. Bytes of the destination structure which are not
+included in any of the fields are unaffected. ::
+
+ >>> a = np.zeros(3, dtype=[('a', 'i8'), ('b', 'f4'), ('c', 'S3')])
+ >>> b = np.ones(3, dtype=[('x', 'f4'), ('y', 'S3'), ('z', 'O')])
+ >>> b[:] = a
+ >>> b
+ array([(0., b'0.0', b''), (0., b'0.0', b''), (0., b'0.0', b'')],
+ dtype=[('x', '<f4'), ('y', 'S3'), ('z', 'O')])
+
+
+Assignment involving subarrays
+``````````````````````````````
+
+When assigning to fields which are subarrays, the assigned value will first be
+broadcast to the shape of the subarray.
+
+Indexing Structured Arrays
+--------------------------
+
+Accessing Individual Fields
+```````````````````````````
+
+Individual fields of a structured array may be accessed and modified by indexing
+the array with the field name. ::
+
+ >>> x = np.array([(1, 2), (3, 4)], dtype=[('foo', 'i8'), ('bar', 'f4')])
+ >>> x['foo']
+ array([1, 3])
+ >>> x['foo'] = 10
+ >>> x
+ array([(10, 2.), (10, 4.)],
+ dtype=[('foo', '<i8'), ('bar', '<f4')])
+
+The resulting array is a view into the original array. It shares the same
+memory locations and writing to the view will modify the original array. ::
+
+ >>> y = x['bar']
+ >>> y[:] = 11
+ >>> x
+ array([(10, 11.), (10, 11.)],
+ dtype=[('foo', '<i8'), ('bar', '<f4')])
+
+This view has the same dtype and itemsize as the indexed field, so it is
+typically a non-structured array, except in the case of nested structures.
+
+ >>> y.dtype, y.shape, y.strides
+ (dtype('float32'), (2,), (12,))
+
+If the accessed field is a subarray, the dimensions of the subarray
+are appended to the shape of the result::
+
+ >>> x = np.zeros((2, 2), dtype=[('a', np.int32), ('b', np.float64, (3, 3))])
+ >>> x['a'].shape
+ (2, 2)
+ >>> x['b'].shape
+ (2, 2, 3, 3)
+
+Accessing Multiple Fields
+```````````````````````````
+
+One can index and assign to a structured array with a multi-field index, where
+the index is a list of field names.
+
+.. warning::
+ The behavior of multi-field indexes changed from Numpy 1.15 to Numpy 1.16.
+
+The result of indexing with a multi-field index is a view into the original
+array, as follows::
+
+ >>> a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'i4'), ('c', 'f4')])
+ >>> a[['a', 'c']]
+ array([(0, 0.), (0, 0.), (0, 0.)],
+ dtype={'names':['a','c'], 'formats':['<i4','<f4'], 'offsets':[0,8], 'itemsize':12})
+
+Assignment to the view modifies the original array. The view's fields will be
+in the order they were indexed. Note that unlike for single-field indexing, the
+dtype of the view has the same itemsize as the original array, and has fields
+at the same offsets as in the original array, and unindexed fields are merely
+missing.
+
+.. warning::
+ In Numpy 1.15, indexing an array with a multi-field index returned a copy of
+ the result above, but with fields packed together in memory as if
+ passed through :func:`numpy.lib.recfunctions.repack_fields`.
+
+ The new behavior as of Numpy 1.16 leads to extra "padding" bytes at the
+ location of unindexed fields compared to 1.15. You will need to update any
+ code which depends on the data having a "packed" layout. For instance code
+ such as::
+
+ >>> a[['a', 'c']].view('i8') # Fails in Numpy 1.16
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ ValueError: When changing to a smaller dtype, its size must be a divisor of the size of original dtype
+
+ will need to be changed. This code has raised a ``FutureWarning`` since
+ Numpy 1.12, and similar code has raised ``FutureWarning`` since 1.7.
+
+ In 1.16 a number of functions have been introduced in the
+ :mod:`numpy.lib.recfunctions` module to help users account for this
+ change. These are
+ :func:`numpy.lib.recfunctions.repack_fields`.
+ :func:`numpy.lib.recfunctions.structured_to_unstructured`,
+ :func:`numpy.lib.recfunctions.unstructured_to_structured`,
+ :func:`numpy.lib.recfunctions.apply_along_fields`,
+ :func:`numpy.lib.recfunctions.assign_fields_by_name`, and
+ :func:`numpy.lib.recfunctions.require_fields`.
+
+ The function :func:`numpy.lib.recfunctions.repack_fields` can always be
+ used to reproduce the old behavior, as it will return a packed copy of the
+ structured array. The code above, for example, can be replaced with:
+
+ >>> from numpy.lib.recfunctions import repack_fields
+ >>> repack_fields(a[['a', 'c']]).view('i8') # supported in 1.16
+ array([0, 0, 0])
+
+ Furthermore, numpy now provides a new function
+ :func:`numpy.lib.recfunctions.structured_to_unstructured` which is a safer
+ and more efficient alternative for users who wish to convert structured
+ arrays to unstructured arrays, as the view above is often indeded to do.
+ This function allows safe conversion to an unstructured type taking into
+ account padding, often avoids a copy, and also casts the datatypes
+ as needed, unlike the view. Code such as:
+
+ >>> b = np.zeros(3, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
+ >>> b[['x', 'z']].view('f4')
+ array([0., 0., 0., 0., 0., 0., 0., 0., 0.], dtype=float32)
+
+ can be made safer by replacing with:
+
+ >>> from numpy.lib.recfunctions import structured_to_unstructured
+ >>> structured_to_unstructured(b[['x', 'z']])
+ array([0, 0, 0])
+
+
+Assignment to an array with a multi-field index modifies the original array::
+
+ >>> a[['a', 'c']] = (2, 3)
+ >>> a
+ array([(2, 0, 3.), (2, 0, 3.), (2, 0, 3.)],
+ dtype=[('a', '<i4'), ('b', '<i4'), ('c', '<f4')])
+
+This obeys the structured array assignment rules described above. For example,
+this means that one can swap the values of two fields using appropriate
+multi-field indexes::
+
+ >>> a[['a', 'c']] = a[['c', 'a']]
+
+Indexing with an Integer to get a Structured Scalar
+```````````````````````````````````````````````````
+
+Indexing a single element of a structured array (with an integer index) returns
+a structured scalar::
+
+ >>> x = np.array([(1, 2., 3.)], dtype='i, f, f')
+ >>> scalar = x[0]
+ >>> scalar
+ (1, 2., 3.)
+ >>> type(scalar)
+ <class 'numpy.void'>
+
+Unlike other numpy scalars, structured scalars are mutable and act like views
+into the original array, such that modifying the scalar will modify the
+original array. Structured scalars also support access and assignment by field
+name::
+
+ >>> x = np.array([(1, 2), (3, 4)], dtype=[('foo', 'i8'), ('bar', 'f4')])
+ >>> s = x[0]
+ >>> s['bar'] = 100
+ >>> x
+ array([(1, 100.), (3, 4.)],
+ dtype=[('foo', '<i8'), ('bar', '<f4')])
+
+Similarly to tuples, structured scalars can also be indexed with an integer::
+
+ >>> scalar = np.array([(1, 2., 3.)], dtype='i, f, f')[0]
+ >>> scalar[0]
+ 1
+ >>> scalar[1] = 4
+
+Thus, tuples might be thought of as the native Python equivalent to numpy's
+structured types, much like native python integers are the equivalent to
+numpy's integer types. Structured scalars may be converted to a tuple by
+calling `numpy.ndarray.item`::
+
+ >>> scalar.item(), type(scalar.item())
+ ((1, 4.0, 3.0), <class 'tuple'>)
+
+Viewing Structured Arrays Containing Objects
+--------------------------------------------
+
+In order to prevent clobbering object pointers in fields of
+:class:`numpy.object` type, numpy currently does not allow views of structured
+arrays containing objects.
+
+Structure Comparison
+--------------------
+
+If the dtypes of two void structured arrays are equal, testing the equality of
+the arrays will result in a boolean array with the dimensions of the original
+arrays, with elements set to ``True`` where all fields of the corresponding
+structures are equal. Structured dtypes are equal if the field names,
+dtypes and titles are the same, ignoring endianness, and the fields are in
+the same order::
+
+ >>> a = np.zeros(2, dtype=[('a', 'i4'), ('b', 'i4')])
+ >>> b = np.ones(2, dtype=[('a', 'i4'), ('b', 'i4')])
+ >>> a == b
+ array([False, False])
+
+Currently, if the dtypes of two void structured arrays are not equivalent the
+comparison fails, returning the scalar value ``False``. This behavior is
+deprecated as of numpy 1.10 and will raise an error or perform elementwise
+comparison in the future.
+
+The ``<`` and ``>`` operators always return ``False`` when comparing void
+structured arrays, and arithmetic and bitwise operations are not supported.
+
+Record Arrays
+=============
+
+As an optional convenience numpy provides an ndarray subclass,
+:class:`numpy.recarray`, and associated helper functions in the
+:mod:`numpy.lib.recfunctions` submodule (aliased as ``numpy.rec``), that allows
+access to fields of structured arrays by attribute instead of only by index.
+Record arrays also use a special datatype, :class:`numpy.record`, that allows
+field access by attribute on the structured scalars obtained from the array.
+
+The simplest way to create a record array is with ``numpy.rec.array``::
+
+ >>> recordarr = np.rec.array([(1, 2., 'Hello'), (2, 3., "World")],
+ ... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'S10')])
+ >>> recordarr.bar
+ array([ 2., 3.], dtype=float32)
+ >>> recordarr[1:2]
+ rec.array([(2, 3., b'World')],
+ dtype=[('foo', '<i4'), ('bar', '<f4'), ('baz', 'S10')])
+ >>> recordarr[1:2].foo
+ array([2], dtype=int32)
+ >>> recordarr.foo[1:2]
+ array([2], dtype=int32)
+ >>> recordarr[1].baz
+ b'World'
+
+:func:`numpy.rec.array` can convert a wide variety of arguments into record
+arrays, including structured arrays::
+
+ >>> arr = np.array([(1, 2., 'Hello'), (2, 3., "World")],
+ ... dtype=[('foo', 'i4'), ('bar', 'f4'), ('baz', 'S10')])
+ >>> recordarr = np.rec.array(arr)
+
+The :mod:`numpy.rec` module provides a number of other convenience functions for
+creating record arrays, see :ref:`record array creation routines
+<routines.array-creation.rec>`.
+
+A record array representation of a structured array can be obtained using the
+appropriate `view <numpy-ndarray-view>`_::
+
+ >>> arr = np.array([(1, 2., 'Hello'), (2, 3., "World")],
+ ... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'a10')])
+ >>> recordarr = arr.view(dtype=np.dtype((np.record, arr.dtype)),
+ ... type=np.recarray)
+
+For convenience, viewing an ndarray as type :class:`np.recarray` will
+automatically convert to :class:`np.record` datatype, so the dtype can be left
+out of the view::
+
+ >>> recordarr = arr.view(np.recarray)
+ >>> recordarr.dtype
+ dtype((numpy.record, [('foo', '<i4'), ('bar', '<f4'), ('baz', 'S10')]))
+
+To get back to a plain ndarray both the dtype and type must be reset. The
+following view does so, taking into account the unusual case that the
+recordarr was not a structured type::
+
+ >>> arr2 = recordarr.view(recordarr.dtype.fields or recordarr.dtype, np.ndarray)
+
+Record array fields accessed by index or by attribute are returned as a record
+array if the field has a structured type but as a plain ndarray otherwise. ::
+
+ >>> recordarr = np.rec.array([('Hello', (1, 2)), ("World", (3, 4))],
+ ... dtype=[('foo', 'S6'),('bar', [('A', int), ('B', int)])])
+ >>> type(recordarr.foo)
+ <class 'numpy.ndarray'>
+ >>> type(recordarr.bar)
+ <class 'numpy.recarray'>
+
+Note that if a field has the same name as an ndarray attribute, the ndarray
+attribute takes precedence. Such fields will be inaccessible by attribute but
+will still be accessible by index.
+
Recarray Helper Functions
-*************************
+-------------------------
.. automodule:: numpy.lib.recfunctions
:members:
diff --git a/doc/source/user/basics.subclassing.rst b/doc/source/user/basics.subclassing.rst
index 43315521c..d8d104220 100644
--- a/doc/source/user/basics.subclassing.rst
+++ b/doc/source/user/basics.subclassing.rst
@@ -4,4 +4,751 @@
Subclassing ndarray
*******************
-.. automodule:: numpy.doc.subclassing
+Introduction
+------------
+
+Subclassing ndarray is relatively simple, but it has some complications
+compared to other Python objects. On this page we explain the machinery
+that allows you to subclass ndarray, and the implications for
+implementing a subclass.
+
+ndarrays and object creation
+============================
+
+Subclassing ndarray is complicated by the fact that new instances of
+ndarray classes can come about in three different ways. These are:
+
+#. Explicit constructor call - as in ``MySubClass(params)``. This is
+ the usual route to Python instance creation.
+#. View casting - casting an existing ndarray as a given subclass
+#. New from template - creating a new instance from a template
+ instance. Examples include returning slices from a subclassed array,
+ creating return types from ufuncs, and copying arrays. See
+ :ref:`new-from-template` for more details
+
+The last two are characteristics of ndarrays - in order to support
+things like array slicing. The complications of subclassing ndarray are
+due to the mechanisms numpy has to support these latter two routes of
+instance creation.
+
+.. _view-casting:
+
+View casting
+------------
+
+*View casting* is the standard ndarray mechanism by which you take an
+ndarray of any subclass, and return a view of the array as another
+(specified) subclass:
+
+>>> import numpy as np
+>>> # create a completely useless ndarray subclass
+>>> class C(np.ndarray): pass
+>>> # create a standard ndarray
+>>> arr = np.zeros((3,))
+>>> # take a view of it, as our useless subclass
+>>> c_arr = arr.view(C)
+>>> type(c_arr)
+<class 'C'>
+
+.. _new-from-template:
+
+Creating new from template
+--------------------------
+
+New instances of an ndarray subclass can also come about by a very
+similar mechanism to :ref:`view-casting`, when numpy finds it needs to
+create a new instance from a template instance. The most obvious place
+this has to happen is when you are taking slices of subclassed arrays.
+For example:
+
+>>> v = c_arr[1:]
+>>> type(v) # the view is of type 'C'
+<class 'C'>
+>>> v is c_arr # but it's a new instance
+False
+
+The slice is a *view* onto the original ``c_arr`` data. So, when we
+take a view from the ndarray, we return a new ndarray, of the same
+class, that points to the data in the original.
+
+There are other points in the use of ndarrays where we need such views,
+such as copying arrays (``c_arr.copy()``), creating ufunc output arrays
+(see also :ref:`array-wrap`), and reducing methods (like
+``c_arr.mean()``).
+
+Relationship of view casting and new-from-template
+--------------------------------------------------
+
+These paths both use the same machinery. We make the distinction here,
+because they result in different input to your methods. Specifically,
+:ref:`view-casting` means you have created a new instance of your array
+type from any potential subclass of ndarray. :ref:`new-from-template`
+means you have created a new instance of your class from a pre-existing
+instance, allowing you - for example - to copy across attributes that
+are particular to your subclass.
+
+Implications for subclassing
+----------------------------
+
+If we subclass ndarray, we need to deal not only with explicit
+construction of our array type, but also :ref:`view-casting` or
+:ref:`new-from-template`. NumPy has the machinery to do this, and this
+machinery that makes subclassing slightly non-standard.
+
+There are two aspects to the machinery that ndarray uses to support
+views and new-from-template in subclasses.
+
+The first is the use of the ``ndarray.__new__`` method for the main work
+of object initialization, rather then the more usual ``__init__``
+method. The second is the use of the ``__array_finalize__`` method to
+allow subclasses to clean up after the creation of views and new
+instances from templates.
+
+A brief Python primer on ``__new__`` and ``__init__``
+=====================================================
+
+``__new__`` is a standard Python method, and, if present, is called
+before ``__init__`` when we create a class instance. See the `python
+__new__ documentation
+<https://docs.python.org/reference/datamodel.html#object.__new__>`_ for more detail.
+
+For example, consider the following Python code:
+
+.. testcode::
+
+ class C:
+ def __new__(cls, *args):
+ print('Cls in __new__:', cls)
+ print('Args in __new__:', args)
+ # The `object` type __new__ method takes a single argument.
+ return object.__new__(cls)
+
+ def __init__(self, *args):
+ print('type(self) in __init__:', type(self))
+ print('Args in __init__:', args)
+
+meaning that we get:
+
+>>> c = C('hello')
+Cls in __new__: <class 'C'>
+Args in __new__: ('hello',)
+type(self) in __init__: <class 'C'>
+Args in __init__: ('hello',)
+
+When we call ``C('hello')``, the ``__new__`` method gets its own class
+as first argument, and the passed argument, which is the string
+``'hello'``. After python calls ``__new__``, it usually (see below)
+calls our ``__init__`` method, with the output of ``__new__`` as the
+first argument (now a class instance), and the passed arguments
+following.
+
+As you can see, the object can be initialized in the ``__new__``
+method or the ``__init__`` method, or both, and in fact ndarray does
+not have an ``__init__`` method, because all the initialization is
+done in the ``__new__`` method.
+
+Why use ``__new__`` rather than just the usual ``__init__``? Because
+in some cases, as for ndarray, we want to be able to return an object
+of some other class. Consider the following:
+
+.. testcode::
+
+ class D(C):
+ def __new__(cls, *args):
+ print('D cls is:', cls)
+ print('D args in __new__:', args)
+ return C.__new__(C, *args)
+
+ def __init__(self, *args):
+ # we never get here
+ print('In D __init__')
+
+meaning that:
+
+>>> obj = D('hello')
+D cls is: <class 'D'>
+D args in __new__: ('hello',)
+Cls in __new__: <class 'C'>
+Args in __new__: ('hello',)
+>>> type(obj)
+<class 'C'>
+
+The definition of ``C`` is the same as before, but for ``D``, the
+``__new__`` method returns an instance of class ``C`` rather than
+``D``. Note that the ``__init__`` method of ``D`` does not get
+called. In general, when the ``__new__`` method returns an object of
+class other than the class in which it is defined, the ``__init__``
+method of that class is not called.
+
+This is how subclasses of the ndarray class are able to return views
+that preserve the class type. When taking a view, the standard
+ndarray machinery creates the new ndarray object with something
+like::
+
+ obj = ndarray.__new__(subtype, shape, ...
+
+where ``subdtype`` is the subclass. Thus the returned view is of the
+same class as the subclass, rather than being of class ``ndarray``.
+
+That solves the problem of returning views of the same type, but now
+we have a new problem. The machinery of ndarray can set the class
+this way, in its standard methods for taking views, but the ndarray
+``__new__`` method knows nothing of what we have done in our own
+``__new__`` method in order to set attributes, and so on. (Aside -
+why not call ``obj = subdtype.__new__(...`` then? Because we may not
+have a ``__new__`` method with the same call signature).
+
+The role of ``__array_finalize__``
+==================================
+
+``__array_finalize__`` is the mechanism that numpy provides to allow
+subclasses to handle the various ways that new instances get created.
+
+Remember that subclass instances can come about in these three ways:
+
+#. explicit constructor call (``obj = MySubClass(params)``). This will
+ call the usual sequence of ``MySubClass.__new__`` then (if it exists)
+ ``MySubClass.__init__``.
+#. :ref:`view-casting`
+#. :ref:`new-from-template`
+
+Our ``MySubClass.__new__`` method only gets called in the case of the
+explicit constructor call, so we can't rely on ``MySubClass.__new__`` or
+``MySubClass.__init__`` to deal with the view casting and
+new-from-template. It turns out that ``MySubClass.__array_finalize__``
+*does* get called for all three methods of object creation, so this is
+where our object creation housekeeping usually goes.
+
+* For the explicit constructor call, our subclass will need to create a
+ new ndarray instance of its own class. In practice this means that
+ we, the authors of the code, will need to make a call to
+ ``ndarray.__new__(MySubClass,...)``, a class-hierarchy prepared call to
+ ``super(MySubClass, cls).__new__(cls, ...)``, or do view casting of an
+ existing array (see below)
+* For view casting and new-from-template, the equivalent of
+ ``ndarray.__new__(MySubClass,...`` is called, at the C level.
+
+The arguments that ``__array_finalize__`` receives differ for the three
+methods of instance creation above.
+
+The following code allows us to look at the call sequences and arguments:
+
+.. testcode::
+
+ import numpy as np
+
+ class C(np.ndarray):
+ def __new__(cls, *args, **kwargs):
+ print('In __new__ with class %s' % cls)
+ return super(C, cls).__new__(cls, *args, **kwargs)
+
+ def __init__(self, *args, **kwargs):
+ # in practice you probably will not need or want an __init__
+ # method for your subclass
+ print('In __init__ with class %s' % self.__class__)
+
+ def __array_finalize__(self, obj):
+ print('In array_finalize:')
+ print(' self type is %s' % type(self))
+ print(' obj type is %s' % type(obj))
+
+
+Now:
+
+>>> # Explicit constructor
+>>> c = C((10,))
+In __new__ with class <class 'C'>
+In array_finalize:
+ self type is <class 'C'>
+ obj type is <type 'NoneType'>
+In __init__ with class <class 'C'>
+>>> # View casting
+>>> a = np.arange(10)
+>>> cast_a = a.view(C)
+In array_finalize:
+ self type is <class 'C'>
+ obj type is <type 'numpy.ndarray'>
+>>> # Slicing (example of new-from-template)
+>>> cv = c[:1]
+In array_finalize:
+ self type is <class 'C'>
+ obj type is <class 'C'>
+
+The signature of ``__array_finalize__`` is::
+
+ def __array_finalize__(self, obj):
+
+One sees that the ``super`` call, which goes to
+``ndarray.__new__``, passes ``__array_finalize__`` the new object, of our
+own class (``self``) as well as the object from which the view has been
+taken (``obj``). As you can see from the output above, the ``self`` is
+always a newly created instance of our subclass, and the type of ``obj``
+differs for the three instance creation methods:
+
+* When called from the explicit constructor, ``obj`` is ``None``
+* When called from view casting, ``obj`` can be an instance of any
+ subclass of ndarray, including our own.
+* When called in new-from-template, ``obj`` is another instance of our
+ own subclass, that we might use to update the new ``self`` instance.
+
+Because ``__array_finalize__`` is the only method that always sees new
+instances being created, it is the sensible place to fill in instance
+defaults for new object attributes, among other tasks.
+
+This may be clearer with an example.
+
+Simple example - adding an extra attribute to ndarray
+-----------------------------------------------------
+
+.. testcode::
+
+ import numpy as np
+
+ class InfoArray(np.ndarray):
+
+ def __new__(subtype, shape, dtype=float, buffer=None, offset=0,
+ strides=None, order=None, info=None):
+ # Create the ndarray instance of our type, given the usual
+ # ndarray input arguments. This will call the standard
+ # ndarray constructor, but return an object of our type.
+ # It also triggers a call to InfoArray.__array_finalize__
+ obj = super(InfoArray, subtype).__new__(subtype, shape, dtype,
+ buffer, offset, strides,
+ order)
+ # set the new 'info' attribute to the value passed
+ obj.info = info
+ # Finally, we must return the newly created object:
+ return obj
+
+ def __array_finalize__(self, obj):
+ # ``self`` is a new object resulting from
+ # ndarray.__new__(InfoArray, ...), therefore it only has
+ # attributes that the ndarray.__new__ constructor gave it -
+ # i.e. those of a standard ndarray.
+ #
+ # We could have got to the ndarray.__new__ call in 3 ways:
+ # From an explicit constructor - e.g. InfoArray():
+ # obj is None
+ # (we're in the middle of the InfoArray.__new__
+ # constructor, and self.info will be set when we return to
+ # InfoArray.__new__)
+ if obj is None: return
+ # From view casting - e.g arr.view(InfoArray):
+ # obj is arr
+ # (type(obj) can be InfoArray)
+ # From new-from-template - e.g infoarr[:3]
+ # type(obj) is InfoArray
+ #
+ # Note that it is here, rather than in the __new__ method,
+ # that we set the default value for 'info', because this
+ # method sees all creation of default objects - with the
+ # InfoArray.__new__ constructor, but also with
+ # arr.view(InfoArray).
+ self.info = getattr(obj, 'info', None)
+ # We do not need to return anything
+
+
+Using the object looks like this:
+
+ >>> obj = InfoArray(shape=(3,)) # explicit constructor
+ >>> type(obj)
+ <class 'InfoArray'>
+ >>> obj.info is None
+ True
+ >>> obj = InfoArray(shape=(3,), info='information')
+ >>> obj.info
+ 'information'
+ >>> v = obj[1:] # new-from-template - here - slicing
+ >>> type(v)
+ <class 'InfoArray'>
+ >>> v.info
+ 'information'
+ >>> arr = np.arange(10)
+ >>> cast_arr = arr.view(InfoArray) # view casting
+ >>> type(cast_arr)
+ <class 'InfoArray'>
+ >>> cast_arr.info is None
+ True
+
+This class isn't very useful, because it has the same constructor as the
+bare ndarray object, including passing in buffers and shapes and so on.
+We would probably prefer the constructor to be able to take an already
+formed ndarray from the usual numpy calls to ``np.array`` and return an
+object.
+
+Slightly more realistic example - attribute added to existing array
+-------------------------------------------------------------------
+
+Here is a class that takes a standard ndarray that already exists, casts
+as our type, and adds an extra attribute.
+
+.. testcode::
+
+ import numpy as np
+
+ class RealisticInfoArray(np.ndarray):
+
+ def __new__(cls, input_array, info=None):
+ # Input array is an already formed ndarray instance
+ # We first cast to be our class type
+ obj = np.asarray(input_array).view(cls)
+ # add the new attribute to the created instance
+ obj.info = info
+ # Finally, we must return the newly created object:
+ return obj
+
+ def __array_finalize__(self, obj):
+ # see InfoArray.__array_finalize__ for comments
+ if obj is None: return
+ self.info = getattr(obj, 'info', None)
+
+
+So:
+
+ >>> arr = np.arange(5)
+ >>> obj = RealisticInfoArray(arr, info='information')
+ >>> type(obj)
+ <class 'RealisticInfoArray'>
+ >>> obj.info
+ 'information'
+ >>> v = obj[1:]
+ >>> type(v)
+ <class 'RealisticInfoArray'>
+ >>> v.info
+ 'information'
+
+.. _array-ufunc:
+
+``__array_ufunc__`` for ufuncs
+------------------------------
+
+ .. versionadded:: 1.13
+
+A subclass can override what happens when executing numpy ufuncs on it by
+overriding the default ``ndarray.__array_ufunc__`` method. This method is
+executed *instead* of the ufunc and should return either the result of the
+operation, or :obj:`NotImplemented` if the operation requested is not
+implemented.
+
+The signature of ``__array_ufunc__`` is::
+
+ def __array_ufunc__(ufunc, method, *inputs, **kwargs):
+
+ - *ufunc* is the ufunc object that was called.
+ - *method* is a string indicating how the Ufunc was called, either
+ ``"__call__"`` to indicate it was called directly, or one of its
+ :ref:`methods<ufuncs.methods>`: ``"reduce"``, ``"accumulate"``,
+ ``"reduceat"``, ``"outer"``, or ``"at"``.
+ - *inputs* is a tuple of the input arguments to the ``ufunc``
+ - *kwargs* contains any optional or keyword arguments passed to the
+ function. This includes any ``out`` arguments, which are always
+ contained in a tuple.
+
+A typical implementation would convert any inputs or outputs that are
+instances of one's own class, pass everything on to a superclass using
+``super()``, and finally return the results after possible
+back-conversion. An example, taken from the test case
+``test_ufunc_override_with_super`` in ``core/tests/test_umath.py``, is the
+following.
+
+.. testcode::
+
+ input numpy as np
+
+ class A(np.ndarray):
+ def __array_ufunc__(self, ufunc, method, *inputs, out=None, **kwargs):
+ args = []
+ in_no = []
+ for i, input_ in enumerate(inputs):
+ if isinstance(input_, A):
+ in_no.append(i)
+ args.append(input_.view(np.ndarray))
+ else:
+ args.append(input_)
+
+ outputs = out
+ out_no = []
+ if outputs:
+ out_args = []
+ for j, output in enumerate(outputs):
+ if isinstance(output, A):
+ out_no.append(j)
+ out_args.append(output.view(np.ndarray))
+ else:
+ out_args.append(output)
+ kwargs['out'] = tuple(out_args)
+ else:
+ outputs = (None,) * ufunc.nout
+
+ info = {}
+ if in_no:
+ info['inputs'] = in_no
+ if out_no:
+ info['outputs'] = out_no
+
+ results = super(A, self).__array_ufunc__(ufunc, method,
+ *args, **kwargs)
+ if results is NotImplemented:
+ return NotImplemented
+
+ if method == 'at':
+ if isinstance(inputs[0], A):
+ inputs[0].info = info
+ return
+
+ if ufunc.nout == 1:
+ results = (results,)
+
+ results = tuple((np.asarray(result).view(A)
+ if output is None else output)
+ for result, output in zip(results, outputs))
+ if results and isinstance(results[0], A):
+ results[0].info = info
+
+ return results[0] if len(results) == 1 else results
+
+So, this class does not actually do anything interesting: it just
+converts any instances of its own to regular ndarray (otherwise, we'd
+get infinite recursion!), and adds an ``info`` dictionary that tells
+which inputs and outputs it converted. Hence, e.g.,
+
+>>> a = np.arange(5.).view(A)
+>>> b = np.sin(a)
+>>> b.info
+{'inputs': [0]}
+>>> b = np.sin(np.arange(5.), out=(a,))
+>>> b.info
+{'outputs': [0]}
+>>> a = np.arange(5.).view(A)
+>>> b = np.ones(1).view(A)
+>>> c = a + b
+>>> c.info
+{'inputs': [0, 1]}
+>>> a += b
+>>> a.info
+{'inputs': [0, 1], 'outputs': [0]}
+
+Note that another approach would be to to use ``getattr(ufunc,
+methods)(*inputs, **kwargs)`` instead of the ``super`` call. For this example,
+the result would be identical, but there is a difference if another operand
+also defines ``__array_ufunc__``. E.g., lets assume that we evalulate
+``np.add(a, b)``, where ``b`` is an instance of another class ``B`` that has
+an override. If you use ``super`` as in the example,
+``ndarray.__array_ufunc__`` will notice that ``b`` has an override, which
+means it cannot evaluate the result itself. Thus, it will return
+`NotImplemented` and so will our class ``A``. Then, control will be passed
+over to ``b``, which either knows how to deal with us and produces a result,
+or does not and returns `NotImplemented`, raising a ``TypeError``.
+
+If instead, we replace our ``super`` call with ``getattr(ufunc, method)``, we
+effectively do ``np.add(a.view(np.ndarray), b)``. Again, ``B.__array_ufunc__``
+will be called, but now it sees an ``ndarray`` as the other argument. Likely,
+it will know how to handle this, and return a new instance of the ``B`` class
+to us. Our example class is not set up to handle this, but it might well be
+the best approach if, e.g., one were to re-implement ``MaskedArray`` using
+``__array_ufunc__``.
+
+As a final note: if the ``super`` route is suited to a given class, an
+advantage of using it is that it helps in constructing class hierarchies.
+E.g., suppose that our other class ``B`` also used the ``super`` in its
+``__array_ufunc__`` implementation, and we created a class ``C`` that depended
+on both, i.e., ``class C(A, B)`` (with, for simplicity, not another
+``__array_ufunc__`` override). Then any ufunc on an instance of ``C`` would
+pass on to ``A.__array_ufunc__``, the ``super`` call in ``A`` would go to
+``B.__array_ufunc__``, and the ``super`` call in ``B`` would go to
+``ndarray.__array_ufunc__``, thus allowing ``A`` and ``B`` to collaborate.
+
+.. _array-wrap:
+
+``__array_wrap__`` for ufuncs and other functions
+-------------------------------------------------
+
+Prior to numpy 1.13, the behaviour of ufuncs could only be tuned using
+``__array_wrap__`` and ``__array_prepare__``. These two allowed one to
+change the output type of a ufunc, but, in contrast to
+``__array_ufunc__``, did not allow one to make any changes to the inputs.
+It is hoped to eventually deprecate these, but ``__array_wrap__`` is also
+used by other numpy functions and methods, such as ``squeeze``, so at the
+present time is still needed for full functionality.
+
+Conceptually, ``__array_wrap__`` "wraps up the action" in the sense of
+allowing a subclass to set the type of the return value and update
+attributes and metadata. Let's show how this works with an example. First
+we return to the simpler example subclass, but with a different name and
+some print statements:
+
+.. testcode::
+
+ import numpy as np
+
+ class MySubClass(np.ndarray):
+
+ def __new__(cls, input_array, info=None):
+ obj = np.asarray(input_array).view(cls)
+ obj.info = info
+ return obj
+
+ def __array_finalize__(self, obj):
+ print('In __array_finalize__:')
+ print(' self is %s' % repr(self))
+ print(' obj is %s' % repr(obj))
+ if obj is None: return
+ self.info = getattr(obj, 'info', None)
+
+ def __array_wrap__(self, out_arr, context=None):
+ print('In __array_wrap__:')
+ print(' self is %s' % repr(self))
+ print(' arr is %s' % repr(out_arr))
+ # then just call the parent
+ return super(MySubClass, self).__array_wrap__(self, out_arr, context)
+
+We run a ufunc on an instance of our new array:
+
+>>> obj = MySubClass(np.arange(5), info='spam')
+In __array_finalize__:
+ self is MySubClass([0, 1, 2, 3, 4])
+ obj is array([0, 1, 2, 3, 4])
+>>> arr2 = np.arange(5)+1
+>>> ret = np.add(arr2, obj)
+In __array_wrap__:
+ self is MySubClass([0, 1, 2, 3, 4])
+ arr is array([1, 3, 5, 7, 9])
+In __array_finalize__:
+ self is MySubClass([1, 3, 5, 7, 9])
+ obj is MySubClass([0, 1, 2, 3, 4])
+>>> ret
+MySubClass([1, 3, 5, 7, 9])
+>>> ret.info
+'spam'
+
+Note that the ufunc (``np.add``) has called the ``__array_wrap__`` method
+with arguments ``self`` as ``obj``, and ``out_arr`` as the (ndarray) result
+of the addition. In turn, the default ``__array_wrap__``
+(``ndarray.__array_wrap__``) has cast the result to class ``MySubClass``,
+and called ``__array_finalize__`` - hence the copying of the ``info``
+attribute. This has all happened at the C level.
+
+But, we could do anything we wanted:
+
+.. testcode::
+
+ class SillySubClass(np.ndarray):
+
+ def __array_wrap__(self, arr, context=None):
+ return 'I lost your data'
+
+>>> arr1 = np.arange(5)
+>>> obj = arr1.view(SillySubClass)
+>>> arr2 = np.arange(5)
+>>> ret = np.multiply(obj, arr2)
+>>> ret
+'I lost your data'
+
+So, by defining a specific ``__array_wrap__`` method for our subclass,
+we can tweak the output from ufuncs. The ``__array_wrap__`` method
+requires ``self``, then an argument - which is the result of the ufunc -
+and an optional parameter *context*. This parameter is returned by
+ufuncs as a 3-element tuple: (name of the ufunc, arguments of the ufunc,
+domain of the ufunc), but is not set by other numpy functions. Though,
+as seen above, it is possible to do otherwise, ``__array_wrap__`` should
+return an instance of its containing class. See the masked array
+subclass for an implementation.
+
+In addition to ``__array_wrap__``, which is called on the way out of the
+ufunc, there is also an ``__array_prepare__`` method which is called on
+the way into the ufunc, after the output arrays are created but before any
+computation has been performed. The default implementation does nothing
+but pass through the array. ``__array_prepare__`` should not attempt to
+access the array data or resize the array, it is intended for setting the
+output array type, updating attributes and metadata, and performing any
+checks based on the input that may be desired before computation begins.
+Like ``__array_wrap__``, ``__array_prepare__`` must return an ndarray or
+subclass thereof or raise an error.
+
+Extra gotchas - custom ``__del__`` methods and ndarray.base
+-----------------------------------------------------------
+
+One of the problems that ndarray solves is keeping track of memory
+ownership of ndarrays and their views. Consider the case where we have
+created an ndarray, ``arr`` and have taken a slice with ``v = arr[1:]``.
+The two objects are looking at the same memory. NumPy keeps track of
+where the data came from for a particular array or view, with the
+``base`` attribute:
+
+>>> # A normal ndarray, that owns its own data
+>>> arr = np.zeros((4,))
+>>> # In this case, base is None
+>>> arr.base is None
+True
+>>> # We take a view
+>>> v1 = arr[1:]
+>>> # base now points to the array that it derived from
+>>> v1.base is arr
+True
+>>> # Take a view of a view
+>>> v2 = v1[1:]
+>>> # base points to the original array that it was derived from
+>>> v2.base is arr
+True
+
+In general, if the array owns its own memory, as for ``arr`` in this
+case, then ``arr.base`` will be None - there are some exceptions to this
+- see the numpy book for more details.
+
+The ``base`` attribute is useful in being able to tell whether we have
+a view or the original array. This in turn can be useful if we need
+to know whether or not to do some specific cleanup when the subclassed
+array is deleted. For example, we may only want to do the cleanup if
+the original array is deleted, but not the views. For an example of
+how this can work, have a look at the ``memmap`` class in
+``numpy.core``.
+
+Subclassing and Downstream Compatibility
+----------------------------------------
+
+When sub-classing ``ndarray`` or creating duck-types that mimic the ``ndarray``
+interface, it is your responsibility to decide how aligned your APIs will be
+with those of numpy. For convenience, many numpy functions that have a corresponding
+``ndarray`` method (e.g., ``sum``, ``mean``, ``take``, ``reshape``) work by checking
+if the first argument to a function has a method of the same name. If it exists, the
+method is called instead of coercing the arguments to a numpy array.
+
+For example, if you want your sub-class or duck-type to be compatible with
+numpy's ``sum`` function, the method signature for this object's ``sum`` method
+should be the following:
+
+.. testcode::
+
+ def sum(self, axis=None, dtype=None, out=None, keepdims=False):
+ ...
+
+This is the exact same method signature for ``np.sum``, so now if a user calls
+``np.sum`` on this object, numpy will call the object's own ``sum`` method and
+pass in these arguments enumerated above in the signature, and no errors will
+be raised because the signatures are completely compatible with each other.
+
+If, however, you decide to deviate from this signature and do something like this:
+
+.. testcode::
+
+ def sum(self, axis=None, dtype=None):
+ ...
+
+This object is no longer compatible with ``np.sum`` because if you call ``np.sum``,
+it will pass in unexpected arguments ``out`` and ``keepdims``, causing a TypeError
+to be raised.
+
+If you wish to maintain compatibility with numpy and its subsequent versions (which
+might add new keyword arguments) but do not want to surface all of numpy's arguments,
+your function's signature should accept ``**kwargs``. For example:
+
+.. testcode::
+
+ def sum(self, axis=None, dtype=None, **unused_kwargs):
+ ...
+
+This object is now compatible with ``np.sum`` again because any extraneous arguments
+(i.e. keywords that are not ``axis`` or ``dtype``) will be hidden away in the
+``**unused_kwargs`` parameter.
+
+
diff --git a/doc/source/user/basics.types.rst b/doc/source/user/basics.types.rst
index 5ce5af15a..3c39b35d0 100644
--- a/doc/source/user/basics.types.rst
+++ b/doc/source/user/basics.types.rst
@@ -4,4 +4,339 @@ Data types
.. seealso:: :ref:`Data type objects <arrays.dtypes>`
-.. automodule:: numpy.doc.basics
+Array types and conversions between types
+=========================================
+
+NumPy supports a much greater variety of numerical types than Python does.
+This section shows which are available, and how to modify an array's data-type.
+
+The primitive types supported are tied closely to those in C:
+
+.. list-table::
+ :header-rows: 1
+
+ * - Numpy type
+ - C type
+ - Description
+
+ * - `np.bool_`
+ - ``bool``
+ - Boolean (True or False) stored as a byte
+
+ * - `np.byte`
+ - ``signed char``
+ - Platform-defined
+
+ * - `np.ubyte`
+ - ``unsigned char``
+ - Platform-defined
+
+ * - `np.short`
+ - ``short``
+ - Platform-defined
+
+ * - `np.ushort`
+ - ``unsigned short``
+ - Platform-defined
+
+ * - `np.intc`
+ - ``int``
+ - Platform-defined
+
+ * - `np.uintc`
+ - ``unsigned int``
+ - Platform-defined
+
+ * - `np.int_`
+ - ``long``
+ - Platform-defined
+
+ * - `np.uint`
+ - ``unsigned long``
+ - Platform-defined
+
+ * - `np.longlong`
+ - ``long long``
+ - Platform-defined
+
+ * - `np.ulonglong`
+ - ``unsigned long long``
+ - Platform-defined
+
+ * - `np.half` / `np.float16`
+ -
+ - Half precision float:
+ sign bit, 5 bits exponent, 10 bits mantissa
+
+ * - `np.single`
+ - ``float``
+ - Platform-defined single precision float:
+ typically sign bit, 8 bits exponent, 23 bits mantissa
+
+ * - `np.double`
+ - ``double``
+ - Platform-defined double precision float:
+ typically sign bit, 11 bits exponent, 52 bits mantissa.
+
+ * - `np.longdouble`
+ - ``long double``
+ - Platform-defined extended-precision float
+
+ * - `np.csingle`
+ - ``float complex``
+ - Complex number, represented by two single-precision floats (real and imaginary components)
+
+ * - `np.cdouble`
+ - ``double complex``
+ - Complex number, represented by two double-precision floats (real and imaginary components).
+
+ * - `np.clongdouble`
+ - ``long double complex``
+ - Complex number, represented by two extended-precision floats (real and imaginary components).
+
+
+Since many of these have platform-dependent definitions, a set of fixed-size
+aliases are provided:
+
+.. list-table::
+ :header-rows: 1
+
+ * - Numpy type
+ - C type
+ - Description
+
+ * - `np.int8`
+ - ``int8_t``
+ - Byte (-128 to 127)
+
+ * - `np.int16`
+ - ``int16_t``
+ - Integer (-32768 to 32767)
+
+ * - `np.int32`
+ - ``int32_t``
+ - Integer (-2147483648 to 2147483647)
+
+ * - `np.int64`
+ - ``int64_t``
+ - Integer (-9223372036854775808 to 9223372036854775807)
+
+ * - `np.uint8`
+ - ``uint8_t``
+ - Unsigned integer (0 to 255)
+
+ * - `np.uint16`
+ - ``uint16_t``
+ - Unsigned integer (0 to 65535)
+
+ * - `np.uint32`
+ - ``uint32_t``
+ - Unsigned integer (0 to 4294967295)
+
+ * - `np.uint64`
+ - ``uint64_t``
+ - Unsigned integer (0 to 18446744073709551615)
+
+ * - `np.intp`
+ - ``intptr_t``
+ - Integer used for indexing, typically the same as ``ssize_t``
+
+ * - `np.uintp`
+ - ``uintptr_t``
+ - Integer large enough to hold a pointer
+
+ * - `np.float32`
+ - ``float``
+ -
+
+ * - `np.float64` / `np.float_`
+ - ``double``
+ - Note that this matches the precision of the builtin python `float`.
+
+ * - `np.complex64`
+ - ``float complex``
+ - Complex number, represented by two 32-bit floats (real and imaginary components)
+
+ * - `np.complex128` / `np.complex_`
+ - ``double complex``
+ - Note that this matches the precision of the builtin python `complex`.
+
+
+NumPy numerical types are instances of ``dtype`` (data-type) objects, each
+having unique characteristics. Once you have imported NumPy using
+
+ ::
+
+ >>> import numpy as np
+
+the dtypes are available as ``np.bool_``, ``np.float32``, etc.
+
+Advanced types, not listed in the table above, are explored in
+section :ref:`structured_arrays`.
+
+There are 5 basic numerical types representing booleans (bool), integers (int),
+unsigned integers (uint) floating point (float) and complex. Those with numbers
+in their name indicate the bitsize of the type (i.e. how many bits are needed
+to represent a single value in memory). Some types, such as ``int`` and
+``intp``, have differing bitsizes, dependent on the platforms (e.g. 32-bit
+vs. 64-bit machines). This should be taken into account when interfacing
+with low-level code (such as C or Fortran) where the raw memory is addressed.
+
+Data-types can be used as functions to convert python numbers to array scalars
+(see the array scalar section for an explanation), python sequences of numbers
+to arrays of that type, or as arguments to the dtype keyword that many numpy
+functions or methods accept. Some examples::
+
+ >>> import numpy as np
+ >>> x = np.float32(1.0)
+ >>> x
+ 1.0
+ >>> y = np.int_([1,2,4])
+ >>> y
+ array([1, 2, 4])
+ >>> z = np.arange(3, dtype=np.uint8)
+ >>> z
+ array([0, 1, 2], dtype=uint8)
+
+Array types can also be referred to by character codes, mostly to retain
+backward compatibility with older packages such as Numeric. Some
+documentation may still refer to these, for example::
+
+ >>> np.array([1, 2, 3], dtype='f')
+ array([ 1., 2., 3.], dtype=float32)
+
+We recommend using dtype objects instead.
+
+To convert the type of an array, use the .astype() method (preferred) or
+the type itself as a function. For example: ::
+
+ >>> z.astype(float) #doctest: +NORMALIZE_WHITESPACE
+ array([ 0., 1., 2.])
+ >>> np.int8(z)
+ array([0, 1, 2], dtype=int8)
+
+Note that, above, we use the *Python* float object as a dtype. NumPy knows
+that ``int`` refers to ``np.int_``, ``bool`` means ``np.bool_``,
+that ``float`` is ``np.float_`` and ``complex`` is ``np.complex_``.
+The other data-types do not have Python equivalents.
+
+To determine the type of an array, look at the dtype attribute::
+
+ >>> z.dtype
+ dtype('uint8')
+
+dtype objects also contain information about the type, such as its bit-width
+and its byte-order. The data type can also be used indirectly to query
+properties of the type, such as whether it is an integer::
+
+ >>> d = np.dtype(int)
+ >>> d
+ dtype('int32')
+
+ >>> np.issubdtype(d, np.integer)
+ True
+
+ >>> np.issubdtype(d, np.floating)
+ False
+
+
+Array Scalars
+=============
+
+NumPy generally returns elements of arrays as array scalars (a scalar
+with an associated dtype). Array scalars differ from Python scalars, but
+for the most part they can be used interchangeably (the primary
+exception is for versions of Python older than v2.x, where integer array
+scalars cannot act as indices for lists and tuples). There are some
+exceptions, such as when code requires very specific attributes of a scalar
+or when it checks specifically whether a value is a Python scalar. Generally,
+problems are easily fixed by explicitly converting array scalars
+to Python scalars, using the corresponding Python type function
+(e.g., ``int``, ``float``, ``complex``, ``str``, ``unicode``).
+
+The primary advantage of using array scalars is that
+they preserve the array type (Python may not have a matching scalar type
+available, e.g. ``int16``). Therefore, the use of array scalars ensures
+identical behaviour between arrays and scalars, irrespective of whether the
+value is inside an array or not. NumPy scalars also have many of the same
+methods arrays do.
+
+Overflow Errors
+===============
+
+The fixed size of NumPy numeric types may cause overflow errors when a value
+requires more memory than available in the data type. For example,
+`numpy.power` evaluates ``100 * 10 ** 8`` correctly for 64-bit integers,
+but gives 1874919424 (incorrect) for a 32-bit integer.
+
+ >>> np.power(100, 8, dtype=np.int64)
+ 10000000000000000
+ >>> np.power(100, 8, dtype=np.int32)
+ 1874919424
+
+The behaviour of NumPy and Python integer types differs significantly for
+integer overflows and may confuse users expecting NumPy integers to behave
+similar to Python's ``int``. Unlike NumPy, the size of Python's ``int`` is
+flexible. This means Python integers may expand to accommodate any integer and
+will not overflow.
+
+NumPy provides `numpy.iinfo` and `numpy.finfo` to verify the
+minimum or maximum values of NumPy integer and floating point values
+respectively ::
+
+ >>> np.iinfo(int) # Bounds of the default integer on this system.
+ iinfo(min=-9223372036854775808, max=9223372036854775807, dtype=int64)
+ >>> np.iinfo(np.int32) # Bounds of a 32-bit integer
+ iinfo(min=-2147483648, max=2147483647, dtype=int32)
+ >>> np.iinfo(np.int64) # Bounds of a 64-bit integer
+ iinfo(min=-9223372036854775808, max=9223372036854775807, dtype=int64)
+
+If 64-bit integers are still too small the result may be cast to a
+floating point number. Floating point numbers offer a larger, but inexact,
+range of possible values.
+
+ >>> np.power(100, 100, dtype=np.int64) # Incorrect even with 64-bit int
+ 0
+ >>> np.power(100, 100, dtype=np.float64)
+ 1e+200
+
+Extended Precision
+==================
+
+Python's floating-point numbers are usually 64-bit floating-point numbers,
+nearly equivalent to ``np.float64``. In some unusual situations it may be
+useful to use floating-point numbers with more precision. Whether this
+is possible in numpy depends on the hardware and on the development
+environment: specifically, x86 machines provide hardware floating-point
+with 80-bit precision, and while most C compilers provide this as their
+``long double`` type, MSVC (standard for Windows builds) makes
+``long double`` identical to ``double`` (64 bits). NumPy makes the
+compiler's ``long double`` available as ``np.longdouble`` (and
+``np.clongdouble`` for the complex numbers). You can find out what your
+numpy provides with ``np.finfo(np.longdouble)``.
+
+NumPy does not provide a dtype with more precision than C's
+``long double``\\; in particular, the 128-bit IEEE quad precision
+data type (FORTRAN's ``REAL*16``\\) is not available.
+
+For efficient memory alignment, ``np.longdouble`` is usually stored
+padded with zero bits, either to 96 or 128 bits. Which is more efficient
+depends on hardware and development environment; typically on 32-bit
+systems they are padded to 96 bits, while on 64-bit systems they are
+typically padded to 128 bits. ``np.longdouble`` is padded to the system
+default; ``np.float96`` and ``np.float128`` are provided for users who
+want specific padding. In spite of the names, ``np.float96`` and
+``np.float128`` provide only as much precision as ``np.longdouble``,
+that is, 80 bits on most x86 machines and 64 bits in standard
+Windows builds.
+
+Be warned that even if ``np.longdouble`` offers more precision than
+python ``float``, it is easy to lose that extra precision, since
+python often forces values to pass through ``float``. For example,
+the ``%`` formatting operator requires its arguments to be converted
+to standard python types, and it is therefore impossible to preserve
+extended precision even if many decimal places are requested. It can
+be useful to test your code with the value
+``1 + np.finfo(np.longdouble).eps``.
+
+
diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst
index 4e6a29d9f..3a79f0f2e 100644
--- a/doc/source/user/index.rst
+++ b/doc/source/user/index.rst
@@ -24,3 +24,20 @@ classes contained in the package, see the :ref:`reference`.
c-info
tutorials_index
howtos_index
+
+
+.. These are stuck here to avoid the "WARNING: document isn't included in any
+ toctree" message
+
+.. toctree::
+ :hidden:
+
+ explanations_index
+ ../f2py/index
+ ../glossary
+ ../dev/underthehood
+ ../docs/index
+ ../bugs
+ ../release
+ ../about
+ ../license
diff --git a/doc/source/user/misc.rst b/doc/source/user/misc.rst
index c10aea486..031ce4efa 100644
--- a/doc/source/user/misc.rst
+++ b/doc/source/user/misc.rst
@@ -2,4 +2,224 @@
Miscellaneous
*************
-.. automodule:: numpy.doc.misc
+IEEE 754 Floating Point Special Values
+--------------------------------------
+
+Special values defined in numpy: nan, inf,
+
+NaNs can be used as a poor-man's mask (if you don't care what the
+original value was)
+
+Note: cannot use equality to test NaNs. E.g.: ::
+
+ >>> myarr = np.array([1., 0., np.nan, 3.])
+ >>> np.nonzero(myarr == np.nan)
+ (array([], dtype=int64),)
+ >>> np.nan == np.nan # is always False! Use special numpy functions instead.
+ False
+ >>> myarr[myarr == np.nan] = 0. # doesn't work
+ >>> myarr
+ array([ 1., 0., NaN, 3.])
+ >>> myarr[np.isnan(myarr)] = 0. # use this instead find
+ >>> myarr
+ array([ 1., 0., 0., 3.])
+
+Other related special value functions: ::
+
+ isinf(): True if value is inf
+ isfinite(): True if not nan or inf
+ nan_to_num(): Map nan to 0, inf to max float, -inf to min float
+
+The following corresponds to the usual functions except that nans are excluded
+from the results: ::
+
+ nansum()
+ nanmax()
+ nanmin()
+ nanargmax()
+ nanargmin()
+
+ >>> x = np.arange(10.)
+ >>> x[3] = np.nan
+ >>> x.sum()
+ nan
+ >>> np.nansum(x)
+ 42.0
+
+How numpy handles numerical exceptions
+--------------------------------------
+
+The default is to ``'warn'`` for ``invalid``, ``divide``, and ``overflow``
+and ``'ignore'`` for ``underflow``. But this can be changed, and it can be
+set individually for different kinds of exceptions. The different behaviors
+are:
+
+ - 'ignore' : Take no action when the exception occurs.
+ - 'warn' : Print a `RuntimeWarning` (via the Python `warnings` module).
+ - 'raise' : Raise a `FloatingPointError`.
+ - 'call' : Call a function specified using the `seterrcall` function.
+ - 'print' : Print a warning directly to ``stdout``.
+ - 'log' : Record error in a Log object specified by `seterrcall`.
+
+These behaviors can be set for all kinds of errors or specific ones:
+
+ - all : apply to all numeric exceptions
+ - invalid : when NaNs are generated
+ - divide : divide by zero (for integers as well!)
+ - overflow : floating point overflows
+ - underflow : floating point underflows
+
+Note that integer divide-by-zero is handled by the same machinery.
+These behaviors are set on a per-thread basis.
+
+Examples
+--------
+
+::
+
+ >>> oldsettings = np.seterr(all='warn')
+ >>> np.zeros(5,dtype=np.float32)/0.
+ invalid value encountered in divide
+ >>> j = np.seterr(under='ignore')
+ >>> np.array([1.e-100])**10
+ >>> j = np.seterr(invalid='raise')
+ >>> np.sqrt(np.array([-1.]))
+ FloatingPointError: invalid value encountered in sqrt
+ >>> def errorhandler(errstr, errflag):
+ ... print("saw stupid error!")
+ >>> np.seterrcall(errorhandler)
+ <function err_handler at 0x...>
+ >>> j = np.seterr(all='call')
+ >>> np.zeros(5, dtype=np.int32)/0
+ FloatingPointError: invalid value encountered in divide
+ saw stupid error!
+ >>> j = np.seterr(**oldsettings) # restore previous
+ ... # error-handling settings
+
+Interfacing to C
+----------------
+Only a survey of the choices. Little detail on how each works.
+
+1) Bare metal, wrap your own C-code manually.
+
+ - Plusses:
+
+ - Efficient
+ - No dependencies on other tools
+
+ - Minuses:
+
+ - Lots of learning overhead:
+
+ - need to learn basics of Python C API
+ - need to learn basics of numpy C API
+ - need to learn how to handle reference counting and love it.
+
+ - Reference counting often difficult to get right.
+
+ - getting it wrong leads to memory leaks, and worse, segfaults
+
+ - API will change for Python 3.0!
+
+2) Cython
+
+ - Plusses:
+
+ - avoid learning C API's
+ - no dealing with reference counting
+ - can code in pseudo python and generate C code
+ - can also interface to existing C code
+ - should shield you from changes to Python C api
+ - has become the de-facto standard within the scientific Python community
+ - fast indexing support for arrays
+
+ - Minuses:
+
+ - Can write code in non-standard form which may become obsolete
+ - Not as flexible as manual wrapping
+
+3) ctypes
+
+ - Plusses:
+
+ - part of Python standard library
+ - good for interfacing to existing sharable libraries, particularly
+ Windows DLLs
+ - avoids API/reference counting issues
+ - good numpy support: arrays have all these in their ctypes
+ attribute: ::
+
+ a.ctypes.data a.ctypes.get_strides
+ a.ctypes.data_as a.ctypes.shape
+ a.ctypes.get_as_parameter a.ctypes.shape_as
+ a.ctypes.get_data a.ctypes.strides
+ a.ctypes.get_shape a.ctypes.strides_as
+
+ - Minuses:
+
+ - can't use for writing code to be turned into C extensions, only a wrapper
+ tool.
+
+4) SWIG (automatic wrapper generator)
+
+ - Plusses:
+
+ - around a long time
+ - multiple scripting language support
+ - C++ support
+ - Good for wrapping large (many functions) existing C libraries
+
+ - Minuses:
+
+ - generates lots of code between Python and the C code
+ - can cause performance problems that are nearly impossible to optimize
+ out
+ - interface files can be hard to write
+ - doesn't necessarily avoid reference counting issues or needing to know
+ API's
+
+5) scipy.weave
+
+ - Plusses:
+
+ - can turn many numpy expressions into C code
+ - dynamic compiling and loading of generated C code
+ - can embed pure C code in Python module and have weave extract, generate
+ interfaces and compile, etc.
+
+ - Minuses:
+
+ - Future very uncertain: it's the only part of Scipy not ported to Python 3
+ and is effectively deprecated in favor of Cython.
+
+6) Psyco
+
+ - Plusses:
+
+ - Turns pure python into efficient machine code through jit-like
+ optimizations
+ - very fast when it optimizes well
+
+ - Minuses:
+
+ - Only on intel (windows?)
+ - Doesn't do much for numpy?
+
+Interfacing to Fortran:
+-----------------------
+The clear choice to wrap Fortran code is
+`f2py <https://docs.scipy.org/doc/numpy/f2py/>`_.
+
+Pyfort is an older alternative, but not supported any longer.
+Fwrap is a newer project that looked promising but isn't being developed any
+longer.
+
+Interfacing to C++:
+-------------------
+ 1) Cython
+ 2) CXX
+ 3) Boost.python
+ 4) SWIG
+ 5) SIP (used mainly in PyQT)
+
+
diff --git a/doc/source/user/numpy-for-matlab-users.rst b/doc/source/user/numpy-for-matlab-users.rst
index 602192ecd..547d5b2a0 100644
--- a/doc/source/user/numpy-for-matlab-users.rst
+++ b/doc/source/user/numpy-for-matlab-users.rst
@@ -1,7 +1,7 @@
.. _numpy-for-matlab-users:
======================
-NumPy for Matlab users
+NumPy for MATLAB users
======================
Introduction
@@ -9,9 +9,9 @@ Introduction
MATLAB® and NumPy/SciPy have a lot in common. But there are many
differences. NumPy and SciPy were created to do numerical and scientific
-computing in the most natural way with Python, not to be MATLAB® clones.
+computing in the most natural way with Python, not to be MATLAB clones.
This page is intended to be a place to collect wisdom about the
-differences, mostly for the purpose of helping proficient MATLAB® users
+differences, mostly for the purpose of helping proficient MATLAB users
become proficient NumPy and SciPy users.
.. raw:: html
@@ -25,7 +25,7 @@ Some Key Differences
.. list-table::
- * - In MATLAB®, the basic data type is a multidimensional array of
+ * - In MATLAB, the basic data type is a multidimensional array of
double precision floating point numbers. Most expressions take such
arrays and return such arrays. Operations on the 2-D instances of
these arrays are designed to act more or less like matrix operations
@@ -36,24 +36,24 @@ Some Key Differences
(though for matrix multiplication, one can use the ``@`` operator
in python 3.5 and above).
- * - MATLAB® uses 1 (one) based indexing. The initial element of a
+ * - MATLAB uses 1 (one) based indexing. The initial element of a
sequence is found using a(1).
:ref:`See note INDEXING <numpy-for-matlab-users.notes>`
- Python uses 0 (zero) based indexing. The initial element of a
sequence is found using a[0].
- * - MATLAB®'s scripting language was created for doing linear algebra.
+ * - MATLAB's scripting language was created for doing linear algebra.
The syntax for basic matrix operations is nice and clean, but the API
for adding GUIs and making full-fledged applications is more or less
an afterthought.
- NumPy is based on Python, which was designed from the outset to be
- an excellent general-purpose programming language. While Matlab's
+ an excellent general-purpose programming language. While MATLAB's
syntax for some array manipulations is more compact than
NumPy's, NumPy (by virtue of being an add-on to Python) can do many
- things that Matlab just cannot, for instance dealing properly with
+ things that MATLAB just cannot, for instance dealing properly with
stacks of matrices.
- * - In MATLAB®, arrays have pass-by-value semantics, with a lazy
+ * - In MATLAB, arrays have pass-by-value semantics, with a lazy
copy-on-write scheme to prevent actually creating copies until they
are actually needed. Slice operations copy parts of the array.
- In NumPy arrays have pass-by-reference semantics. Slice operations
@@ -158,7 +158,7 @@ There are pros and cons to using both:
- ``matrix``
- - ``:\\`` Behavior is more like that of MATLAB® matrices.
+ - ``:\\`` Behavior is more like that of MATLAB matrices.
- ``<:(`` Maximum of two-dimensional. To hold three-dimensional data you
need ``array`` or perhaps a Python list of ``matrix``.
- ``<:(`` Minimum of two-dimensional. You cannot have vectors. They must be
@@ -183,7 +183,7 @@ deprecate ``matrix`` eventually.
Table of Rough MATLAB-NumPy Equivalents
=======================================
-The table below gives rough equivalents for some common MATLAB®
+The table below gives rough equivalents for some common MATLAB
expressions. **These are not exact equivalents**, but rather should be
taken as hints to get you going in the right direction. For more detail
read the built-in documentation on the NumPy functions.
@@ -272,7 +272,7 @@ Linear Algebra Equivalents
* - ``size(a,n)``
- ``a.shape[n-1]``
- get the number of elements of the n-th dimension of array ``a``. (Note
- that MATLAB® uses 1 based indexing while Python uses 0 based indexing,
+ that MATLAB uses 1 based indexing while Python uses 0 based indexing,
See note :ref:`INDEXING <numpy-for-matlab-users.notes>`)
* - ``[ 1 2 3; 4 5 6 ]``
@@ -356,7 +356,7 @@ Linear Algebra Equivalents
* - ``(a>0.5)``
- ``(a>0.5)``
- - matrix whose i,jth element is (a_ij > 0.5). The Matlab result is an
+ - matrix whose i,jth element is (a_ij > 0.5). The MATLAB result is an
array of 0s and 1s. The NumPy result is an array of the boolean
values ``False`` and ``True``.
@@ -395,7 +395,7 @@ Linear Algebra Equivalents
* - ``y=x(:)``
- ``y = x.flatten()``
- turn array into vector (note that this forces a copy). To obtain the
- same data ordering as in Matlab, use ``x.flatten('F')``.
+ same data ordering as in MATLAB, use ``x.flatten('F')``.
* - ``1:10``
- ``arange(1.,11.)`` or ``r_[1.:11.]`` or ``r_[1:10:10j]``
@@ -475,7 +475,7 @@ Linear Algebra Equivalents
* - ``max(max(a))``
- ``a.max()``
- - maximum element of ``a`` (with ndims(a)<=2 for matlab)
+ - maximum element of ``a`` (with ndims(a)<=2 for MATLAB)
* - ``max(a)``
- ``a.max(0)``
@@ -539,7 +539,7 @@ Linear Algebra Equivalents
* - ``chol(a)``
- ``linalg.cholesky(a).T``
- - cholesky factorization of a matrix (``chol(a)`` in matlab returns an
+ - cholesky factorization of a matrix (``chol(a)`` in MATLAB returns an
upper triangular matrix, but ``linalg.cholesky(a)`` returns a lower
triangular matrix)
@@ -561,7 +561,7 @@ Linear Algebra Equivalents
* - ``[L,U,P]=lu(a)``
- ``L,U = scipy.linalg.lu(a)`` or ``LU,P=scipy.linalg.lu_factor(a)``
- - LU decomposition (note: P(Matlab) == transpose(P(numpy)) )
+ - LU decomposition (note: P(MATLAB) == transpose(P(numpy)) )
* - ``conjgrad``
- ``scipy.sparse.linalg.cg``
@@ -613,7 +613,7 @@ but the commands ``help`` and ``source`` will usually list the filename
where the function is located. Python also has an ``inspect`` module (do
``import inspect``) which provides a ``getfile`` that often works.
-\ **INDEXING**: MATLAB® uses one based indexing, so the initial element
+\ **INDEXING**: MATLAB uses one based indexing, so the initial element
of a sequence has index 1. Python uses zero based indexing, so the
initial element of a sequence has index 0. Confusion and flamewars arise
because each has advantages and disadvantages. One based indexing is
@@ -623,7 +623,7 @@ indexing <https://groups.google.com/group/comp.lang.python/msg/1bf4d925dfbf368?q
See also `a text by prof.dr. Edsger W.
Dijkstra <https://www.cs.utexas.edu/users/EWD/transcriptions/EWD08xx/EWD831.html>`__.
-\ **RANGES**: In MATLAB®, ``0:5`` can be used as both a range literal
+\ **RANGES**: In MATLAB, ``0:5`` can be used as both a range literal
and a 'slice' index (inside parentheses); however, in Python, constructs
like ``0:5`` can *only* be used as a slice index (inside square
brackets). Thus the somewhat quirky ``r_`` object was created to allow
@@ -632,46 +632,46 @@ numpy to have a similarly terse range construction mechanism. Note that
*indexed* using square brackets, which allows the use of Python's slice
syntax in the arguments.
-\ **LOGICOPS**: & or \| in NumPy is bitwise AND/OR, while in Matlab &
+\ **LOGICOPS**: & or \| in NumPy is bitwise AND/OR, while in MATLAB &
and \| are logical AND/OR. The difference should be clear to anyone with
significant programming experience. The two can appear to work the same,
-but there are important differences. If you would have used Matlab's &
+but there are important differences. If you would have used MATLAB's &
or \| operators, you should use the NumPy ufuncs
-logical\_and/logical\_or. The notable differences between Matlab's and
+logical\_and/logical\_or. The notable differences between MATLAB's and
NumPy's & and \| operators are:
- Non-logical {0,1} inputs: NumPy's output is the bitwise AND of the
- inputs. Matlab treats any non-zero value as 1 and returns the logical
- AND. For example (3 & 4) in NumPy is 0, while in Matlab both 3 and 4
+ inputs. MATLAB treats any non-zero value as 1 and returns the logical
+ AND. For example (3 & 4) in NumPy is 0, while in MATLAB both 3 and 4
are considered logical true and (3 & 4) returns 1.
- Precedence: NumPy's & operator is higher precedence than logical
- operators like < and >; Matlab's is the reverse.
+ operators like < and >; MATLAB's is the reverse.
If you know you have boolean arguments, you can get away with using
NumPy's bitwise operators, but be careful with parentheses, like this: z
= (x > 1) & (x < 2). The absence of NumPy operator forms of logical\_and
and logical\_or is an unfortunate consequence of Python's design.
-**RESHAPE and LINEAR INDEXING**: Matlab always allows multi-dimensional
+**RESHAPE and LINEAR INDEXING**: MATLAB always allows multi-dimensional
arrays to be accessed using scalar or linear indices, NumPy does not.
-Linear indices are common in Matlab programs, e.g. find() on a matrix
+Linear indices are common in MATLAB programs, e.g. find() on a matrix
returns them, whereas NumPy's find behaves differently. When converting
-Matlab code it might be necessary to first reshape a matrix to a linear
+MATLAB code it might be necessary to first reshape a matrix to a linear
sequence, perform some indexing operations and then reshape back. As
reshape (usually) produces views onto the same storage, it should be
possible to do this fairly efficiently. Note that the scan order used by
-reshape in NumPy defaults to the 'C' order, whereas Matlab uses the
+reshape in NumPy defaults to the 'C' order, whereas MATLAB uses the
Fortran order. If you are simply converting to a linear sequence and
-back this doesn't matter. But if you are converting reshapes from Matlab
-code which relies on the scan order, then this Matlab code: z =
+back this doesn't matter. But if you are converting reshapes from MATLAB
+code which relies on the scan order, then this MATLAB code: z =
reshape(x,3,4); should become z = x.reshape(3,4,order='F').copy() in
NumPy.
Customizing Your Environment
============================
-In MATLAB® the main tool available to you for customizing the
+In MATLAB the main tool available to you for customizing the
environment is to modify the search path with the locations of your
favorite functions. You can put such customizations into a startup
script that MATLAB will run on startup.
@@ -685,7 +685,7 @@ NumPy, or rather Python, has similar facilities.
interpreter is started, define the ``PYTHONSTARTUP`` environment
variable to contain the name of your startup script.
-Unlike MATLAB®, where anything on your path can be called immediately,
+Unlike MATLAB, where anything on your path can be called immediately,
with Python you need to first do an 'import' statement to make functions
in a particular file accessible.
@@ -712,7 +712,7 @@ this is just an example, not a statement of "best practices"):
Links
=====
-See http://mathesaurus.sf.net/ for another MATLAB®/NumPy
+See http://mathesaurus.sf.net/ for another MATLAB/NumPy
cross-reference.
An extensive list of tools for scientific work with python can be
diff --git a/doc/source/user/tutorial-ma.rst b/doc/source/user/tutorial-ma.rst
index c28353371..88bad3cbe 100644
--- a/doc/source/user/tutorial-ma.rst
+++ b/doc/source/user/tutorial-ma.rst
@@ -9,7 +9,8 @@ Tutorial: Masked Arrays
import numpy as np
np.random.seed(1)
-**Prerequisites**
+Prerequisites
+-------------
Before reading this tutorial, you should know a bit of Python. If you
would like to refresh your memory, take a look at the
@@ -18,13 +19,15 @@ would like to refresh your memory, take a look at the
If you want to be able to run the examples in this tutorial, you should also
have `matplotlib <https://matplotlib.org/>`_ installed on your computer.
-**Learner profile**
+Learner profile
+---------------
This tutorial is for people who have a basic understanding of NumPy and want to
understand how masked arrays and the :mod:`numpy.ma` module can be used in
practice.
-**Learning Objectives**
+Learning Objectives
+-------------------
After this tutorial, you should be able to:
@@ -33,7 +36,8 @@ After this tutorial, you should be able to:
- Decide when the use of masked arrays is appropriate in some of your
applications
-**What are masked arrays?**
+What are masked arrays?
+-----------------------
Consider the following problem. You have a dataset with missing or invalid
entries. If you're doing any kind of processing on this data, and want to
@@ -63,7 +67,8 @@ combination of:
- A ``fill_value``, a value that may be used to replace the invalid entries
in order to return a standard :class:`numpy.ndarray`.
-**When can they be useful?**
+When can they be useful?
+------------------------
There are a few situations where masked arrays can be more useful than just
eliminating the invalid entries of an array:
@@ -84,7 +89,8 @@ comes with a specific implementation of most :term:`NumPy universal functions
functions and operations on masked data. The output is then a masked array.
We'll see some examples of how this works in practice below.
-**Using masked arrays to see COVID-19 data**
+Using masked arrays to see COVID-19 data
+----------------------------------------
From `Kaggle <https://www.kaggle.com/atilamadai/covid19>`_ it is possible to
download a dataset with initial data about the COVID-19 outbreak in the
@@ -149,7 +155,8 @@ can read more about the :func:`numpy.genfromtxt` function from
the :func:`Reference Documentation <numpy.genfromtxt>` or from the
:doc:`Basic IO tutorial <basics.io.genfromtxt>`.
-**Exploring the data**
+Exploring the data
+------------------
First of all, we can plot the whole set of data we have and see what it looks
like. In order to get a readable plot, we select only a few of the dates to
@@ -194,7 +201,8 @@ the :func:`numpy.sum` function to sum all the selected rows (``axis=0``):
Something's wrong with this data - we are not supposed to have negative values
in a cumulative data set. What's going on?
-**Missing data**
+Missing data
+------------
Looking at the data, here's what we find: there is a period with
**missing data**:
@@ -308,7 +316,8 @@ Mainland China:
It's clear that masked arrays are the right solution here. We cannot represent
the missing data without mischaracterizing the evolution of the curve.
-**Fitting Data**
+Fitting Data
+------------
One possibility we can think of is to interpolate the missing data to estimate
the number of cases in late January. Observe that we can select the masked
@@ -367,7 +376,8 @@ after the beginning of the records:
plt.title("COVID-19 cumulative cases from Jan 21 to Feb 3 2020 - Mainland China\n"
"Cubic estimate for 7 days after start");
-**More reading**
+More reading
+------------
Topics not covered in this tutorial can be found in the documentation:
diff --git a/doc/source/user/tutorial-svd.rst b/doc/source/user/tutorial-svd.rst
index 086e0a6de..fd9e366e0 100644
--- a/doc/source/user/tutorial-svd.rst
+++ b/doc/source/user/tutorial-svd.rst
@@ -9,7 +9,8 @@ Tutorial: Linear algebra on n-dimensional arrays
import numpy as np
np.random.seed(1)
-**Prerequisites**
+Prerequisites
+-------------
Before reading this tutorial, you should know a bit of Python. If you
would like to refresh your memory, take a look at the
@@ -19,7 +20,8 @@ If you want to be able to run the examples in this tutorial, you should also
have `matplotlib <https://matplotlib.org/>`_ and `SciPy <https://scipy.org>`_
installed on your computer.
-**Learner profile**
+Learner profile
+---------------
This tutorial is for people who have a basic understanding of linear
algebra and arrays in NumPy and want to understand how n-dimensional
@@ -28,7 +30,8 @@ you don't know how to apply common functions to n-dimensional arrays (without
using for-loops), or if you want to understand axis and shape properties for
n-dimensional arrays, this tutorial might be of help.
-**Learning Objectives**
+Learning Objectives
+-------------------
After this tutorial, you should be able to:
@@ -38,7 +41,8 @@ After this tutorial, you should be able to:
arrays without using for-loops;
- Understand axis and shape properties for n-dimensional arrays.
-**Content**
+Content
+-------
In this tutorial, we will use a `matrix decomposition
<https://en.wikipedia.org/wiki/Matrix_decomposition>`_ from linear algebra, the
@@ -78,7 +82,8 @@ We can see the image using the `matplotlib.pyplot.imshow` function::
If you are executing the commands above in the IPython shell, it might be
necessary to use the command ``plt.show()`` to show the image window.
-**Shape, axis and array properties**
+Shape, axis and array properties
+--------------------------------
Note that, in linear algebra, the dimension of a vector refers to the number of
entries in an array. In NumPy, it instead defines the number of axes. For
@@ -162,7 +167,8 @@ syntax::
>>> green_array = img_array[:, :, 1]
>>> blue_array = img_array[:, :, 2]
-**Operations on an axis**
+Operations on an axis
+---------------------
It is possible to use methods from linear algebra to approximate an existing set
of data. Here, we will use the `SVD (Singular Value Decomposition)
@@ -290,7 +296,8 @@ diagonal and with the appropriate dimensions for multiplying: in our case,
Now, we want to check if the reconstructed ``U @ Sigma @ Vt`` is
close to the original ``img_gray`` matrix.
-**Approximation**
+Approximation
+-------------
The `linalg` module includes a ``norm`` function, which
computes the norm of a vector or matrix represented in a NumPy array. For
@@ -360,7 +367,8 @@ Now, you can go ahead and repeat this experiment with other values of `k`, and
each of your experiments should give you a slightly better (or worse) image
depending on the value you choose.
-**Applying to all colors**
+Applying to all colors
+----------------------
Now we want to do the same kind of operation, but to all three colors. Our
first instinct might be to repeat the same operation we did above to each color
@@ -411,7 +419,8 @@ matrices into the approximation. Now, note that
To build the final approximation matrix, we must understand how multiplication
across different axes works.
-**Products with n-dimensional arrays**
+Products with n-dimensional arrays
+----------------------------------
If you have worked before with only one- or two-dimensional arrays in NumPy,
you might use `numpy.dot` and `numpy.matmul` (or the ``@`` operator)
@@ -495,7 +504,8 @@ Even though the image is not as sharp, using a small number of ``k`` singular
values (compared to the original set of 768 values), we can recover many of the
distinguishing features from this image.
-**Final words**
+Final words
+-----------
Of course, this is not the best method to *approximate* an image.
However, there is, in fact, a result in linear algebra that says that the
@@ -504,7 +514,8 @@ terms of the norm of the difference. For more information, see *G. H. Golub and
C. F. Van Loan, Matrix Computations, Baltimore, MD, Johns Hopkins University
Press, 1985*.
-**Further reading**
+Further reading
+---------------
- :doc:`Python tutorial <python:tutorial/index>`
- :ref:`reference`
diff --git a/doc/source/user/tutorials_index.rst b/doc/source/user/tutorials_index.rst
index 5e9419f96..20e2c256c 100644
--- a/doc/source/user/tutorials_index.rst
+++ b/doc/source/user/tutorials_index.rst
@@ -11,10 +11,6 @@ classes contained in the package, see the :ref:`API reference <reference>`.
.. toctree::
:maxdepth: 1
- basics
- misc
- numpy-for-matlab-users
tutorial-svd
tutorial-ma
- building
- c-info
+
diff --git a/doc/source/user/whatisnumpy.rst b/doc/source/user/whatisnumpy.rst
index 8478a77c4..154f91c84 100644
--- a/doc/source/user/whatisnumpy.rst
+++ b/doc/source/user/whatisnumpy.rst
@@ -125,7 +125,7 @@ same shape, or a scalar and an array, or even two arrays of with
different shapes, provided that the smaller array is "expandable" to
the shape of the larger in such a way that the resulting broadcast is
unambiguous. For detailed "rules" of broadcasting see
-`numpy.doc.broadcasting`.
+`basics.broadcasting`.
Who Else Uses NumPy?
--------------------
diff --git a/doc_requirements.txt b/doc_requirements.txt
index 6947ec18f..e2694ba12 100644
--- a/doc_requirements.txt
+++ b/doc_requirements.txt
@@ -3,3 +3,4 @@ ipython
scipy
matplotlib
pandas
+pydata-sphinx-theme==0.4.0
diff --git a/numpy/__init__.cython-30.pxd b/numpy/__init__.cython-30.pxd
index 55097888c..4d9ec1fed 100644
--- a/numpy/__init__.cython-30.pxd
+++ b/numpy/__init__.cython-30.pxd
@@ -329,8 +329,8 @@ cdef extern from "numpy/arrayobject.h":
ctypedef long double npy_float128
ctypedef struct npy_cfloat:
- double real
- double imag
+ float real
+ float imag
ctypedef struct npy_cdouble:
double real
diff --git a/numpy/__init__.pxd b/numpy/__init__.pxd
index 206dbe8b9..bf4298e59 100644
--- a/numpy/__init__.pxd
+++ b/numpy/__init__.pxd
@@ -290,8 +290,8 @@ cdef extern from "numpy/arrayobject.h":
ctypedef long double npy_float128
ctypedef struct npy_cfloat:
- double real
- double imag
+ float real
+ float imag
ctypedef struct npy_cdouble:
double real
diff --git a/numpy/__init__.py b/numpy/__init__.py
index 6c4ac98bd..41c3dc42d 100644
--- a/numpy/__init__.py
+++ b/numpy/__init__.py
@@ -214,6 +214,18 @@ else:
__all__.remove('Arrayterator')
del Arrayterator
+ # These names were removed in NumPy 1.20. For at least one release,
+ # attempts to access these names in the numpy namespace will trigger
+ # a warning, and calling the function will raise an exception.
+ _financial_names = ['fv', 'ipmt', 'irr', 'mirr', 'nper', 'npv', 'pmt',
+ 'ppmt', 'pv', 'rate']
+ __expired_functions__ = {
+ name: (f'In accordance with NEP 32, the function {name} was removed '
+ 'from NumPy version 1.20. A replacement for this function '
+ 'is available in the numpy_financial library: '
+ 'https://pypi.org/project/numpy-financial')
+ for name in _financial_names}
+
# Filter out Cython harmless warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
@@ -228,6 +240,20 @@ else:
# module level getattr is only supported in 3.7 onwards
# https://www.python.org/dev/peps/pep-0562/
def __getattr__(attr):
+ # Warn for expired attributes, and return a dummy function
+ # that always raises an exception.
+ try:
+ msg = __expired_functions__[attr]
+ except KeyError:
+ pass
+ else:
+ warnings.warn(msg, RuntimeWarning)
+
+ def _expired(*args, **kwds):
+ raise RuntimeError(msg)
+
+ return _expired
+
# Emit warnings for deprecated attributes
try:
val, msg = __deprecated_attrs__[attr]
diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi
index fad5e1774..bf54207a4 100644
--- a/numpy/__init__.pyi
+++ b/numpy/__init__.pyi
@@ -23,6 +23,7 @@ from typing import (
Sequence,
Sized,
SupportsAbs,
+ SupportsBytes,
SupportsComplex,
SupportsFloat,
SupportsInt,
@@ -33,21 +34,48 @@ from typing import (
Union,
)
-if sys.version_info[0] < 3:
- class SupportsBytes: ...
-
-else:
- from typing import SupportsBytes
-
if sys.version_info >= (3, 8):
- from typing import Literal, Protocol
+ from typing import Literal, Protocol, SupportsIndex
else:
from typing_extensions import Literal, Protocol
+ class SupportsIndex(Protocol):
+ def __index__(self) -> int: ...
+
+# Ensures that the stubs are picked up
+from . import (
+ char,
+ compat,
+ core,
+ ctypeslib,
+ emath,
+ fft,
+ lib,
+ linalg,
+ ma,
+ matrixlib,
+ polynomial,
+ random,
+ rec,
+ testing,
+ version,
+)
+
+from numpy.core.function_base import (
+ linspace,
+ logspace,
+ geomspace,
+)
+
+# Add an object to `__all__` if their stubs are defined in an external file;
+# their stubs will not be recognized otherwise.
+# NOTE: This is redundant for objects defined within this file.
+__all__ = ["linspace", "logspace", "geomspace"]
# TODO: remove when the full numpy namespace is defined
def __getattr__(name: str) -> Any: ...
_NdArraySubClass = TypeVar("_NdArraySubClass", bound=ndarray)
+_ByteOrder = Literal["S", "<", ">", "=", "|", "L", "B", "N", "I"]
class dtype:
names: Optional[Tuple[str, ...]]
@@ -103,7 +131,7 @@ class dtype:
def ndim(self) -> int: ...
@property
def subdtype(self) -> Optional[Tuple[dtype, _Shape]]: ...
- def newbyteorder(self, new_order: str = ...) -> dtype: ...
+ def newbyteorder(self, __new_order: _ByteOrder = ...) -> dtype: ...
# Leave str and type for end to avoid having to use `builtins.str`
# everywhere. See https://github.com/python/mypy/issues/3775
@property
@@ -143,6 +171,14 @@ class _flagsobj:
def __getitem__(self, key: str) -> bool: ...
def __setitem__(self, key: str, value: bool) -> None: ...
+_ArrayLikeInt = Union[
+ int,
+ integer,
+ Sequence[Union[int, integer]],
+ Sequence[Sequence[Any]], # TODO: wait for support for recursive types
+ ndarray
+]
+
_FlatIterSelf = TypeVar("_FlatIterSelf", bound=flatiter)
class flatiter(Generic[_ArraySelf]):
@@ -155,6 +191,18 @@ class flatiter(Generic[_ArraySelf]):
def copy(self) -> _ArraySelf: ...
def __iter__(self: _FlatIterSelf) -> _FlatIterSelf: ...
def __next__(self) -> generic: ...
+ def __len__(self) -> int: ...
+ @overload
+ def __getitem__(self, key: Union[int, integer]) -> generic: ...
+ @overload
+ def __getitem__(
+ self, key: Union[_ArrayLikeInt, slice, ellipsis],
+ ) -> _ArraySelf: ...
+ def __array__(self, __dtype: DtypeLike = ...) -> ndarray: ...
+
+_OrderKACF = Optional[Literal["K", "A", "C", "F"]]
+_OrderACF = Optional[Literal["A", "C", "F"]]
+_OrderCF = Optional[Literal["C", "F"]]
_ArraySelf = TypeVar("_ArraySelf", bound=_ArrayOrScalarCommon)
@@ -187,18 +235,12 @@ class _ArrayOrScalarCommon(
def __int__(self) -> int: ...
def __float__(self) -> float: ...
def __complex__(self) -> complex: ...
- if sys.version_info[0] < 3:
- def __oct__(self) -> str: ...
- def __hex__(self) -> str: ...
- def __nonzero__(self) -> bool: ...
- def __unicode__(self) -> Text: ...
- else:
- def __bool__(self) -> bool: ...
- def __bytes__(self) -> bytes: ...
+ def __bool__(self) -> bool: ...
+ def __bytes__(self) -> bytes: ...
def __str__(self) -> str: ...
def __repr__(self) -> str: ...
- def __copy__(self: _ArraySelf, order: str = ...) -> _ArraySelf: ...
- def __deepcopy__(self: _ArraySelf, memo: dict) -> _ArraySelf: ...
+ def __copy__(self: _ArraySelf) -> _ArraySelf: ...
+ def __deepcopy__(self: _ArraySelf, __memo: Optional[dict] = ...) -> _ArraySelf: ...
def __lt__(self, other): ...
def __le__(self, other): ...
def __eq__(self, other): ...
@@ -207,58 +249,150 @@ class _ArrayOrScalarCommon(
def __ge__(self, other): ...
def __add__(self, other): ...
def __radd__(self, other): ...
- def __iadd__(self, other): ...
def __sub__(self, other): ...
def __rsub__(self, other): ...
- def __isub__(self, other): ...
def __mul__(self, other): ...
def __rmul__(self, other): ...
- def __imul__(self, other): ...
- if sys.version_info[0] < 3:
- def __div__(self, other): ...
- def __rdiv__(self, other): ...
- def __idiv__(self, other): ...
def __truediv__(self, other): ...
def __rtruediv__(self, other): ...
- def __itruediv__(self, other): ...
def __floordiv__(self, other): ...
def __rfloordiv__(self, other): ...
- def __ifloordiv__(self, other): ...
def __mod__(self, other): ...
def __rmod__(self, other): ...
- def __imod__(self, other): ...
def __divmod__(self, other): ...
def __rdivmod__(self, other): ...
# NumPy's __pow__ doesn't handle a third argument
def __pow__(self, other): ...
def __rpow__(self, other): ...
- def __ipow__(self, other): ...
def __lshift__(self, other): ...
def __rlshift__(self, other): ...
- def __ilshift__(self, other): ...
def __rshift__(self, other): ...
def __rrshift__(self, other): ...
- def __irshift__(self, other): ...
def __and__(self, other): ...
def __rand__(self, other): ...
- def __iand__(self, other): ...
def __xor__(self, other): ...
def __rxor__(self, other): ...
- def __ixor__(self, other): ...
def __or__(self, other): ...
def __ror__(self, other): ...
- def __ior__(self, other): ...
- if sys.version_info[:2] >= (3, 5):
- def __matmul__(self, other): ...
- def __rmatmul__(self, other): ...
def __neg__(self: _ArraySelf) -> _ArraySelf: ...
def __pos__(self: _ArraySelf) -> _ArraySelf: ...
def __abs__(self: _ArraySelf) -> _ArraySelf: ...
def __invert__(self: _ArraySelf) -> _ArraySelf: ...
- # TODO(shoyer): remove when all methods are defined
- def __getattr__(self, name) -> Any: ...
+ def astype(
+ self: _ArraySelf,
+ dtype: DtypeLike,
+ order: _OrderKACF = ...,
+ casting: _Casting = ...,
+ subok: bool = ...,
+ copy: bool = ...,
+ ) -> _ArraySelf: ...
+ def byteswap(self: _ArraySelf, inplace: bool = ...) -> _ArraySelf: ...
+ def copy(self: _ArraySelf, order: _OrderKACF = ...) -> _ArraySelf: ...
+ def dump(self, file: str) -> None: ...
+ def dumps(self) -> bytes: ...
+ def fill(self, value: Any) -> None: ...
+ @property
+ def flat(self: _ArraySelf) -> flatiter[_ArraySelf]: ...
+ def flatten(self: _ArraySelf, order: _OrderKACF = ...) -> _ArraySelf: ...
+ def getfield(
+ self: _ArraySelf, dtype: DtypeLike, offset: int = ...
+ ) -> _ArraySelf: ...
+ @overload
+ def item(self, *args: int) -> Any: ...
+ @overload
+ def item(self, args: Tuple[int, ...]) -> Any: ...
+ @overload
+ def itemset(self, __value: Any) -> None: ...
+ @overload
+ def itemset(self, __item: _ShapeLike, __value: Any) -> None: ...
+ def ravel(self: _ArraySelf, order: _OrderKACF = ...) -> _ArraySelf: ...
+ @overload
+ def reshape(
+ self: _ArraySelf, shape: Sequence[int], *, order: _OrderACF = ...
+ ) -> _ArraySelf: ...
+ @overload
+ def reshape(
+ self: _ArraySelf, *shape: int, order: _OrderACF = ...
+ ) -> _ArraySelf: ...
+ @overload
+ def resize(self, new_shape: Sequence[int], *, refcheck: bool = ...) -> None: ...
+ @overload
+ def resize(self, *new_shape: int, refcheck: bool = ...) -> None: ...
+ def setflags(
+ self, write: bool = ..., align: bool = ..., uic: bool = ...
+ ) -> None: ...
+ def squeeze(
+ self: _ArraySelf, axis: Union[int, Tuple[int, ...]] = ...
+ ) -> _ArraySelf: ...
+ def swapaxes(self: _ArraySelf, axis1: int, axis2: int) -> _ArraySelf: ...
+ def tobytes(self, order: _OrderKACF = ...) -> bytes: ...
+ def tofile(
+ self, fid: Union[IO[bytes], str], sep: str = ..., format: str = ...
+ ) -> None: ...
+ # generics and 0d arrays return builtin scalars
+ def tolist(self) -> Any: ...
+ @overload
+ def transpose(self: _ArraySelf, axes: Sequence[int]) -> _ArraySelf: ...
+ @overload
+ def transpose(self: _ArraySelf, *axes: int) -> _ArraySelf: ...
+ @overload
+ def view(self, type: Type[_NdArraySubClass]) -> _NdArraySubClass: ...
+ @overload
+ def view(self: _ArraySelf, dtype: DtypeLike = ...) -> _ArraySelf: ...
+ @overload
+ def view(
+ self, dtype: DtypeLike, type: Type[_NdArraySubClass]
+ ) -> _NdArraySubClass: ...
+
+ # TODO: Add proper signatures
+ def __getitem__(self, key) -> Any: ...
+ @property
+ def __array_interface__(self): ...
+ @property
+ def __array_priority__(self): ...
+ @property
+ def __array_struct__(self): ...
+ def __array_wrap__(array, context=...): ...
+ def __setstate__(self, __state): ...
+ def all(self, axis=..., out=..., keepdims=...): ...
+ def any(self, axis=..., out=..., keepdims=...): ...
+ def argmax(self, axis=..., out=...): ...
+ def argmin(self, axis=..., out=...): ...
+ def argpartition(self, kth, axis=..., kind=..., order=...): ...
+ def argsort(self, axis=..., kind=..., order=...): ...
+ def choose(self, choices, out=..., mode=...): ...
+ def clip(self, min=..., max=..., out=..., **kwargs): ...
+ def compress(self, condition, axis=..., out=...): ...
+ def conj(self): ...
+ def conjugate(self): ...
+ def cumprod(self, axis=..., dtype=..., out=...): ...
+ def cumsum(self, axis=..., dtype=..., out=...): ...
+ def diagonal(self, offset=..., axis1=..., axis2=...): ...
+ def dot(self, b, out=...): ...
+ def max(self, axis=..., out=..., keepdims=..., initial=..., where=...): ...
+ def mean(self, axis=..., dtype=..., out=..., keepdims=...): ...
+ def min(self, axis=..., out=..., keepdims=..., initial=..., where=...): ...
+ def newbyteorder(self, new_order=...): ...
+ def nonzero(self): ...
+ def partition(self, kth, axis=..., kind=..., order=...): ...
+ def prod(self, axis=..., dtype=..., out=..., keepdims=..., initial=..., where=...): ...
+ def ptp(self, axis=..., out=..., keepdims=...): ...
+ def put(self, indices, values, mode=...): ...
+ def repeat(self, repeats, axis=...): ...
+ def round(self, decimals=..., out=...): ...
+ def searchsorted(self, v, side=..., sorter=...): ...
+ def setfield(self, val, dtype, offset=...): ...
+ def sort(self, axis=..., kind=..., order=...): ...
+ def std(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ...
+ def sum(self, axis=..., dtype=..., out=..., keepdims=..., initial=..., where=...): ...
+ def take(self, indices, axis=..., out=..., mode=...): ...
+ # NOTE: `tostring()` is deprecated and therefore excluded
+ # def tostring(self, order=...): ...
+ def trace(self, offset=..., axis1=..., axis2=..., dtype=..., out=...): ...
+ def var(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ...
_BufferType = Union[ndarray, bytes, bytearray, memoryview]
+_Casting = Literal["no", "equiv", "safe", "same_kind", "unsafe"]
class ndarray(_ArrayOrScalarCommon, Iterable, Sized, Container):
@property
@@ -276,7 +410,7 @@ class ndarray(_ArrayOrScalarCommon, Iterable, Sized, Container):
buffer: _BufferType = ...,
offset: int = ...,
strides: _ShapeLike = ...,
- order: Optional[str] = ...,
+ order: _OrderKACF = ...,
) -> _ArraySelf: ...
@property
def dtype(self) -> _Dtype: ...
@@ -287,82 +421,33 @@ class ndarray(_ArrayOrScalarCommon, Iterable, Sized, Container):
@shape.setter
def shape(self, value: _ShapeLike): ...
@property
- def flat(self: _ArraySelf) -> flatiter[_ArraySelf]: ...
- @property
def strides(self) -> _Shape: ...
@strides.setter
def strides(self, value: _ShapeLike): ...
- # Array conversion
- @overload
- def item(self, *args: int) -> Any: ...
- @overload
- def item(self, args: Tuple[int, ...]) -> Any: ...
- def tolist(self) -> List[Any]: ...
- @overload
- def itemset(self, __value: Any) -> None: ...
- @overload
- def itemset(self, __item: _ShapeLike, __value: Any) -> None: ...
- def tobytes(self, order: Optional[str] = ...) -> bytes: ...
- def tofile(
- self, fid: Union[IO[bytes], str], sep: str = ..., format: str = ...
- ) -> None: ...
- def dump(self, file: str) -> None: ...
- def dumps(self) -> bytes: ...
- def astype(
- self: _ArraySelf,
- dtype: DtypeLike,
- order: str = ...,
- casting: str = ...,
- subok: bool = ...,
- copy: bool = ...,
- ) -> _ArraySelf: ...
- def byteswap(self: _ArraySelf, inplace: bool = ...) -> _ArraySelf: ...
- def copy(self: _ArraySelf, order: str = ...) -> _ArraySelf: ...
- @overload
- def view(self, type: Type[_NdArraySubClass]) -> _NdArraySubClass: ...
- @overload
- def view(self: _ArraySelf, dtype: DtypeLike = ...) -> _ArraySelf: ...
- @overload
- def view(
- self, dtype: DtypeLike, type: Type[_NdArraySubClass]
- ) -> _NdArraySubClass: ...
- def getfield(
- self: _ArraySelf, dtype: DtypeLike, offset: int = ...
- ) -> _ArraySelf: ...
- def setflags(
- self, write: bool = ..., align: bool = ..., uic: bool = ...
- ) -> None: ...
- def fill(self, value: Any) -> None: ...
- # Shape manipulation
- @overload
- def reshape(
- self: _ArraySelf, shape: Sequence[int], *, order: str = ...
- ) -> _ArraySelf: ...
- @overload
- def reshape(self: _ArraySelf, *shape: int, order: str = ...) -> _ArraySelf: ...
- @overload
- def resize(self, new_shape: Sequence[int], *, refcheck: bool = ...) -> None: ...
- @overload
- def resize(self, *new_shape: int, refcheck: bool = ...) -> None: ...
- @overload
- def transpose(self: _ArraySelf, axes: Sequence[int]) -> _ArraySelf: ...
- @overload
- def transpose(self: _ArraySelf, *axes: int) -> _ArraySelf: ...
- def swapaxes(self: _ArraySelf, axis1: int, axis2: int) -> _ArraySelf: ...
- def flatten(self: _ArraySelf, order: str = ...) -> _ArraySelf: ...
- def ravel(self: _ArraySelf, order: str = ...) -> _ArraySelf: ...
- def squeeze(
- self: _ArraySelf, axis: Union[int, Tuple[int, ...]] = ...
- ) -> _ArraySelf: ...
# Many of these special methods are irrelevant currently, since protocols
# aren't supported yet. That said, I'm adding them for completeness.
# https://docs.python.org/3/reference/datamodel.html
def __len__(self) -> int: ...
- def __getitem__(self, key) -> Any: ...
def __setitem__(self, key, value): ...
def __iter__(self) -> Any: ...
def __contains__(self, key) -> bool: ...
def __index__(self) -> int: ...
+ def __matmul__(self, other): ...
+ def __imatmul__(self, other): ...
+ def __rmatmul__(self, other): ...
+ # `np.generic` does not support inplace operations
+ def __iadd__(self, other): ...
+ def __isub__(self, other): ...
+ def __imul__(self, other): ...
+ def __itruediv__(self, other): ...
+ def __ifloordiv__(self, other): ...
+ def __imod__(self, other): ...
+ def __ipow__(self, other): ...
+ def __ilshift__(self, other): ...
+ def __irshift__(self, other): ...
+ def __iand__(self, other): ...
+ def __ixor__(self, other): ...
+ def __ior__(self, other): ...
# NOTE: while `np.generic` is not technically an instance of `ABCMeta`,
# the `@abstractmethod` decorator is herein used to (forcefully) deny
@@ -372,65 +457,87 @@ class ndarray(_ArrayOrScalarCommon, Iterable, Sized, Container):
# See https://github.com/numpy/numpy-stubs/pull/80 for more details.
+_CharLike = Union[str, bytes]
+
class generic(_ArrayOrScalarCommon):
@abstractmethod
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
@property
def base(self) -> None: ...
-class _real_generic(generic): # type: ignore
+class number(generic): # type: ignore
@property
def real(self: _ArraySelf) -> _ArraySelf: ...
@property
def imag(self: _ArraySelf) -> _ArraySelf: ...
-class number(generic): ... # type: ignore
-
-class bool_(_real_generic):
+class bool_(generic):
def __init__(self, __value: object = ...) -> None: ...
+ @property
+ def real(self: _ArraySelf) -> _ArraySelf: ...
+ @property
+ def imag(self: _ArraySelf) -> _ArraySelf: ...
class object_(generic):
def __init__(self, __value: object = ...) -> None: ...
+ @property
+ def real(self: _ArraySelf) -> _ArraySelf: ...
+ @property
+ def imag(self: _ArraySelf) -> _ArraySelf: ...
-class datetime64:
+class datetime64(generic):
@overload
def __init__(
self,
- __value: Union[None, datetime64, str, dt.datetime] = ...,
- __format: str = ...
+ __value: Union[None, datetime64, _CharLike, dt.datetime] = ...,
+ __format: Union[_CharLike, Tuple[_CharLike, _IntLike]] = ...,
) -> None: ...
@overload
- def __init__(self, __value: int, __format: str) -> None: ...
+ def __init__(self, __value: int, __format: Union[_CharLike, Tuple[_CharLike, _IntLike]]) -> None: ...
def __add__(self, other: Union[timedelta64, int]) -> datetime64: ...
def __sub__(self, other: Union[timedelta64, datetime64, int]) -> timedelta64: ...
+ def __rsub__(self, other: Union[datetime64, int]) -> timedelta64: ...
+
+# Support for `__index__` was added in python 3.8 (bpo-20092)
+if sys.version_info >= (3, 8):
+ _IntValue = Union[SupportsInt, _CharLike, SupportsIndex]
+ _FloatValue = Union[None, _CharLike, SupportsFloat, SupportsIndex]
+ _ComplexValue = Union[None, _CharLike, SupportsFloat, SupportsComplex, SupportsIndex]
+else:
+ _IntValue = Union[SupportsInt, _CharLike]
+ _FloatValue = Union[None, _CharLike, SupportsFloat]
+ _ComplexValue = Union[None, _CharLike, SupportsFloat, SupportsComplex]
+
+class integer(number): # type: ignore
+ # NOTE: `__index__` is technically defined in the bottom-most
+ # sub-classes (`int64`, `uint32`, etc)
+ def __index__(self) -> int: ...
-class integer(number, _real_generic): ... # type: ignore
class signedinteger(integer): ... # type: ignore
class int8(signedinteger):
- def __init__(self, __value: SupportsInt = ...) -> None: ...
+ def __init__(self, __value: _IntValue = ...) -> None: ...
class int16(signedinteger):
- def __init__(self, __value: SupportsInt = ...) -> None: ...
+ def __init__(self, __value: _IntValue = ...) -> None: ...
class int32(signedinteger):
- def __init__(self, __value: SupportsInt = ...) -> None: ...
+ def __init__(self, __value: _IntValue = ...) -> None: ...
class int64(signedinteger):
- def __init__(self, __value: SupportsInt = ...) -> None: ...
+ def __init__(self, __value: _IntValue = ...) -> None: ...
class timedelta64(signedinteger):
- def __init__(self, __value: Any = ..., __format: str = ...) -> None: ...
+ def __init__(
+ self,
+ __value: Union[None, int, _CharLike, dt.timedelta, timedelta64] = ...,
+ __format: Union[_CharLike, Tuple[_CharLike, _IntLike]] = ...,
+ ) -> None: ...
@overload
def __add__(self, other: Union[timedelta64, int]) -> timedelta64: ...
@overload
def __add__(self, other: datetime64) -> datetime64: ...
def __sub__(self, other: Union[timedelta64, int]) -> timedelta64: ...
- if sys.version_info[0] < 3:
- @overload
- def __div__(self, other: timedelta64) -> float: ...
- @overload
- def __div__(self, other: float) -> timedelta64: ...
@overload
def __truediv__(self, other: timedelta64) -> float: ...
@overload
@@ -440,72 +547,69 @@ class timedelta64(signedinteger):
class unsignedinteger(integer): ... # type: ignore
class uint8(unsignedinteger):
- def __init__(self, __value: SupportsInt = ...) -> None: ...
+ def __init__(self, __value: _IntValue = ...) -> None: ...
class uint16(unsignedinteger):
- def __init__(self, __value: SupportsInt = ...) -> None: ...
+ def __init__(self, __value: _IntValue = ...) -> None: ...
class uint32(unsignedinteger):
- def __init__(self, __value: SupportsInt = ...) -> None: ...
+ def __init__(self, __value: _IntValue = ...) -> None: ...
class uint64(unsignedinteger):
- def __init__(self, __value: SupportsInt = ...) -> None: ...
+ def __init__(self, __value: _IntValue = ...) -> None: ...
class inexact(number): ... # type: ignore
-class floating(inexact, _real_generic): ... # type: ignore
+class floating(inexact): ... # type: ignore
+
+_FloatType = TypeVar('_FloatType', bound=floating)
class float16(floating):
- def __init__(self, __value: Optional[SupportsFloat] = ...) -> None: ...
+ def __init__(self, __value: _FloatValue = ...) -> None: ...
class float32(floating):
- def __init__(self, __value: Optional[SupportsFloat] = ...) -> None: ...
+ def __init__(self, __value: _FloatValue = ...) -> None: ...
-class float64(floating):
- def __init__(self, __value: Optional[SupportsFloat] = ...) -> None: ...
+class float64(floating, float):
+ def __init__(self, __value: _FloatValue = ...) -> None: ...
-class complexfloating(inexact): ... # type: ignore
-
-class complex64(complexfloating):
- def __init__(
- self,
- __value: Union[None, SupportsInt, SupportsFloat, SupportsComplex] = ...
- ) -> None: ...
+class complexfloating(inexact, Generic[_FloatType]): # type: ignore
@property
- def real(self) -> float32: ...
+ def real(self) -> _FloatType: ... # type: ignore[override]
@property
- def imag(self) -> float32: ...
+ def imag(self) -> _FloatType: ... # type: ignore[override]
+ def __abs__(self) -> _FloatType: ... # type: ignore[override]
-class complex128(complexfloating):
- def __init__(
- self,
- __value: Union[None, SupportsInt, SupportsFloat, SupportsComplex] = ...
- ) -> None: ...
- @property
- def real(self) -> float64: ...
- @property
- def imag(self) -> float64: ...
+class complex64(complexfloating[float32]):
+ def __init__(self, __value: _ComplexValue = ...) -> None: ...
-class flexible(_real_generic): ... # type: ignore
+class complex128(complexfloating[float64], complex):
+ def __init__(self, __value: _ComplexValue = ...) -> None: ...
+
+class flexible(generic): ... # type: ignore
class void(flexible):
- def __init__(self, __value: Union[int, integer, bool_, bytes, bytes_]): ...
+ def __init__(self, __value: Union[int, integer, bool_, bytes]): ...
+ @property
+ def real(self: _ArraySelf) -> _ArraySelf: ...
+ @property
+ def imag(self: _ArraySelf) -> _ArraySelf: ...
-class character(_real_generic): ... # type: ignore
+class character(flexible): ... # type: ignore
-class bytes_(character):
+class bytes_(character, bytes):
@overload
def __init__(self, __value: object = ...) -> None: ...
@overload
def __init__(
- self, __value: Union[str, str_], encoding: str = ..., errors: str = ...
+ self, __value: str, encoding: str = ..., errors: str = ...
) -> None: ...
-class str_(character):
+class str_(character, str):
@overload
def __init__(self, __value: object = ...) -> None: ...
@overload
def __init__(
- self, __value: Union[bytes, bytes_], encoding: str = ..., errors: str = ...
+ self, __value: bytes, encoding: str = ..., errors: str = ...
) -> None: ...
# TODO(alan): Platform dependent types
@@ -521,48 +625,66 @@ def array(
dtype: DtypeLike = ...,
*,
copy: bool = ...,
- order: Optional[str] = ...,
+ order: _OrderKACF = ...,
subok: bool = ...,
ndmin: int = ...,
+ like: ArrayLike = ...,
) -> ndarray: ...
def zeros(
- shape: _ShapeLike, dtype: DtypeLike = ..., order: Optional[str] = ...
+ shape: _ShapeLike,
+ dtype: DtypeLike = ...,
+ order: _OrderCF = ...,
+ *,
+ like: ArrayLike = ...,
) -> ndarray: ...
def ones(
- shape: _ShapeLike, dtype: DtypeLike = ..., order: Optional[str] = ...
+ shape: _ShapeLike,
+ dtype: DtypeLike = ...,
+ order: _OrderCF = ...,
+ *,
+ like: ArrayLike = ...,
) -> ndarray: ...
def empty(
- shape: _ShapeLike, dtype: DtypeLike = ..., order: Optional[str] = ...
+ shape: _ShapeLike,
+ dtype: DtypeLike = ...,
+ order: _OrderCF = ...,
+ *,
+ like: ArrayLike = ...,
) -> ndarray: ...
def zeros_like(
a: ArrayLike,
dtype: DtypeLike = ...,
- order: str = ...,
+ order: _OrderKACF = ...,
subok: bool = ...,
shape: Optional[Union[int, Sequence[int]]] = ...,
) -> ndarray: ...
def ones_like(
a: ArrayLike,
dtype: DtypeLike = ...,
- order: str = ...,
+ order: _OrderKACF = ...,
subok: bool = ...,
shape: Optional[_ShapeLike] = ...,
) -> ndarray: ...
def empty_like(
a: ArrayLike,
dtype: DtypeLike = ...,
- order: str = ...,
+ order: _OrderKACF = ...,
subok: bool = ...,
shape: Optional[_ShapeLike] = ...,
) -> ndarray: ...
def full(
- shape: _ShapeLike, fill_value: Any, dtype: DtypeLike = ..., order: str = ...
+ shape: _ShapeLike,
+ fill_value: Any,
+ dtype: DtypeLike = ...,
+ order: _OrderCF = ...,
+ *,
+ like: ArrayLike = ...,
) -> ndarray: ...
def full_like(
a: ArrayLike,
fill_value: Any,
dtype: DtypeLike = ...,
- order: str = ...,
+ order: _OrderKACF = ...,
subok: bool = ...,
shape: Optional[_ShapeLike] = ...,
) -> ndarray: ...
@@ -572,8 +694,11 @@ def count_nonzero(
def isfortran(a: ndarray) -> bool: ...
def argwhere(a: ArrayLike) -> ndarray: ...
def flatnonzero(a: ArrayLike) -> ndarray: ...
-def correlate(a: ArrayLike, v: ArrayLike, mode: str = ...) -> ndarray: ...
-def convolve(a: ArrayLike, v: ArrayLike, mode: str = ...) -> ndarray: ...
+
+_CorrelateMode = Literal["valid", "same", "full"]
+
+def correlate(a: ArrayLike, v: ArrayLike, mode: _CorrelateMode = ...) -> ndarray: ...
+def convolve(a: ArrayLike, v: ArrayLike, mode: _CorrelateMode = ...) -> ndarray: ...
def outer(a: ArrayLike, b: ArrayLike, out: ndarray = ...) -> ndarray: ...
def tensordot(
a: ArrayLike,
@@ -604,11 +729,17 @@ def cross(
def indices(
dimensions: Sequence[int], dtype: dtype = ..., sparse: bool = ...
) -> Union[ndarray, Tuple[ndarray, ...]]: ...
-def fromfunction(function: Callable, shape: Tuple[int, int], **kwargs) -> Any: ...
+def fromfunction(
+ function: Callable,
+ shape: Tuple[int, int],
+ *,
+ like: ArrayLike = ...,
+ **kwargs,
+) -> Any: ...
def isscalar(element: Any) -> bool: ...
def binary_repr(num: int, width: Optional[int] = ...) -> str: ...
def base_repr(number: int, base: int = ..., padding: int = ...) -> str: ...
-def identity(n: int, dtype: DtypeLike = ...) -> ndarray: ...
+def identity(n: int, dtype: DtypeLike = ..., *, like: ArrayLike = ...) -> ndarray: ...
def allclose(
a: ArrayLike,
b: ArrayLike,
@@ -689,10 +820,8 @@ class ufunc:
axes: List[Any] = ...,
axis: int = ...,
keepdims: bool = ...,
- # TODO: make this precise when we can use Literal.
- casting: str = ...,
- # TODO: make this precise when we can use Literal.
- order: Optional[str] = ...,
+ casting: _Casting = ...,
+ order: _OrderKACF = ...,
dtype: DtypeLike = ...,
subok: bool = ...,
signature: Union[str, Tuple[str]] = ...,
@@ -871,7 +1000,6 @@ def find_common_type(
# Functions from np.core.fromnumeric
_Mode = Literal["raise", "wrap", "clip"]
-_Order = Literal["C", "F", "A"]
_PartitionKind = Literal["introselect"]
_SortKind = Literal["quicksort", "mergesort", "heapsort", "stable"]
_Side = Literal["left", "right"]
@@ -896,9 +1024,9 @@ _Number = TypeVar('_Number', bound=number)
_NumberLike = Union[int, float, complex, number, bool_]
# An array-like object consisting of integers
-_Int = Union[int, integer]
-_Bool = Union[bool, bool_]
-_IntOrBool = Union[_Int, _Bool]
+_IntLike = Union[int, integer]
+_BoolLike = Union[bool, bool_]
+_IntOrBool = Union[_IntLike, _BoolLike]
_ArrayLikeIntNested = ArrayLike # TODO: wait for support for recursive types
_ArrayLikeBoolNested = ArrayLike # TODO: wait for support for recursive types
@@ -911,8 +1039,8 @@ _ArrayLikeIntOrBool = Union[
Sequence[_ArrayLikeBoolNested],
]
_ArrayLikeBool = Union[
- _Bool,
- Sequence[_Bool],
+ _BoolLike,
+ Sequence[_BoolLike],
ndarray
]
@@ -953,7 +1081,7 @@ def take(
out: Optional[ndarray] = ...,
mode: _Mode = ...,
) -> Union[_ScalarNumpy, ndarray]: ...
-def reshape(a: ArrayLike, newshape: _ShapeLike, order: _Order = ...) -> ndarray: ...
+def reshape(a: ArrayLike, newshape: _ShapeLike, order: _OrderACF = ...) -> ndarray: ...
@overload
def choose(
a: _ScalarIntOrBool,
@@ -1067,7 +1195,7 @@ def trace(
dtype: DtypeLike = ...,
out: Optional[ndarray] = ...,
) -> Union[number, ndarray]: ...
-def ravel(a: ArrayLike, order: _Order = ...) -> ndarray: ...
+def ravel(a: ArrayLike, order: _OrderKACF = ...) -> ndarray: ...
def nonzero(a: ArrayLike) -> Tuple[ndarray, ...]: ...
def shape(a: ArrayLike) -> _Shape: ...
def compress(
@@ -1237,3 +1365,114 @@ def amin(
initial: _NumberLike = ...,
where: _ArrayLikeBool = ...,
) -> Union[number, ndarray]: ...
+
+# TODO: `np.prod()``: For object arrays `initial` does not necessarily
+# have to be a numerical scalar.
+# The only requirement is that it is compatible
+# with the `.__mul__()` method(s) of the passed array's elements.
+
+# Note that the same situation holds for all wrappers around
+# `np.ufunc.reduce`, e.g. `np.sum()` (`.__add__()`).
+
+@overload
+def prod(
+ a: _Number,
+ axis: Optional[_ShapeLike] = ...,
+ dtype: DtypeLike = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike = ...,
+ where: _ArrayLikeBool = ...,
+) -> _Number: ...
+@overload
+def prod(
+ a: ArrayLike,
+ axis: None = ...,
+ dtype: DtypeLike = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ initial: _NumberLike = ...,
+ where: _ArrayLikeBool = ...,
+) -> number: ...
+@overload
+def prod(
+ a: ArrayLike,
+ axis: Optional[_ShapeLike] = ...,
+ dtype: DtypeLike = ...,
+ out: Optional[ndarray] = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike = ...,
+ where: _ArrayLikeBool = ...,
+) -> Union[number, ndarray]: ...
+def cumprod(
+ a: ArrayLike,
+ axis: Optional[int] = ...,
+ dtype: DtypeLike = ...,
+ out: Optional[ndarray] = ...,
+) -> ndarray: ...
+def ndim(a: ArrayLike) -> int: ...
+def size(a: ArrayLike, axis: Optional[int] = ...) -> int: ...
+@overload
+def around(
+ a: _Number, decimals: int = ..., out: Optional[ndarray] = ...
+) -> _Number: ...
+@overload
+def around(
+ a: _NumberLike, decimals: int = ..., out: Optional[ndarray] = ...
+) -> number: ...
+@overload
+def around(
+ a: ArrayLike, decimals: int = ..., out: Optional[ndarray] = ...
+) -> ndarray: ...
+@overload
+def mean(
+ a: ArrayLike,
+ axis: None = ...,
+ dtype: DtypeLike = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+) -> number: ...
+@overload
+def mean(
+ a: ArrayLike,
+ axis: Optional[_ShapeLike] = ...,
+ dtype: DtypeLike = ...,
+ out: Optional[ndarray] = ...,
+ keepdims: bool = ...,
+) -> Union[number, ndarray]: ...
+@overload
+def std(
+ a: ArrayLike,
+ axis: None = ...,
+ dtype: DtypeLike = ...,
+ out: None = ...,
+ ddof: int = ...,
+ keepdims: Literal[False] = ...,
+) -> number: ...
+@overload
+def std(
+ a: ArrayLike,
+ axis: Optional[_ShapeLike] = ...,
+ dtype: DtypeLike = ...,
+ out: Optional[ndarray] = ...,
+ ddof: int = ...,
+ keepdims: bool = ...,
+) -> Union[number, ndarray]: ...
+@overload
+def var(
+ a: ArrayLike,
+ axis: None = ...,
+ dtype: DtypeLike = ...,
+ out: None = ...,
+ ddof: int = ...,
+ keepdims: Literal[False] = ...,
+) -> number: ...
+@overload
+def var(
+ a: ArrayLike,
+ axis: Optional[_ShapeLike] = ...,
+ dtype: DtypeLike = ...,
+ out: Optional[ndarray] = ...,
+ ddof: int = ...,
+ keepdims: bool = ...,
+) -> Union[number, ndarray]: ...
diff --git a/numpy/char.pyi b/numpy/char.pyi
new file mode 100644
index 000000000..0e7342c0b
--- /dev/null
+++ b/numpy/char.pyi
@@ -0,0 +1,53 @@
+from typing import Any
+
+equal: Any
+not_equal: Any
+greater_equal: Any
+less_equal: Any
+greater: Any
+less: Any
+str_len: Any
+add: Any
+multiply: Any
+mod: Any
+capitalize: Any
+center: Any
+count: Any
+decode: Any
+encode: Any
+endswith: Any
+expandtabs: Any
+find: Any
+index: Any
+isalnum: Any
+isalpha: Any
+isdigit: Any
+islower: Any
+isspace: Any
+istitle: Any
+isupper: Any
+join: Any
+ljust: Any
+lower: Any
+lstrip: Any
+partition: Any
+replace: Any
+rfind: Any
+rindex: Any
+rjust: Any
+rpartition: Any
+rsplit: Any
+rstrip: Any
+split: Any
+splitlines: Any
+startswith: Any
+strip: Any
+swapcase: Any
+title: Any
+translate: Any
+upper: Any
+zfill: Any
+isnumeric: Any
+isdecimal: Any
+array: Any
+asarray: Any
diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py
index c3b4374f4..879b3645d 100644
--- a/numpy/core/_add_newdocs.py
+++ b/numpy/core/_add_newdocs.py
@@ -12,6 +12,7 @@ NOTE: Many of the methods of ndarray have corresponding functions.
from numpy.core import numerictypes as _numerictypes
from numpy.core import dtype
from numpy.core.function_base import add_newdoc
+from numpy.core.overrides import array_function_like_doc
###############################################################################
#
@@ -786,7 +787,8 @@ add_newdoc('numpy.core', 'broadcast', ('reset',
add_newdoc('numpy.core.multiarray', 'array',
"""
- array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0)
+ array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0,
+ like=None)
Create an array.
@@ -829,6 +831,9 @@ add_newdoc('numpy.core.multiarray', 'array',
Specifies the minimum number of dimensions that the resulting
array should have. Ones will be pre-pended to the shape as
needed to meet this requirement.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -895,11 +900,14 @@ add_newdoc('numpy.core.multiarray', 'array',
matrix([[1, 2],
[3, 4]])
- """)
+ """.replace(
+ "${ARRAY_FUNCTION_LIKE}",
+ array_function_like_doc,
+ ))
add_newdoc('numpy.core.multiarray', 'empty',
"""
- empty(shape, dtype=float, order='C')
+ empty(shape, dtype=float, order='C', *, like=None)
Return a new array of given shape and type, without initializing entries.
@@ -914,6 +922,9 @@ add_newdoc('numpy.core.multiarray', 'empty',
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -946,7 +957,10 @@ add_newdoc('numpy.core.multiarray', 'empty',
array([[-1073741821, -1067949133],
[ 496041986, 19249760]]) #uninitialized
- """)
+ """.replace(
+ "${ARRAY_FUNCTION_LIKE}",
+ array_function_like_doc,
+ ))
add_newdoc('numpy.core.multiarray', 'scalar',
"""
@@ -964,7 +978,7 @@ add_newdoc('numpy.core.multiarray', 'scalar',
add_newdoc('numpy.core.multiarray', 'zeros',
"""
- zeros(shape, dtype=float, order='C')
+ zeros(shape, dtype=float, order='C', *, like=None)
Return a new array of given shape and type, filled with zeros.
@@ -979,6 +993,9 @@ add_newdoc('numpy.core.multiarray', 'zeros',
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -1013,7 +1030,10 @@ add_newdoc('numpy.core.multiarray', 'zeros',
array([(0, 0), (0, 0)],
dtype=[('x', '<i4'), ('y', '<i4')])
- """)
+ """.replace(
+ "${ARRAY_FUNCTION_LIKE}",
+ array_function_like_doc,
+ ))
add_newdoc('numpy.core.multiarray', 'set_typeDict',
"""set_typeDict(dict)
@@ -1025,7 +1045,7 @@ add_newdoc('numpy.core.multiarray', 'set_typeDict',
add_newdoc('numpy.core.multiarray', 'fromstring',
"""
- fromstring(string, dtype=float, count=-1, sep='')
+ fromstring(string, dtype=float, count=-1, sep='', *, like=None)
A new 1-D array initialized from text data in a string.
@@ -1058,6 +1078,9 @@ add_newdoc('numpy.core.multiarray', 'fromstring',
text, the binary mode of `fromstring` will first encode it into
bytes using either utf-8 (python 3) or the default encoding
(python 2), neither of which produce sane results.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -1081,7 +1104,10 @@ add_newdoc('numpy.core.multiarray', 'fromstring',
>>> np.fromstring('1, 2', dtype=int, sep=',')
array([1, 2])
- """)
+ """.replace(
+ "${ARRAY_FUNCTION_LIKE}",
+ array_function_like_doc,
+ ))
add_newdoc('numpy.core.multiarray', 'compare_chararrays',
"""
@@ -1122,7 +1148,7 @@ add_newdoc('numpy.core.multiarray', 'compare_chararrays',
add_newdoc('numpy.core.multiarray', 'fromiter',
"""
- fromiter(iterable, dtype, count=-1)
+ fromiter(iterable, dtype, count=-1, *, like=None)
Create a new 1-dimensional array from an iterable object.
@@ -1135,6 +1161,9 @@ add_newdoc('numpy.core.multiarray', 'fromiter',
count : int, optional
The number of items to read from *iterable*. The default is -1,
which means all data is read.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -1152,11 +1181,14 @@ add_newdoc('numpy.core.multiarray', 'fromiter',
>>> np.fromiter(iterable, float)
array([ 0., 1., 4., 9., 16.])
- """)
+ """.replace(
+ "${ARRAY_FUNCTION_LIKE}",
+ array_function_like_doc,
+ ))
add_newdoc('numpy.core.multiarray', 'fromfile',
"""
- fromfile(file, dtype=float, count=-1, sep='', offset=0)
+ fromfile(file, dtype=float, count=-1, sep='', offset=0, *, like=None)
Construct an array from data in a text or binary file.
@@ -1195,6 +1227,9 @@ add_newdoc('numpy.core.multiarray', 'fromfile',
Only permitted for binary files.
.. versionadded:: 1.17.0
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
See also
--------
@@ -1241,11 +1276,14 @@ add_newdoc('numpy.core.multiarray', 'fromfile',
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')])
- """)
+ """.replace(
+ "${ARRAY_FUNCTION_LIKE}",
+ array_function_like_doc,
+ ))
add_newdoc('numpy.core.multiarray', 'frombuffer',
"""
- frombuffer(buffer, dtype=float, count=-1, offset=0)
+ frombuffer(buffer, dtype=float, count=-1, offset=0, *, like=None)
Interpret a buffer as a 1-dimensional array.
@@ -1259,6 +1297,9 @@ add_newdoc('numpy.core.multiarray', 'frombuffer',
Number of items to read. ``-1`` means all data in the buffer.
offset : int, optional
Start reading the buffer from this offset (in bytes); default: 0.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Notes
-----
@@ -1283,7 +1324,10 @@ add_newdoc('numpy.core.multiarray', 'frombuffer',
>>> np.frombuffer(b'\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3)
array([1, 2, 3], dtype=uint8)
- """)
+ """.replace(
+ "${ARRAY_FUNCTION_LIKE}",
+ array_function_like_doc,
+ ))
add_newdoc('numpy.core', 'fastCopyAndTranspose',
"""_fastCopyAndTranspose(a)""")
@@ -1293,7 +1337,7 @@ add_newdoc('numpy.core.multiarray', 'correlate',
add_newdoc('numpy.core.multiarray', 'arange',
"""
- arange([start,] stop[, step,], dtype=None)
+ arange([start,] stop[, step,], dtype=None, *, like=None)
Return evenly spaced values within a given interval.
@@ -1322,6 +1366,9 @@ add_newdoc('numpy.core.multiarray', 'arange',
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -1350,7 +1397,10 @@ add_newdoc('numpy.core.multiarray', 'arange',
>>> np.arange(3,7,2)
array([3, 5])
- """)
+ """.replace(
+ "${ARRAY_FUNCTION_LIKE}",
+ array_function_like_doc,
+ ))
add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version',
"""_get_ndarray_c_version()
@@ -3223,7 +3273,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('min',
add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder',
"""
- arr.newbyteorder(new_order='S')
+ arr.newbyteorder(new_order='S', /)
Return the array with the same data viewed with a different byte order.
@@ -4689,14 +4739,14 @@ add_newdoc('numpy.core', 'ufunc', ('signature',
add_newdoc('numpy.core', 'ufunc', ('reduce',
"""
- reduce(a, axis=0, dtype=None, out=None, keepdims=False, initial=<no value>, where=True)
+ reduce(array, axis=0, dtype=None, out=None, keepdims=False, initial=<no value>, where=True)
- Reduces `a`'s dimension by one, by applying ufunc along one axis.
+ Reduces `array`'s dimension by one, by applying ufunc along one axis.
- Let :math:`a.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then
- :math:`ufunc.reduce(a, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` =
+ Let :math:`array.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then
+ :math:`ufunc.reduce(array, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` =
the result of iterating `j` over :math:`range(N_i)`, cumulatively applying
- ufunc to each :math:`a[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`.
+ ufunc to each :math:`array[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`.
For a one-dimensional array, reduce produces results equivalent to:
::
@@ -4709,7 +4759,7 @@ add_newdoc('numpy.core', 'ufunc', ('reduce',
Parameters
----------
- a : array_like
+ array : array_like
The array to act on.
axis : None or int or tuple of ints, optional
Axis or axes along which a reduction is performed.
@@ -4742,7 +4792,7 @@ add_newdoc('numpy.core', 'ufunc', ('reduce',
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
- the result will broadcast correctly against the original `arr`.
+ the result will broadcast correctly against the original `array`.
.. versionadded:: 1.7.0
initial : scalar, optional
@@ -4756,7 +4806,7 @@ add_newdoc('numpy.core', 'ufunc', ('reduce',
where : array_like of bool, optional
A boolean array which is broadcasted to match the dimensions
- of `a`, and selects elements to include in the reduction. Note
+ of `array`, and selects elements to include in the reduction. Note
that for ufuncs like ``minimum`` that do not have an identity
defined, one has to pass in also ``initial``.
@@ -4898,28 +4948,28 @@ add_newdoc('numpy.core', 'ufunc', ('accumulate',
add_newdoc('numpy.core', 'ufunc', ('reduceat',
"""
- reduceat(a, indices, axis=0, dtype=None, out=None)
+ reduceat(array, indices, axis=0, dtype=None, out=None)
Performs a (local) reduce with specified slices over a single axis.
For i in ``range(len(indices))``, `reduceat` computes
- ``ufunc.reduce(a[indices[i]:indices[i+1]])``, which becomes the i-th
+ ``ufunc.reduce(array[indices[i]:indices[i+1]])``, which becomes the i-th
generalized "row" parallel to `axis` in the final result (i.e., in a
2-D array, for example, if `axis = 0`, it becomes the i-th row, but if
`axis = 1`, it becomes the i-th column). There are three exceptions to this:
* when ``i = len(indices) - 1`` (so for the last index),
- ``indices[i+1] = a.shape[axis]``.
+ ``indices[i+1] = array.shape[axis]``.
* if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is
- simply ``a[indices[i]]``.
- * if ``indices[i] >= len(a)`` or ``indices[i] < 0``, an error is raised.
+ simply ``array[indices[i]]``.
+ * if ``indices[i] >= len(array)`` or ``indices[i] < 0``, an error is raised.
The shape of the output depends on the size of `indices`, and may be
- larger than `a` (this happens if ``len(indices) > a.shape[axis]``).
+ larger than `array` (this happens if ``len(indices) > array.shape[axis]``).
Parameters
----------
- a : array_like
+ array : array_like
The array to act on.
indices : array_like
Paired indices, comma separated (not colon), specifying slices to
@@ -4949,14 +4999,15 @@ add_newdoc('numpy.core', 'ufunc', ('reduceat',
-----
A descriptive example:
- If `a` is 1-D, the function `ufunc.accumulate(a)` is the same as
- ``ufunc.reduceat(a, indices)[::2]`` where `indices` is
+ If `array` is 1-D, the function `ufunc.accumulate(array)` is the same as
+ ``ufunc.reduceat(array, indices)[::2]`` where `indices` is
``range(len(array) - 1)`` with a zero placed
in every other element:
- ``indices = zeros(2 * len(a) - 1)``, ``indices[1::2] = range(1, len(a))``.
+ ``indices = zeros(2 * len(array) - 1)``,
+ ``indices[1::2] = range(1, len(array))``.
- Don't be fooled by this attribute's name: `reduceat(a)` is not
- necessarily smaller than `a`.
+ Don't be fooled by this attribute's name: `reduceat(array)` is not
+ necessarily smaller than `array`.
Examples
--------
@@ -5005,7 +5056,7 @@ add_newdoc('numpy.core', 'ufunc', ('reduceat',
add_newdoc('numpy.core', 'ufunc', ('outer',
r"""
- outer(A, B, **kwargs)
+ outer(A, B, /, **kwargs)
Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`.
@@ -5075,7 +5126,7 @@ add_newdoc('numpy.core', 'ufunc', ('outer',
add_newdoc('numpy.core', 'ufunc', ('at',
"""
- at(a, indices, b=None)
+ at(a, indices, b=None, /)
Performs unbuffered in place operation on operand 'a' for elements
specified by 'indices'. For addition ufunc, this method is equivalent to
@@ -5491,6 +5542,45 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('kind',
"""))
+add_newdoc('numpy.core.multiarray', 'dtype', ('metadata',
+ """
+ Either ``None`` or a readonly dictionary of metadata (mappingproxy).
+
+ The metadata field can be set using any dictionary at data-type
+ creation. NumPy currently has no uniform approach to propagating
+ metadata; although some array operations preserve it, there is no
+ guarantee that others will.
+
+ .. warning::
+
+ Although used in certain projects, this feature was long undocumented
+ and is not well supported. Some aspects of metadata propagation
+ are expected to change in the future.
+
+ Examples
+ --------
+
+ >>> dt = np.dtype(float, metadata={"key": "value"})
+ >>> dt.metadata["key"]
+ 'value'
+ >>> arr = np.array([1, 2, 3], dtype=dt)
+ >>> arr.dtype.metadata
+ mappingproxy({'key': 'value'})
+
+ Adding arrays with identical datatypes currently preserves the metadata:
+
+ >>> (arr + arr).dtype.metadata
+ mappingproxy({'key': 'value'})
+
+ But if the arrays have different dtype metadata, the metadata may be
+ dropped:
+
+ >>> dt2 = np.dtype(float, metadata={"key2": "value2"})
+ >>> arr2 = np.array([3, 2, 1], dtype=dt2)
+ >>> (arr + arr2).dtype.metadata is None
+ True # The metadata field is cleared so None is returned
+ """))
+
add_newdoc('numpy.core.multiarray', 'dtype', ('name',
"""
A bit-width name for this data-type.
@@ -5647,7 +5737,7 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('type',
add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder',
"""
- newbyteorder(new_order='S')
+ newbyteorder(new_order='S', /)
Return a new dtype with a different byte order.
@@ -6023,7 +6113,7 @@ add_newdoc('numpy.core.numerictypes', 'generic',
add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder',
"""
- newbyteorder(new_order='S')
+ newbyteorder(new_order='S', /)
Return a new `dtype` with a different byte order.
diff --git a/numpy/core/_asarray.py b/numpy/core/_asarray.py
index 1b06c328f..a406308f3 100644
--- a/numpy/core/_asarray.py
+++ b/numpy/core/_asarray.py
@@ -3,7 +3,11 @@ Functions in the ``as*array`` family that promote array-likes into arrays.
`require` fits this category despite its name not matching this pattern.
"""
-from .overrides import set_module
+from .overrides import (
+ array_function_dispatch,
+ set_array_function_like_doc,
+ set_module,
+)
from .multiarray import array
@@ -11,8 +15,14 @@ __all__ = [
"asarray", "asanyarray", "ascontiguousarray", "asfortranarray", "require",
]
+
+def _asarray_dispatcher(a, dtype=None, order=None, *, like=None):
+ return (like,)
+
+
+@set_array_function_like_doc
@set_module('numpy')
-def asarray(a, dtype=None, order=None):
+def asarray(a, dtype=None, order=None, *, like=None):
"""Convert the input to an array.
Parameters
@@ -30,6 +40,9 @@ def asarray(a, dtype=None, order=None):
'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise
'K' (keep) preserve input order
Defaults to 'C'.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -83,11 +96,20 @@ def asarray(a, dtype=None, order=None):
True
"""
+ if like is not None:
+ return _asarray_with_like(a, dtype=dtype, order=order, like=like)
+
return array(a, dtype, copy=False, order=order)
+_asarray_with_like = array_function_dispatch(
+ _asarray_dispatcher
+)(asarray)
+
+
+@set_array_function_like_doc
@set_module('numpy')
-def asanyarray(a, dtype=None, order=None):
+def asanyarray(a, dtype=None, order=None, *, like=None):
"""Convert the input to an ndarray, but pass ndarray subclasses through.
Parameters
@@ -105,6 +127,9 @@ def asanyarray(a, dtype=None, order=None):
'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise
'K' (keep) preserve input order
Defaults to 'C'.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -140,11 +165,24 @@ def asanyarray(a, dtype=None, order=None):
True
"""
+ if like is not None:
+ return _asanyarray_with_like(a, dtype=dtype, order=order, like=like)
+
return array(a, dtype, copy=False, order=order, subok=True)
+_asanyarray_with_like = array_function_dispatch(
+ _asarray_dispatcher
+)(asanyarray)
+
+
+def _asarray_contiguous_fortran_dispatcher(a, dtype=None, *, like=None):
+ return (like,)
+
+
+@set_array_function_like_doc
@set_module('numpy')
-def ascontiguousarray(a, dtype=None):
+def ascontiguousarray(a, dtype=None, *, like=None):
"""
Return a contiguous array (ndim >= 1) in memory (C order).
@@ -154,6 +192,9 @@ def ascontiguousarray(a, dtype=None):
Input array.
dtype : str or dtype object, optional
Data-type of returned array.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -181,11 +222,20 @@ def ascontiguousarray(a, dtype=None):
so it will not preserve 0-d arrays.
"""
+ if like is not None:
+ return _ascontiguousarray_with_like(a, dtype=dtype, like=like)
+
return array(a, dtype, copy=False, order='C', ndmin=1)
+_ascontiguousarray_with_like = array_function_dispatch(
+ _asarray_contiguous_fortran_dispatcher
+)(ascontiguousarray)
+
+
+@set_array_function_like_doc
@set_module('numpy')
-def asfortranarray(a, dtype=None):
+def asfortranarray(a, dtype=None, *, like=None):
"""
Return an array (ndim >= 1) laid out in Fortran order in memory.
@@ -195,6 +245,9 @@ def asfortranarray(a, dtype=None):
Input array.
dtype : str or dtype object, optional
By default, the data-type is inferred from the input data.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -222,11 +275,24 @@ def asfortranarray(a, dtype=None):
so it will not preserve 0-d arrays.
"""
+ if like is not None:
+ return _asfortranarray_with_like(a, dtype=dtype, like=like)
+
return array(a, dtype, copy=False, order='F', ndmin=1)
+_asfortranarray_with_like = array_function_dispatch(
+ _asarray_contiguous_fortran_dispatcher
+)(asfortranarray)
+
+
+def _require_dispatcher(a, dtype=None, requirements=None, *, like=None):
+ return (like,)
+
+
+@set_array_function_like_doc
@set_module('numpy')
-def require(a, dtype=None, requirements=None):
+def require(a, dtype=None, requirements=None, *, like=None):
"""
Return an ndarray of the provided type that satisfies requirements.
@@ -250,6 +316,9 @@ def require(a, dtype=None, requirements=None):
* 'WRITEABLE' ('W') - ensure a writable array
* 'OWNDATA' ('O') - ensure an array that owns its own data
* 'ENSUREARRAY', ('E') - ensure a base array, instead of a subclass
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -293,6 +362,14 @@ def require(a, dtype=None, requirements=None):
UPDATEIFCOPY : False
"""
+ if like is not None:
+ return _require_with_like(
+ a,
+ dtype=dtype,
+ requirements=requirements,
+ like=like,
+ )
+
possible_flags = {'C': 'C', 'C_CONTIGUOUS': 'C', 'CONTIGUOUS': 'C',
'F': 'F', 'F_CONTIGUOUS': 'F', 'FORTRAN': 'F',
'A': 'A', 'ALIGNED': 'A',
@@ -327,3 +404,8 @@ def require(a, dtype=None, requirements=None):
arr = arr.copy(order)
break
return arr
+
+
+_require_with_like = array_function_dispatch(
+ _require_dispatcher
+)(require)
diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py
index 85853622a..449926f58 100644
--- a/numpy/core/_internal.py
+++ b/numpy/core/_internal.py
@@ -17,26 +17,25 @@ except ImportError:
IS_PYPY = platform.python_implementation() == 'PyPy'
-if (sys.byteorder == 'little'):
+if sys.byteorder == 'little':
_nbo = '<'
else:
_nbo = '>'
def _makenames_list(adict, align):
allfields = []
- fnames = list(adict.keys())
- for fname in fnames:
- obj = adict[fname]
+
+ for fname, obj in adict.items():
n = len(obj)
- if not isinstance(obj, tuple) or n not in [2, 3]:
+ if not isinstance(obj, tuple) or n not in (2, 3):
raise ValueError("entry not a 2- or 3- tuple")
- if (n > 2) and (obj[2] == fname):
+ if n > 2 and obj[2] == fname:
continue
num = int(obj[1])
- if (num < 0):
+ if num < 0:
raise ValueError("invalid offset.")
format = dtype(obj[0], align=align)
- if (n > 2):
+ if n > 2:
title = obj[2]
else:
title = None
@@ -68,7 +67,7 @@ def _usefields(adict, align):
res = adict[name]
formats.append(res[0])
offsets.append(res[1])
- if (len(res) > 2):
+ if len(res) > 2:
titles.append(res[2])
else:
titles.append(None)
@@ -108,7 +107,7 @@ def _array_descr(descriptor):
for field in ordered_fields:
if field[1] > offset:
num = field[1] - offset
- result.append(('', '|V%d' % num))
+ result.append(('', f'|V{num}'))
offset += num
elif field[1] < offset:
raise ValueError(
@@ -128,7 +127,7 @@ def _array_descr(descriptor):
if descriptor.itemsize > offset:
num = descriptor.itemsize - offset
- result.append(('', '|V%d' % num))
+ result.append(('', f'|V{num}'))
return result
@@ -191,7 +190,7 @@ def _commastring(astr):
(order1, order2))
order = order1
- if order in ['|', '=', _nbo]:
+ if order in ('|', '=', _nbo):
order = ''
dtype = order + dtype
if (repeats == ''):
@@ -223,7 +222,7 @@ def _getintp_ctype():
val = dummy_ctype(np.intp)
else:
char = dtype('p').char
- if (char == 'i'):
+ if char == 'i':
val = ctypes.c_int
elif char == 'l':
val = ctypes.c_long
@@ -379,7 +378,7 @@ def _newnames(datatype, order):
raise ValueError(f"unknown field name: {name}") from None
seen.add(name)
return tuple(list(order) + nameslist)
- raise ValueError("unsupported order value: %s" % (order,))
+ raise ValueError(f"unsupported order value: {order}")
def _copy_fields(ary):
"""Return copy of structured array with padding between fields removed.
@@ -680,8 +679,7 @@ def __dtype_from_pep3118(stream, is_subdtype):
if not (is_padding and name is None):
if name is not None and name in field_spec['names']:
- raise RuntimeError("Duplicate field name '%s' in PEP3118 format"
- % name)
+ raise RuntimeError(f"Duplicate field name '{name}' in PEP3118 format")
field_spec['names'].append(name)
field_spec['formats'].append(value)
field_spec['offsets'].append(offset)
@@ -717,7 +715,7 @@ def _fix_names(field_spec):
j = 0
while True:
- name = 'f{}'.format(j)
+ name = f'f{j}'
if name not in names:
break
j = j + 1
@@ -790,7 +788,7 @@ def _ufunc_doc_signature_formatter(ufunc):
if ufunc.nin == 1:
in_args = 'x'
else:
- in_args = ', '.join('x{}'.format(i+1) for i in range(ufunc.nin))
+ in_args = ', '.join(f'x{i+1}' for i in range(ufunc.nin))
# output arguments are both keyword or positional
if ufunc.nout == 0:
diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py
index 5d9642ea8..ad1530419 100644
--- a/numpy/core/arrayprint.py
+++ b/numpy/core/arrayprint.py
@@ -1628,6 +1628,3 @@ def set_string_function(f, repr=True):
return multiarray.set_string_function(_default_array_str, 0)
else:
return multiarray.set_string_function(f, repr)
-
-set_string_function(_default_array_str, False)
-set_string_function(_default_array_repr, True)
diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py
index 86e28b104..8ef9392d3 100644
--- a/numpy/core/code_generators/generate_umath.py
+++ b/numpy/core/code_generators/generate_umath.py
@@ -1042,7 +1042,7 @@ def make_arrays(funcdict):
#ifndef NPY_DISABLE_OPTIMIZATION
#include "{dname}.dispatch.h"
#endif
- NPY_CPU_DISPATCH_CALL_XB({name}_functions[{k}] = {tname}_{name})
+ NPY_CPU_DISPATCH_CALL_XB({name}_functions[{k}] = {tname}_{name});
""").format(
dname=dname, name=name, tname=tname, k=k
))
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index f8c11c015..b07def736 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -2059,15 +2059,10 @@ def clip(a, a_min, a_max, out=None, **kwargs):
----------
a : array_like
Array containing elements to clip.
- a_min : scalar or array_like or None
- Minimum value. If None, clipping is not performed on lower
- interval edge. Not more than one of `a_min` and `a_max` may be
- None.
- a_max : scalar or array_like or None
- Maximum value. If None, clipping is not performed on upper
- interval edge. Not more than one of `a_min` and `a_max` may be
- None. If `a_min` or `a_max` are array_like, then the three
- arrays will be broadcasted to match their shapes.
+ a_min, a_max : array_like or None
+ Minimum and maximum value. If ``None``, clipping is not performed on
+ the corresponding edge. Only one of `a_min` and `a_max` may be
+ ``None``. Both are broadcast against `a`.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
diff --git a/numpy/core/function_base.py b/numpy/core/function_base.py
index f57e95742..8a1fee99b 100644
--- a/numpy/core/function_base.py
+++ b/numpy/core/function_base.py
@@ -34,6 +34,11 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None,
.. versionchanged:: 1.16.0
Non-scalar `start` and `stop` are now supported.
+ .. versionchanged:: 1.20.0
+ Values are rounded towards ``-inf`` instead of ``0`` when an
+ integer ``dtype`` is specified. The old behavior can
+ still be obtained with ``np.linspace(start, stop, num).astype(int)``
+
Parameters
----------
start : array_like
@@ -161,6 +166,9 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None,
if axis != 0:
y = _nx.moveaxis(y, 0, axis)
+ if _nx.issubdtype(dtype, _nx.integer):
+ _nx.floor(y, out=y)
+
if retstep:
return y.astype(dtype, copy=False), step
else:
@@ -199,7 +207,7 @@ def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None,
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
- base : float, optional
+ base : array_like, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
diff --git a/numpy/core/function_base.pyi b/numpy/core/function_base.pyi
new file mode 100644
index 000000000..c6ebbd5f5
--- /dev/null
+++ b/numpy/core/function_base.pyi
@@ -0,0 +1,56 @@
+import sys
+from typing import overload, Tuple, Union, Sequence, Any
+
+from numpy import ndarray, inexact, _NumberLike
+from numpy.typing import ArrayLike, DtypeLike, _SupportsArray
+
+if sys.version_info >= (3, 8):
+ from typing import SupportsIndex, Literal
+else:
+ from typing_extensions import Literal, Protocol
+
+ class SupportsIndex(Protocol):
+ def __index__(self) -> int: ...
+
+# TODO: wait for support for recursive types
+_ArrayLikeNested = Sequence[Sequence[Any]]
+_ArrayLikeNumber = Union[
+ _NumberLike, Sequence[_NumberLike], ndarray, _SupportsArray, _ArrayLikeNested
+]
+@overload
+def linspace(
+ start: _ArrayLikeNumber,
+ stop: _ArrayLikeNumber,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ retstep: Literal[False] = ...,
+ dtype: DtypeLike = ...,
+ axis: SupportsIndex = ...,
+) -> ndarray: ...
+@overload
+def linspace(
+ start: _ArrayLikeNumber,
+ stop: _ArrayLikeNumber,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ retstep: Literal[True] = ...,
+ dtype: DtypeLike = ...,
+ axis: SupportsIndex = ...,
+) -> Tuple[ndarray, inexact]: ...
+def logspace(
+ start: _ArrayLikeNumber,
+ stop: _ArrayLikeNumber,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ base: _ArrayLikeNumber = ...,
+ dtype: DtypeLike = ...,
+ axis: SupportsIndex = ...,
+) -> ndarray: ...
+def geomspace(
+ start: _ArrayLikeNumber,
+ stop: _ArrayLikeNumber,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ dtype: DtypeLike = ...,
+ axis: SupportsIndex = ...,
+) -> ndarray: ...
diff --git a/numpy/core/include/numpy/arrayscalars.h b/numpy/core/include/numpy/arrayscalars.h
index 6dce88df3..b282a2cd4 100644
--- a/numpy/core/include/numpy/arrayscalars.h
+++ b/numpy/core/include/numpy/arrayscalars.h
@@ -134,8 +134,7 @@ typedef struct {
char obval;
} PyScalarObject;
-#define PyStringScalarObject PyStringObject
-#define PyStringScalarObject PyStringObject
+#define PyStringScalarObject PyBytesObject
typedef struct {
/* note that the PyObject_HEAD macro lives right here */
PyUnicodeObject base;
diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h
index bbcf468c1..6eca4afdb 100644
--- a/numpy/core/include/numpy/ndarraytypes.h
+++ b/numpy/core/include/numpy/ndarraytypes.h
@@ -1759,8 +1759,8 @@ typedef struct {
} npy_stride_sort_item;
/************************************************************
- * This is the form of the struct that's returned pointed by the
- * PyCObject attribute of an array __array_struct__. See
+ * This is the form of the struct that's stored in the
+ * PyCapsule returned by an array's __array_struct__ attribute. See
* https://docs.scipy.org/doc/numpy/reference/arrays.interface.html for the full
* documentation.
************************************************************/
diff --git a/numpy/core/include/numpy/npy_cpu.h b/numpy/core/include/numpy/npy_cpu.h
index 509e23a51..4dbf9d84e 100644
--- a/numpy/core/include/numpy/npy_cpu.h
+++ b/numpy/core/include/numpy/npy_cpu.h
@@ -24,7 +24,6 @@
#define _NPY_CPUARCH_H_
#include "numpyconfig.h"
-#include <string.h> /* for memcpy */
#if defined( __i386__ ) || defined(i386) || defined(_M_IX86)
/*
@@ -111,8 +110,6 @@
information about your platform (OS, CPU and compiler)
#endif
-#define NPY_COPY_PYOBJECT_PTR(dst, src) memcpy(dst, src, sizeof(PyObject *))
-
#if (defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64))
#define NPY_CPU_HAVE_UNALIGNED_ACCESS 1
#else
diff --git a/numpy/core/multiarray.py b/numpy/core/multiarray.py
index 10325050d..225c9554c 100644
--- a/numpy/core/multiarray.py
+++ b/numpy/core/multiarray.py
@@ -141,9 +141,9 @@ def empty_like(prototype, dtype=None, order=None, subok=None, shape=None):
@array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate)
-def concatenate(arrays, axis=None, out=None):
+def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None):
"""
- concatenate((a1, a2, ...), axis=0, out=None)
+ concatenate((a1, a2, ...), axis=0, out=None, dtype=None, casting="same_kind")
Join a sequence of arrays along an existing axis.
@@ -159,6 +159,16 @@ def concatenate(arrays, axis=None, out=None):
If provided, the destination to place the result. The shape must be
correct, matching that of what concatenate would have returned if no
out argument were specified.
+ dtype : str or dtype
+ If provided, the destination array will have this dtype. Cannot be
+ provided together with `out`.
+
+ ..versionadded:: 1.20.0
+
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ Controls what kind of data casting may occur. Defaults to 'same_kind'.
+
+ ..versionadded:: 1.20.0
Returns
-------
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index 84066dd30..a023bf0da 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -21,7 +21,7 @@ from .multiarray import (
from . import overrides
from . import umath
from . import shape_base
-from .overrides import set_module
+from .overrides import set_array_function_like_doc, set_module
from .umath import (multiply, invert, sin, PINF, NAN)
from . import numerictypes
from .numerictypes import longlong, intc, int_, float_, complex_, bool_
@@ -141,8 +141,13 @@ def zeros_like(a, dtype=None, order='K', subok=True, shape=None):
return res
+def _ones_dispatcher(shape, dtype=None, order=None, *, like=None):
+ return(like,)
+
+
+@set_array_function_like_doc
@set_module('numpy')
-def ones(shape, dtype=None, order='C'):
+def ones(shape, dtype=None, order='C', *, like=None):
"""
Return a new array of given shape and type, filled with ones.
@@ -157,6 +162,9 @@ def ones(shape, dtype=None, order='C'):
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -189,11 +197,19 @@ def ones(shape, dtype=None, order='C'):
[1., 1.]])
"""
+ if like is not None:
+ return _ones_with_like(shape, dtype=dtype, order=order, like=like)
+
a = empty(shape, dtype, order)
multiarray.copyto(a, 1, casting='unsafe')
return a
+_ones_with_like = array_function_dispatch(
+ _ones_dispatcher
+)(ones)
+
+
def _ones_like_dispatcher(a, dtype=None, order=None, subok=None, shape=None):
return (a,)
@@ -265,8 +281,13 @@ def ones_like(a, dtype=None, order='K', subok=True, shape=None):
return res
+def _full_dispatcher(shape, fill_value, dtype=None, order=None, *, like=None):
+ return(like,)
+
+
+@set_array_function_like_doc
@set_module('numpy')
-def full(shape, fill_value, dtype=None, order='C'):
+def full(shape, fill_value, dtype=None, order='C', *, like=None):
"""
Return a new array of given shape and type, filled with `fill_value`.
@@ -282,6 +303,9 @@ def full(shape, fill_value, dtype=None, order='C'):
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -309,6 +333,9 @@ def full(shape, fill_value, dtype=None, order='C'):
[1, 2]])
"""
+ if like is not None:
+ return _full_with_like(shape, fill_value, dtype=dtype, order=order, like=like)
+
if dtype is None:
fill_value = asarray(fill_value)
dtype = fill_value.dtype
@@ -317,6 +344,11 @@ def full(shape, fill_value, dtype=None, order='C'):
return a
+_full_with_like = array_function_dispatch(
+ _full_dispatcher
+)(full)
+
+
def _full_like_dispatcher(a, fill_value, dtype=None, order=None, subok=None, shape=None):
return (a,)
@@ -1754,8 +1786,13 @@ def indices(dimensions, dtype=int, sparse=False):
return res
+def _fromfunction_dispatcher(function, shape, *, dtype=None, like=None, **kwargs):
+ return (like,)
+
+
+@set_array_function_like_doc
@set_module('numpy')
-def fromfunction(function, shape, *, dtype=float, **kwargs):
+def fromfunction(function, shape, *, dtype=float, like=None, **kwargs):
"""
Construct an array by executing a function over each coordinate.
@@ -1776,6 +1813,9 @@ def fromfunction(function, shape, *, dtype=float, **kwargs):
dtype : data-type, optional
Data-type of the coordinate arrays passed to `function`.
By default, `dtype` is float.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -1806,10 +1846,18 @@ def fromfunction(function, shape, *, dtype=float, **kwargs):
[2, 3, 4]])
"""
+ if like is not None:
+ return _fromfunction_with_like(function, shape, dtype=dtype, like=like, **kwargs)
+
args = indices(shape, dtype=dtype)
return function(*args, **kwargs)
+_fromfunction_with_like = array_function_dispatch(
+ _fromfunction_dispatcher
+)(fromfunction)
+
+
def _frombuffer(buf, dtype, shape, order):
return frombuffer(buf, dtype=dtype).reshape(shape, order=order)
@@ -2082,8 +2130,13 @@ def _maketup(descr, val):
return tuple(res)
+def _identity_dispatcher(n, dtype=None, *, like=None):
+ return (like,)
+
+
+@set_array_function_like_doc
@set_module('numpy')
-def identity(n, dtype=None):
+def identity(n, dtype=None, *, like=None):
"""
Return the identity array.
@@ -2096,6 +2149,9 @@ def identity(n, dtype=None):
Number of rows (and columns) in `n` x `n` output.
dtype : data-type, optional
Data-type of the output. Defaults to ``float``.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -2111,8 +2167,16 @@ def identity(n, dtype=None):
[0., 0., 1.]])
"""
+ if like is not None:
+ return _identity_with_like(n, dtype=dtype, like=like)
+
from numpy import eye
- return eye(n, dtype=dtype)
+ return eye(n, dtype=dtype, like=like)
+
+
+_identity_with_like = array_function_dispatch(
+ _identity_dispatcher
+)(identity)
def _allclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None):
diff --git a/numpy/core/overrides.py b/numpy/core/overrides.py
index 816b11293..c2b5fb7fa 100644
--- a/numpy/core/overrides.py
+++ b/numpy/core/overrides.py
@@ -12,6 +12,27 @@ from numpy.compat._inspect import getargspec
ARRAY_FUNCTION_ENABLED = bool(
int(os.environ.get('NUMPY_EXPERIMENTAL_ARRAY_FUNCTION', 1)))
+array_function_like_doc = (
+ """like : array_like
+ Reference object to allow the creation of arrays which are not
+ NumPy arrays. If an array-like passed in as ``like`` supports
+ the ``__array_function__`` protocol, the result will be defined
+ by it. In this case, it ensures the creation of an array object
+ compatible with that passed in via this argument.
+
+ .. note::
+ The ``like`` keyword is an experimental feature pending on
+ acceptance of :ref:`NEP 35 <NEP35>`."""
+)
+
+def set_array_function_like_doc(public_api):
+ if public_api.__doc__ is not None:
+ public_api.__doc__ = public_api.__doc__.replace(
+ "${ARRAY_FUNCTION_LIKE}",
+ array_function_like_doc,
+ )
+ return public_api
+
add_docstring(
implement_array_function,
diff --git a/numpy/core/records.py b/numpy/core/records.py
index 0d3fd9118..e95be0e3f 100644
--- a/numpy/core/records.py
+++ b/numpy/core/records.py
@@ -40,7 +40,7 @@ from collections import Counter, OrderedDict
from . import numeric as sb
from . import numerictypes as nt
from numpy.compat import (
- isfileobj, os_fspath, contextlib_nullcontext
+ os_fspath, contextlib_nullcontext
)
from numpy.core.overrides import set_module
from .arrayprint import get_printoptions
@@ -847,13 +847,12 @@ def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None,
return _array
def get_remaining_size(fd):
+ pos = fd.tell()
try:
- fn = fd.fileno()
- except AttributeError:
- return os.path.getsize(fd.name) - fd.tell()
- st = os.fstat(fn)
- size = st.st_size - fd.tell()
- return size
+ fd.seek(0, 2)
+ return fd.tell() - pos
+ finally:
+ fd.seek(pos, 0)
def fromfile(fd, dtype=None, shape=None, offset=0, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
@@ -911,7 +910,9 @@ def fromfile(fd, dtype=None, shape=None, offset=0, formats=None,
elif isinstance(shape, int):
shape = (shape,)
- if isfileobj(fd):
+ if hasattr(fd, 'readinto'):
+ # GH issue 2504. fd supports io.RawIOBase or io.BufferedIOBase interface.
+ # Example of fd: gzip, BytesIO, BufferedReader
# file already opened
ctx = contextlib_nullcontext(fd)
else:
@@ -1036,7 +1037,7 @@ def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None,
array('def', dtype='<U3')
"""
- if ((isinstance(obj, (type(None), str)) or isfileobj(obj)) and
+ if ((isinstance(obj, (type(None), str)) or hasattr(obj, 'readinto')) and
formats is None and dtype is None):
raise ValueError("Must define formats (or dtype) if object is "
"None, string, or an open file")
@@ -1078,7 +1079,7 @@ def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None,
new = new.copy()
return new
- elif isfileobj(obj):
+ elif hasattr(obj, 'readinto'):
return fromfile(obj, dtype=dtype, shape=shape, offset=offset)
elif isinstance(obj, ndarray):
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index 39bfc56ca..39a787897 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -9,7 +9,7 @@ from os.path import join
from numpy.distutils import log
from distutils.dep_util import newer
-from distutils.sysconfig import get_config_var
+from sysconfig import get_config_var
from numpy.compat import npy_load_module
from setup_common import * # noqa: F403
@@ -687,26 +687,6 @@ def configuration(parent_package='',top_path=None):
subst_dict)
#######################################################################
- # npysort library #
- #######################################################################
-
- # This library is created for the build but it is not installed
- npysort_sources = [join('src', 'common', 'npy_sort.h.src'),
- join('src', 'npysort', 'quicksort.c.src'),
- join('src', 'npysort', 'mergesort.c.src'),
- join('src', 'npysort', 'timsort.c.src'),
- join('src', 'npysort', 'heapsort.c.src'),
- join('src', 'npysort', 'radixsort.c.src'),
- join('src', 'common', 'npy_partition.h.src'),
- join('src', 'npysort', 'selection.c.src'),
- join('src', 'common', 'npy_binsearch.h.src'),
- join('src', 'npysort', 'binsearch.c.src'),
- ]
- config.add_library('npysort',
- sources=npysort_sources,
- include_dirs=[])
-
- #######################################################################
# multiarray_tests module #
#######################################################################
@@ -790,6 +770,8 @@ def configuration(parent_package='',top_path=None):
join('src', 'multiarray', 'descriptor.h'),
join('src', 'multiarray', 'dtypemeta.h'),
join('src', 'multiarray', 'dragon4.h'),
+ join('src', 'multiarray', 'einsum_debug.h'),
+ join('src', 'multiarray', 'einsum_sumprod.h'),
join('src', 'multiarray', 'getset.h'),
join('src', 'multiarray', 'hashdescr.h'),
join('src', 'multiarray', 'iterators.h'),
@@ -825,7 +807,7 @@ def configuration(parent_package='',top_path=None):
join('include', 'numpy', 'npy_1_7_deprecated_api.h'),
# add library sources as distuils does not consider libraries
# dependencies
- ] + npysort_sources + npymath_sources
+ ] + npymath_sources
multiarray_src = [
join('src', 'multiarray', 'abstractdtypes.c'),
@@ -854,6 +836,7 @@ def configuration(parent_package='',top_path=None):
join('src', 'multiarray', 'dragon4.c'),
join('src', 'multiarray', 'dtype_transfer.c'),
join('src', 'multiarray', 'einsum.c.src'),
+ join('src', 'multiarray', 'einsum_sumprod.c.src'),
join('src', 'multiarray', 'flagsobject.c'),
join('src', 'multiarray', 'getset.c'),
join('src', 'multiarray', 'hashdescr.c'),
@@ -878,6 +861,16 @@ def configuration(parent_package='',top_path=None):
join('src', 'multiarray', 'typeinfo.c'),
join('src', 'multiarray', 'usertypes.c'),
join('src', 'multiarray', 'vdot.c'),
+ join('src', 'common', 'npy_sort.h.src'),
+ join('src', 'npysort', 'quicksort.c.src'),
+ join('src', 'npysort', 'mergesort.c.src'),
+ join('src', 'npysort', 'timsort.c.src'),
+ join('src', 'npysort', 'heapsort.c.src'),
+ join('src', 'npysort', 'radixsort.c.src'),
+ join('src', 'common', 'npy_partition.h.src'),
+ join('src', 'npysort', 'selection.c.src'),
+ join('src', 'common', 'npy_binsearch.h.src'),
+ join('src', 'npysort', 'binsearch.c.src'),
]
#######################################################################
@@ -928,7 +921,7 @@ def configuration(parent_package='',top_path=None):
config.add_extension('_multiarray_umath',
sources=multiarray_src + umath_src +
- npymath_sources + common_src +
+ common_src +
[generate_config_h,
generate_numpyconfig_h,
generate_numpy_api,
@@ -939,7 +932,7 @@ def configuration(parent_package='',top_path=None):
],
depends=deps + multiarray_deps + umath_deps +
common_deps,
- libraries=['npymath', 'npysort'],
+ libraries=['npymath'],
extra_info=extra_info)
#######################################################################
diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py
index 7a76bbf9d..e4dc30d4c 100644
--- a/numpy/core/shape_base.py
+++ b/numpy/core/shape_base.py
@@ -539,7 +539,8 @@ def _accumulate(values):
def _concatenate_shapes(shapes, axis):
"""Given array shapes, return the resulting shape and slices prefixes.
- These help in nested concatation.
+ These help in nested concatenation.
+
Returns
-------
shape: tuple of int
diff --git a/numpy/core/src/common/array_assign.c b/numpy/core/src/common/array_assign.c
index d626d1260..67abcae24 100644
--- a/numpy/core/src/common/array_assign.c
+++ b/numpy/core/src/common/array_assign.c
@@ -14,7 +14,6 @@
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#define _MULTIARRAYMODULE
#include <numpy/ndarraytypes.h>
-
#include "npy_config.h"
#include "npy_pycompat.h"
@@ -67,12 +66,12 @@ broadcast_strides(int ndim, npy_intp const *shape,
broadcast_error: {
PyObject *errmsg;
- errmsg = PyUString_FromFormat("could not broadcast %s from shape ",
+ errmsg = PyUnicode_FromFormat("could not broadcast %s from shape ",
strides_name);
PyUString_ConcatAndDel(&errmsg,
build_shape_string(strides_ndim, strides_shape));
PyUString_ConcatAndDel(&errmsg,
- PyUString_FromString(" into shape "));
+ PyUnicode_FromString(" into shape "));
PyUString_ConcatAndDel(&errmsg,
build_shape_string(ndim, shape));
PyErr_SetObject(PyExc_ValueError, errmsg);
diff --git a/numpy/core/src/common/lowlevel_strided_loops.h b/numpy/core/src/common/lowlevel_strided_loops.h
index f2f12a55b..12aa61822 100644
--- a/numpy/core/src/common/lowlevel_strided_loops.h
+++ b/numpy/core/src/common/lowlevel_strided_loops.h
@@ -30,10 +30,9 @@
* Use NPY_AUXDATA_CLONE and NPY_AUXDATA_FREE to deal with this data.
*
*/
-typedef void (PyArray_StridedUnaryOp)(char *dst, npy_intp dst_stride,
- char *src, npy_intp src_stride,
- npy_intp N, npy_intp src_itemsize,
- NpyAuxData *transferdata);
+typedef int (PyArray_StridedUnaryOp)(
+ char *dst, npy_intp dst_stride, char *src, npy_intp src_stride,
+ npy_intp N, npy_intp src_itemsize, NpyAuxData *transferdata);
/*
* This is for pointers to functions which behave exactly as
@@ -43,31 +42,10 @@ typedef void (PyArray_StridedUnaryOp)(char *dst, npy_intp dst_stride,
* In particular, the 'i'-th element is operated on if and only if
* mask[i*mask_stride] is true.
*/
-typedef void (PyArray_MaskedStridedUnaryOp)(char *dst, npy_intp dst_stride,
- char *src, npy_intp src_stride,
- npy_bool *mask, npy_intp mask_stride,
- npy_intp N, npy_intp src_itemsize,
- NpyAuxData *transferdata);
-
-/*
- * This function pointer is for binary operations that input two
- * arbitrarily strided one-dimensional array segments and output
- * an arbitrarily strided array segment of the same size.
- * It may be a fully general function, or a specialized function
- * when the strides or item size have particular known values.
- *
- * Examples of binary operations are the basic arithmetic operations,
- * logical operators AND, OR, and many others.
- *
- * The 'transferdata' parameter is slightly special, following a
- * generic auxiliary data pattern defined in ndarraytypes.h
- * Use NPY_AUXDATA_CLONE and NPY_AUXDATA_FREE to deal with this data.
- *
- */
-typedef void (PyArray_StridedBinaryOp)(char *dst, npy_intp dst_stride,
- char *src0, npy_intp src0_stride,
- char *src1, npy_intp src1_stride,
- npy_intp N, NpyAuxData *transferdata);
+typedef int (PyArray_MaskedStridedUnaryOp)(
+ char *dst, npy_intp dst_stride, char *src, npy_intp src_stride,
+ npy_bool *mask, npy_intp mask_stride,
+ npy_intp N, npy_intp src_itemsize, NpyAuxData *transferdata);
/*
* Gives back a function pointer to a specialized function for copying
@@ -271,6 +249,7 @@ PyArray_CastRawArrays(npy_intp count,
* The return value is the number of elements it couldn't copy. A return value
* of 0 means all elements were copied, a larger value means the end of
* the n-dimensional array was reached before 'count' elements were copied.
+ * A negative return value indicates an error occurred.
*
* ndim:
* The number of dimensions of the n-dimensional array.
diff --git a/numpy/core/src/common/npy_binsearch.h.src b/numpy/core/src/common/npy_binsearch.h.src
index ce3b34b0e..052c44482 100644
--- a/numpy/core/src/common/npy_binsearch.h.src
+++ b/numpy/core/src/common/npy_binsearch.h.src
@@ -40,12 +40,12 @@ typedef struct {
* cfloat, cdouble, clongdouble, datetime, timedelta#
*/
-NPY_VISIBILITY_HIDDEN void
+NPY_NO_EXPORT void
binsearch_@side@_@suff@(const char *arr, const char *key, char *ret,
npy_intp arr_len, npy_intp key_len,
npy_intp arr_str, npy_intp key_str, npy_intp ret_str,
PyArrayObject *unused);
-NPY_VISIBILITY_HIDDEN int
+NPY_NO_EXPORT int
argbinsearch_@side@_@suff@(const char *arr, const char *key,
const char *sort, char *ret,
npy_intp arr_len, npy_intp key_len,
@@ -54,12 +54,12 @@ argbinsearch_@side@_@suff@(const char *arr, const char *key,
PyArrayObject *unused);
/**end repeat1**/
-NPY_VISIBILITY_HIDDEN void
+NPY_NO_EXPORT void
npy_binsearch_@side@(const char *arr, const char *key, char *ret,
npy_intp arr_len, npy_intp key_len,
npy_intp arr_str, npy_intp key_str,
npy_intp ret_str, PyArrayObject *cmp);
-NPY_VISIBILITY_HIDDEN int
+NPY_NO_EXPORT int
npy_argbinsearch_@side@(const char *arr, const char *key,
const char *sort, char *ret,
npy_intp arr_len, npy_intp key_len,
diff --git a/numpy/core/src/common/npy_cblas.h b/numpy/core/src/common/npy_cblas.h
index 97308238a..072993ec2 100644
--- a/numpy/core/src/common/npy_cblas.h
+++ b/numpy/core/src/common/npy_cblas.h
@@ -47,8 +47,10 @@ enum CBLAS_SIDE {CblasLeft=141, CblasRight=142};
#ifdef HAVE_BLAS_ILP64
#define CBLAS_INT npy_int64
+#define CBLAS_INT_MAX NPY_MAX_INT64
#else
#define CBLAS_INT int
+#define CBLAS_INT_MAX INT_MAX
#endif
#define BLASNAME(name) CBLAS_FUNC(name)
@@ -59,6 +61,39 @@ enum CBLAS_SIDE {CblasLeft=141, CblasRight=142};
#undef BLASINT
#undef BLASNAME
+
+/*
+ * Convert NumPy stride to BLAS stride. Returns 0 if conversion cannot be done
+ * (BLAS won't handle negative or zero strides the way we want).
+ */
+static NPY_INLINE CBLAS_INT
+blas_stride(npy_intp stride, unsigned itemsize)
+{
+ /*
+ * Should probably check pointer alignment also, but this may cause
+ * problems if we require complex to be 16 byte aligned.
+ */
+ if (stride > 0 && (stride % itemsize) == 0) {
+ stride /= itemsize;
+ if (stride <= CBLAS_INT_MAX) {
+ return stride;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Define a chunksize for CBLAS.
+ *
+ * The chunksize is the greatest power of two less than CBLAS_INT_MAX.
+ */
+#if NPY_MAX_INTP > CBLAS_INT_MAX
+# define NPY_CBLAS_CHUNK (CBLAS_INT_MAX / 2 + 1)
+#else
+# define NPY_CBLAS_CHUNK NPY_MAX_INTP
+#endif
+
+
#ifdef __cplusplus
}
#endif
diff --git a/numpy/core/src/common/npy_cpu_dispatch.h b/numpy/core/src/common/npy_cpu_dispatch.h
index 846d1ebb9..274520852 100644
--- a/numpy/core/src/common/npy_cpu_dispatch.h
+++ b/numpy/core/src/common/npy_cpu_dispatch.h
@@ -217,44 +217,49 @@
* func_type the_callee(const int *src, int *dst, func_type *cb)
* {
* // direct call
- * NPY_CPU_DISPATCH_CALL(dispatch_me, (src, dst))
+ * NPY_CPU_DISPATCH_CALL(dispatch_me, (src, dst));
* // assign the pointer
- * NPY_CPU_DISPATCH_CALL(*cb = dispatch_me)
+ * *cb = NPY_CPU_DISPATCH_CALL(dispatch_me);
+ * // or
+ * NPY_CPU_DISPATCH_CALL(*cb = dispatch_me);
* // return the pointer
- * NPY_CPU_DISPATCH_CALL(return dispatch_me)
+ * return NPY_CPU_DISPATCH_CALL(dispatch_me);
* }
*/
#define NPY_CPU_DISPATCH_CALL(...) \
- if (0) {/*DUMMY*/} \
NPY__CPU_DISPATCH_CALL(NPY_CPU_HAVE, NPY_CPU_DISPATCH_CALL_CB_, __VA_ARGS__) \
NPY__CPU_DISPATCH_BASELINE_CALL(NPY_CPU_DISPATCH_CALL_BASE_CB_, __VA_ARGS__)
// Preprocessor callbacks
#define NPY_CPU_DISPATCH_CALL_CB_(TESTED_FEATURES, TARGET_NAME, LEFT, ...) \
- else if (TESTED_FEATURES) { NPY_CAT(NPY_CAT(LEFT, _), TARGET_NAME) __VA_ARGS__; }
+ (TESTED_FEATURES) ? (NPY_CAT(NPY_CAT(LEFT, _), TARGET_NAME) __VA_ARGS__) :
#define NPY_CPU_DISPATCH_CALL_BASE_CB_(LEFT, ...) \
- else { LEFT __VA_ARGS__; }
+ (LEFT __VA_ARGS__)
/**
* Macro NPY_CPU_DISPATCH_CALL_XB(LEFT, ...)
*
- * Same as `NPY_CPU_DISPATCH_DECLARE` but exclude the baseline declration even
- * if it was provided within the configration statments.
+ * Same as `NPY_CPU_DISPATCH_DECLARE` but exclude the baseline declaration even
+ * if it was provided within the configration statements.
+ * Returns void.
*/
+#define NPY_CPU_DISPATCH_CALL_XB_CB_(TESTED_FEATURES, TARGET_NAME, LEFT, ...) \
+ (TESTED_FEATURES) ? (void) (NPY_CAT(NPY_CAT(LEFT, _), TARGET_NAME) __VA_ARGS__) :
#define NPY_CPU_DISPATCH_CALL_XB(...) \
- if (0) {/*DUMMY*/} \
- NPY__CPU_DISPATCH_CALL(NPY_CPU_HAVE, NPY_CPU_DISPATCH_CALL_CB_, __VA_ARGS__)
+ NPY__CPU_DISPATCH_CALL(NPY_CPU_HAVE, NPY_CPU_DISPATCH_CALL_XB_CB_, __VA_ARGS__) \
+ ((void) 0 /* discarded expression value */)
/**
* Macro NPY_CPU_DISPATCH_CALL_ALL(LEFT, ...)
*
* Same as `NPY_CPU_DISPATCH_CALL` but dispatching all the required optimizations for
* the exported functions and variables instead of highest interested one.
+ * Returns void.
*/
#define NPY_CPU_DISPATCH_CALL_ALL(...) \
- NPY__CPU_DISPATCH_CALL(NPY_CPU_HAVE, NPY_CPU_DISPATCH_CALL_ALL_CB_, __VA_ARGS__) \
- NPY__CPU_DISPATCH_BASELINE_CALL(NPY_CPU_DISPATCH_CALL_ALL_BASE_CB_, __VA_ARGS__)
+ (NPY__CPU_DISPATCH_CALL(NPY_CPU_HAVE, NPY_CPU_DISPATCH_CALL_ALL_CB_, __VA_ARGS__) \
+ NPY__CPU_DISPATCH_BASELINE_CALL(NPY_CPU_DISPATCH_CALL_ALL_BASE_CB_, __VA_ARGS__))
// Preprocessor callbacks
#define NPY_CPU_DISPATCH_CALL_ALL_CB_(TESTED_FEATURES, TARGET_NAME, LEFT, ...) \
- if (TESTED_FEATURES) { NPY_CAT(NPY_CAT(LEFT, _), TARGET_NAME) __VA_ARGS__; }
+ ((TESTED_FEATURES) ? (NPY_CAT(NPY_CAT(LEFT, _), TARGET_NAME) __VA_ARGS__) : (void) 0),
#define NPY_CPU_DISPATCH_CALL_ALL_BASE_CB_(LEFT, ...) \
- { LEFT __VA_ARGS__; }
+ ( LEFT __VA_ARGS__ )
#endif // NPY_CPU_DISPATCH_H_
diff --git a/numpy/core/src/common/npy_cpu_features.c.src b/numpy/core/src/common/npy_cpu_features.c.src
index dfcf98c74..69bbc83a2 100644
--- a/numpy/core/src/common/npy_cpu_features.c.src
+++ b/numpy/core/src/common/npy_cpu_features.c.src
@@ -19,11 +19,11 @@ npy__cpu_init_features(void);
* Multiple features can be present, and separated by space, comma, or tab.
* Raises an error if parsing fails or if the feature was not enabled
*/
-static void
+static int
npy__cpu_try_disable_env(void);
/* Ensure the build's CPU baseline features are supported at runtime */
-static void
+static int
npy__cpu_validate_baseline(void);
/******************** Public Definitions *********************/
@@ -40,11 +40,12 @@ NPY_VISIBILITY_HIDDEN int
npy_cpu_init(void)
{
npy__cpu_init_features();
- npy__cpu_validate_baseline();
- npy__cpu_try_disable_env();
-
- if (PyErr_Occurred())
+ if (npy__cpu_validate_baseline() < 0) {
+ return -1;
+ }
+ if (npy__cpu_try_disable_env() < 0) {
return -1;
+ }
return 0;
}
@@ -142,7 +143,7 @@ npy__cpu_dispatch_fid(const char *feature)
return 0;
}
-static void
+static int
npy__cpu_validate_baseline(void)
{
#if !defined(NPY_DISABLE_OPTIMIZATION) && NPY_WITH_CPU_BASELINE_N > 0
@@ -165,16 +166,18 @@ npy__cpu_validate_baseline(void)
"(" NPY_WITH_CPU_BASELINE ") but your machine doesn't support:\n(%s).",
baseline_failure
);
+ return -1;
}
#endif
+ return 0;
}
-static void
+static int
npy__cpu_try_disable_env(void)
{
char *disenv = getenv("NPY_DISABLE_CPU_FEATURES");
if (disenv == NULL || disenv[0] == 0) {
- return;
+ return 0;
}
#define NPY__CPU_ENV_ERR_HEAD \
"During parsing environment variable 'NPY_DISABLE_CPU_FEATURES':\n"
@@ -187,7 +190,7 @@ npy__cpu_try_disable_env(void)
"Length of environment variable 'NPY_DISABLE_CPU_FEATURES' is %d, only %d accepted",
var_len, NPY__MAX_VAR_LEN - 1
);
- return;
+ return -1;
}
char disable_features[NPY__MAX_VAR_LEN];
memcpy(disable_features, disenv, var_len);
@@ -210,7 +213,7 @@ npy__cpu_try_disable_env(void)
"(" NPY_WITH_CPU_BASELINE ").",
feature
);
- break;
+ return -1;
}
// check if the feature is part of dispatched features
int feature_id = npy__cpu_dispatch_fid(feature);
@@ -236,36 +239,43 @@ npy__cpu_try_disable_env(void)
*nexist_cur = '\0';
if (nexist[0] != '\0') {
*(nexist_cur-1) = '\0'; // trim the last space
- PyErr_WarnFormat(PyExc_RuntimeWarning, 1,
- NPY__CPU_ENV_ERR_HEAD
- "You cannot disable CPU features (%s), since "
- "they are not part of the dispatched optimizations\n"
- "(" NPY_WITH_CPU_DISPATCH ").",
- nexist
- );
+ if (PyErr_WarnFormat(PyExc_RuntimeWarning, 1,
+ NPY__CPU_ENV_ERR_HEAD
+ "You cannot disable CPU features (%s), since "
+ "they are not part of the dispatched optimizations\n"
+ "(" NPY_WITH_CPU_DISPATCH ").",
+ nexist
+ ) < 0) {
+ return -1;
+ }
}
*notsupp_cur = '\0';
if (notsupp[0] != '\0') {
*(notsupp_cur-1) = '\0'; // trim the last space
- PyErr_WarnFormat(PyExc_RuntimeWarning, 1,
- NPY__CPU_ENV_ERR_HEAD
- "You cannot disable CPU features (%s), since "
- "they are not supported by your machine.",
- notsupp
- );
+ if (PyErr_WarnFormat(PyExc_RuntimeWarning, 1,
+ NPY__CPU_ENV_ERR_HEAD
+ "You cannot disable CPU features (%s), since "
+ "they are not supported by your machine.",
+ notsupp
+ ) < 0) {
+ return -1;
+ }
}
#else
- PyErr_WarnFormat(PyExc_RuntimeWarning, 1,
- NPY__CPU_ENV_ERR_HEAD
- "You cannot use environment variable 'NPY_DISABLE_CPU_FEATURES', since "
- #ifdef NPY_DISABLE_OPTIMIZATION
- "the NumPy library was compiled with optimization disabled."
- #else
- "the NumPy library was compiled without any dispatched optimizations."
- #endif
- );
+ if (PyErr_WarnFormat(PyExc_RuntimeWarning, 1,
+ NPY__CPU_ENV_ERR_HEAD
+ "You cannot use environment variable 'NPY_DISABLE_CPU_FEATURES', since "
+ #ifdef NPY_DISABLE_OPTIMIZATION
+ "the NumPy library was compiled with optimization disabled."
+ #else
+ "the NumPy library was compiled without any dispatched optimizations."
+ #endif
+ ) < 0) {
+ return -1;
+ }
#endif
+ return 0;
}
/****************************************************************
diff --git a/numpy/core/src/common/npy_partition.h.src b/numpy/core/src/common/npy_partition.h.src
index 97dc2536b..72c2095f1 100644
--- a/numpy/core/src/common/npy_partition.h.src
+++ b/numpy/core/src/common/npy_partition.h.src
@@ -42,12 +42,12 @@
* npy_cdouble, npy_clongdouble#
*/
-NPY_VISIBILITY_HIDDEN int introselect_@suff@(@type@ *v, npy_intp num,
+NPY_NO_EXPORT int introselect_@suff@(@type@ *v, npy_intp num,
npy_intp kth,
npy_intp * pivots,
npy_intp * npiv,
void *NOT_USED);
-NPY_VISIBILITY_HIDDEN int aintroselect_@suff@(@type@ *v, npy_intp* tosort, npy_intp num,
+NPY_NO_EXPORT int aintroselect_@suff@(@type@ *v, npy_intp* tosort, npy_intp num,
npy_intp kth,
npy_intp * pivots,
npy_intp * npiv,
diff --git a/numpy/core/src/common/npy_sort.h.src b/numpy/core/src/common/npy_sort.h.src
index 16a105499..ddbde0c9b 100644
--- a/numpy/core/src/common/npy_sort.h.src
+++ b/numpy/core/src/common/npy_sort.h.src
@@ -33,14 +33,14 @@ static NPY_INLINE int npy_get_msb(npy_uintp unum)
* cfloat, cdouble, clongdouble, datetime, timedelta#
*/
-int quicksort_@suff@(void *vec, npy_intp cnt, void *null);
-int heapsort_@suff@(void *vec, npy_intp cnt, void *null);
-int mergesort_@suff@(void *vec, npy_intp cnt, void *null);
-int timsort_@suff@(void *vec, npy_intp cnt, void *null);
-int aquicksort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int atimsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
+NPY_NO_EXPORT int quicksort_@suff@(void *vec, npy_intp cnt, void *null);
+NPY_NO_EXPORT int heapsort_@suff@(void *vec, npy_intp cnt, void *null);
+NPY_NO_EXPORT int mergesort_@suff@(void *vec, npy_intp cnt, void *null);
+NPY_NO_EXPORT int timsort_@suff@(void *vec, npy_intp cnt, void *null);
+NPY_NO_EXPORT int aquicksort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
+NPY_NO_EXPORT int aheapsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
+NPY_NO_EXPORT int amergesort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
+NPY_NO_EXPORT int atimsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
/**end repeat**/
@@ -50,8 +50,8 @@ int atimsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
* longlong, ulonglong#
*/
-int radixsort_@suff@(void *vec, npy_intp cnt, void *null);
-int aradixsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
+NPY_NO_EXPORT int radixsort_@suff@(void *vec, npy_intp cnt, void *null);
+NPY_NO_EXPORT int aradixsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
/**end repeat**/
@@ -69,14 +69,14 @@ int aradixsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
* #suff = string, unicode#
*/
-int quicksort_@suff@(void *vec, npy_intp cnt, void *arr);
-int heapsort_@suff@(void *vec, npy_intp cnt, void *arr);
-int mergesort_@suff@(void *vec, npy_intp cnt, void *arr);
-int timsort_@suff@(void *vec, npy_intp cnt, void *arr);
-int aquicksort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
-int aheapsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
-int amergesort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
-int atimsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
+NPY_NO_EXPORT int quicksort_@suff@(void *vec, npy_intp cnt, void *arr);
+NPY_NO_EXPORT int heapsort_@suff@(void *vec, npy_intp cnt, void *arr);
+NPY_NO_EXPORT int mergesort_@suff@(void *vec, npy_intp cnt, void *arr);
+NPY_NO_EXPORT int timsort_@suff@(void *vec, npy_intp cnt, void *arr);
+NPY_NO_EXPORT int aquicksort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
+NPY_NO_EXPORT int aheapsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
+NPY_NO_EXPORT int amergesort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
+NPY_NO_EXPORT int atimsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
/**end repeat**/
@@ -88,13 +88,13 @@ int atimsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
*/
-int npy_quicksort(void *vec, npy_intp cnt, void *arr);
-int npy_heapsort(void *vec, npy_intp cnt, void *arr);
-int npy_mergesort(void *vec, npy_intp cnt, void *arr);
-int npy_timsort(void *vec, npy_intp cnt, void *arr);
-int npy_aquicksort(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
-int npy_aheapsort(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
-int npy_amergesort(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
-int npy_atimsort(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
+NPY_NO_EXPORT int npy_quicksort(void *vec, npy_intp cnt, void *arr);
+NPY_NO_EXPORT int npy_heapsort(void *vec, npy_intp cnt, void *arr);
+NPY_NO_EXPORT int npy_mergesort(void *vec, npy_intp cnt, void *arr);
+NPY_NO_EXPORT int npy_timsort(void *vec, npy_intp cnt, void *arr);
+NPY_NO_EXPORT int npy_aquicksort(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
+NPY_NO_EXPORT int npy_aheapsort(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
+NPY_NO_EXPORT int npy_amergesort(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
+NPY_NO_EXPORT int npy_atimsort(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
#endif
diff --git a/numpy/core/src/common/simd/avx2/arithmetic.h b/numpy/core/src/common/simd/avx2/arithmetic.h
index 9d8b4ab5e..4af9e4d17 100644
--- a/numpy/core/src/common/simd/avx2/arithmetic.h
+++ b/numpy/core/src/common/simd/avx2/arithmetic.h
@@ -72,4 +72,48 @@
#define npyv_div_f32 _mm256_div_ps
#define npyv_div_f64 _mm256_div_pd
+/***************************
+ * FUSED
+ ***************************/
+#ifdef NPY_HAVE_FMA3
+ // multiply and add, a*b + c
+ #define npyv_muladd_f32 _mm256_fmadd_ps
+ #define npyv_muladd_f64 _mm256_fmadd_pd
+ // multiply and subtract, a*b - c
+ #define npyv_mulsub_f32 _mm256_fmsub_ps
+ #define npyv_mulsub_f64 _mm256_fmsub_pd
+ // negate multiply and add, -(a*b) + c
+ #define npyv_nmuladd_f32 _mm256_fnmadd_ps
+ #define npyv_nmuladd_f64 _mm256_fnmadd_pd
+ // negate multiply and subtract, -(a*b) - c
+ #define npyv_nmulsub_f32 _mm256_fnmsub_ps
+ #define npyv_nmulsub_f64 _mm256_fnmsub_pd
+#else
+ // multiply and add, a*b + c
+ NPY_FINLINE npyv_f32 npyv_muladd_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c)
+ { return npyv_add_f32(npyv_mul_f32(a, b), c); }
+ NPY_FINLINE npyv_f64 npyv_muladd_f64(npyv_f64 a, npyv_f64 b, npyv_f64 c)
+ { return npyv_add_f64(npyv_mul_f64(a, b), c); }
+ // multiply and subtract, a*b - c
+ NPY_FINLINE npyv_f32 npyv_mulsub_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c)
+ { return npyv_sub_f32(npyv_mul_f32(a, b), c); }
+ NPY_FINLINE npyv_f64 npyv_mulsub_f64(npyv_f64 a, npyv_f64 b, npyv_f64 c)
+ { return npyv_sub_f64(npyv_mul_f64(a, b), c); }
+ // negate multiply and add, -(a*b) + c
+ NPY_FINLINE npyv_f32 npyv_nmuladd_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c)
+ { return npyv_sub_f32(c, npyv_mul_f32(a, b)); }
+ NPY_FINLINE npyv_f64 npyv_nmuladd_f64(npyv_f64 a, npyv_f64 b, npyv_f64 c)
+ { return npyv_sub_f64(c, npyv_mul_f64(a, b)); }
+ // negate multiply and subtract, -(a*b) - c
+ NPY_FINLINE npyv_f32 npyv_nmulsub_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c)
+ {
+ npyv_f32 neg_a = npyv_xor_f32(a, npyv_setall_f32(-0.0f));
+ return npyv_sub_f32(npyv_mul_f32(neg_a, b), c);
+ }
+ NPY_FINLINE npyv_f64 npyv_nmulsub_f64(npyv_f64 a, npyv_f64 b, npyv_f64 c)
+ {
+ npyv_f64 neg_a = npyv_xor_f64(a, npyv_setall_f64(-0.0));
+ return npyv_sub_f64(npyv_mul_f64(neg_a, b), c);
+ }
+#endif // !NPY_HAVE_FMA3
#endif // _NPY_SIMD_AVX2_ARITHMETIC_H
diff --git a/numpy/core/src/common/simd/avx512/arithmetic.h b/numpy/core/src/common/simd/avx512/arithmetic.h
index fcaef0efd..824ae818e 100644
--- a/numpy/core/src/common/simd/avx512/arithmetic.h
+++ b/numpy/core/src/common/simd/avx512/arithmetic.h
@@ -113,4 +113,20 @@ NPY_FINLINE __m512i npyv_mul_u8(__m512i a, __m512i b)
#define npyv_div_f32 _mm512_div_ps
#define npyv_div_f64 _mm512_div_pd
+/***************************
+ * FUSED
+ ***************************/
+// multiply and add, a*b + c
+#define npyv_muladd_f32 _mm512_fmadd_ps
+#define npyv_muladd_f64 _mm512_fmadd_pd
+// multiply and subtract, a*b - c
+#define npyv_mulsub_f32 _mm512_fmsub_ps
+#define npyv_mulsub_f64 _mm512_fmsub_pd
+// negate multiply and add, -(a*b) + c
+#define npyv_nmuladd_f32 _mm512_fnmadd_ps
+#define npyv_nmuladd_f64 _mm512_fnmadd_pd
+// negate multiply and subtract, -(a*b) - c
+#define npyv_nmulsub_f32 _mm512_fnmsub_ps
+#define npyv_nmulsub_f64 _mm512_fnmsub_pd
+
#endif // _NPY_SIMD_AVX512_ARITHMETIC_H
diff --git a/numpy/core/src/common/simd/neon/arithmetic.h b/numpy/core/src/common/simd/neon/arithmetic.h
index ec8b8ecd0..5eeee1bb6 100644
--- a/numpy/core/src/common/simd/neon/arithmetic.h
+++ b/numpy/core/src/common/simd/neon/arithmetic.h
@@ -75,4 +75,47 @@
#endif
#define npyv_div_f64 vdivq_f64
+/***************************
+ * FUSED F32
+ ***************************/
+#ifdef NPY_HAVE_NEON_VFPV4 // FMA
+ // multiply and add, a*b + c
+ NPY_FINLINE npyv_f32 npyv_muladd_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c)
+ { return vfmaq_f32(c, a, b); }
+ // multiply and subtract, a*b - c
+ NPY_FINLINE npyv_f32 npyv_mulsub_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c)
+ { return vfmaq_f32(vnegq_f32(c), a, b); }
+ // negate multiply and add, -(a*b) + c
+ NPY_FINLINE npyv_f32 npyv_nmuladd_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c)
+ { return vfmsq_f32(c, a, b); }
+ // negate multiply and subtract, -(a*b) - c
+ NPY_FINLINE npyv_f32 npyv_nmulsub_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c)
+ { return vfmsq_f32(vnegq_f32(c), a, b); }
+#else
+ // multiply and add, a*b + c
+ NPY_FINLINE npyv_f32 npyv_muladd_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c)
+ { return vmlaq_f32(c, a, b); }
+ // multiply and subtract, a*b - c
+ NPY_FINLINE npyv_f32 npyv_mulsub_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c)
+ { return vmlaq_f32(vnegq_f32(c), a, b); }
+ // negate multiply and add, -(a*b) + c
+ NPY_FINLINE npyv_f32 npyv_nmuladd_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c)
+ { return vmlsq_f32(c, a, b); }
+ // negate multiply and subtract, -(a*b) - c
+ NPY_FINLINE npyv_f32 npyv_nmulsub_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c)
+ { return vmlsq_f32(vnegq_f32(c), a, b); }
+#endif
+/***************************
+ * FUSED F64
+ ***************************/
+#if NPY_SIMD_F64
+ NPY_FINLINE npyv_f64 npyv_muladd_f64(npyv_f64 a, npyv_f64 b, npyv_f64 c)
+ { return vfmaq_f64(c, a, b); }
+ NPY_FINLINE npyv_f64 npyv_mulsub_f64(npyv_f64 a, npyv_f64 b, npyv_f64 c)
+ { return vfmaq_f64(vnegq_f64(c), a, b); }
+ NPY_FINLINE npyv_f64 npyv_nmuladd_f64(npyv_f64 a, npyv_f64 b, npyv_f64 c)
+ { return vfmsq_f64(c, a, b); }
+ NPY_FINLINE npyv_f64 npyv_nmulsub_f64(npyv_f64 a, npyv_f64 b, npyv_f64 c)
+ { return vfmsq_f64(vnegq_f64(c), a, b); }
+#endif // NPY_SIMD_F64
#endif // _NPY_SIMD_NEON_ARITHMETIC_H
diff --git a/numpy/core/src/common/simd/sse/arithmetic.h b/numpy/core/src/common/simd/sse/arithmetic.h
index 12d0af05c..717dacd39 100644
--- a/numpy/core/src/common/simd/sse/arithmetic.h
+++ b/numpy/core/src/common/simd/sse/arithmetic.h
@@ -91,5 +91,60 @@ NPY_FINLINE __m128i npyv_mul_u8(__m128i a, __m128i b)
// TODO: emulate integer division
#define npyv_div_f32 _mm_div_ps
#define npyv_div_f64 _mm_div_pd
-
+/***************************
+ * FUSED
+ ***************************/
+#ifdef NPY_HAVE_FMA3
+ // multiply and add, a*b + c
+ #define npyv_muladd_f32 _mm_fmadd_ps
+ #define npyv_muladd_f64 _mm_fmadd_pd
+ // multiply and subtract, a*b - c
+ #define npyv_mulsub_f32 _mm_fmsub_ps
+ #define npyv_mulsub_f64 _mm_fmsub_pd
+ // negate multiply and add, -(a*b) + c
+ #define npyv_nmuladd_f32 _mm_fnmadd_ps
+ #define npyv_nmuladd_f64 _mm_fnmadd_pd
+ // negate multiply and subtract, -(a*b) - c
+ #define npyv_nmulsub_f32 _mm_fnmsub_ps
+ #define npyv_nmulsub_f64 _mm_fnmsub_pd
+#elif defined(NPY_HAVE_FMA4)
+ // multiply and add, a*b + c
+ #define npyv_muladd_f32 _mm_macc_ps
+ #define npyv_muladd_f64 _mm_macc_pd
+ // multiply and subtract, a*b - c
+ #define npyv_mulsub_f32 _mm_msub_ps
+ #define npyv_mulsub_f64 _mm_msub_pd
+ // negate multiply and add, -(a*b) + c
+ #define npyv_nmuladd_f32 _mm_nmacc_ps
+ #define npyv_nmuladd_f64 _mm_nmacc_pd
+#else
+ // multiply and add, a*b + c
+ NPY_FINLINE npyv_f32 npyv_muladd_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c)
+ { return npyv_add_f32(npyv_mul_f32(a, b), c); }
+ NPY_FINLINE npyv_f64 npyv_muladd_f64(npyv_f64 a, npyv_f64 b, npyv_f64 c)
+ { return npyv_add_f64(npyv_mul_f64(a, b), c); }
+ // multiply and subtract, a*b - c
+ NPY_FINLINE npyv_f32 npyv_mulsub_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c)
+ { return npyv_sub_f32(npyv_mul_f32(a, b), c); }
+ NPY_FINLINE npyv_f64 npyv_mulsub_f64(npyv_f64 a, npyv_f64 b, npyv_f64 c)
+ { return npyv_sub_f64(npyv_mul_f64(a, b), c); }
+ // negate multiply and add, -(a*b) + c
+ NPY_FINLINE npyv_f32 npyv_nmuladd_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c)
+ { return npyv_sub_f32(c, npyv_mul_f32(a, b)); }
+ NPY_FINLINE npyv_f64 npyv_nmuladd_f64(npyv_f64 a, npyv_f64 b, npyv_f64 c)
+ { return npyv_sub_f64(c, npyv_mul_f64(a, b)); }
+#endif // NPY_HAVE_FMA3
+#ifndef NPY_HAVE_FMA3 // for FMA4 and NON-FMA3
+ // negate multiply and subtract, -(a*b) - c
+ NPY_FINLINE npyv_f32 npyv_nmulsub_f32(npyv_f32 a, npyv_f32 b, npyv_f32 c)
+ {
+ npyv_f32 neg_a = npyv_xor_f32(a, npyv_setall_f32(-0.0f));
+ return npyv_sub_f32(npyv_mul_f32(neg_a, b), c);
+ }
+ NPY_FINLINE npyv_f64 npyv_nmulsub_f64(npyv_f64 a, npyv_f64 b, npyv_f64 c)
+ {
+ npyv_f64 neg_a = npyv_xor_f64(a, npyv_setall_f64(-0.0));
+ return npyv_sub_f64(npyv_mul_f64(neg_a, b), c);
+ }
+#endif // !NPY_HAVE_FMA3
#endif // _NPY_SIMD_SSE_ARITHMETIC_H
diff --git a/numpy/core/src/common/simd/vsx/arithmetic.h b/numpy/core/src/common/simd/vsx/arithmetic.h
index dd23b5b11..6ef007676 100644
--- a/numpy/core/src/common/simd/vsx/arithmetic.h
+++ b/numpy/core/src/common/simd/vsx/arithmetic.h
@@ -100,4 +100,20 @@
#define npyv_div_f32 vec_div
#define npyv_div_f64 vec_div
+/***************************
+ * FUSED
+ ***************************/
+// multiply and add, a*b + c
+#define npyv_muladd_f32 vec_madd
+#define npyv_muladd_f64 vec_madd
+// multiply and subtract, a*b - c
+#define npyv_mulsub_f32 vec_msub
+#define npyv_mulsub_f64 vec_msub
+// negate multiply and add, -(a*b) + c
+#define npyv_nmuladd_f32 vec_nmsub // equivalent to -(a*b - c)
+#define npyv_nmuladd_f64 vec_nmsub
+// negate multiply and subtract, -(a*b) - c
+#define npyv_nmulsub_f32 vec_nmadd // equivalent to -(a*b + c)
+#define npyv_nmulsub_f64 vec_nmadd
+
#endif // _NPY_SIMD_VSX_ARITHMETIC_H
diff --git a/numpy/core/src/multiarray/_multiarray_tests.c.src b/numpy/core/src/multiarray/_multiarray_tests.c.src
index da631c830..fc4b2647a 100644
--- a/numpy/core/src/multiarray/_multiarray_tests.c.src
+++ b/numpy/core/src/multiarray/_multiarray_tests.c.src
@@ -9,8 +9,6 @@
#include "common.h"
#include "mem_overlap.h"
#include "npy_extint128.h"
-#include "common.h"
-
#if defined(MS_WIN32) || defined(__CYGWIN__)
#define EXPORT(x) __declspec(dllexport) x
@@ -1902,7 +1900,7 @@ PrintFloat_Printf_g(PyObject *obj, int precision)
PyOS_snprintf(str, sizeof(str), "%.*g", precision, val);
}
- return PyUString_FromString(str);
+ return PyUnicode_FromString(str);
}
diff --git a/numpy/core/src/multiarray/alloc.c b/numpy/core/src/multiarray/alloc.c
index 795fc7315..887deff53 100644
--- a/numpy/core/src/multiarray/alloc.c
+++ b/numpy/core/src/multiarray/alloc.c
@@ -2,17 +2,12 @@
#include <Python.h>
#include "structmember.h"
-#if PY_VERSION_HEX >= 0x03060000
#include <pymem.h>
/* public api in 3.7 */
#if PY_VERSION_HEX < 0x03070000
#define PyTraceMalloc_Track _PyTraceMalloc_Track
#define PyTraceMalloc_Untrack _PyTraceMalloc_Untrack
#endif
-#else
-#define PyTraceMalloc_Track(...)
-#define PyTraceMalloc_Untrack(...)
-#endif
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#define _MULTIARRAYMODULE
diff --git a/numpy/core/src/multiarray/array_assign_array.c b/numpy/core/src/multiarray/array_assign_array.c
index b8dc7d516..361964a5c 100644
--- a/numpy/core/src/multiarray/array_assign_array.c
+++ b/numpy/core/src/multiarray/array_assign_array.c
@@ -132,17 +132,22 @@ raw_array_assign_array(int ndim, npy_intp const *shape,
NPY_RAW_ITER_START(idim, ndim, coord, shape_it) {
/* Process the innermost dimension */
- stransfer(dst_data, dst_strides_it[0], src_data, src_strides_it[0],
- shape_it[0], src_itemsize, transferdata);
+ if (stransfer(
+ dst_data, dst_strides_it[0], src_data, src_strides_it[0],
+ shape_it[0], src_itemsize, transferdata) < 0) {
+ goto fail;
+ }
} NPY_RAW_ITER_TWO_NEXT(idim, ndim, coord, shape_it,
dst_data, dst_strides_it,
src_data, src_strides_it);
NPY_END_THREADS;
-
NPY_AUXDATA_FREE(transferdata);
-
- return (needs_api && PyErr_Occurred()) ? -1 : 0;
+ return 0;
+fail:
+ NPY_END_THREADS;
+ NPY_AUXDATA_FREE(transferdata);
+ return -1;
}
/*
diff --git a/numpy/core/src/multiarray/array_assign_scalar.c b/numpy/core/src/multiarray/array_assign_scalar.c
index 41eb75f1c..023772776 100644
--- a/numpy/core/src/multiarray/array_assign_scalar.c
+++ b/numpy/core/src/multiarray/array_assign_scalar.c
@@ -82,16 +82,21 @@ raw_array_assign_scalar(int ndim, npy_intp const *shape,
NPY_RAW_ITER_START(idim, ndim, coord, shape_it) {
/* Process the innermost dimension */
- stransfer(dst_data, dst_strides_it[0], src_data, 0,
- shape_it[0], src_itemsize, transferdata);
+ if (stransfer(
+ dst_data, dst_strides_it[0], src_data, 0,
+ shape_it[0], src_itemsize, transferdata) < 0) {
+ goto fail;
+ }
} NPY_RAW_ITER_ONE_NEXT(idim, ndim, coord,
shape_it, dst_data, dst_strides_it);
NPY_END_THREADS;
-
NPY_AUXDATA_FREE(transferdata);
-
- return (needs_api && PyErr_Occurred()) ? -1 : 0;
+ return 0;
+fail:
+ NPY_END_THREADS;
+ NPY_AUXDATA_FREE(transferdata);
+ return -1;
}
/*
diff --git a/numpy/core/src/multiarray/array_coercion.c b/numpy/core/src/multiarray/array_coercion.c
index 382645ff5..3f3fd1387 100644
--- a/numpy/core/src/multiarray/array_coercion.c
+++ b/numpy/core/src/multiarray/array_coercion.c
@@ -495,12 +495,10 @@ PyArray_Pack(PyArray_Descr *descr, char *item, PyObject *value)
res = -1;
goto finish;
}
- stransfer(item, 0, data, 0, 1, tmp_descr->elsize, transferdata);
- NPY_AUXDATA_FREE(transferdata);
-
- if (needs_api && PyErr_Occurred()) {
+ if (stransfer(item, 0, data, 0, 1, tmp_descr->elsize, transferdata) < 0) {
res = -1;
}
+ NPY_AUXDATA_FREE(transferdata);
finish:
if (PyDataType_REFCHK(tmp_descr)) {
@@ -550,7 +548,7 @@ update_shape(int curr_ndim, int *max_ndim,
success = -1;
if (!sequence) {
/* Remove dimensions that we cannot use: */
- *max_ndim -= new_ndim + i;
+ *max_ndim -= new_ndim - i;
}
else {
assert(i == 0);
diff --git a/numpy/core/src/multiarray/arrayfunction_override.c b/numpy/core/src/multiarray/arrayfunction_override.c
index 9ea8efdd9..613fe6b3f 100644
--- a/numpy/core/src/multiarray/arrayfunction_override.c
+++ b/numpy/core/src/multiarray/arrayfunction_override.c
@@ -26,7 +26,6 @@ static PyObject *
get_array_function(PyObject *obj)
{
static PyObject *ndarray_array_function = NULL;
- PyObject *array_function;
if (ndarray_array_function == NULL) {
ndarray_array_function = get_ndarray_array_function();
@@ -38,7 +37,7 @@ get_array_function(PyObject *obj)
return ndarray_array_function;
}
- array_function = PyArray_LookupSpecial(obj, "__array_function__");
+ PyObject *array_function = PyArray_LookupSpecial(obj, "__array_function__");
if (array_function == NULL && PyErr_Occurred()) {
PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */
}
@@ -53,9 +52,7 @@ get_array_function(PyObject *obj)
static void
pyobject_array_insert(PyObject **array, int length, int index, PyObject *item)
{
- int j;
-
- for (j = length; j > index; j--) {
+ for (int j = length; j > index; j--) {
array[j] = array[j - 1];
}
array[index] = item;
@@ -74,18 +71,16 @@ get_implementing_args_and_methods(PyObject *relevant_args,
PyObject **methods)
{
int num_implementing_args = 0;
- Py_ssize_t i;
- int j;
PyObject **items = PySequence_Fast_ITEMS(relevant_args);
Py_ssize_t length = PySequence_Fast_GET_SIZE(relevant_args);
- for (i = 0; i < length; i++) {
+ for (Py_ssize_t i = 0; i < length; i++) {
int new_class = 1;
PyObject *argument = items[i];
/* Have we seen this type before? */
- for (j = 0; j < num_implementing_args; j++) {
+ for (int j = 0; j < num_implementing_args; j++) {
if (Py_TYPE(argument) == Py_TYPE(implementing_args[j])) {
new_class = 0;
break;
@@ -109,7 +104,7 @@ get_implementing_args_and_methods(PyObject *relevant_args,
/* "subclasses before superclasses, otherwise left to right" */
arg_index = num_implementing_args;
- for (j = 0; j < num_implementing_args; j++) {
+ for (int j = 0; j < num_implementing_args; j++) {
PyObject *other_type;
other_type = (PyObject *)Py_TYPE(implementing_args[j]);
if (PyObject_IsInstance(argument, other_type)) {
@@ -129,7 +124,7 @@ get_implementing_args_and_methods(PyObject *relevant_args,
return num_implementing_args;
fail:
- for (j = 0; j < num_implementing_args; j++) {
+ for (int j = 0; j < num_implementing_args; j++) {
Py_DECREF(implementing_args[j]);
Py_DECREF(methods[j]);
}
@@ -161,13 +156,10 @@ NPY_NO_EXPORT PyObject *
array_function_method_impl(PyObject *func, PyObject *types, PyObject *args,
PyObject *kwargs)
{
- Py_ssize_t j;
- PyObject *implementation, *result;
-
PyObject **items = PySequence_Fast_ITEMS(types);
Py_ssize_t length = PySequence_Fast_GET_SIZE(types);
- for (j = 0; j < length; j++) {
+ for (Py_ssize_t j = 0; j < length; j++) {
int is_subclass = PyObject_IsSubclass(
items[j], (PyObject *)&PyArray_Type);
if (is_subclass == -1) {
@@ -179,11 +171,11 @@ array_function_method_impl(PyObject *func, PyObject *types, PyObject *args,
}
}
- implementation = PyObject_GetAttr(func, npy_ma_str_implementation);
+ PyObject *implementation = PyObject_GetAttr(func, npy_ma_str_implementation);
if (implementation == NULL) {
return NULL;
}
- result = PyObject_Call(implementation, args, kwargs);
+ PyObject *result = PyObject_Call(implementation, args, kwargs);
Py_DECREF(implementation);
return result;
}
@@ -208,32 +200,32 @@ call_array_function(PyObject* argument, PyObject* method,
}
-/*
- * Implements the __array_function__ protocol for a function, as described in
- * in NEP-18. See numpy.core.overrides for a full docstring.
+/**
+ * Internal handler for the array-function dispatching. The helper returns
+ * either the result, or NotImplemented (as a borrowed reference).
+ *
+ * @param public_api The public API symbol used for dispatching
+ * @param relevant_args Arguments which may implement __array_function__
+ * @param args Original arguments
+ * @param kwargs Original keyword arguments
+ *
+ * @returns The result of the dispatched version, or a borrowed reference
+ * to NotImplemented to indicate the default implementation should
+ * be used.
*/
NPY_NO_EXPORT PyObject *
-array_implement_array_function(
- PyObject *NPY_UNUSED(dummy), PyObject *positional_args)
+array_implement_array_function_internal(
+ PyObject *public_api, PyObject *relevant_args,
+ PyObject *args, PyObject *kwargs)
{
- PyObject *implementation, *public_api, *relevant_args, *args, *kwargs;
-
- PyObject *types = NULL;
PyObject *implementing_args[NPY_MAXARGS];
PyObject *array_function_methods[NPY_MAXARGS];
+ PyObject *types = NULL;
- int j, any_overrides;
- int num_implementing_args = 0;
PyObject *result = NULL;
static PyObject *errmsg_formatter = NULL;
- if (!PyArg_UnpackTuple(
- positional_args, "implement_array_function", 5, 5,
- &implementation, &public_api, &relevant_args, &args, &kwargs)) {
- return NULL;
- }
-
relevant_args = PySequence_Fast(
relevant_args,
"dispatcher for __array_function__ did not return an iterable");
@@ -242,7 +234,7 @@ array_implement_array_function(
}
/* Collect __array_function__ implementations */
- num_implementing_args = get_implementing_args_and_methods(
+ int num_implementing_args = get_implementing_args_and_methods(
relevant_args, implementing_args, array_function_methods);
if (num_implementing_args == -1) {
goto cleanup;
@@ -254,15 +246,19 @@ array_implement_array_function(
* arguments implement __array_function__ at all (e.g., if they are all
* built-in types).
*/
- any_overrides = 0;
- for (j = 0; j < num_implementing_args; j++) {
+ int any_overrides = 0;
+ for (int j = 0; j < num_implementing_args; j++) {
if (!is_default_array_function(array_function_methods[j])) {
any_overrides = 1;
break;
}
}
if (!any_overrides) {
- result = PyObject_Call(implementation, args, kwargs);
+ /*
+ * When the default implementation should be called, return
+ * `Py_NotImplemented` to indicate this.
+ */
+ result = Py_NotImplemented;
goto cleanup;
}
@@ -275,14 +271,14 @@ array_implement_array_function(
if (types == NULL) {
goto cleanup;
}
- for (j = 0; j < num_implementing_args; j++) {
+ for (int j = 0; j < num_implementing_args; j++) {
PyObject *arg_type = (PyObject *)Py_TYPE(implementing_args[j]);
Py_INCREF(arg_type);
PyTuple_SET_ITEM(types, j, arg_type);
}
/* Call __array_function__ methods */
- for (j = 0; j < num_implementing_args; j++) {
+ for (int j = 0; j < num_implementing_args; j++) {
PyObject *argument = implementing_args[j];
PyObject *method = array_function_methods[j];
@@ -319,7 +315,7 @@ array_implement_array_function(
}
cleanup:
- for (j = 0; j < num_implementing_args; j++) {
+ for (int j = 0; j < num_implementing_args; j++) {
Py_DECREF(implementing_args[j]);
Py_DECREF(array_function_methods[j]);
}
@@ -330,6 +326,92 @@ cleanup:
/*
+ * Implements the __array_function__ protocol for a Python function, as described in
+ * in NEP-18. See numpy.core.overrides for a full docstring.
+ */
+NPY_NO_EXPORT PyObject *
+array_implement_array_function(
+ PyObject *NPY_UNUSED(dummy), PyObject *positional_args)
+{
+ PyObject *implementation, *public_api, *relevant_args, *args, *kwargs;
+
+ if (!PyArg_UnpackTuple(
+ positional_args, "implement_array_function", 5, 5,
+ &implementation, &public_api, &relevant_args, &args, &kwargs)) {
+ return NULL;
+ }
+
+ /* Remove `like=` kwarg, which is NumPy-exclusive and thus not present
+ * in downstream libraries.
+ */
+ if (kwargs != NULL && PyDict_Contains(kwargs, npy_ma_str_like)) {
+ PyDict_DelItem(kwargs, npy_ma_str_like);
+ }
+
+ PyObject *res = array_implement_array_function_internal(
+ public_api, relevant_args, args, kwargs);
+
+ if (res == Py_NotImplemented) {
+ return PyObject_Call(implementation, args, kwargs);
+ }
+ return res;
+}
+
+
+/*
+ * Implements the __array_function__ protocol for C array creation functions
+ * only. Added as an extension to NEP-18 in an effort to bring NEP-35 to
+ * life with minimal dispatch overhead.
+ */
+NPY_NO_EXPORT PyObject *
+array_implement_c_array_function_creation(
+ const char *function_name, PyObject *args, PyObject *kwargs)
+{
+ if (kwargs == NULL) {
+ return Py_NotImplemented;
+ }
+
+ /* Remove `like=` kwarg, which is NumPy-exclusive and thus not present
+ * in downstream libraries. If that key isn't present, return NULL and
+ * let originating call to continue.
+ */
+ if (!PyDict_Contains(kwargs, npy_ma_str_like)) {
+ return Py_NotImplemented;
+ }
+
+ PyObject *relevant_args = PyTuple_Pack(1,
+ PyDict_GetItem(kwargs, npy_ma_str_like));
+ if (relevant_args == NULL) {
+ return NULL;
+ }
+ PyDict_DelItem(kwargs, npy_ma_str_like);
+
+ PyObject *numpy_module = PyImport_Import(npy_ma_str_numpy);
+ if (numpy_module == NULL) {
+ return NULL;
+ }
+
+ PyObject *public_api = PyObject_GetAttrString(numpy_module, function_name);
+ Py_DECREF(numpy_module);
+ if (public_api == NULL) {
+ return NULL;
+ }
+ if (!PyCallable_Check(public_api)) {
+ Py_DECREF(public_api);
+ return PyErr_Format(PyExc_RuntimeError,
+ "numpy.%s is not callable.",
+ function_name);
+ }
+
+ PyObject* result = array_implement_array_function_internal(
+ public_api, relevant_args, args, kwargs);
+
+ Py_DECREF(public_api);
+ return result;
+}
+
+
+/*
* Python wrapper for get_implementing_args_and_methods, for testing purposes.
*/
NPY_NO_EXPORT PyObject *
@@ -337,8 +419,6 @@ array__get_implementing_args(
PyObject *NPY_UNUSED(dummy), PyObject *positional_args)
{
PyObject *relevant_args;
- int j;
- int num_implementing_args = 0;
PyObject *implementing_args[NPY_MAXARGS];
PyObject *array_function_methods[NPY_MAXARGS];
PyObject *result = NULL;
@@ -355,7 +435,7 @@ array__get_implementing_args(
return NULL;
}
- num_implementing_args = get_implementing_args_and_methods(
+ int num_implementing_args = get_implementing_args_and_methods(
relevant_args, implementing_args, array_function_methods);
if (num_implementing_args == -1) {
goto cleanup;
@@ -366,14 +446,14 @@ array__get_implementing_args(
if (result == NULL) {
goto cleanup;
}
- for (j = 0; j < num_implementing_args; j++) {
+ for (int j = 0; j < num_implementing_args; j++) {
PyObject *argument = implementing_args[j];
Py_INCREF(argument);
PyList_SET_ITEM(result, j, argument);
}
cleanup:
- for (j = 0; j < num_implementing_args; j++) {
+ for (int j = 0; j < num_implementing_args; j++) {
Py_DECREF(implementing_args[j]);
Py_DECREF(array_function_methods[j]);
}
diff --git a/numpy/core/src/multiarray/arrayfunction_override.h b/numpy/core/src/multiarray/arrayfunction_override.h
index 0d224e2b6..fdcf1746d 100644
--- a/numpy/core/src/multiarray/arrayfunction_override.h
+++ b/numpy/core/src/multiarray/arrayfunction_override.h
@@ -10,6 +10,10 @@ array__get_implementing_args(
PyObject *NPY_UNUSED(dummy), PyObject *positional_args);
NPY_NO_EXPORT PyObject *
+array_implement_c_array_function_creation(
+ const char *function_name, PyObject *args, PyObject *kwargs);
+
+NPY_NO_EXPORT PyObject *
array_function_method_impl(PyObject *func, PyObject *types, PyObject *args,
PyObject *kwargs);
diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c
index 95c650674..5da1b5f29 100644
--- a/numpy/core/src/multiarray/arrayobject.c
+++ b/numpy/core/src/multiarray/arrayobject.c
@@ -416,7 +416,7 @@ WARN_IN_DEALLOC(PyObject* warning, const char * msg) {
if (PyErr_WarnEx(warning, msg, 1) < 0) {
PyObject * s;
- s = PyUString_FromString("array_dealloc");
+ s = PyUnicode_FromString("array_dealloc");
if (s) {
PyErr_WriteUnraisable(s);
Py_DECREF(s);
diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src
index 9508fb5ad..4767901ef 100644
--- a/numpy/core/src/multiarray/arraytypes.c.src
+++ b/numpy/core/src/multiarray/arraytypes.c.src
@@ -648,7 +648,7 @@ static PyObject *
OBJECT_getitem(void *ip, void *NPY_UNUSED(ap))
{
PyObject *obj;
- NPY_COPY_PYOBJECT_PTR(&obj, ip);
+ memcpy(&obj, ip, sizeof(obj));
if (obj == NULL) {
Py_RETURN_NONE;
}
@@ -664,12 +664,12 @@ OBJECT_setitem(PyObject *op, void *ov, void *NPY_UNUSED(ap))
{
PyObject *obj;
- NPY_COPY_PYOBJECT_PTR(&obj, ov);
+ memcpy(&obj, ov, sizeof(obj));
Py_INCREF(op);
Py_XDECREF(obj);
- NPY_COPY_PYOBJECT_PTR(ov, &op);
+ memcpy(ov, &op, sizeof(op));
return PyErr_Occurred() ? -1 : 0;
}
@@ -865,7 +865,7 @@ VOID_setitem(PyObject *op, void *input, void *vap)
npy_intp names_size = PyTuple_GET_SIZE(descr->names);
if (names_size != PyTuple_Size(op)) {
- errmsg = PyUString_FromFormat(
+ errmsg = PyUnicode_FromFormat(
"could not assign tuple of length %zd to structure "
"with %" NPY_INTP_FMT " fields.",
PyTuple_Size(op), names_size);
@@ -2237,11 +2237,11 @@ OBJECT_copyswapn(PyObject **dst, npy_intp dstride, PyObject **src,
dstp = (unsigned char*)dst;
srcp = (unsigned char*)src;
for (i = 0; i < n; i++) {
- NPY_COPY_PYOBJECT_PTR(&tmp, srcp);
+ memcpy(&tmp, srcp, sizeof(tmp));
Py_XINCREF(tmp);
- NPY_COPY_PYOBJECT_PTR(&tmp, dstp);
+ memcpy(&tmp, dstp, sizeof(tmp));
Py_XDECREF(tmp);
- NPY_COPY_PYOBJECT_PTR(dstp, srcp);
+ memcpy(dstp, srcp, sizeof(tmp));
dstp += dstride;
srcp += sstride;
}
@@ -2265,11 +2265,11 @@ OBJECT_copyswap(PyObject **dst, PyObject **src, int NPY_UNUSED(swap),
}
else {
PyObject *tmp;
- NPY_COPY_PYOBJECT_PTR(&tmp, src);
+ memcpy(&tmp, src, sizeof(tmp));
Py_XINCREF(tmp);
- NPY_COPY_PYOBJECT_PTR(&tmp, dst);
+ memcpy(&tmp, dst, sizeof(tmp));
Py_XDECREF(tmp);
- NPY_COPY_PYOBJECT_PTR(dst, src);
+ memcpy(dst, src, sizeof(tmp));
}
}
}
@@ -2686,7 +2686,7 @@ OBJECT_nonzero (PyObject **ip, PyArrayObject *ap)
}
else {
PyObject *obj;
- NPY_COPY_PYOBJECT_PTR(&obj, ip);
+ memcpy(&obj, ip, sizeof(obj));
if (obj == NULL) {
return NPY_FALSE;
}
diff --git a/numpy/core/src/multiarray/buffer.c b/numpy/core/src/multiarray/buffer.c
index 8b482dc03..af40cdc2c 100644
--- a/numpy/core/src/multiarray/buffer.c
+++ b/numpy/core/src/multiarray/buffer.c
@@ -267,7 +267,7 @@ _buffer_format_string(PyArray_Descr *descr, _tmp_string_t *str,
child = (PyArray_Descr*)PyTuple_GetItem(item, 0);
offset_obj = PyTuple_GetItem(item, 1);
- new_offset = PyInt_AsLong(offset_obj);
+ new_offset = PyLong_AsLong(offset_obj);
if (error_converting(new_offset)) {
return -1;
}
@@ -931,7 +931,7 @@ _descriptor_from_pep3118_format(char const *s)
}
*p = '\0';
- str = PyUString_FromStringAndSize(buf, strlen(buf));
+ str = PyUnicode_FromStringAndSize(buf, strlen(buf));
if (str == NULL) {
free(buf);
return NULL;
diff --git a/numpy/core/src/multiarray/calculation.c b/numpy/core/src/multiarray/calculation.c
index 92ab75053..43d88271b 100644
--- a/numpy/core/src/multiarray/calculation.c
+++ b/numpy/core/src/multiarray/calculation.c
@@ -392,7 +392,7 @@ __New_PyArray_Std(PyArrayObject *self, int axis, int rtype, PyArrayObject *out,
else {
val = PyArray_DIM(arrnew,i);
}
- PyTuple_SET_ITEM(newshape, i, PyInt_FromLong((long)val));
+ PyTuple_SET_ITEM(newshape, i, PyLong_FromLong((long)val));
}
arr2 = (PyArrayObject *)PyArray_Reshape(arr1, newshape);
Py_DECREF(arr1);
@@ -1023,7 +1023,7 @@ PyArray_Clip(PyArrayObject *self, PyObject *min, PyObject *max, PyArrayObject *o
if (min != NULL) {
if (PyArray_ISUNSIGNED(self)) {
int cmp;
- zero = PyInt_FromLong(0);
+ zero = PyLong_FromLong(0);
cmp = PyObject_RichCompareBool(min, zero, Py_LT);
if (cmp == -1) {
Py_DECREF(zero);
diff --git a/numpy/core/src/multiarray/common.c b/numpy/core/src/multiarray/common.c
index 2abc79167..6af71f351 100644
--- a/numpy/core/src/multiarray/common.c
+++ b/numpy/core/src/multiarray/common.c
@@ -12,7 +12,6 @@
#include "abstractdtypes.h"
#include "usertypes.h"
-#include "common.h"
#include "npy_buffer.h"
#include "get_attr_string.h"
@@ -127,26 +126,6 @@ PyArray_DTypeFromObject(PyObject *obj, int maxdims, PyArray_Descr **out_dtype)
return 0;
}
-
-/* new reference */
-NPY_NO_EXPORT PyArray_Descr *
-_array_typedescr_fromstr(char const *c_str)
-{
- PyArray_Descr *descr = NULL;
- PyObject *stringobj = PyString_FromString(c_str);
-
- if (stringobj == NULL) {
- return NULL;
- }
- if (PyArray_DescrConverter(stringobj, &descr) != NPY_SUCCEED) {
- Py_DECREF(stringobj);
- return NULL;
- }
- Py_DECREF(stringobj);
- return descr;
-}
-
-
NPY_NO_EXPORT char *
index2ptr(PyArrayObject *mp, npy_intp i)
{
@@ -169,7 +148,7 @@ NPY_NO_EXPORT int
_zerofill(PyArrayObject *ret)
{
if (PyDataType_REFCHK(PyArray_DESCR(ret))) {
- PyObject *zero = PyInt_FromLong(0);
+ PyObject *zero = PyLong_FromLong(0);
PyArray_FillObjectArray(ret, zero);
Py_DECREF(zero);
if (PyErr_Occurred()) {
@@ -264,10 +243,10 @@ convert_shape_to_string(npy_intp n, npy_intp const *vals, char *ending)
for (i = 0; i < n && vals[i] < 0; i++);
if (i == n) {
- return PyUString_FromFormat("()%s", ending);
+ return PyUnicode_FromFormat("()%s", ending);
}
else {
- ret = PyUString_FromFormat("(%" NPY_INTP_FMT, vals[i++]);
+ ret = PyUnicode_FromFormat("(%" NPY_INTP_FMT, vals[i++]);
if (ret == NULL) {
return NULL;
}
@@ -275,10 +254,10 @@ convert_shape_to_string(npy_intp n, npy_intp const *vals, char *ending)
for (; i < n; ++i) {
if (vals[i] < 0) {
- tmp = PyUString_FromString(",newaxis");
+ tmp = PyUnicode_FromString(",newaxis");
}
else {
- tmp = PyUString_FromFormat(",%" NPY_INTP_FMT, vals[i]);
+ tmp = PyUnicode_FromFormat(",%" NPY_INTP_FMT, vals[i]);
}
if (tmp == NULL) {
Py_DECREF(ret);
@@ -292,10 +271,10 @@ convert_shape_to_string(npy_intp n, npy_intp const *vals, char *ending)
}
if (i == 1) {
- tmp = PyUString_FromFormat(",)%s", ending);
+ tmp = PyUnicode_FromFormat(",)%s", ending);
}
else {
- tmp = PyUString_FromFormat(")%s", ending);
+ tmp = PyUnicode_FromFormat(")%s", ending);
}
PyUString_ConcatAndDel(&ret, tmp);
return ret;
@@ -310,7 +289,7 @@ dot_alignment_error(PyArrayObject *a, int i, PyArrayObject *b, int j)
*shape1 = NULL, *shape2 = NULL,
*shape1_i = NULL, *shape2_j = NULL;
- format = PyUString_FromString("shapes %s and %s not aligned:"
+ format = PyUnicode_FromString("shapes %s and %s not aligned:"
" %d (dim %d) != %d (dim %d)");
shape1 = convert_shape_to_string(PyArray_NDIM(a), PyArray_DIMS(a), "");
@@ -333,7 +312,7 @@ dot_alignment_error(PyArrayObject *a, int i, PyArrayObject *b, int j)
goto end;
}
- errmsg = PyUString_Format(format, fmt_args);
+ errmsg = PyUnicode_Format(format, fmt_args);
if (errmsg != NULL) {
PyErr_SetObject(PyExc_ValueError, errmsg);
}
@@ -373,10 +352,7 @@ _unpack_field(PyObject *value, PyArray_Descr **descr, npy_intp *offset)
*descr = (PyArray_Descr *)PyTuple_GET_ITEM(value, 0);
off = PyTuple_GET_ITEM(value, 1);
- if (PyInt_Check(off)) {
- *offset = PyInt_AsSsize_t(off);
- }
- else if (PyLong_Check(off)) {
+ if (PyLong_Check(off)) {
*offset = PyLong_AsSsize_t(off);
}
else {
diff --git a/numpy/core/src/multiarray/common.h b/numpy/core/src/multiarray/common.h
index 793cefaf8..ef9bc79da 100644
--- a/numpy/core/src/multiarray/common.h
+++ b/numpy/core/src/multiarray/common.h
@@ -2,7 +2,6 @@
#define _NPY_PRIVATE_COMMON_H_
#include "structmember.h"
#include <numpy/npy_common.h>
-#include <numpy/npy_cpu.h>
#include <numpy/ndarraytypes.h>
#include <limits.h>
#include "npy_import.h"
@@ -292,43 +291,6 @@ npy_memchr(char * haystack, char needle,
return p;
}
-/*
- * Convert NumPy stride to BLAS stride. Returns 0 if conversion cannot be done
- * (BLAS won't handle negative or zero strides the way we want).
- */
-static NPY_INLINE int
-blas_stride(npy_intp stride, unsigned itemsize)
-{
- /*
- * Should probably check pointer alignment also, but this may cause
- * problems if we require complex to be 16 byte aligned.
- */
- if (stride > 0 && npy_is_aligned((void *)stride, itemsize)) {
- stride /= itemsize;
-#ifndef HAVE_BLAS_ILP64
- if (stride <= INT_MAX) {
-#else
- if (stride <= NPY_MAX_INT64) {
-#endif
- return stride;
- }
- }
- return 0;
-}
-
-/*
- * Define a chunksize for CBLAS. CBLAS counts in integers.
- */
-#if NPY_MAX_INTP > INT_MAX
-# ifndef HAVE_BLAS_ILP64
-# define NPY_CBLAS_CHUNK (INT_MAX / 2 + 1)
-# else
-# define NPY_CBLAS_CHUNK (NPY_MAX_INT64 / 2 + 1)
-# endif
-#else
-# define NPY_CBLAS_CHUNK NPY_MAX_INTP
-#endif
-
#include "ucsnarrow.h"
/*
diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c
index 36101fb81..db936d9e1 100644
--- a/numpy/core/src/multiarray/compiled_base.c
+++ b/numpy/core/src/multiarray/compiled_base.c
@@ -248,7 +248,7 @@ arr__monotonicity(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds)
NPY_END_THREADS
Py_DECREF(arr_x);
- return PyInt_FromLong(monotonic);
+ return PyLong_FromLong(monotonic);
}
/*
diff --git a/numpy/core/src/multiarray/conversion_utils.c b/numpy/core/src/multiarray/conversion_utils.c
index e41fdc8f1..dd18f71fd 100644
--- a/numpy/core/src/multiarray/conversion_utils.c
+++ b/numpy/core/src/multiarray/conversion_utils.c
@@ -6,7 +6,6 @@
#define _MULTIARRAYMODULE
#include "numpy/arrayobject.h"
#include "numpy/arrayscalars.h"
-#include "numpy/arrayobject.h"
#include "npy_config.h"
#include "npy_pycompat.h"
@@ -1152,7 +1151,7 @@ PyArray_IntTupleFromIntp(int len, npy_intp const *vals)
}
for (i = 0; i < len; i++) {
#if NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG
- PyObject *o = PyInt_FromLong((long) vals[i]);
+ PyObject *o = PyLong_FromLong((long) vals[i]);
#else
PyObject *o = PyLong_FromLongLong((npy_longlong) vals[i]);
#endif
diff --git a/numpy/core/src/multiarray/convert.c b/numpy/core/src/multiarray/convert.c
index e7cbeaa77..29a2bb0e8 100644
--- a/numpy/core/src/multiarray/convert.c
+++ b/numpy/core/src/multiarray/convert.c
@@ -8,9 +8,6 @@
#define _MULTIARRAYMODULE
#include "numpy/arrayobject.h"
#include "numpy/arrayscalars.h"
-
-#include "npy_config.h"
-
#include "npy_pycompat.h"
#include "common.h"
@@ -248,13 +245,13 @@ PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format)
return -1;
}
PyTuple_SET_ITEM(tupobj,0,obj);
- obj = PyUString_FromString((const char *)format);
+ obj = PyUnicode_FromString((const char *)format);
if (obj == NULL) {
Py_DECREF(tupobj);
Py_DECREF(it);
return -1;
}
- strobj = PyUString_Format(obj, tupobj);
+ strobj = PyUnicode_Format(obj, tupobj);
Py_DECREF(obj);
Py_DECREF(tupobj);
if (strobj == NULL) {
@@ -403,7 +400,7 @@ PyArray_FillWithScalar(PyArrayObject *arr, PyObject *obj)
}
}
/* Python integer */
- else if (PyLong_Check(obj) || PyInt_Check(obj)) {
+ else if (PyLong_Check(obj)) {
/* Try long long before unsigned long long */
npy_longlong ll_v = PyLong_AsLongLong(obj);
if (error_converting(ll_v)) {
diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c
index 94cd1e5fa..d9121707b 100644
--- a/numpy/core/src/multiarray/convert_datatype.c
+++ b/numpy/core/src/multiarray/convert_datatype.c
@@ -92,11 +92,14 @@ PyArray_GetCastFunc(PyArray_Descr *descr, int type_num)
PyObject *key;
PyObject *cobj;
- key = PyInt_FromLong(type_num);
+ key = PyLong_FromLong(type_num);
cobj = PyDict_GetItem(obj, key);
Py_DECREF(key);
- if (cobj && NpyCapsule_Check(cobj)) {
- castfunc = NpyCapsule_AsVoidPtr(cobj);
+ if (cobj && PyCapsule_CheckExact(cobj)) {
+ castfunc = PyCapsule_GetPointer(cobj, NULL);
+ if (castfunc == NULL) {
+ return NULL;
+ }
}
}
}
@@ -340,25 +343,6 @@ PyArray_CanCastSafely(int fromtype, int totype)
if (fromtype == totype) {
return 1;
}
- /* Special-cases for some types */
- switch (fromtype) {
- case NPY_DATETIME:
- case NPY_TIMEDELTA:
- case NPY_OBJECT:
- case NPY_VOID:
- return 0;
- case NPY_BOOL:
- return 1;
- }
- switch (totype) {
- case NPY_BOOL:
- case NPY_DATETIME:
- case NPY_TIMEDELTA:
- return 0;
- case NPY_OBJECT:
- case NPY_VOID:
- return 1;
- }
from = PyArray_DescrFromType(fromtype);
/*
@@ -1989,7 +1973,7 @@ PyArray_Zero(PyArrayObject *arr)
}
if (zero_obj == NULL) {
- zero_obj = PyInt_FromLong((long) 0);
+ zero_obj = PyLong_FromLong((long) 0);
if (zero_obj == NULL) {
return NULL;
}
@@ -2035,7 +2019,7 @@ PyArray_One(PyArrayObject *arr)
}
if (one_obj == NULL) {
- one_obj = PyInt_FromLong((long) 1);
+ one_obj = PyLong_FromLong((long) 1);
if (one_obj == NULL) {
return NULL;
}
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index dc451685a..956dfd3bb 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -300,12 +300,12 @@ _update_descr_and_dimensions(PyArray_Descr **des, npy_intp *newdims,
}
if (tuple) {
for (i = 0; i < numnew; i++) {
- mydim[i] = (npy_intp) PyInt_AsLong(
+ mydim[i] = (npy_intp) PyLong_AsLong(
PyTuple_GET_ITEM(old->subarray->shape, i));
}
}
else {
- mydim[0] = (npy_intp) PyInt_AsLong(old->subarray->shape);
+ mydim[0] = (npy_intp) PyLong_AsLong(old->subarray->shape);
}
if (newstrides) {
@@ -868,11 +868,14 @@ PyArray_NewFromDescr_int(
func = PyObject_GetAttr((PyObject *)fa, npy_ma_str_array_finalize);
if (func && func != Py_None) {
- if (NpyCapsule_Check(func)) {
+ if (PyCapsule_CheckExact(func)) {
/* A C-function is stored here */
PyArray_FinalizeFunc *cfunc;
- cfunc = NpyCapsule_AsVoidPtr(func);
+ cfunc = PyCapsule_GetPointer(func, NULL);
Py_DECREF(func);
+ if (cfunc == NULL) {
+ goto fail;
+ }
if (cfunc((PyArrayObject *)fa, obj) < 0) {
goto fail;
}
@@ -1575,6 +1578,7 @@ PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth,
return obj;
}
+
/*NUMPY_API
* steals reference to newtype --- acc. NULL
*/
@@ -1733,10 +1737,8 @@ NPY_NO_EXPORT PyObject *
PyArray_FromStructInterface(PyObject *input)
{
PyArray_Descr *thetype = NULL;
- char buf[40];
PyArrayInterface *inter;
PyObject *attr;
- PyArrayObject *ret;
char endian = NPY_NATBYTE;
attr = PyArray_LookupSpecial_OnInstance(input, "__array_struct__");
@@ -1747,7 +1749,7 @@ PyArray_FromStructInterface(PyObject *input)
return Py_NotImplemented;
}
}
- if (!NpyCapsule_Check(attr)) {
+ if (!PyCapsule_CheckExact(attr)) {
if (PyType_Check(input) && PyObject_HasAttrString(attr, "__get__")) {
/*
* If the input is a class `attr` should be a property-like object.
@@ -1759,7 +1761,10 @@ PyArray_FromStructInterface(PyObject *input)
}
goto fail;
}
- inter = NpyCapsule_AsVoidPtr(attr);
+ inter = PyCapsule_GetPointer(attr, NULL);
+ if (inter == NULL) {
+ goto fail;
+ }
if (inter->two != 2) {
goto fail;
}
@@ -1776,20 +1781,26 @@ PyArray_FromStructInterface(PyObject *input)
}
if (thetype == NULL) {
- PyOS_snprintf(buf, sizeof(buf),
- "%c%c%d", endian, inter->typekind, inter->itemsize);
- if (!(thetype=_array_typedescr_fromstr(buf))) {
+ PyObject *type_str = PyUnicode_FromFormat(
+ "%c%c%d", endian, inter->typekind, inter->itemsize);
+ if (type_str == NULL) {
+ Py_DECREF(attr);
+ return NULL;
+ }
+ int ok = PyArray_DescrConverter(type_str, &thetype);
+ Py_DECREF(type_str);
+ if (ok != NPY_SUCCEED) {
Py_DECREF(attr);
return NULL;
}
}
- ret = (PyArrayObject *)PyArray_NewFromDescrAndBase(
+ PyObject *ret = PyArray_NewFromDescrAndBase(
&PyArray_Type, thetype,
inter->nd, inter->shape, inter->strides, inter->data,
inter->flags, NULL, input);
Py_DECREF(attr);
- return (PyObject *)ret;
+ return ret;
fail:
PyErr_SetString(PyExc_ValueError, "invalid __array_struct__");
@@ -1803,41 +1814,21 @@ PyArray_FromStructInterface(PyObject *input)
*/
NPY_NO_EXPORT int
_is_default_descr(PyObject *descr, PyObject *typestr) {
- PyObject *tuple, *name, *typestr2;
- PyObject *tmp = NULL;
- int ret = 0;
-
if (!PyList_Check(descr) || PyList_GET_SIZE(descr) != 1) {
return 0;
}
- tuple = PyList_GET_ITEM(descr, 0);
+ PyObject *tuple = PyList_GET_ITEM(descr, 0);
if (!(PyTuple_Check(tuple) && PyTuple_GET_SIZE(tuple) == 2)) {
return 0;
}
- name = PyTuple_GET_ITEM(tuple, 0);
+ PyObject *name = PyTuple_GET_ITEM(tuple, 0);
if (!(PyUnicode_Check(name) && PyUnicode_GetLength(name) == 0)) {
return 0;
}
- typestr2 = PyTuple_GET_ITEM(tuple, 1);
- /* Allow unicode type strings */
- if (PyUnicode_Check(typestr2)) {
- tmp = PyUnicode_AsASCIIString(typestr2);
- if (tmp == NULL) {
- return 0;
- }
- typestr2 = tmp;
- }
- if (PyBytes_Check(typestr2) &&
- PyObject_RichCompareBool(typestr, typestr2, Py_EQ)) {
- ret = 1;
- }
- Py_XDECREF(tmp);
-
- return ret;
+ PyObject *typestr2 = PyTuple_GET_ITEM(tuple, 1);
+ return PyObject_RichCompareBool(typestr, typestr2, Py_EQ);
}
-#define PyIntOrLong_Check(obj) (PyInt_Check(obj) || PyLong_Check(obj))
-
/*NUMPY_API*/
NPY_NO_EXPORT PyObject *
PyArray_FromInterface(PyObject *origin)
@@ -1849,7 +1840,7 @@ PyArray_FromInterface(PyObject *origin)
PyArray_Descr *dtype = NULL;
char *data = NULL;
Py_buffer view;
- int res, i, n;
+ int i, n;
npy_intp dims[NPY_MAXDIMS], strides[NPY_MAXDIMS];
int dataflags = NPY_ARRAY_BEHAVED;
@@ -1889,26 +1880,15 @@ PyArray_FromInterface(PyObject *origin)
return NULL;
}
- /* Allow unicode type strings */
- if (PyUnicode_Check(attr)) {
- PyObject *tmp = PyUnicode_AsASCIIString(attr);
- if (tmp == NULL) {
- goto fail;
- }
- attr = tmp;
- }
- else {
- Py_INCREF(attr);
- }
-
- if (!PyBytes_Check(attr)) {
+ /* allow bytes for backwards compatibility */
+ if (!PyBytes_Check(attr) && !PyUnicode_Check(attr)) {
PyErr_SetString(PyExc_TypeError,
"__array_interface__ typestr must be a string");
goto fail;
}
+
/* Get dtype from type string */
- dtype = _array_typedescr_fromstr(PyString_AS_STRING(attr));
- if (dtype == NULL) {
+ if (PyArray_DescrConverter(attr, &dtype) != NPY_SUCCEED) {
goto fail;
}
@@ -1922,16 +1902,24 @@ PyArray_FromInterface(PyObject *origin)
goto fail;
}
PyArray_Descr *new_dtype = NULL;
+ if (descr != NULL) {
+ int is_default = _is_default_descr(descr, attr);
+ if (is_default < 0) {
+ goto fail;
+ }
+ if (!is_default) {
+ if (PyArray_DescrConverter2(descr, &new_dtype) != NPY_SUCCEED) {
+ goto fail;
+ }
+ if (new_dtype != NULL) {
+ Py_DECREF(dtype);
+ dtype = new_dtype;
+ }
+ }
- if (descr != NULL && !_is_default_descr(descr, attr) &&
- PyArray_DescrConverter2(descr, &new_dtype) == NPY_SUCCEED &&
- new_dtype != NULL) {
- Py_DECREF(dtype);
- dtype = new_dtype;
}
- }
- Py_DECREF(attr); /* Pairs with the unicode handling above */
+ }
/* Get shape tuple from interface specification */
attr = _PyDict_GetItemStringWithError(iface, "shape");
@@ -1990,22 +1978,16 @@ PyArray_FromInterface(PyObject *origin)
goto fail;
}
dataptr = PyTuple_GET_ITEM(attr, 0);
- if (PyString_Check(dataptr)) {
- res = sscanf(PyString_AsString(dataptr),
- "%p", (void **)&data);
- if (res < 1) {
- PyErr_SetString(PyExc_TypeError,
- "__array_interface__ data string cannot be converted");
+ if (PyLong_Check(dataptr)) {
+ data = PyLong_AsVoidPtr(dataptr);
+ if (data == NULL && PyErr_Occurred()) {
goto fail;
}
}
- else if (PyIntOrLong_Check(dataptr)) {
- data = PyLong_AsVoidPtr(dataptr);
- }
else {
PyErr_SetString(PyExc_TypeError,
"first element of __array_interface__ data tuple "
- "must be integer or string.");
+ "must be an integer.");
goto fail;
}
if (PyObject_IsTrue(PyTuple_GET_ITEM(attr,1))) {
@@ -2265,7 +2247,10 @@ PyArray_EnsureAnyArray(PyObject *op)
return PyArray_EnsureArray(op);
}
-/* TODO: Put the order parameter in PyArray_CopyAnyInto and remove this */
+/*
+ * Private implementation of PyArray_CopyAnyInto with an additional order
+ * parameter.
+ */
NPY_NO_EXPORT int
PyArray_CopyAsFlat(PyArrayObject *dst, PyArrayObject *src, NPY_ORDER order)
{
@@ -2391,16 +2376,21 @@ PyArray_CopyAsFlat(PyArrayObject *dst, PyArrayObject *src, NPY_ORDER order)
src_count = *src_countptr;
dst_data = dst_dataptr[0];
src_data = src_dataptr[0];
+ int res = 0;
for(;;) {
/* Transfer the biggest amount that fits both */
count = (src_count < dst_count) ? src_count : dst_count;
- stransfer(dst_data, dst_stride,
- src_data, src_stride,
- count, src_itemsize, transferdata);
+ if (stransfer(
+ dst_data, dst_stride, src_data, src_stride,
+ count, src_itemsize, transferdata) < 0) {
+ res = -1;
+ break;
+ }
/* If we exhausted the dst block, refresh it */
if (dst_count == count) {
- if (!dst_iternext(dst_iter)) {
+ res = dst_iternext(dst_iter);
+ if (!res) {
break;
}
dst_count = *dst_countptr;
@@ -2413,7 +2403,8 @@ PyArray_CopyAsFlat(PyArrayObject *dst, PyArrayObject *src, NPY_ORDER order)
/* If we exhausted the src block, refresh it */
if (src_count == count) {
- if (!src_iternext(src_iter)) {
+ res = src_iternext(src_iter);
+ if (!res) {
break;
}
src_count = *src_countptr;
@@ -2430,8 +2421,11 @@ PyArray_CopyAsFlat(PyArrayObject *dst, PyArrayObject *src, NPY_ORDER order)
NPY_AUXDATA_FREE(transferdata);
NpyIter_Deallocate(dst_iter);
NpyIter_Deallocate(src_iter);
-
- return PyErr_Occurred() ? -1 : 0;
+ if (res > 0) {
+ /* The iteration stopped successfully, do not report an error */
+ return 0;
+ }
+ return res;
}
/*NUMPY_API
@@ -2741,7 +2735,7 @@ _calc_length(PyObject *start, PyObject *stop, PyObject *step, PyObject **next, i
return -1;
}
- zero = PyInt_FromLong(0);
+ zero = PyLong_FromLong(0);
if (!zero) {
Py_DECREF(*next);
*next = NULL;
@@ -2886,14 +2880,14 @@ PyArray_ArangeObj(PyObject *start, PyObject *stop, PyObject *step, PyArray_Descr
Py_INCREF(dtype);
}
if (!step || step == Py_None) {
- step = PyInt_FromLong(1);
+ step = PyLong_FromLong(1);
}
else {
Py_XINCREF(step);
}
if (!stop || stop == Py_None) {
stop = start;
- start = PyInt_FromLong(0);
+ start = PyLong_FromLong(0);
}
else {
Py_INCREF(start);
diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c
index 8f3948c23..f2225809a 100644
--- a/numpy/core/src/multiarray/datetime.c
+++ b/numpy/core/src/multiarray/datetime.c
@@ -1435,14 +1435,14 @@ raise_if_datetime64_metadata_cast_error(char *object_type,
}
else {
PyObject *errmsg;
- errmsg = PyUString_FromFormat("Cannot cast %s "
+ errmsg = PyUnicode_FromFormat("Cannot cast %s "
"from metadata ", object_type);
errmsg = append_metastr_to_string(src_meta, 0, errmsg);
PyUString_ConcatAndDel(&errmsg,
- PyUString_FromString(" to "));
+ PyUnicode_FromString(" to "));
errmsg = append_metastr_to_string(dst_meta, 0, errmsg);
PyUString_ConcatAndDel(&errmsg,
- PyUString_FromFormat(" according to the rule %s",
+ PyUnicode_FromFormat(" according to the rule %s",
npy_casting_to_string(casting)));
PyErr_SetObject(PyExc_TypeError, errmsg);
Py_DECREF(errmsg);
@@ -1467,14 +1467,14 @@ raise_if_timedelta64_metadata_cast_error(char *object_type,
}
else {
PyObject *errmsg;
- errmsg = PyUString_FromFormat("Cannot cast %s "
+ errmsg = PyUnicode_FromFormat("Cannot cast %s "
"from metadata ", object_type);
errmsg = append_metastr_to_string(src_meta, 0, errmsg);
PyUString_ConcatAndDel(&errmsg,
- PyUString_FromString(" to "));
+ PyUnicode_FromString(" to "));
errmsg = append_metastr_to_string(dst_meta, 0, errmsg);
PyUString_ConcatAndDel(&errmsg,
- PyUString_FromFormat(" according to the rule %s",
+ PyUnicode_FromFormat(" according to the rule %s",
npy_casting_to_string(casting)));
PyErr_SetObject(PyExc_TypeError, errmsg);
Py_DECREF(errmsg);
@@ -1601,15 +1601,15 @@ compute_datetime_metadata_greatest_common_divisor(
incompatible_units: {
PyObject *errmsg;
- errmsg = PyUString_FromString("Cannot get "
+ errmsg = PyUnicode_FromString("Cannot get "
"a common metadata divisor for "
"NumPy datetime metadata ");
errmsg = append_metastr_to_string(meta1, 0, errmsg);
PyUString_ConcatAndDel(&errmsg,
- PyUString_FromString(" and "));
+ PyUnicode_FromString(" and "));
errmsg = append_metastr_to_string(meta2, 0, errmsg);
PyUString_ConcatAndDel(&errmsg,
- PyUString_FromString(" because they have "
+ PyUnicode_FromString(" because they have "
"incompatible nonlinear base time units"));
PyErr_SetObject(PyExc_TypeError, errmsg);
Py_DECREF(errmsg);
@@ -1617,12 +1617,12 @@ incompatible_units: {
}
units_overflow: {
PyObject *errmsg;
- errmsg = PyUString_FromString("Integer overflow "
+ errmsg = PyUnicode_FromString("Integer overflow "
"getting a common metadata divisor for "
"NumPy datetime metadata ");
errmsg = append_metastr_to_string(meta1, 0, errmsg);
PyUString_ConcatAndDel(&errmsg,
- PyUString_FromString(" and "));
+ PyUnicode_FromString(" and "));
errmsg = append_metastr_to_string(meta2, 0, errmsg);
PyErr_SetObject(PyExc_OverflowError, errmsg);
Py_DECREF(errmsg);
@@ -1717,6 +1717,10 @@ parse_datetime_unit_from_string(char const *str, Py_ssize_t len, char const *met
return NPY_FR_as;
}
}
+ else if (len == 3 && !strncmp(str, "\xce\xbcs", 3)) {
+ /* greek small letter mu, utf8-encoded */
+ return NPY_FR_us;
+ }
else if (len == 7 && !strncmp(str, "generic", 7)) {
return NPY_FR_GENERIC;
}
@@ -1747,9 +1751,9 @@ convert_datetime_metadata_to_tuple(PyArray_DatetimeMetaData *meta)
}
PyTuple_SET_ITEM(dt_tuple, 0,
- PyUString_FromString(_datetime_strings[meta->base]));
+ PyUnicode_FromString(_datetime_strings[meta->base]));
PyTuple_SET_ITEM(dt_tuple, 1,
- PyInt_FromLong(meta->num));
+ PyLong_FromLong(meta->num));
return dt_tuple;
}
@@ -1764,22 +1768,16 @@ convert_datetime_metadata_tuple_to_datetime_metadata(PyObject *tuple,
PyArray_DatetimeMetaData *out_meta,
npy_bool from_pickle)
{
- char *basestr = NULL;
- Py_ssize_t len = 0, tuple_size;
int den = 1;
- PyObject *unit_str = NULL;
if (!PyTuple_Check(tuple)) {
- PyObject *errmsg;
- errmsg = PyUString_FromString("Require tuple for tuple to NumPy "
- "datetime metadata conversion, not ");
- PyUString_ConcatAndDel(&errmsg, PyObject_Repr(tuple));
- PyErr_SetObject(PyExc_TypeError, errmsg);
- Py_DECREF(errmsg);
+ PyErr_Format(PyExc_TypeError,
+ "Require tuple for tuple to NumPy "
+ "datetime metadata conversion, not %R", tuple);
return -1;
}
- tuple_size = PyTuple_GET_SIZE(tuple);
+ Py_ssize_t tuple_size = PyTuple_GET_SIZE(tuple);
if (tuple_size < 2 || tuple_size > 4) {
PyErr_SetString(PyExc_TypeError,
"Require tuple of size 2 to 4 for "
@@ -1787,18 +1785,22 @@ convert_datetime_metadata_tuple_to_datetime_metadata(PyObject *tuple,
return -1;
}
- unit_str = PyTuple_GET_ITEM(tuple, 0);
- Py_INCREF(unit_str);
- if (PyUnicode_Check(unit_str)) {
- /* Allow unicode format strings: convert to bytes */
- PyObject *tmp = PyUnicode_AsASCIIString(unit_str);
- Py_DECREF(unit_str);
+ PyObject *unit_str = PyTuple_GET_ITEM(tuple, 0);
+ if (PyBytes_Check(unit_str)) {
+ /* Allow bytes format strings: convert to unicode */
+ PyObject *tmp = PyUnicode_FromEncodedObject(unit_str, NULL, NULL);
if (tmp == NULL) {
return -1;
}
unit_str = tmp;
}
- if (PyBytes_AsStringAndSize(unit_str, &basestr, &len) < 0) {
+ else {
+ Py_INCREF(unit_str);
+ }
+
+ Py_ssize_t len;
+ char const *basestr = PyUnicode_AsUTF8AndSize(unit_str, &len);
+ if (basestr == NULL) {
Py_DECREF(unit_str);
return -1;
}
@@ -1812,7 +1814,7 @@ convert_datetime_metadata_tuple_to_datetime_metadata(PyObject *tuple,
Py_DECREF(unit_str);
/* Convert the values to longs */
- out_meta->num = PyInt_AsLong(PyTuple_GET_ITEM(tuple, 1));
+ out_meta->num = PyLong_AsLong(PyTuple_GET_ITEM(tuple, 1));
if (error_converting(out_meta->num)) {
return -1;
}
@@ -1837,11 +1839,10 @@ convert_datetime_metadata_tuple_to_datetime_metadata(PyObject *tuple,
if (from_pickle) {
/* if (event == 1) */
PyObject *one = PyLong_FromLong(1);
- int equal_one;
if (one == NULL) {
return -1;
}
- equal_one = PyObject_RichCompareBool(event, one, Py_EQ);
+ int equal_one = PyObject_RichCompareBool(event, one, Py_EQ);
Py_DECREF(one);
if (equal_one == -1) {
return -1;
@@ -1868,7 +1869,7 @@ convert_datetime_metadata_tuple_to_datetime_metadata(PyObject *tuple,
return -1;
}
}
- den = PyInt_AsLong(PyTuple_GET_ITEM(tuple, 2));
+ den = PyLong_AsLong(PyTuple_GET_ITEM(tuple, 2));
if (error_converting(den)) {
return -1;
}
@@ -1900,26 +1901,23 @@ NPY_NO_EXPORT int
convert_pyobject_to_datetime_metadata(PyObject *obj,
PyArray_DatetimeMetaData *out_meta)
{
- PyObject *ascii = NULL;
- char *str = NULL;
- Py_ssize_t len = 0;
-
if (PyTuple_Check(obj)) {
return convert_datetime_metadata_tuple_to_datetime_metadata(
obj, out_meta, NPY_FALSE);
}
- /* Get an ASCII string */
- if (PyUnicode_Check(obj)) {
- /* Allow unicode format strings: convert to bytes */
- ascii = PyUnicode_AsASCIIString(obj);
- if (ascii == NULL) {
+ /* Get a UTF8 string */
+ PyObject *utf8 = NULL;
+ if (PyBytes_Check(obj)) {
+ /* Allow bytes format strings: convert to unicode */
+ utf8 = PyUnicode_FromEncodedObject(obj, NULL, NULL);
+ if (utf8 == NULL) {
return -1;
}
}
- else if (PyBytes_Check(obj)) {
- ascii = obj;
- Py_INCREF(ascii);
+ else if (PyUnicode_Check(obj)) {
+ utf8 = obj;
+ Py_INCREF(utf8);
}
else {
PyErr_SetString(PyExc_TypeError,
@@ -1927,24 +1925,26 @@ convert_pyobject_to_datetime_metadata(PyObject *obj,
return -1;
}
- if (PyBytes_AsStringAndSize(ascii, &str, &len) < 0) {
- Py_DECREF(ascii);
+ Py_ssize_t len = 0;
+ char const *str = PyUnicode_AsUTF8AndSize(utf8, &len);
+ if (str == NULL) {
+ Py_DECREF(utf8);
return -1;
}
if (len > 0 && str[0] == '[') {
int r = parse_datetime_metadata_from_metastr(str, len, out_meta);
- Py_DECREF(ascii);
+ Py_DECREF(utf8);
return r;
}
else {
if (parse_datetime_extended_unit_from_string(str, len,
NULL, out_meta) < 0) {
- Py_DECREF(ascii);
+ Py_DECREF(utf8);
return -1;
}
- Py_DECREF(ascii);
+ Py_DECREF(utf8);
return 0;
}
}
@@ -1973,7 +1973,7 @@ append_metastr_to_string(PyArray_DatetimeMetaData *meta,
if (meta->base == NPY_FR_GENERIC) {
/* Without brackets, give a string "generic" */
if (skip_brackets) {
- PyUString_ConcatAndDel(&ret, PyUString_FromString("generic"));
+ PyUString_ConcatAndDel(&ret, PyUnicode_FromString("generic"));
return ret;
}
/* But with brackets, append nothing */
@@ -1994,18 +1994,18 @@ append_metastr_to_string(PyArray_DatetimeMetaData *meta,
if (num == 1) {
if (skip_brackets) {
- res = PyUString_FromFormat("%s", basestr);
+ res = PyUnicode_FromFormat("%s", basestr);
}
else {
- res = PyUString_FromFormat("[%s]", basestr);
+ res = PyUnicode_FromFormat("[%s]", basestr);
}
}
else {
if (skip_brackets) {
- res = PyUString_FromFormat("%d%s", num, basestr);
+ res = PyUnicode_FromFormat("%d%s", num, basestr);
}
else {
- res = PyUString_FromFormat("[%d%s]", num, basestr);
+ res = PyUnicode_FromFormat("[%d%s]", num, basestr);
}
}
@@ -2108,7 +2108,7 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
if (tmp == NULL) {
return -1;
}
- out->year = PyInt_AsLong(tmp);
+ out->year = PyLong_AsLong(tmp);
if (error_converting(out->year)) {
Py_DECREF(tmp);
return -1;
@@ -2120,7 +2120,7 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
if (tmp == NULL) {
return -1;
}
- out->month = PyInt_AsLong(tmp);
+ out->month = PyLong_AsLong(tmp);
if (error_converting(out->month)) {
Py_DECREF(tmp);
return -1;
@@ -2132,7 +2132,7 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
if (tmp == NULL) {
return -1;
}
- out->day = PyInt_AsLong(tmp);
+ out->day = PyLong_AsLong(tmp);
if (error_converting(out->day)) {
Py_DECREF(tmp);
return -1;
@@ -2166,7 +2166,7 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
if (tmp == NULL) {
return -1;
}
- out->hour = PyInt_AsLong(tmp);
+ out->hour = PyLong_AsLong(tmp);
if (error_converting(out->hour)) {
Py_DECREF(tmp);
return -1;
@@ -2178,7 +2178,7 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
if (tmp == NULL) {
return -1;
}
- out->min = PyInt_AsLong(tmp);
+ out->min = PyLong_AsLong(tmp);
if (error_converting(out->min)) {
Py_DECREF(tmp);
return -1;
@@ -2190,7 +2190,7 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
if (tmp == NULL) {
return -1;
}
- out->sec = PyInt_AsLong(tmp);
+ out->sec = PyLong_AsLong(tmp);
if (error_converting(out->sec)) {
Py_DECREF(tmp);
return -1;
@@ -2202,7 +2202,7 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
if (tmp == NULL) {
return -1;
}
- out->us = PyInt_AsLong(tmp);
+ out->us = PyLong_AsLong(tmp);
if (error_converting(out->us)) {
Py_DECREF(tmp);
return -1;
@@ -2350,32 +2350,33 @@ convert_pyobject_to_datetime(PyArray_DatetimeMetaData *meta, PyObject *obj,
NPY_CASTING casting, npy_datetime *out)
{
if (PyBytes_Check(obj) || PyUnicode_Check(obj)) {
- PyObject *bytes = NULL;
- char *str = NULL;
- Py_ssize_t len = 0;
- npy_datetimestruct dts;
- NPY_DATETIMEUNIT bestunit = NPY_FR_ERROR;
+ PyObject *utf8 = NULL;
- /* Convert to an ASCII string for the date parser */
- if (PyUnicode_Check(obj)) {
- bytes = PyUnicode_AsASCIIString(obj);
- if (bytes == NULL) {
+ /* Convert to an UTF8 string for the date parser */
+ if (PyBytes_Check(obj)) {
+ utf8 = PyUnicode_FromEncodedObject(obj, NULL, NULL);
+ if (utf8 == NULL) {
return -1;
}
}
else {
- bytes = obj;
- Py_INCREF(bytes);
+ utf8 = obj;
+ Py_INCREF(utf8);
}
- if (PyBytes_AsStringAndSize(bytes, &str, &len) < 0) {
- Py_DECREF(bytes);
+
+ Py_ssize_t len = 0;
+ char const *str = PyUnicode_AsUTF8AndSize(utf8, &len);
+ if (str == NULL) {
+ Py_DECREF(utf8);
return -1;
}
/* Parse the ISO date */
+ npy_datetimestruct dts;
+ NPY_DATETIMEUNIT bestunit = NPY_FR_ERROR;
if (parse_iso_8601_datetime(str, len, meta->base, casting,
&dts, &bestunit, NULL) < 0) {
- Py_DECREF(bytes);
+ Py_DECREF(utf8);
return -1;
}
@@ -2386,15 +2387,15 @@ convert_pyobject_to_datetime(PyArray_DatetimeMetaData *meta, PyObject *obj,
}
if (convert_datetimestruct_to_datetime(meta, &dts, out) < 0) {
- Py_DECREF(bytes);
+ Py_DECREF(utf8);
return -1;
}
- Py_DECREF(bytes);
+ Py_DECREF(utf8);
return 0;
}
/* Do no conversion on raw integers */
- else if (PyInt_Check(obj) || PyLong_Check(obj)) {
+ else if (PyLong_Check(obj)) {
/* Don't allow conversion from an integer without specifying a unit */
if (meta->base == NPY_FR_ERROR || meta->base == NPY_FR_GENERIC) {
PyErr_SetString(PyExc_ValueError, "Converting an integer to a "
@@ -2544,24 +2545,25 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj,
NPY_CASTING casting, npy_timedelta *out)
{
if (PyBytes_Check(obj) || PyUnicode_Check(obj)) {
- PyObject *bytes = NULL;
- char *str = NULL;
- Py_ssize_t len = 0;
+ PyObject *utf8 = NULL;
int succeeded = 0;
- /* Convert to an ASCII string for the date parser */
- if (PyUnicode_Check(obj)) {
- bytes = PyUnicode_AsASCIIString(obj);
- if (bytes == NULL) {
+ /* Convert to an UTF8 string for the date parser */
+ if (PyBytes_Check(obj)) {
+ utf8 = PyUnicode_FromEncodedObject(obj, NULL, NULL);
+ if (utf8 == NULL) {
return -1;
}
}
else {
- bytes = obj;
- Py_INCREF(bytes);
+ utf8 = obj;
+ Py_INCREF(utf8);
}
- if (PyBytes_AsStringAndSize(bytes, &str, &len) < 0) {
- Py_DECREF(bytes);
+
+ Py_ssize_t len = 0;
+ char const *str = PyUnicode_AsUTF8AndSize(utf8, &len);
+ if (str == NULL) {
+ Py_DECREF(utf8);
return -1;
}
@@ -2582,7 +2584,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj,
succeeded = 1;
}
}
- Py_DECREF(bytes);
+ Py_DECREF(utf8);
if (succeeded) {
/* Use generic units if none was specified */
@@ -2595,7 +2597,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj,
}
}
/* Do no conversion on raw integers */
- else if (PyInt_Check(obj) || PyLong_Check(obj)) {
+ else if (PyLong_Check(obj)) {
/* Use the default unit if none was specified */
if (meta->base == NPY_FR_ERROR) {
meta->base = NPY_DATETIME_DEFAULTUNIT;
@@ -2699,7 +2701,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj,
if (tmp == NULL) {
return -1;
}
- seconds = PyInt_AsLong(tmp);
+ seconds = PyLong_AsLong(tmp);
if (error_converting(seconds)) {
Py_DECREF(tmp);
return -1;
@@ -2711,7 +2713,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj,
if (tmp == NULL) {
return -1;
}
- useconds = PyInt_AsLong(tmp);
+ useconds = PyLong_AsLong(tmp);
if (error_converting(useconds)) {
Py_DECREF(tmp);
return -1;
@@ -3320,8 +3322,7 @@ datetime_arange(PyObject *start, PyObject *stop, PyObject *step,
type_nums[2] = NPY_TIMEDELTA;
}
else {
- if (PyInt_Check(objs[1]) ||
- PyLong_Check(objs[1]) ||
+ if (PyLong_Check(objs[1]) ||
PyArray_IsScalar(objs[1], Integer) ||
is_any_numpy_timedelta(objs[1])) {
type_nums[1] = NPY_TIMEDELTA;
diff --git a/numpy/core/src/multiarray/datetime_busdaycal.c b/numpy/core/src/multiarray/datetime_busdaycal.c
index 6936a803f..2374eaa63 100644
--- a/numpy/core/src/multiarray/datetime_busdaycal.c
+++ b/numpy/core/src/multiarray/datetime_busdaycal.c
@@ -168,7 +168,7 @@ invalid_weekmask_string:
return 0;
}
- val = PyInt_AsLong(f);
+ val = PyLong_AsLong(f);
if (error_converting(val)) {
Py_DECREF(f);
Py_DECREF(obj);
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index 67d57975b..95597b812 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -386,7 +386,7 @@ _convert_from_tuple(PyObject *obj, int align)
}
for (int i=0; i < shape.len; i++) {
PyTuple_SET_ITEM(newdescr->subarray->shape, i,
- PyInt_FromLong((long)shape.ptr[i]));
+ PyLong_FromLong((long)shape.ptr[i]));
if (PyTuple_GET_ITEM(newdescr->subarray->shape, i) == NULL) {
Py_DECREF(newdescr);
@@ -472,7 +472,7 @@ _convert_from_array_descr(PyObject *obj, int align)
if (PyUnicode_GetLength(name) == 0) {
Py_DECREF(name);
if (title == NULL) {
- name = PyUString_FromFormat("f%d", i);
+ name = PyUnicode_FromFormat("f%d", i);
if (name == NULL) {
goto fail;
}
@@ -537,7 +537,7 @@ _convert_from_array_descr(PyObject *obj, int align)
goto fail;
}
PyTuple_SET_ITEM(tup, 0, (PyObject *)conv);
- PyTuple_SET_ITEM(tup, 1, PyInt_FromLong((long) totalsize));
+ PyTuple_SET_ITEM(tup, 1, PyLong_FromLong((long) totalsize));
/*
* Title can be "meta-data". Only insert it
@@ -660,7 +660,7 @@ _convert_from_list(PyObject *obj, int align)
}
maxalign = PyArray_MAX(maxalign, _align);
}
- PyObject *size_obj = PyInt_FromLong((long) totalsize);
+ PyObject *size_obj = PyLong_FromLong((long) totalsize);
if (!size_obj) {
Py_DECREF(conv);
goto fail;
@@ -673,7 +673,7 @@ _convert_from_list(PyObject *obj, int align)
}
PyTuple_SET_ITEM(tup, 0, (PyObject *)conv);
PyTuple_SET_ITEM(tup, 1, size_obj);
- PyObject *key = PyUString_FromFormat("f%d", i);
+ PyObject *key = PyUnicode_FromFormat("f%d", i);
if (!key) {
Py_DECREF(tup);
goto fail;
@@ -1112,7 +1112,7 @@ _convert_from_dict(PyObject *obj, int align)
/* Build item to insert (descr, offset, [title])*/
int len = 2;
PyObject *title = NULL;
- PyObject *ind = PyInt_FromLong(i);
+ PyObject *ind = PyLong_FromLong(i);
if (titles) {
title=PyObject_GetItem(titles, ind);
if (title && title != Py_None) {
@@ -1166,7 +1166,7 @@ _convert_from_dict(PyObject *obj, int align)
goto fail;
}
- PyTuple_SET_ITEM(tup, 1, PyInt_FromLong(offset));
+ PyTuple_SET_ITEM(tup, 1, PyLong_FromLong(offset));
/* Flag whether the fields are specified out of order */
if (offset < totalsize) {
has_out_of_order_fields = 1;
@@ -1190,7 +1190,7 @@ _convert_from_dict(PyObject *obj, int align)
if (align && _align > 1) {
totalsize = NPY_NEXT_ALIGNED_OFFSET(totalsize, _align);
}
- PyTuple_SET_ITEM(tup, 1, PyInt_FromLong(totalsize));
+ PyTuple_SET_ITEM(tup, 1, PyLong_FromLong(totalsize));
totalsize += newdescr->elsize;
}
if (len == 3) {
@@ -1887,10 +1887,10 @@ arraydescr_protocol_typestr_get(PyArray_Descr *self)
size >>= 2;
}
if (self->type_num == NPY_OBJECT) {
- ret = PyUString_FromFormat("%c%c", endian, basic_);
+ ret = PyUnicode_FromFormat("%c%c", endian, basic_);
}
else {
- ret = PyUString_FromFormat("%c%c%d", endian, basic_, size);
+ ret = PyUnicode_FromFormat("%c%c%d", endian, basic_, size);
}
if (PyDataType_ISDATETIME(self)) {
PyArray_DatetimeMetaData *meta;
@@ -1950,7 +1950,7 @@ arraydescr_ndim_get(PyArray_Descr *self)
Py_ssize_t ndim;
if (!PyDataType_HASSUBARRAY(self)) {
- return PyInt_FromLong(0);
+ return PyLong_FromLong(0);
}
/*
@@ -1958,7 +1958,7 @@ arraydescr_ndim_get(PyArray_Descr *self)
* for tuple argument
*/
ndim = PyTuple_Size(self->subarray->shape);
- return PyInt_FromLong(ndim);
+ return PyLong_FromLong(ndim);
}
@@ -1974,7 +1974,7 @@ arraydescr_protocol_descr_get(PyArray_Descr *self)
if (dobj == NULL) {
return NULL;
}
- PyTuple_SET_ITEM(dobj, 0, PyUString_FromString(""));
+ PyTuple_SET_ITEM(dobj, 0, PyUnicode_FromString(""));
PyTuple_SET_ITEM(dobj, 1, arraydescr_protocol_typestr_get(self));
res = PyList_New(1);
if (res == NULL) {
@@ -2010,7 +2010,7 @@ arraydescr_isbuiltin_get(PyArray_Descr *self)
if (PyTypeNum_ISUSERDEF(self->type_num)) {
val = 2;
}
- return PyInt_FromLong(val);
+ return PyLong_FromLong(val);
}
static int
@@ -2391,11 +2391,11 @@ _get_pickleabletype_from_datetime_metadata(PyArray_Descr *dtype)
PyTuple_SET_ITEM(dt_tuple, 0,
PyBytes_FromString(_datetime_strings[meta->base]));
PyTuple_SET_ITEM(dt_tuple, 1,
- PyInt_FromLong(meta->num));
+ PyLong_FromLong(meta->num));
PyTuple_SET_ITEM(dt_tuple, 2,
- PyInt_FromLong(1));
+ PyLong_FromLong(1));
PyTuple_SET_ITEM(dt_tuple, 3,
- PyInt_FromLong(1));
+ PyLong_FromLong(1));
PyTuple_SET_ITEM(ret, 1, dt_tuple);
@@ -2450,7 +2450,7 @@ arraydescr_reduce(PyArray_Descr *self, PyObject *NPY_UNUSED(args))
if (self->type_num == NPY_UNICODE) {
elsize >>= 2;
}
- obj = PyUString_FromFormat("%c%d",self->kind, elsize);
+ obj = PyUnicode_FromFormat("%c%d",self->kind, elsize);
}
PyTuple_SET_ITEM(ret, 1, Py_BuildValue("(NOO)", obj, Py_False, Py_True));
@@ -2468,7 +2468,7 @@ arraydescr_reduce(PyArray_Descr *self, PyObject *NPY_UNUSED(args))
if (PyDataType_ISDATETIME(self)) {
PyObject *newobj;
state = PyTuple_New(9);
- PyTuple_SET_ITEM(state, 0, PyInt_FromLong(version));
+ PyTuple_SET_ITEM(state, 0, PyLong_FromLong(version));
/*
* newobj is a tuple of the Python metadata dictionary
* and tuple of date_time info (str, num)
@@ -2483,16 +2483,16 @@ arraydescr_reduce(PyArray_Descr *self, PyObject *NPY_UNUSED(args))
}
else if (self->metadata) {
state = PyTuple_New(9);
- PyTuple_SET_ITEM(state, 0, PyInt_FromLong(version));
+ PyTuple_SET_ITEM(state, 0, PyLong_FromLong(version));
Py_INCREF(self->metadata);
PyTuple_SET_ITEM(state, 8, self->metadata);
}
else { /* Use version 3 pickle format */
state = PyTuple_New(8);
- PyTuple_SET_ITEM(state, 0, PyInt_FromLong(3));
+ PyTuple_SET_ITEM(state, 0, PyLong_FromLong(3));
}
- PyTuple_SET_ITEM(state, 1, PyUString_FromFormat("%c", endian));
+ PyTuple_SET_ITEM(state, 1, PyUnicode_FromFormat("%c", endian));
PyTuple_SET_ITEM(state, 2, arraydescr_subdescr_get(self));
if (PyDataType_HASFIELDS(self)) {
Py_INCREF(self->names);
@@ -2516,9 +2516,9 @@ arraydescr_reduce(PyArray_Descr *self, PyObject *NPY_UNUSED(args))
elsize = -1;
alignment = -1;
}
- PyTuple_SET_ITEM(state, 5, PyInt_FromLong(elsize));
- PyTuple_SET_ITEM(state, 6, PyInt_FromLong(alignment));
- PyTuple_SET_ITEM(state, 7, PyInt_FromLong(self->flags));
+ PyTuple_SET_ITEM(state, 5, PyLong_FromLong(elsize));
+ PyTuple_SET_ITEM(state, 6, PyLong_FromLong(alignment));
+ PyTuple_SET_ITEM(state, 7, PyLong_FromLong(self->flags));
PyTuple_SET_ITEM(ret, 2, state);
return ret;
@@ -2628,7 +2628,7 @@ arraydescr_setstate(PyArray_Descr *self, PyObject *args)
default:
/* raise an error */
if (PyTuple_GET_SIZE(PyTuple_GET_ITEM(args,0)) > 5) {
- version = PyInt_AsLong(PyTuple_GET_ITEM(args, 0));
+ version = PyLong_AsLong(PyTuple_GET_ITEM(args, 0));
}
else {
version = -1;
@@ -2651,7 +2651,7 @@ arraydescr_setstate(PyArray_Descr *self, PyObject *args)
if (version == 1 || version == 0) {
if (fields != Py_None) {
PyObject *key, *list;
- key = PyInt_FromLong(-1);
+ key = PyLong_FromLong(-1);
list = PyDict_GetItemWithError(fields, key);
if (!list) {
if (!PyErr_Occurred()) {
@@ -2894,7 +2894,7 @@ arraydescr_setstate(PyArray_Descr *self, PyObject *args)
PyArray_DatetimeMetaData temp_dt_data;
if ((! PyTuple_Check(metadata)) || (PyTuple_Size(metadata) != 2)) {
- errmsg = PyUString_FromString("Invalid datetime dtype (metadata, c_metadata): ");
+ errmsg = PyUnicode_FromString("Invalid datetime dtype (metadata, c_metadata): ");
PyUString_ConcatAndDel(&errmsg, PyObject_Repr(metadata));
PyErr_SetObject(PyExc_ValueError, errmsg);
Py_DECREF(errmsg);
@@ -3393,7 +3393,7 @@ arraydescr_field_subset_view(PyArray_Descr *self, PyObject *ind)
/* disallow duplicate field indices */
if (PyDict_Contains(fields, name)) {
PyObject *msg = NULL;
- PyObject *fmt = PyUString_FromString(
+ PyObject *fmt = PyUnicode_FromString(
"duplicate field of name {!r}");
if (fmt != NULL) {
msg = PyObject_CallMethod(fmt, "format", "O", name);
diff --git a/numpy/core/src/multiarray/dragon4.c b/numpy/core/src/multiarray/dragon4.c
index 553d0effb..a7b252a77 100644
--- a/numpy/core/src/multiarray/dragon4.c
+++ b/numpy/core/src/multiarray/dragon4.c
@@ -3093,7 +3093,7 @@ Dragon4_Positional_##Type##_opt(npy_type *val, Dragon4_Options *opt)\
free_dragon4_bigint_scratch(scratch);\
return NULL;\
}\
- ret = PyUString_FromString(scratch->repr);\
+ ret = PyUnicode_FromString(scratch->repr);\
free_dragon4_bigint_scratch(scratch);\
return ret;\
}\
@@ -3130,7 +3130,7 @@ Dragon4_Scientific_##Type##_opt(npy_type *val, Dragon4_Options *opt)\
free_dragon4_bigint_scratch(scratch);\
return NULL;\
}\
- ret = PyUString_FromString(scratch->repr);\
+ ret = PyUnicode_FromString(scratch->repr);\
free_dragon4_bigint_scratch(scratch);\
return ret;\
}\
diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c
index 3a58b5849..42c66ee7f 100644
--- a/numpy/core/src/multiarray/dtype_transfer.c
+++ b/numpy/core/src/multiarray/dtype_transfer.c
@@ -17,7 +17,6 @@
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#define _MULTIARRAYMODULE
#include <numpy/arrayobject.h>
-#include <numpy/npy_cpu.h>
#include "npy_pycompat.h"
@@ -106,7 +105,7 @@ get_bool_setdstone_transfer_function(npy_intp dst_stride,
/*************************** COPY REFERENCES *******************************/
/* Moves references from src to dst */
-static void
+static int
_strided_to_strided_move_references(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -114,27 +113,28 @@ _strided_to_strided_move_references(char *dst, npy_intp dst_stride,
{
PyObject *src_ref = NULL, *dst_ref = NULL;
while (N > 0) {
- NPY_COPY_PYOBJECT_PTR(&src_ref, src);
- NPY_COPY_PYOBJECT_PTR(&dst_ref, dst);
+ memcpy(&src_ref, src, sizeof(src_ref));
+ memcpy(&dst_ref, dst, sizeof(dst_ref));
/* Release the reference in dst */
NPY_DT_DBG_REFTRACE("dec dst ref", dst_ref);
Py_XDECREF(dst_ref);
/* Move the reference */
NPY_DT_DBG_REFTRACE("move src ref", src_ref);
- NPY_COPY_PYOBJECT_PTR(dst, &src_ref);
+ memcpy(dst, &src_ref, sizeof(src_ref));
/* Set the source reference to NULL */
src_ref = NULL;
- NPY_COPY_PYOBJECT_PTR(src, &src_ref);
+ memcpy(src, &src_ref, sizeof(src_ref));
src += src_stride;
dst += dst_stride;
--N;
}
+ return 0;
}
/* Copies references from src to dst */
-static void
+static int
_strided_to_strided_copy_references(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -142,12 +142,12 @@ _strided_to_strided_copy_references(char *dst, npy_intp dst_stride,
{
PyObject *src_ref = NULL, *dst_ref = NULL;
while (N > 0) {
- NPY_COPY_PYOBJECT_PTR(&src_ref, src);
- NPY_COPY_PYOBJECT_PTR(&dst_ref, dst);
+ memcpy(&src_ref, src, sizeof(src_ref));
+ memcpy(&dst_ref, dst, sizeof(dst_ref));
/* Copy the reference */
NPY_DT_DBG_REFTRACE("copy src ref", src_ref);
- NPY_COPY_PYOBJECT_PTR(dst, &src_ref);
+ memcpy(dst, &src_ref, sizeof(src_ref));
/* Claim the reference */
Py_XINCREF(src_ref);
/* Release the reference in dst */
@@ -158,6 +158,7 @@ _strided_to_strided_copy_references(char *dst, npy_intp dst_stride,
dst += dst_stride;
--N;
}
+ return 0;
}
@@ -188,7 +189,7 @@ static NpyAuxData *_strided_zero_pad_data_clone(NpyAuxData *data)
* Does a strided to strided zero-padded copy for the case where
* dst_itemsize > src_itemsize
*/
-static void
+static int
_strided_to_strided_zero_pad_copy(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -205,13 +206,14 @@ _strided_to_strided_zero_pad_copy(char *dst, npy_intp dst_stride,
dst += dst_stride;
--N;
}
+ return 0;
}
/*
* Does a strided to strided zero-padded copy for the case where
* dst_itemsize < src_itemsize
*/
-static void
+static int
_strided_to_strided_truncate_copy(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -226,13 +228,14 @@ _strided_to_strided_truncate_copy(char *dst, npy_intp dst_stride,
dst += dst_stride;
--N;
}
+ return 0;
}
/*
* Does a strided to strided zero-padded or truncated copy for the case where
* unicode swapping is needed.
*/
-static void
+static int
_strided_to_strided_unicode_copyswap(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -260,6 +263,7 @@ _strided_to_strided_unicode_copyswap(char *dst, npy_intp dst_stride,
dst += dst_stride;
--N;
}
+ return 0;
}
@@ -379,7 +383,7 @@ static NpyAuxData *_align_wrap_data_clone(NpyAuxData *data)
return (NpyAuxData *)newdata;
}
-static void
+static int
_strided_to_strided_contig_align_wrap(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -395,47 +399,50 @@ _strided_to_strided_contig_align_wrap(char *dst, npy_intp dst_stride,
*todata = d->todata,
*fromdata = d->fromdata;
char *bufferin = d->bufferin, *bufferout = d->bufferout;
- npy_bool init_dest = d->init_dest, out_needs_api = d->out_needs_api;
+ npy_bool init_dest = d->init_dest;
for(;;) {
- /*
- * The caller does not know if a previous call resulted in a Python
- * exception. Much of the Python API is unsafe while an exception is in
- * flight, so just skip all the work. Someone higher in the call stack
- * will check for errors and propagate them.
- */
- if (out_needs_api && PyErr_Occurred()) {
- return;
- }
if (N > NPY_LOWLEVEL_BUFFER_BLOCKSIZE) {
- tobuffer(bufferin, inner_src_itemsize, src, src_stride,
- NPY_LOWLEVEL_BUFFER_BLOCKSIZE,
- src_itemsize, todata);
+ if (tobuffer(
+ bufferin, inner_src_itemsize, src, src_stride,
+ NPY_LOWLEVEL_BUFFER_BLOCKSIZE, src_itemsize, todata) < 0) {
+ return -1;
+ }
if (init_dest) {
memset(bufferout, 0,
- dst_itemsize*NPY_LOWLEVEL_BUFFER_BLOCKSIZE);
+ dst_itemsize*NPY_LOWLEVEL_BUFFER_BLOCKSIZE);
+ }
+ if (wrapped(bufferout, dst_itemsize, bufferin, inner_src_itemsize,
+ NPY_LOWLEVEL_BUFFER_BLOCKSIZE,
+ inner_src_itemsize, wrappeddata) < 0) {
+ return -1;
+ }
+ if (frombuffer(dst, dst_stride, bufferout, dst_itemsize,
+ NPY_LOWLEVEL_BUFFER_BLOCKSIZE,
+ dst_itemsize, fromdata) < 0) {
+ return -1;
}
- wrapped(bufferout, dst_itemsize, bufferin, inner_src_itemsize,
- NPY_LOWLEVEL_BUFFER_BLOCKSIZE,
- inner_src_itemsize, wrappeddata);
- frombuffer(dst, dst_stride, bufferout, dst_itemsize,
- NPY_LOWLEVEL_BUFFER_BLOCKSIZE,
- dst_itemsize, fromdata);
N -= NPY_LOWLEVEL_BUFFER_BLOCKSIZE;
src += NPY_LOWLEVEL_BUFFER_BLOCKSIZE*src_stride;
dst += NPY_LOWLEVEL_BUFFER_BLOCKSIZE*dst_stride;
}
else {
- tobuffer(bufferin, inner_src_itemsize, src, src_stride, N,
- src_itemsize, todata);
+ if (tobuffer(bufferin, inner_src_itemsize, src, src_stride,
+ N, src_itemsize, todata) < 0) {
+ return -1;
+ }
if (init_dest) {
memset(bufferout, 0, dst_itemsize*N);
}
- wrapped(bufferout, dst_itemsize, bufferin, inner_src_itemsize, N,
- inner_src_itemsize, wrappeddata);
- frombuffer(dst, dst_stride, bufferout, dst_itemsize, N,
- dst_itemsize, fromdata);
- return;
+ if (wrapped(bufferout, dst_itemsize, bufferin, inner_src_itemsize,
+ N, inner_src_itemsize, wrappeddata) < 0) {
+ return -1;
+ }
+ if (frombuffer(dst, dst_stride, bufferout, dst_itemsize,
+ N, dst_itemsize, fromdata) < 0) {
+ return -1;
+ }
+ return 0;
}
}
}
@@ -538,7 +545,7 @@ static NpyAuxData *_wrap_copy_swap_data_clone(NpyAuxData *data)
return (NpyAuxData *)newdata;
}
-static void
+static int
_strided_to_strided_wrap_copy_swap(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp NPY_UNUSED(src_itemsize),
@@ -546,7 +553,9 @@ _strided_to_strided_wrap_copy_swap(char *dst, npy_intp dst_stride,
{
_wrap_copy_swap_data *d = (_wrap_copy_swap_data *)data;
+ /* We assume that d->copyswapn should not be able to error. */
d->copyswapn(dst, dst_stride, src, src_stride, N, d->swap, d->arr);
+ return 0;
}
/* This only gets used for custom data types and for Unicode when swapping */
@@ -603,6 +612,7 @@ typedef struct {
NpyAuxData base;
PyArray_VectorUnaryFunc *castfunc;
PyArrayObject *aip, *aop;
+ npy_bool needs_api;
} _strided_cast_data;
/* strided cast data free function */
@@ -630,7 +640,7 @@ static NpyAuxData *_strided_cast_data_clone(NpyAuxData *data)
return (NpyAuxData *)newdata;
}
-static void
+static int
_aligned_strided_to_strided_cast(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -639,17 +649,29 @@ _aligned_strided_to_strided_cast(char *dst, npy_intp dst_stride,
_strided_cast_data *d = (_strided_cast_data *)data;
PyArray_VectorUnaryFunc *castfunc = d->castfunc;
PyArrayObject *aip = d->aip, *aop = d->aop;
+ npy_bool needs_api = d->needs_api;
while (N > 0) {
castfunc(src, dst, 1, aip, aop);
+ /*
+ * Since error handling in ufuncs is not ideal (at the time of
+ * writing this, an error could be in process before calling this
+ * function. For most of NumPy history these checks were completely
+ * missing, so this is hopefully OK for the time being (until ufuncs
+ * are fixed).
+ */
+ if (needs_api && PyErr_Occurred()) {
+ return -1;
+ }
dst += dst_stride;
src += src_stride;
--N;
}
+ return 0;
}
/* This one requires src be of type NPY_OBJECT */
-static void
+static int
_aligned_strided_to_strided_cast_decref_src(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -658,31 +680,49 @@ _aligned_strided_to_strided_cast_decref_src(char *dst, npy_intp dst_stride,
_strided_cast_data *d = (_strided_cast_data *)data;
PyArray_VectorUnaryFunc *castfunc = d->castfunc;
PyArrayObject *aip = d->aip, *aop = d->aop;
+ npy_bool needs_api = d->needs_api;
PyObject *src_ref;
while (N > 0) {
castfunc(src, dst, 1, aip, aop);
-
- /* After casting, decrement the source ref */
- NPY_COPY_PYOBJECT_PTR(&src_ref, src);
- NPY_DT_DBG_REFTRACE("dec src ref (cast object -> not object)", src_ref);
+ /*
+ * See comment in `_aligned_strided_to_strided_cast`, an error could
+ * in principle be set before `castfunc` is called.
+ */
+ if (needs_api && PyErr_Occurred()) {
+ return -1;
+ }
+ /* After casting, decrement the source ref and set it to NULL */
+ memcpy(&src_ref, src, sizeof(src_ref));
Py_XDECREF(src_ref);
+ memset(src, 0, sizeof(PyObject *));
+ NPY_DT_DBG_REFTRACE("dec src ref (cast object -> not object)", src_ref);
dst += dst_stride;
src += src_stride;
--N;
}
+ return 0;
}
-static void
+static int
_aligned_contig_to_contig_cast(char *dst, npy_intp NPY_UNUSED(dst_stride),
char *src, npy_intp NPY_UNUSED(src_stride),
npy_intp N, npy_intp NPY_UNUSED(itemsize),
NpyAuxData *data)
{
_strided_cast_data *d = (_strided_cast_data *)data;
+ npy_bool needs_api = d->needs_api;
d->castfunc(src, dst, N, d->aip, d->aop);
+ /*
+ * See comment in `_aligned_strided_to_strided_cast`, an error could
+ * in principle be set before `castfunc` is called.
+ */
+ if (needs_api && PyErr_Occurred()) {
+ return -1;
+ }
+ return 0;
}
static int
@@ -777,7 +817,7 @@ static NpyAuxData *_strided_datetime_cast_data_clone(NpyAuxData *data)
return (NpyAuxData *)newdata;
}
-static void
+static int
_strided_to_strided_datetime_general_cast(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -792,12 +832,12 @@ _strided_to_strided_datetime_general_cast(char *dst, npy_intp dst_stride,
if (convert_datetime_to_datetimestruct(&d->src_meta,
dt, &dts) < 0) {
- dt = NPY_DATETIME_NAT;
+ return -1;
}
else {
if (convert_datetimestruct_to_datetime(&d->dst_meta,
&dts, &dt) < 0) {
- dt = NPY_DATETIME_NAT;
+ return -1;
}
}
@@ -807,9 +847,10 @@ _strided_to_strided_datetime_general_cast(char *dst, npy_intp dst_stride,
src += src_stride;
--N;
}
+ return 0;
}
-static void
+static int
_strided_to_strided_datetime_cast(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -838,9 +879,10 @@ _strided_to_strided_datetime_cast(char *dst, npy_intp dst_stride,
src += src_stride;
--N;
}
+ return 0;
}
-static void
+static int
_aligned_strided_to_strided_datetime_cast(char *dst,
npy_intp dst_stride,
char *src, npy_intp src_stride,
@@ -870,9 +912,10 @@ _aligned_strided_to_strided_datetime_cast(char *dst,
src += src_stride;
--N;
}
+ return 0;
}
-static void
+static int
_strided_to_strided_datetime_to_string(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp NPY_UNUSED(src_itemsize),
@@ -888,28 +931,26 @@ _strided_to_strided_datetime_to_string(char *dst, npy_intp dst_stride,
if (convert_datetime_to_datetimestruct(&d->src_meta,
dt, &dts) < 0) {
- /* For an error, produce a 'NaT' string */
- dts.year = NPY_DATETIME_NAT;
+ return -1;
}
/* Initialize the destination to all zeros */
memset(dst, 0, dst_itemsize);
- /*
- * This may also raise an error, but the caller needs
- * to use PyErr_Occurred().
- */
- make_iso_8601_datetime(&dts, dst, dst_itemsize,
+ if (make_iso_8601_datetime(&dts, dst, dst_itemsize,
0, 0, d->src_meta.base, -1,
- NPY_UNSAFE_CASTING);
+ NPY_UNSAFE_CASTING) < 0) {
+ return -1;
+ }
dst += dst_stride;
src += src_stride;
--N;
}
+ return 0;
}
-static void
+static int
_strided_to_strided_string_to_datetime(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -934,7 +975,7 @@ _strided_to_strided_string_to_datetime(char *dst, npy_intp dst_stride,
if (parse_iso_8601_datetime(tmp_buffer, src_itemsize,
d->dst_meta.base, NPY_SAME_KIND_CASTING,
&dts, NULL, NULL) < 0) {
- dt = NPY_DATETIME_NAT;
+ return -1;
}
}
/* Otherwise parse the data in place */
@@ -942,7 +983,7 @@ _strided_to_strided_string_to_datetime(char *dst, npy_intp dst_stride,
if (parse_iso_8601_datetime(src, tmp - src,
d->dst_meta.base, NPY_SAME_KIND_CASTING,
&dts, NULL, NULL) < 0) {
- dt = NPY_DATETIME_NAT;
+ return -1;
}
}
@@ -950,7 +991,7 @@ _strided_to_strided_string_to_datetime(char *dst, npy_intp dst_stride,
if (dt != NPY_DATETIME_NAT &&
convert_datetimestruct_to_datetime(&d->dst_meta,
&dts, &dt) < 0) {
- dt = NPY_DATETIME_NAT;
+ return -1;
}
memcpy(dst, &dt, sizeof(dt));
@@ -959,6 +1000,7 @@ _strided_to_strided_string_to_datetime(char *dst, npy_intp dst_stride,
src += src_stride;
--N;
}
+ return 0;
}
/*
@@ -1422,6 +1464,7 @@ get_nbo_cast_transfer_function(int aligned,
data->base.free = &_strided_cast_data_free;
data->base.clone = &_strided_cast_data_clone;
data->castfunc = castfunc;
+ data->needs_api = *out_needs_api;
/*
* TODO: This is a hack so the cast functions have an array.
* The cast functions shouldn't need that. Also, since we
@@ -1652,7 +1695,7 @@ static NpyAuxData *_one_to_n_data_clone(NpyAuxData *data)
return (NpyAuxData *)newdata;
}
-static void
+static int
_strided_to_strided_one_to_n(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -1664,18 +1707,19 @@ _strided_to_strided_one_to_n(char *dst, npy_intp dst_stride,
npy_intp subN = d->N, dst_itemsize = d->dst_itemsize;
while (N > 0) {
- subtransfer(dst, dst_itemsize,
- src, 0,
- subN, src_itemsize,
- subdata);
+ if (subtransfer(
+ dst, dst_itemsize, src, 0, subN, src_itemsize, subdata) < 0) {
+ return -1;
+ }
src += src_stride;
dst += dst_stride;
--N;
}
+ return 0;
}
-static void
+static int
_strided_to_strided_one_to_n_with_finish(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -1688,21 +1732,21 @@ _strided_to_strided_one_to_n_with_finish(char *dst, npy_intp dst_stride,
npy_intp subN = d->N, dst_itemsize = d->dst_itemsize;
while (N > 0) {
- subtransfer(dst, dst_itemsize,
- src, 0,
- subN, src_itemsize,
- subdata);
-
+ if (subtransfer(
+ dst, dst_itemsize, src, 0, subN, src_itemsize, subdata) < 0) {
+ return -1;
+ }
- stransfer_finish_src(NULL, 0,
- src, 0,
- 1, src_itemsize,
- data_finish_src);
+ if (stransfer_finish_src(
+ NULL, 0, src, 0, 1, src_itemsize, data_finish_src) < 0) {
+ return -1;
+ }
src += src_stride;
dst += dst_stride;
--N;
}
+ return 0;
}
/*
@@ -1846,7 +1890,7 @@ static NpyAuxData *_n_to_n_data_clone(NpyAuxData *data)
return (NpyAuxData *)newdata;
}
-static void
+static int
_strided_to_strided_n_to_n(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -1859,18 +1903,19 @@ _strided_to_strided_n_to_n(char *dst, npy_intp dst_stride,
dst_subitemsize = d->dst_itemsize;
while (N > 0) {
- subtransfer(dst, dst_subitemsize,
- src, src_subitemsize,
- subN, src_subitemsize,
- subdata);
-
+ if (subtransfer(
+ dst, dst_subitemsize, src, src_subitemsize,
+ subN, src_subitemsize, subdata) < 0) {
+ return -1;
+ }
src += src_stride;
dst += dst_stride;
--N;
}
+ return 0;
}
-static void
+static int
_contig_to_contig_n_to_n(char *dst, npy_intp NPY_UNUSED(dst_stride),
char *src, npy_intp NPY_UNUSED(src_stride),
npy_intp N, npy_intp NPY_UNUSED(src_itemsize),
@@ -1882,10 +1927,12 @@ _contig_to_contig_n_to_n(char *dst, npy_intp NPY_UNUSED(dst_stride),
npy_intp subN = d->N, src_subitemsize = d->src_itemsize,
dst_subitemsize = d->dst_itemsize;
- subtransfer(dst, dst_subitemsize,
- src, src_subitemsize,
- subN*N, src_subitemsize,
- subdata);
+ if (subtransfer(
+ dst, dst_subitemsize, src, src_subitemsize,
+ subN*N, src_subitemsize, subdata) < 0) {
+ return -1;
+ }
+ return 0;
}
/*
@@ -2049,7 +2096,7 @@ static NpyAuxData *_subarray_broadcast_data_clone( NpyAuxData *data)
return (NpyAuxData *)newdata;
}
-static void
+static int
_strided_to_strided_subarray_broadcast(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp NPY_UNUSED(src_itemsize),
@@ -2072,10 +2119,11 @@ _strided_to_strided_subarray_broadcast(char *dst, npy_intp dst_stride,
count = offsetruns[run].count;
dst_ptr = dst + loop_index*dst_subitemsize;
if (offset != -1) {
- subtransfer(dst_ptr, dst_subitemsize,
- src + offset, src_subitemsize,
- count, src_subitemsize,
- subdata);
+ if (subtransfer(
+ dst_ptr, dst_subitemsize, src + offset, src_subitemsize,
+ count, src_subitemsize, subdata) < 0) {
+ return -1;
+ }
}
else {
memset(dst_ptr, 0, count*dst_subitemsize);
@@ -2087,10 +2135,11 @@ _strided_to_strided_subarray_broadcast(char *dst, npy_intp dst_stride,
dst += dst_stride;
--N;
}
+ return 0;
}
-static void
+static int
_strided_to_strided_subarray_broadcast_withrefs(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp NPY_UNUSED(src_itemsize),
@@ -2118,16 +2167,19 @@ _strided_to_strided_subarray_broadcast_withrefs(char *dst, npy_intp dst_stride,
count = offsetruns[run].count;
dst_ptr = dst + loop_index*dst_subitemsize;
if (offset != -1) {
- subtransfer(dst_ptr, dst_subitemsize,
- src + offset, src_subitemsize,
- count, src_subitemsize,
- subdata);
+ if (subtransfer(
+ dst_ptr, dst_subitemsize, src + offset, src_subitemsize,
+ count, src_subitemsize, subdata) < 0) {
+ return -1;
+ }
}
else {
if (stransfer_decdstref != NULL) {
- stransfer_decdstref(NULL, 0, dst_ptr, dst_subitemsize,
- count, dst_subitemsize,
- data_decdstref);
+ if (stransfer_decdstref(
+ NULL, 0, dst_ptr, dst_subitemsize,
+ count, dst_subitemsize, data_decdstref) < 0) {
+ return -1;
+ }
}
memset(dst_ptr, 0, count*dst_subitemsize);
}
@@ -2135,15 +2187,18 @@ _strided_to_strided_subarray_broadcast_withrefs(char *dst, npy_intp dst_stride,
}
if (stransfer_decsrcref != NULL) {
- stransfer_decsrcref(NULL, 0, src, src_subitemsize,
- src_subN, src_subitemsize,
- data_decsrcref);
+ if (stransfer_decsrcref(
+ NULL, 0, src, src_subitemsize,
+ src_subN, src_subitemsize, data_decsrcref) < 0) {
+ return -1;
+ }
}
src += src_stride;
dst += dst_stride;
--N;
}
+ return 0;
}
@@ -2500,7 +2555,7 @@ static NpyAuxData *_field_transfer_data_clone(NpyAuxData *data)
return (NpyAuxData *)newdata;
}
-static void
+static int
_strided_to_strided_field_transfer(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp NPY_UNUSED(src_itemsize),
@@ -2515,11 +2570,13 @@ _strided_to_strided_field_transfer(char *dst, npy_intp dst_stride,
field = &d->fields;
if (N > NPY_LOWLEVEL_BUFFER_BLOCKSIZE) {
for (i = 0; i < field_count; ++i, ++field) {
- field->stransfer(dst + field->dst_offset, dst_stride,
- src + field->src_offset, src_stride,
- NPY_LOWLEVEL_BUFFER_BLOCKSIZE,
- field->src_itemsize,
- field->data);
+ if (field->stransfer(
+ dst + field->dst_offset, dst_stride,
+ src + field->src_offset, src_stride,
+ NPY_LOWLEVEL_BUFFER_BLOCKSIZE,
+ field->src_itemsize, field->data) < 0) {
+ return -1;
+ }
}
N -= NPY_LOWLEVEL_BUFFER_BLOCKSIZE;
src += NPY_LOWLEVEL_BUFFER_BLOCKSIZE*src_stride;
@@ -2527,13 +2584,15 @@ _strided_to_strided_field_transfer(char *dst, npy_intp dst_stride,
}
else {
for (i = 0; i < field_count; ++i, ++field) {
- field->stransfer(dst + field->dst_offset, dst_stride,
- src + field->src_offset, src_stride,
- N,
- field->src_itemsize,
- field->data);
+ if (field->stransfer(
+ dst + field->dst_offset, dst_stride,
+ src + field->src_offset, src_stride,
+ N,
+ field->src_itemsize, field->data) < 0) {
+ return -1;
+ }
}
- return;
+ return 0;
}
}
}
@@ -2947,7 +3006,8 @@ static NpyAuxData *_masked_wrapper_transfer_data_clone(NpyAuxData *data)
return (NpyAuxData *)newdata;
}
-static void _strided_masked_wrapper_decsrcref_transfer_function(
+static int
+_strided_masked_wrapper_decsrcref_transfer_function(
char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_bool *mask, npy_intp mask_stride,
@@ -2969,8 +3029,11 @@ static void _strided_masked_wrapper_decsrcref_transfer_function(
/* Skip masked values, still calling decsrcref for move_references */
mask = (npy_bool*)npy_memchr((char *)mask, 0, mask_stride, N,
&subloopsize, 1);
- decsrcref_stransfer(NULL, 0, src, src_stride,
- subloopsize, src_itemsize, decsrcref_transferdata);
+ if (decsrcref_stransfer(
+ NULL, 0, src, src_stride,
+ subloopsize, src_itemsize, decsrcref_transferdata) < 0) {
+ return -1;
+ }
dst += subloopsize * dst_stride;
src += subloopsize * src_stride;
N -= subloopsize;
@@ -2981,15 +3044,20 @@ static void _strided_masked_wrapper_decsrcref_transfer_function(
/* Process unmasked values */
mask = (npy_bool*)npy_memchr((char *)mask, 0, mask_stride, N,
&subloopsize, 0);
- unmasked_stransfer(dst, dst_stride, src, src_stride,
- subloopsize, src_itemsize, unmasked_transferdata);
+ if (unmasked_stransfer(
+ dst, dst_stride, src, src_stride,
+ subloopsize, src_itemsize, unmasked_transferdata) < 0) {
+ return -1;
+ }
dst += subloopsize * dst_stride;
src += subloopsize * src_stride;
N -= subloopsize;
}
+ return 0;
}
-static void _strided_masked_wrapper_transfer_function(
+static int
+_strided_masked_wrapper_transfer_function(
char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_bool *mask, npy_intp mask_stride,
@@ -3020,18 +3088,22 @@ static void _strided_masked_wrapper_transfer_function(
/* Process unmasked values */
mask = (npy_bool*)npy_memchr((char *)mask, 0, mask_stride, N,
&subloopsize, 0);
- unmasked_stransfer(dst, dst_stride, src, src_stride,
- subloopsize, src_itemsize, unmasked_transferdata);
+ if (unmasked_stransfer(
+ dst, dst_stride, src, src_stride,
+ subloopsize, src_itemsize, unmasked_transferdata) < 0) {
+ return -1;
+ }
dst += subloopsize * dst_stride;
src += subloopsize * src_stride;
N -= subloopsize;
}
+ return 0;
}
/************************* DEST BOOL SETONE *******************************/
-static void
+static int
_null_to_strided_set_bool_one(char *dst,
npy_intp dst_stride,
char *NPY_UNUSED(src), npy_intp NPY_UNUSED(src_stride),
@@ -3046,9 +3118,10 @@ _null_to_strided_set_bool_one(char *dst,
dst += dst_stride;
--N;
}
+ return 0;
}
-static void
+static int
_null_to_contig_set_bool_one(char *dst,
npy_intp NPY_UNUSED(dst_stride),
char *NPY_UNUSED(src), npy_intp NPY_UNUSED(src_stride),
@@ -3058,6 +3131,7 @@ _null_to_contig_set_bool_one(char *dst,
/* bool type is one byte, so can just use the char */
memset(dst, 1, N);
+ return 0;
}
/* Only for the bool type, sets the destination to 1 */
@@ -3101,7 +3175,7 @@ static NpyAuxData *_dst_memset_zero_data_clone(NpyAuxData *data)
return (NpyAuxData *)newdata;
}
-static void
+static int
_null_to_strided_memset_zero(char *dst,
npy_intp dst_stride,
char *NPY_UNUSED(src), npy_intp NPY_UNUSED(src_stride),
@@ -3116,9 +3190,10 @@ _null_to_strided_memset_zero(char *dst,
dst += dst_stride;
--N;
}
+ return 0;
}
-static void
+static int
_null_to_contig_memset_zero(char *dst,
npy_intp dst_stride,
char *NPY_UNUSED(src), npy_intp NPY_UNUSED(src_stride),
@@ -3129,9 +3204,10 @@ _null_to_contig_memset_zero(char *dst,
npy_intp dst_itemsize = d->dst_itemsize;
memset(dst, 0, N*dst_itemsize);
+ return 0;
}
-static void
+static int
_null_to_strided_reference_setzero(char *dst,
npy_intp dst_stride,
char *NPY_UNUSED(src), npy_intp NPY_UNUSED(src_stride),
@@ -3141,19 +3217,17 @@ _null_to_strided_reference_setzero(char *dst,
PyObject *dst_ref = NULL;
while (N > 0) {
- NPY_COPY_PYOBJECT_PTR(&dst_ref, dst);
+ memcpy(&dst_ref, dst, sizeof(dst_ref));
- /* Release the reference in dst */
+ /* Release the reference in dst and set it to NULL */
NPY_DT_DBG_REFTRACE("dec dest ref (to set zero)", dst_ref);
Py_XDECREF(dst_ref);
-
- /* Set it to zero */
- dst_ref = NULL;
- NPY_COPY_PYOBJECT_PTR(dst, &dst_ref);
+ memset(dst, 0, sizeof(PyObject *));
dst += dst_stride;
--N;
}
+ return 0;
}
NPY_NO_EXPORT int
@@ -3250,7 +3324,7 @@ get_setdstzero_transfer_function(int aligned,
return NPY_SUCCEED;
}
-static void
+static int
_dec_src_ref_nop(char *NPY_UNUSED(dst),
npy_intp NPY_UNUSED(dst_stride),
char *NPY_UNUSED(src), npy_intp NPY_UNUSED(src_stride),
@@ -3259,9 +3333,10 @@ _dec_src_ref_nop(char *NPY_UNUSED(dst),
NpyAuxData *NPY_UNUSED(data))
{
/* NOP */
+ return 0;
}
-static void
+static int
_strided_to_null_dec_src_ref_reference(char *NPY_UNUSED(dst),
npy_intp NPY_UNUSED(dst_stride),
char *src, npy_intp src_stride,
@@ -3271,15 +3346,16 @@ _strided_to_null_dec_src_ref_reference(char *NPY_UNUSED(dst),
{
PyObject *src_ref = NULL;
while (N > 0) {
- NPY_COPY_PYOBJECT_PTR(&src_ref, src);
-
- /* Release the reference in src */
+ /* Release the reference in src and set it to NULL */
NPY_DT_DBG_REFTRACE("dec src ref (null dst)", src_ref);
+ memcpy(&src_ref, src, sizeof(src_ref));
Py_XDECREF(src_ref);
+ memset(src, 0, sizeof(PyObject *));
src += src_stride;
--N;
}
+ return 0;
}
diff --git a/numpy/core/src/multiarray/einsum.c.src b/numpy/core/src/multiarray/einsum.c.src
index 2538e05c6..6ad375f67 100644
--- a/numpy/core/src/multiarray/einsum.c.src
+++ b/numpy/core/src/multiarray/einsum.c.src
@@ -16,7 +16,6 @@
#define _MULTIARRAYMODULE
#include <numpy/npy_common.h>
#include <numpy/arrayobject.h>
-#include <numpy/halffloat.h>
#include <npy_pycompat.h>
#include <ctype.h>
@@ -25,1898 +24,8 @@
#include "common.h"
#include "ctors.h"
-#ifdef NPY_HAVE_SSE_INTRINSICS
-#define EINSUM_USE_SSE1 1
-#else
-#define EINSUM_USE_SSE1 0
-#endif
-
-#ifdef NPY_HAVE_SSE2_INTRINSICS
-#define EINSUM_USE_SSE2 1
-#else
-#define EINSUM_USE_SSE2 0
-#endif
-
-#if EINSUM_USE_SSE1
-#include <xmmintrin.h>
-#endif
-
-#if EINSUM_USE_SSE2
-#include <emmintrin.h>
-#endif
-
-#define EINSUM_IS_SSE_ALIGNED(x) ((((npy_intp)x)&0xf) == 0)
-
-/********** PRINTF DEBUG TRACING **************/
-#define NPY_EINSUM_DBG_TRACING 0
-
-#if NPY_EINSUM_DBG_TRACING
-#define NPY_EINSUM_DBG_PRINT(s) printf("%s", s);
-#define NPY_EINSUM_DBG_PRINT1(s, p1) printf(s, p1);
-#define NPY_EINSUM_DBG_PRINT2(s, p1, p2) printf(s, p1, p2);
-#define NPY_EINSUM_DBG_PRINT3(s, p1, p2, p3) printf(s);
-#else
-#define NPY_EINSUM_DBG_PRINT(s)
-#define NPY_EINSUM_DBG_PRINT1(s, p1)
-#define NPY_EINSUM_DBG_PRINT2(s, p1, p2)
-#define NPY_EINSUM_DBG_PRINT3(s, p1, p2, p3)
-#endif
-/**********************************************/
-
-/**begin repeat
- * #name = byte, short, int, long, longlong,
- * ubyte, ushort, uint, ulong, ulonglong,
- * half, float, double, longdouble,
- * cfloat, cdouble, clongdouble#
- * #type = npy_byte, npy_short, npy_int, npy_long, npy_longlong,
- * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong,
- * npy_half, npy_float, npy_double, npy_longdouble,
- * npy_cfloat, npy_cdouble, npy_clongdouble#
- * #temptype = npy_byte, npy_short, npy_int, npy_long, npy_longlong,
- * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong,
- * npy_float, npy_float, npy_double, npy_longdouble,
- * npy_float, npy_double, npy_longdouble#
- * #to = ,,,,,
- * ,,,,,
- * npy_float_to_half,,,,
- * ,,#
- * #from = ,,,,,
- * ,,,,,
- * npy_half_to_float,,,,
- * ,,#
- * #complex = 0*5,
- * 0*5,
- * 0*4,
- * 1*3#
- * #float32 = 0*5,
- * 0*5,
- * 0,1,0,0,
- * 0*3#
- * #float64 = 0*5,
- * 0*5,
- * 0,0,1,0,
- * 0*3#
- */
-
-/**begin repeat1
- * #nop = 1, 2, 3, 1000#
- * #noplabel = one, two, three, any#
- */
-static void
-@name@_sum_of_products_@noplabel@(int nop, char **dataptr,
- npy_intp const *strides, npy_intp count)
-{
-#if (@nop@ == 1) || (@nop@ <= 3 && !@complex@)
- char *data0 = dataptr[0];
- npy_intp stride0 = strides[0];
-#endif
-#if (@nop@ == 2 || @nop@ == 3) && !@complex@
- char *data1 = dataptr[1];
- npy_intp stride1 = strides[1];
-#endif
-#if (@nop@ == 3) && !@complex@
- char *data2 = dataptr[2];
- npy_intp stride2 = strides[2];
-#endif
-#if (@nop@ == 1) || (@nop@ <= 3 && !@complex@)
- char *data_out = dataptr[@nop@];
- npy_intp stride_out = strides[@nop@];
-#endif
-
- NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_@noplabel@ (%d)\n", (int)count);
-
- while (count--) {
-#if !@complex@
-# if @nop@ == 1
- *(@type@ *)data_out = @to@(@from@(*(@type@ *)data0) +
- @from@(*(@type@ *)data_out));
- data0 += stride0;
- data_out += stride_out;
-# elif @nop@ == 2
- *(@type@ *)data_out = @to@(@from@(*(@type@ *)data0) *
- @from@(*(@type@ *)data1) +
- @from@(*(@type@ *)data_out));
- data0 += stride0;
- data1 += stride1;
- data_out += stride_out;
-# elif @nop@ == 3
- *(@type@ *)data_out = @to@(@from@(*(@type@ *)data0) *
- @from@(*(@type@ *)data1) *
- @from@(*(@type@ *)data2) +
- @from@(*(@type@ *)data_out));
- data0 += stride0;
- data1 += stride1;
- data2 += stride2;
- data_out += stride_out;
-# else
- @temptype@ temp = @from@(*(@type@ *)dataptr[0]);
- int i;
- for (i = 1; i < nop; ++i) {
- temp *= @from@(*(@type@ *)dataptr[i]);
- }
- *(@type@ *)dataptr[nop] = @to@(temp +
- @from@(*(@type@ *)dataptr[i]));
- for (i = 0; i <= nop; ++i) {
- dataptr[i] += strides[i];
- }
-# endif
-#else /* complex */
-# if @nop@ == 1
- ((@temptype@ *)data_out)[0] = ((@temptype@ *)data0)[0] +
- ((@temptype@ *)data_out)[0];
- ((@temptype@ *)data_out)[1] = ((@temptype@ *)data0)[1] +
- ((@temptype@ *)data_out)[1];
- data0 += stride0;
- data_out += stride_out;
-# else
-# if @nop@ <= 3
-#define _SUMPROD_NOP @nop@
-# else
-#define _SUMPROD_NOP nop
-# endif
- @temptype@ re, im, tmp;
- int i;
- re = ((@temptype@ *)dataptr[0])[0];
- im = ((@temptype@ *)dataptr[0])[1];
- for (i = 1; i < _SUMPROD_NOP; ++i) {
- tmp = re * ((@temptype@ *)dataptr[i])[0] -
- im * ((@temptype@ *)dataptr[i])[1];
- im = re * ((@temptype@ *)dataptr[i])[1] +
- im * ((@temptype@ *)dataptr[i])[0];
- re = tmp;
- }
- ((@temptype@ *)dataptr[_SUMPROD_NOP])[0] = re +
- ((@temptype@ *)dataptr[_SUMPROD_NOP])[0];
- ((@temptype@ *)dataptr[_SUMPROD_NOP])[1] = im +
- ((@temptype@ *)dataptr[_SUMPROD_NOP])[1];
-
- for (i = 0; i <= _SUMPROD_NOP; ++i) {
- dataptr[i] += strides[i];
- }
-#undef _SUMPROD_NOP
-# endif
-#endif
- }
-}
-
-#if @nop@ == 1
-
-static void
-@name@_sum_of_products_contig_one(int nop, char **dataptr,
- npy_intp const *NPY_UNUSED(strides), npy_intp count)
-{
- @type@ *data0 = (@type@ *)dataptr[0];
- @type@ *data_out = (@type@ *)dataptr[1];
-
- NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_one (%d)\n",
- (int)count);
-
-/* This is placed before the main loop to make small counts faster */
-finish_after_unrolled_loop:
- switch (count) {
-/**begin repeat2
- * #i = 6, 5, 4, 3, 2, 1, 0#
- */
- case @i@+1:
-#if !@complex@
- data_out[@i@] = @to@(@from@(data0[@i@]) +
- @from@(data_out[@i@]));
-#else
- ((@temptype@ *)data_out + 2*@i@)[0] =
- ((@temptype@ *)data0 + 2*@i@)[0] +
- ((@temptype@ *)data_out + 2*@i@)[0];
- ((@temptype@ *)data_out + 2*@i@)[1] =
- ((@temptype@ *)data0 + 2*@i@)[1] +
- ((@temptype@ *)data_out + 2*@i@)[1];
-#endif
-/**end repeat2**/
- case 0:
- return;
- }
-
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-/**begin repeat2
- * #i = 0, 1, 2, 3, 4, 5, 6, 7#
- */
-#if !@complex@
- data_out[@i@] = @to@(@from@(data0[@i@]) +
- @from@(data_out[@i@]));
-#else /* complex */
- ((@temptype@ *)data_out + 2*@i@)[0] =
- ((@temptype@ *)data0 + 2*@i@)[0] +
- ((@temptype@ *)data_out + 2*@i@)[0];
- ((@temptype@ *)data_out + 2*@i@)[1] =
- ((@temptype@ *)data0 + 2*@i@)[1] +
- ((@temptype@ *)data_out + 2*@i@)[1];
-#endif
-/**end repeat2**/
- data0 += 8;
- data_out += 8;
- }
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
-}
-
-#elif @nop@ == 2 && !@complex@
-
-static void
-@name@_sum_of_products_contig_two(int nop, char **dataptr,
- npy_intp const *NPY_UNUSED(strides), npy_intp count)
-{
- @type@ *data0 = (@type@ *)dataptr[0];
- @type@ *data1 = (@type@ *)dataptr[1];
- @type@ *data_out = (@type@ *)dataptr[2];
-
-#if EINSUM_USE_SSE1 && @float32@
- __m128 a, b;
-#elif EINSUM_USE_SSE2 && @float64@
- __m128d a, b;
-#endif
-
- NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_two (%d)\n",
- (int)count);
-
-/* This is placed before the main loop to make small counts faster */
-finish_after_unrolled_loop:
- switch (count) {
-/**begin repeat2
- * #i = 6, 5, 4, 3, 2, 1, 0#
- */
- case @i@+1:
- data_out[@i@] = @to@(@from@(data0[@i@]) *
- @from@(data1[@i@]) +
- @from@(data_out[@i@]));
-/**end repeat2**/
- case 0:
- return;
- }
-
-#if EINSUM_USE_SSE1 && @float32@
- /* Use aligned instructions if possible */
- if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data1) &&
- EINSUM_IS_SSE_ALIGNED(data_out)) {
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-/**begin repeat2
- * #i = 0, 4#
- */
- a = _mm_mul_ps(_mm_load_ps(data0+@i@), _mm_load_ps(data1+@i@));
- b = _mm_add_ps(a, _mm_load_ps(data_out+@i@));
- _mm_store_ps(data_out+@i@, b);
-/**end repeat2**/
- data0 += 8;
- data1 += 8;
- data_out += 8;
- }
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
- }
-#elif EINSUM_USE_SSE2 && @float64@
- /* Use aligned instructions if possible */
- if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data1) &&
- EINSUM_IS_SSE_ALIGNED(data_out)) {
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-/**begin repeat2
- * #i = 0, 2, 4, 6#
- */
- a = _mm_mul_pd(_mm_load_pd(data0+@i@), _mm_load_pd(data1+@i@));
- b = _mm_add_pd(a, _mm_load_pd(data_out+@i@));
- _mm_store_pd(data_out+@i@, b);
-/**end repeat2**/
- data0 += 8;
- data1 += 8;
- data_out += 8;
- }
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
- }
-#endif
-
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-#if EINSUM_USE_SSE1 && @float32@
-/**begin repeat2
- * #i = 0, 4#
- */
- a = _mm_mul_ps(_mm_loadu_ps(data0+@i@), _mm_loadu_ps(data1+@i@));
- b = _mm_add_ps(a, _mm_loadu_ps(data_out+@i@));
- _mm_storeu_ps(data_out+@i@, b);
-/**end repeat2**/
-#elif EINSUM_USE_SSE2 && @float64@
-/**begin repeat2
- * #i = 0, 2, 4, 6#
- */
- a = _mm_mul_pd(_mm_loadu_pd(data0+@i@), _mm_loadu_pd(data1+@i@));
- b = _mm_add_pd(a, _mm_loadu_pd(data_out+@i@));
- _mm_storeu_pd(data_out+@i@, b);
-/**end repeat2**/
-#else
-/**begin repeat2
- * #i = 0, 1, 2, 3, 4, 5, 6, 7#
- */
- data_out[@i@] = @to@(@from@(data0[@i@]) *
- @from@(data1[@i@]) +
- @from@(data_out[@i@]));
-/**end repeat2**/
-#endif
- data0 += 8;
- data1 += 8;
- data_out += 8;
- }
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
-}
-
-/* Some extra specializations for the two operand case */
-static void
-@name@_sum_of_products_stride0_contig_outcontig_two(int nop, char **dataptr,
- npy_intp const *NPY_UNUSED(strides), npy_intp count)
-{
- @temptype@ value0 = @from@(*(@type@ *)dataptr[0]);
- @type@ *data1 = (@type@ *)dataptr[1];
- @type@ *data_out = (@type@ *)dataptr[2];
-
-#if EINSUM_USE_SSE1 && @float32@
- __m128 a, b, value0_sse;
-#elif EINSUM_USE_SSE2 && @float64@
- __m128d a, b, value0_sse;
-#endif
-
- NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_stride0_contig_outcontig_two (%d)\n",
- (int)count);
-
-/* This is placed before the main loop to make small counts faster */
-finish_after_unrolled_loop:
- switch (count) {
-/**begin repeat2
- * #i = 6, 5, 4, 3, 2, 1, 0#
- */
- case @i@+1:
- data_out[@i@] = @to@(value0 *
- @from@(data1[@i@]) +
- @from@(data_out[@i@]));
-/**end repeat2**/
- case 0:
- return;
- }
-
-#if EINSUM_USE_SSE1 && @float32@
- value0_sse = _mm_set_ps1(value0);
-
- /* Use aligned instructions if possible */
- if (EINSUM_IS_SSE_ALIGNED(data1) && EINSUM_IS_SSE_ALIGNED(data_out)) {
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-/**begin repeat2
- * #i = 0, 4#
- */
- a = _mm_mul_ps(value0_sse, _mm_load_ps(data1+@i@));
- b = _mm_add_ps(a, _mm_load_ps(data_out+@i@));
- _mm_store_ps(data_out+@i@, b);
-/**end repeat2**/
- data1 += 8;
- data_out += 8;
- }
-
- /* Finish off the loop */
- if (count > 0) {
- goto finish_after_unrolled_loop;
- }
- else {
- return;
- }
- }
-#elif EINSUM_USE_SSE2 && @float64@
- value0_sse = _mm_set1_pd(value0);
-
- /* Use aligned instructions if possible */
- if (EINSUM_IS_SSE_ALIGNED(data1) && EINSUM_IS_SSE_ALIGNED(data_out)) {
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-/**begin repeat2
- * #i = 0, 2, 4, 6#
- */
- a = _mm_mul_pd(value0_sse, _mm_load_pd(data1+@i@));
- b = _mm_add_pd(a, _mm_load_pd(data_out+@i@));
- _mm_store_pd(data_out+@i@, b);
-/**end repeat2**/
- data1 += 8;
- data_out += 8;
- }
-
- /* Finish off the loop */
- if (count > 0) {
- goto finish_after_unrolled_loop;
- }
- else {
- return;
- }
- }
-#endif
-
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-#if EINSUM_USE_SSE1 && @float32@
-/**begin repeat2
- * #i = 0, 4#
- */
- a = _mm_mul_ps(value0_sse, _mm_loadu_ps(data1+@i@));
- b = _mm_add_ps(a, _mm_loadu_ps(data_out+@i@));
- _mm_storeu_ps(data_out+@i@, b);
-/**end repeat2**/
-#elif EINSUM_USE_SSE2 && @float64@
-/**begin repeat2
- * #i = 0, 2, 4, 6#
- */
- a = _mm_mul_pd(value0_sse, _mm_loadu_pd(data1+@i@));
- b = _mm_add_pd(a, _mm_loadu_pd(data_out+@i@));
- _mm_storeu_pd(data_out+@i@, b);
-/**end repeat2**/
-#else
-/**begin repeat2
- * #i = 0, 1, 2, 3, 4, 5, 6, 7#
- */
- data_out[@i@] = @to@(value0 *
- @from@(data1[@i@]) +
- @from@(data_out[@i@]));
-/**end repeat2**/
-#endif
- data1 += 8;
- data_out += 8;
- }
-
- /* Finish off the loop */
- if (count > 0) {
- goto finish_after_unrolled_loop;
- }
-}
-
-static void
-@name@_sum_of_products_contig_stride0_outcontig_two(int nop, char **dataptr,
- npy_intp const *NPY_UNUSED(strides), npy_intp count)
-{
- @type@ *data0 = (@type@ *)dataptr[0];
- @temptype@ value1 = @from@(*(@type@ *)dataptr[1]);
- @type@ *data_out = (@type@ *)dataptr[2];
-
-#if EINSUM_USE_SSE1 && @float32@
- __m128 a, b, value1_sse;
-#elif EINSUM_USE_SSE2 && @float64@
- __m128d a, b, value1_sse;
-#endif
-
- NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_stride0_outcontig_two (%d)\n",
- (int)count);
-
-/* This is placed before the main loop to make small counts faster */
-finish_after_unrolled_loop:
- switch (count) {
-/**begin repeat2
- * #i = 6, 5, 4, 3, 2, 1, 0#
- */
- case @i@+1:
- data_out[@i@] = @to@(@from@(data0[@i@])*
- value1 +
- @from@(data_out[@i@]));
-/**end repeat2**/
- case 0:
- return;
- }
-
-#if EINSUM_USE_SSE1 && @float32@
- value1_sse = _mm_set_ps1(value1);
-
- /* Use aligned instructions if possible */
- if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data_out)) {
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-/**begin repeat2
- * #i = 0, 4#
- */
- a = _mm_mul_ps(_mm_load_ps(data0+@i@), value1_sse);
- b = _mm_add_ps(a, _mm_load_ps(data_out+@i@));
- _mm_store_ps(data_out+@i@, b);
-/**end repeat2**/
- data0 += 8;
- data_out += 8;
- }
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
- }
-#elif EINSUM_USE_SSE2 && @float64@
- value1_sse = _mm_set1_pd(value1);
-
- /* Use aligned instructions if possible */
- if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data_out)) {
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-/**begin repeat2
- * #i = 0, 2, 4, 6#
- */
- a = _mm_mul_pd(_mm_load_pd(data0+@i@), value1_sse);
- b = _mm_add_pd(a, _mm_load_pd(data_out+@i@));
- _mm_store_pd(data_out+@i@, b);
-/**end repeat2**/
- data0 += 8;
- data_out += 8;
- }
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
- }
-#endif
-
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-#if EINSUM_USE_SSE1 && @float32@
-/**begin repeat2
- * #i = 0, 4#
- */
- a = _mm_mul_ps(_mm_loadu_ps(data0+@i@), value1_sse);
- b = _mm_add_ps(a, _mm_loadu_ps(data_out+@i@));
- _mm_storeu_ps(data_out+@i@, b);
-/**end repeat2**/
-#elif EINSUM_USE_SSE2 && @float64@
-/**begin repeat2
- * #i = 0, 2, 4, 6#
- */
- a = _mm_mul_pd(_mm_loadu_pd(data0+@i@), value1_sse);
- b = _mm_add_pd(a, _mm_loadu_pd(data_out+@i@));
- _mm_storeu_pd(data_out+@i@, b);
-/**end repeat2**/
-#else
-/**begin repeat2
- * #i = 0, 1, 2, 3, 4, 5, 6, 7#
- */
- data_out[@i@] = @to@(@from@(data0[@i@])*
- value1 +
- @from@(data_out[@i@]));
-/**end repeat2**/
-#endif
- data0 += 8;
- data_out += 8;
- }
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
-}
-
-static void
-@name@_sum_of_products_contig_contig_outstride0_two(int nop, char **dataptr,
- npy_intp const *NPY_UNUSED(strides), npy_intp count)
-{
- @type@ *data0 = (@type@ *)dataptr[0];
- @type@ *data1 = (@type@ *)dataptr[1];
- @temptype@ accum = 0;
-
-#if EINSUM_USE_SSE1 && @float32@
- __m128 a, accum_sse = _mm_setzero_ps();
-#elif EINSUM_USE_SSE2 && @float64@
- __m128d a, accum_sse = _mm_setzero_pd();
-#endif
-
- NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_contig_outstride0_two (%d)\n",
- (int)count);
-
-/* This is placed before the main loop to make small counts faster */
-finish_after_unrolled_loop:
- switch (count) {
-/**begin repeat2
- * #i = 6, 5, 4, 3, 2, 1, 0#
- */
- case @i@+1:
- accum += @from@(data0[@i@]) * @from@(data1[@i@]);
-/**end repeat2**/
- case 0:
- *(@type@ *)dataptr[2] = @to@(@from@(*(@type@ *)dataptr[2]) + accum);
- return;
- }
-
-#if EINSUM_USE_SSE1 && @float32@
- /* Use aligned instructions if possible */
- if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data1)) {
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
- _mm_prefetch(data0 + 512, _MM_HINT_T0);
- _mm_prefetch(data1 + 512, _MM_HINT_T0);
-
-/**begin repeat2
- * #i = 0, 4#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- a = _mm_mul_ps(_mm_load_ps(data0+@i@), _mm_load_ps(data1+@i@));
- accum_sse = _mm_add_ps(accum_sse, a);
-/**end repeat2**/
- data0 += 8;
- data1 += 8;
- }
-
- /* Add the four SSE values and put in accum */
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
- accum_sse = _mm_add_ps(a, accum_sse);
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
- accum_sse = _mm_add_ps(a, accum_sse);
- _mm_store_ss(&accum, accum_sse);
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
- }
-#elif EINSUM_USE_SSE2 && @float64@
- /* Use aligned instructions if possible */
- if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data1)) {
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
- _mm_prefetch(data0 + 512, _MM_HINT_T0);
- _mm_prefetch(data1 + 512, _MM_HINT_T0);
-
-/**begin repeat2
- * #i = 0, 2, 4, 6#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- a = _mm_mul_pd(_mm_load_pd(data0+@i@), _mm_load_pd(data1+@i@));
- accum_sse = _mm_add_pd(accum_sse, a);
-/**end repeat2**/
- data0 += 8;
- data1 += 8;
- }
-
- /* Add the two SSE2 values and put in accum */
- a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
- accum_sse = _mm_add_pd(a, accum_sse);
- _mm_store_sd(&accum, accum_sse);
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
- }
-#endif
-
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-#if EINSUM_USE_SSE1 && @float32@
- _mm_prefetch(data0 + 512, _MM_HINT_T0);
- _mm_prefetch(data1 + 512, _MM_HINT_T0);
-
-/**begin repeat2
- * #i = 0, 4#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- a = _mm_mul_ps(_mm_loadu_ps(data0+@i@), _mm_loadu_ps(data1+@i@));
- accum_sse = _mm_add_ps(accum_sse, a);
-/**end repeat2**/
-#elif EINSUM_USE_SSE2 && @float64@
- _mm_prefetch(data0 + 512, _MM_HINT_T0);
- _mm_prefetch(data1 + 512, _MM_HINT_T0);
-
-/**begin repeat2
- * #i = 0, 2, 4, 6#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- a = _mm_mul_pd(_mm_loadu_pd(data0+@i@), _mm_loadu_pd(data1+@i@));
- accum_sse = _mm_add_pd(accum_sse, a);
-/**end repeat2**/
-#else
-/**begin repeat2
- * #i = 0, 1, 2, 3, 4, 5, 6, 7#
- */
- accum += @from@(data0[@i@]) * @from@(data1[@i@]);
-/**end repeat2**/
-#endif
- data0 += 8;
- data1 += 8;
- }
-
-#if EINSUM_USE_SSE1 && @float32@
- /* Add the four SSE values and put in accum */
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
- accum_sse = _mm_add_ps(a, accum_sse);
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
- accum_sse = _mm_add_ps(a, accum_sse);
- _mm_store_ss(&accum, accum_sse);
-#elif EINSUM_USE_SSE2 && @float64@
- /* Add the two SSE2 values and put in accum */
- a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
- accum_sse = _mm_add_pd(a, accum_sse);
- _mm_store_sd(&accum, accum_sse);
-#endif
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
-}
-
-static void
-@name@_sum_of_products_stride0_contig_outstride0_two(int nop, char **dataptr,
- npy_intp const *NPY_UNUSED(strides), npy_intp count)
-{
- @temptype@ value0 = @from@(*(@type@ *)dataptr[0]);
- @type@ *data1 = (@type@ *)dataptr[1];
- @temptype@ accum = 0;
-
-#if EINSUM_USE_SSE1 && @float32@
- __m128 a, accum_sse = _mm_setzero_ps();
-#elif EINSUM_USE_SSE2 && @float64@
- __m128d a, accum_sse = _mm_setzero_pd();
-#endif
-
- NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_stride0_contig_outstride0_two (%d)\n",
- (int)count);
-
-/* This is placed before the main loop to make small counts faster */
-finish_after_unrolled_loop:
- switch (count) {
-/**begin repeat2
- * #i = 6, 5, 4, 3, 2, 1, 0#
- */
- case @i@+1:
- accum += @from@(data1[@i@]);
-/**end repeat2**/
- case 0:
- *(@type@ *)dataptr[2] = @to@(@from@(*(@type@ *)dataptr[2]) + value0 * accum);
- return;
- }
-
-#if EINSUM_USE_SSE1 && @float32@
- /* Use aligned instructions if possible */
- if (EINSUM_IS_SSE_ALIGNED(data1)) {
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-/**begin repeat2
- * #i = 0, 4#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- accum_sse = _mm_add_ps(accum_sse, _mm_load_ps(data1+@i@));
-/**end repeat2**/
- data1 += 8;
- }
- /* Add the four SSE values and put in accum */
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
- accum_sse = _mm_add_ps(a, accum_sse);
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
- accum_sse = _mm_add_ps(a, accum_sse);
- _mm_store_ss(&accum, accum_sse);
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
- }
-#elif EINSUM_USE_SSE2 && @float64@
- /* Use aligned instructions if possible */
- if (EINSUM_IS_SSE_ALIGNED(data1)) {
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-/**begin repeat2
- * #i = 0, 2, 4, 6#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- accum_sse = _mm_add_pd(accum_sse, _mm_load_pd(data1+@i@));
-/**end repeat2**/
- data1 += 8;
- }
- /* Add the two SSE2 values and put in accum */
- a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
- accum_sse = _mm_add_pd(a, accum_sse);
- _mm_store_sd(&accum, accum_sse);
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
- }
-#endif
-
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-#if EINSUM_USE_SSE1 && @float32@
-/**begin repeat2
- * #i = 0, 4#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- accum_sse = _mm_add_ps(accum_sse, _mm_loadu_ps(data1+@i@));
-/**end repeat2**/
-#elif EINSUM_USE_SSE2 && @float64@
-/**begin repeat2
- * #i = 0, 2, 4, 6#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- accum_sse = _mm_add_pd(accum_sse, _mm_loadu_pd(data1+@i@));
-/**end repeat2**/
-#else
-/**begin repeat2
- * #i = 0, 1, 2, 3, 4, 5, 6, 7#
- */
- accum += @from@(data1[@i@]);
-/**end repeat2**/
-#endif
- data1 += 8;
- }
-
-#if EINSUM_USE_SSE1 && @float32@
- /* Add the four SSE values and put in accum */
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
- accum_sse = _mm_add_ps(a, accum_sse);
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
- accum_sse = _mm_add_ps(a, accum_sse);
- _mm_store_ss(&accum, accum_sse);
-#elif EINSUM_USE_SSE2 && @float64@
- /* Add the two SSE2 values and put in accum */
- a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
- accum_sse = _mm_add_pd(a, accum_sse);
- _mm_store_sd(&accum, accum_sse);
-#endif
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
-}
-
-static void
-@name@_sum_of_products_contig_stride0_outstride0_two(int nop, char **dataptr,
- npy_intp const *NPY_UNUSED(strides), npy_intp count)
-{
- @type@ *data0 = (@type@ *)dataptr[0];
- @temptype@ value1 = @from@(*(@type@ *)dataptr[1]);
- @temptype@ accum = 0;
-
-#if EINSUM_USE_SSE1 && @float32@
- __m128 a, accum_sse = _mm_setzero_ps();
-#elif EINSUM_USE_SSE2 && @float64@
- __m128d a, accum_sse = _mm_setzero_pd();
-#endif
-
- NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_stride0_outstride0_two (%d)\n",
- (int)count);
-
-/* This is placed before the main loop to make small counts faster */
-finish_after_unrolled_loop:
- switch (count) {
-/**begin repeat2
- * #i = 6, 5, 4, 3, 2, 1, 0#
- */
- case @i@+1:
- accum += @from@(data0[@i@]);
-/**end repeat2**/
- case 0:
- *(@type@ *)dataptr[2] = @to@(@from@(*(@type@ *)dataptr[2]) + accum * value1);
- return;
- }
-
-#if EINSUM_USE_SSE1 && @float32@
- /* Use aligned instructions if possible */
- if (EINSUM_IS_SSE_ALIGNED(data0)) {
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-/**begin repeat2
- * #i = 0, 4#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- accum_sse = _mm_add_ps(accum_sse, _mm_load_ps(data0+@i@));
-/**end repeat2**/
- data0 += 8;
- }
- /* Add the four SSE values and put in accum */
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
- accum_sse = _mm_add_ps(a, accum_sse);
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
- accum_sse = _mm_add_ps(a, accum_sse);
- _mm_store_ss(&accum, accum_sse);
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
- }
-#elif EINSUM_USE_SSE2 && @float64@
- /* Use aligned instructions if possible */
- if (EINSUM_IS_SSE_ALIGNED(data0)) {
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-/**begin repeat2
- * #i = 0, 2, 4, 6#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- accum_sse = _mm_add_pd(accum_sse, _mm_load_pd(data0+@i@));
-/**end repeat2**/
- data0 += 8;
- }
- /* Add the two SSE2 values and put in accum */
- a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
- accum_sse = _mm_add_pd(a, accum_sse);
- _mm_store_sd(&accum, accum_sse);
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
- }
-#endif
-
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-#if EINSUM_USE_SSE1 && @float32@
-/**begin repeat2
- * #i = 0, 4#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- accum_sse = _mm_add_ps(accum_sse, _mm_loadu_ps(data0+@i@));
-/**end repeat2**/
-#elif EINSUM_USE_SSE2 && @float64@
-/**begin repeat2
- * #i = 0, 2, 4, 6#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- accum_sse = _mm_add_pd(accum_sse, _mm_loadu_pd(data0+@i@));
-/**end repeat2**/
-#else
-/**begin repeat2
- * #i = 0, 1, 2, 3, 4, 5, 6, 7#
- */
- accum += @from@(data0[@i@]);
-/**end repeat2**/
-#endif
- data0 += 8;
- }
-
-#if EINSUM_USE_SSE1 && @float32@
- /* Add the four SSE values and put in accum */
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
- accum_sse = _mm_add_ps(a, accum_sse);
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
- accum_sse = _mm_add_ps(a, accum_sse);
- _mm_store_ss(&accum, accum_sse);
-#elif EINSUM_USE_SSE2 && @float64@
- /* Add the two SSE2 values and put in accum */
- a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
- accum_sse = _mm_add_pd(a, accum_sse);
- _mm_store_sd(&accum, accum_sse);
-#endif
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
-}
-
-#elif @nop@ == 3 && !@complex@
-
-static void
-@name@_sum_of_products_contig_three(int nop, char **dataptr,
- npy_intp const *NPY_UNUSED(strides), npy_intp count)
-{
- @type@ *data0 = (@type@ *)dataptr[0];
- @type@ *data1 = (@type@ *)dataptr[1];
- @type@ *data2 = (@type@ *)dataptr[2];
- @type@ *data_out = (@type@ *)dataptr[3];
-
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-/**begin repeat2
- * #i = 0, 1, 2, 3, 4, 5, 6, 7#
- */
- data_out[@i@] = @to@(@from@(data0[@i@]) *
- @from@(data1[@i@]) *
- @from@(data2[@i@]) +
- @from@(data_out[@i@]));
-/**end repeat2**/
- data0 += 8;
- data1 += 8;
- data2 += 8;
- data_out += 8;
- }
-
- /* Finish off the loop */
-
-/**begin repeat2
- * #i = 0, 1, 2, 3, 4, 5, 6, 7#
- */
- if (count-- == 0) {
- return;
- }
- data_out[@i@] = @to@(@from@(data0[@i@]) *
- @from@(data1[@i@]) *
- @from@(data2[@i@]) +
- @from@(data_out[@i@]));
-/**end repeat2**/
-}
-
-#else /* @nop@ > 3 || @complex */
-
-static void
-@name@_sum_of_products_contig_@noplabel@(int nop, char **dataptr,
- npy_intp const *NPY_UNUSED(strides), npy_intp count)
-{
- NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_@noplabel@ (%d)\n",
- (int)count);
-
- while (count--) {
-#if !@complex@
- @temptype@ temp = @from@(*(@type@ *)dataptr[0]);
- int i;
- for (i = 1; i < nop; ++i) {
- temp *= @from@(*(@type@ *)dataptr[i]);
- }
- *(@type@ *)dataptr[nop] = @to@(temp +
- @from@(*(@type@ *)dataptr[i]));
- for (i = 0; i <= nop; ++i) {
- dataptr[i] += sizeof(@type@);
- }
-#else /* complex */
-# if @nop@ <= 3
-# define _SUMPROD_NOP @nop@
-# else
-# define _SUMPROD_NOP nop
-# endif
- @temptype@ re, im, tmp;
- int i;
- re = ((@temptype@ *)dataptr[0])[0];
- im = ((@temptype@ *)dataptr[0])[1];
- for (i = 1; i < _SUMPROD_NOP; ++i) {
- tmp = re * ((@temptype@ *)dataptr[i])[0] -
- im * ((@temptype@ *)dataptr[i])[1];
- im = re * ((@temptype@ *)dataptr[i])[1] +
- im * ((@temptype@ *)dataptr[i])[0];
- re = tmp;
- }
- ((@temptype@ *)dataptr[_SUMPROD_NOP])[0] = re +
- ((@temptype@ *)dataptr[_SUMPROD_NOP])[0];
- ((@temptype@ *)dataptr[_SUMPROD_NOP])[1] = im +
- ((@temptype@ *)dataptr[_SUMPROD_NOP])[1];
-
- for (i = 0; i <= _SUMPROD_NOP; ++i) {
- dataptr[i] += sizeof(@type@);
- }
-# undef _SUMPROD_NOP
-#endif
- }
-}
-
-#endif /* functions for various @nop@ */
-
-#if @nop@ == 1
-
-static void
-@name@_sum_of_products_contig_outstride0_one(int nop, char **dataptr,
- npy_intp const *strides, npy_intp count)
-{
-#if @complex@
- @temptype@ accum_re = 0, accum_im = 0;
- @temptype@ *data0 = (@temptype@ *)dataptr[0];
-#else
- @temptype@ accum = 0;
- @type@ *data0 = (@type@ *)dataptr[0];
-#endif
-
-#if EINSUM_USE_SSE1 && @float32@
- __m128 a, accum_sse = _mm_setzero_ps();
-#elif EINSUM_USE_SSE2 && @float64@
- __m128d a, accum_sse = _mm_setzero_pd();
-#endif
-
-
- NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_outstride0_one (%d)\n",
- (int)count);
-
-/* This is placed before the main loop to make small counts faster */
-finish_after_unrolled_loop:
- switch (count) {
-/**begin repeat2
- * #i = 6, 5, 4, 3, 2, 1, 0#
- */
- case @i@+1:
-#if !@complex@
- accum += @from@(data0[@i@]);
-#else /* complex */
- accum_re += data0[2*@i@+0];
- accum_im += data0[2*@i@+1];
-#endif
-/**end repeat2**/
- case 0:
-#if @complex@
- ((@temptype@ *)dataptr[1])[0] += accum_re;
- ((@temptype@ *)dataptr[1])[1] += accum_im;
-#else
- *((@type@ *)dataptr[1]) = @to@(accum +
- @from@(*((@type@ *)dataptr[1])));
-#endif
- return;
- }
-
-#if EINSUM_USE_SSE1 && @float32@
- /* Use aligned instructions if possible */
- if (EINSUM_IS_SSE_ALIGNED(data0)) {
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
- _mm_prefetch(data0 + 512, _MM_HINT_T0);
-
-/**begin repeat2
- * #i = 0, 4#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- accum_sse = _mm_add_ps(accum_sse, _mm_load_ps(data0+@i@));
-/**end repeat2**/
- data0 += 8;
- }
-
- /* Add the four SSE values and put in accum */
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
- accum_sse = _mm_add_ps(a, accum_sse);
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
- accum_sse = _mm_add_ps(a, accum_sse);
- _mm_store_ss(&accum, accum_sse);
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
- }
-#elif EINSUM_USE_SSE2 && @float64@
- /* Use aligned instructions if possible */
- if (EINSUM_IS_SSE_ALIGNED(data0)) {
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
- _mm_prefetch(data0 + 512, _MM_HINT_T0);
-
-/**begin repeat2
- * #i = 0, 2, 4, 6#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- accum_sse = _mm_add_pd(accum_sse, _mm_load_pd(data0+@i@));
-/**end repeat2**/
- data0 += 8;
- }
-
- /* Add the two SSE2 values and put in accum */
- a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
- accum_sse = _mm_add_pd(a, accum_sse);
- _mm_store_sd(&accum, accum_sse);
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
- }
-#endif
-
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-#if EINSUM_USE_SSE1 && @float32@
- _mm_prefetch(data0 + 512, _MM_HINT_T0);
-
-/**begin repeat2
- * #i = 0, 4#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- accum_sse = _mm_add_ps(accum_sse, _mm_loadu_ps(data0+@i@));
-/**end repeat2**/
-#elif EINSUM_USE_SSE2 && @float64@
- _mm_prefetch(data0 + 512, _MM_HINT_T0);
-
-/**begin repeat2
- * #i = 0, 2, 4, 6#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- accum_sse = _mm_add_pd(accum_sse, _mm_loadu_pd(data0+@i@));
-/**end repeat2**/
-#else
-/**begin repeat2
- * #i = 0, 1, 2, 3, 4, 5, 6, 7#
- */
-# if !@complex@
- accum += @from@(data0[@i@]);
-# else /* complex */
- accum_re += data0[2*@i@+0];
- accum_im += data0[2*@i@+1];
-# endif
-/**end repeat2**/
-#endif
-
-#if !@complex@
- data0 += 8;
-#else
- data0 += 8*2;
-#endif
- }
-
-#if EINSUM_USE_SSE1 && @float32@
- /* Add the four SSE values and put in accum */
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
- accum_sse = _mm_add_ps(a, accum_sse);
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
- accum_sse = _mm_add_ps(a, accum_sse);
- _mm_store_ss(&accum, accum_sse);
-#elif EINSUM_USE_SSE2 && @float64@
- /* Add the two SSE2 values and put in accum */
- a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
- accum_sse = _mm_add_pd(a, accum_sse);
- _mm_store_sd(&accum, accum_sse);
-#endif
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
-}
-
-#endif /* @nop@ == 1 */
-
-static void
-@name@_sum_of_products_outstride0_@noplabel@(int nop, char **dataptr,
- npy_intp const *strides, npy_intp count)
-{
-#if @complex@
- @temptype@ accum_re = 0, accum_im = 0;
-#else
- @temptype@ accum = 0;
-#endif
-
-#if (@nop@ == 1) || (@nop@ <= 3 && !@complex@)
- char *data0 = dataptr[0];
- npy_intp stride0 = strides[0];
-#endif
-#if (@nop@ == 2 || @nop@ == 3) && !@complex@
- char *data1 = dataptr[1];
- npy_intp stride1 = strides[1];
-#endif
-#if (@nop@ == 3) && !@complex@
- char *data2 = dataptr[2];
- npy_intp stride2 = strides[2];
-#endif
-
- NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_outstride0_@noplabel@ (%d)\n",
- (int)count);
-
- while (count--) {
-#if !@complex@
-# if @nop@ == 1
- accum += @from@(*(@type@ *)data0);
- data0 += stride0;
-# elif @nop@ == 2
- accum += @from@(*(@type@ *)data0) *
- @from@(*(@type@ *)data1);
- data0 += stride0;
- data1 += stride1;
-# elif @nop@ == 3
- accum += @from@(*(@type@ *)data0) *
- @from@(*(@type@ *)data1) *
- @from@(*(@type@ *)data2);
- data0 += stride0;
- data1 += stride1;
- data2 += stride2;
-# else
- @temptype@ temp = @from@(*(@type@ *)dataptr[0]);
- int i;
- for (i = 1; i < nop; ++i) {
- temp *= @from@(*(@type@ *)dataptr[i]);
- }
- accum += temp;
- for (i = 0; i < nop; ++i) {
- dataptr[i] += strides[i];
- }
-# endif
-#else /* complex */
-# if @nop@ == 1
- accum_re += ((@temptype@ *)data0)[0];
- accum_im += ((@temptype@ *)data0)[1];
- data0 += stride0;
-# else
-# if @nop@ <= 3
-#define _SUMPROD_NOP @nop@
-# else
-#define _SUMPROD_NOP nop
-# endif
- @temptype@ re, im, tmp;
- int i;
- re = ((@temptype@ *)dataptr[0])[0];
- im = ((@temptype@ *)dataptr[0])[1];
- for (i = 1; i < _SUMPROD_NOP; ++i) {
- tmp = re * ((@temptype@ *)dataptr[i])[0] -
- im * ((@temptype@ *)dataptr[i])[1];
- im = re * ((@temptype@ *)dataptr[i])[1] +
- im * ((@temptype@ *)dataptr[i])[0];
- re = tmp;
- }
- accum_re += re;
- accum_im += im;
- for (i = 0; i < _SUMPROD_NOP; ++i) {
- dataptr[i] += strides[i];
- }
-#undef _SUMPROD_NOP
-# endif
-#endif
- }
-
-#if @complex@
-# if @nop@ <= 3
- ((@temptype@ *)dataptr[@nop@])[0] += accum_re;
- ((@temptype@ *)dataptr[@nop@])[1] += accum_im;
-# else
- ((@temptype@ *)dataptr[nop])[0] += accum_re;
- ((@temptype@ *)dataptr[nop])[1] += accum_im;
-# endif
-#else
-# if @nop@ <= 3
- *((@type@ *)dataptr[@nop@]) = @to@(accum +
- @from@(*((@type@ *)dataptr[@nop@])));
-# else
- *((@type@ *)dataptr[nop]) = @to@(accum +
- @from@(*((@type@ *)dataptr[nop])));
-# endif
-#endif
-
-}
-
-/**end repeat1**/
-
-/**end repeat**/
-
-
-/* Do OR of ANDs for the boolean type */
-
-/**begin repeat
- * #nop = 1, 2, 3, 1000#
- * #noplabel = one, two, three, any#
- */
-
-static void
-bool_sum_of_products_@noplabel@(int nop, char **dataptr,
- npy_intp const *strides, npy_intp count)
-{
-#if (@nop@ <= 3)
- char *data0 = dataptr[0];
- npy_intp stride0 = strides[0];
-#endif
-#if (@nop@ == 2 || @nop@ == 3)
- char *data1 = dataptr[1];
- npy_intp stride1 = strides[1];
-#endif
-#if (@nop@ == 3)
- char *data2 = dataptr[2];
- npy_intp stride2 = strides[2];
-#endif
-#if (@nop@ <= 3)
- char *data_out = dataptr[@nop@];
- npy_intp stride_out = strides[@nop@];
-#endif
-
- while (count--) {
-#if @nop@ == 1
- *(npy_bool *)data_out = *(npy_bool *)data0 ||
- *(npy_bool *)data_out;
- data0 += stride0;
- data_out += stride_out;
-#elif @nop@ == 2
- *(npy_bool *)data_out = (*(npy_bool *)data0 &&
- *(npy_bool *)data1) ||
- *(npy_bool *)data_out;
- data0 += stride0;
- data1 += stride1;
- data_out += stride_out;
-#elif @nop@ == 3
- *(npy_bool *)data_out = (*(npy_bool *)data0 &&
- *(npy_bool *)data1 &&
- *(npy_bool *)data2) ||
- *(npy_bool *)data_out;
- data0 += stride0;
- data1 += stride1;
- data2 += stride2;
- data_out += stride_out;
-#else
- npy_bool temp = *(npy_bool *)dataptr[0];
- int i;
- for (i = 1; i < nop; ++i) {
- temp = temp && *(npy_bool *)dataptr[i];
- }
- *(npy_bool *)dataptr[nop] = temp || *(npy_bool *)dataptr[i];
- for (i = 0; i <= nop; ++i) {
- dataptr[i] += strides[i];
- }
-#endif
- }
-}
-
-static void
-bool_sum_of_products_contig_@noplabel@(int nop, char **dataptr,
- npy_intp const *strides, npy_intp count)
-{
-#if (@nop@ <= 3)
- char *data0 = dataptr[0];
-#endif
-#if (@nop@ == 2 || @nop@ == 3)
- char *data1 = dataptr[1];
-#endif
-#if (@nop@ == 3)
- char *data2 = dataptr[2];
-#endif
-#if (@nop@ <= 3)
- char *data_out = dataptr[@nop@];
-#endif
-
-#if (@nop@ <= 3)
-/* This is placed before the main loop to make small counts faster */
-finish_after_unrolled_loop:
- switch (count) {
-/**begin repeat1
- * #i = 6, 5, 4, 3, 2, 1, 0#
- */
- case @i@+1:
-# if @nop@ == 1
- ((npy_bool *)data_out)[@i@] = ((npy_bool *)data0)[@i@] ||
- ((npy_bool *)data_out)[@i@];
-# elif @nop@ == 2
- ((npy_bool *)data_out)[@i@] =
- (((npy_bool *)data0)[@i@] &&
- ((npy_bool *)data1)[@i@]) ||
- ((npy_bool *)data_out)[@i@];
-# elif @nop@ == 3
- ((npy_bool *)data_out)[@i@] =
- (((npy_bool *)data0)[@i@] &&
- ((npy_bool *)data1)[@i@] &&
- ((npy_bool *)data2)[@i@]) ||
- ((npy_bool *)data_out)[@i@];
-# endif
-/**end repeat1**/
- case 0:
- return;
- }
-#endif
-
-/* Unroll the loop by 8 for fixed-size nop */
-#if (@nop@ <= 3)
- while (count >= 8) {
- count -= 8;
-#else
- while (count--) {
-#endif
-
-# if @nop@ == 1
-/**begin repeat1
- * #i = 0, 1, 2, 3, 4, 5, 6, 7#
- */
- *((npy_bool *)data_out + @i@) = (*((npy_bool *)data0 + @i@)) ||
- (*((npy_bool *)data_out + @i@));
-/**end repeat1**/
- data0 += 8*sizeof(npy_bool);
- data_out += 8*sizeof(npy_bool);
-# elif @nop@ == 2
-/**begin repeat1
- * #i = 0, 1, 2, 3, 4, 5, 6, 7#
- */
- *((npy_bool *)data_out + @i@) =
- ((*((npy_bool *)data0 + @i@)) &&
- (*((npy_bool *)data1 + @i@))) ||
- (*((npy_bool *)data_out + @i@));
-/**end repeat1**/
- data0 += 8*sizeof(npy_bool);
- data1 += 8*sizeof(npy_bool);
- data_out += 8*sizeof(npy_bool);
-# elif @nop@ == 3
-/**begin repeat1
- * #i = 0, 1, 2, 3, 4, 5, 6, 7#
- */
- *((npy_bool *)data_out + @i@) =
- ((*((npy_bool *)data0 + @i@)) &&
- (*((npy_bool *)data1 + @i@)) &&
- (*((npy_bool *)data2 + @i@))) ||
- (*((npy_bool *)data_out + @i@));
-/**end repeat1**/
- data0 += 8*sizeof(npy_bool);
- data1 += 8*sizeof(npy_bool);
- data2 += 8*sizeof(npy_bool);
- data_out += 8*sizeof(npy_bool);
-# else
- npy_bool temp = *(npy_bool *)dataptr[0];
- int i;
- for (i = 1; i < nop; ++i) {
- temp = temp && *(npy_bool *)dataptr[i];
- }
- *(npy_bool *)dataptr[nop] = temp || *(npy_bool *)dataptr[i];
- for (i = 0; i <= nop; ++i) {
- dataptr[i] += sizeof(npy_bool);
- }
-# endif
- }
-
- /* If the loop was unrolled, we need to finish it off */
-#if (@nop@ <= 3)
- goto finish_after_unrolled_loop;
-#endif
-}
-
-static void
-bool_sum_of_products_outstride0_@noplabel@(int nop, char **dataptr,
- npy_intp const *strides, npy_intp count)
-{
- npy_bool accum = 0;
-
-#if (@nop@ <= 3)
- char *data0 = dataptr[0];
- npy_intp stride0 = strides[0];
-#endif
-#if (@nop@ == 2 || @nop@ == 3)
- char *data1 = dataptr[1];
- npy_intp stride1 = strides[1];
-#endif
-#if (@nop@ == 3)
- char *data2 = dataptr[2];
- npy_intp stride2 = strides[2];
-#endif
-
- while (count--) {
-#if @nop@ == 1
- accum = *(npy_bool *)data0 || accum;
- data0 += stride0;
-#elif @nop@ == 2
- accum = (*(npy_bool *)data0 && *(npy_bool *)data1) || accum;
- data0 += stride0;
- data1 += stride1;
-#elif @nop@ == 3
- accum = (*(npy_bool *)data0 &&
- *(npy_bool *)data1 &&
- *(npy_bool *)data2) || accum;
- data0 += stride0;
- data1 += stride1;
- data2 += stride2;
-#else
- npy_bool temp = *(npy_bool *)dataptr[0];
- int i;
- for (i = 1; i < nop; ++i) {
- temp = temp && *(npy_bool *)dataptr[i];
- }
- accum = temp || accum;
- for (i = 0; i <= nop; ++i) {
- dataptr[i] += strides[i];
- }
-#endif
- }
-
-# if @nop@ <= 3
- *((npy_bool *)dataptr[@nop@]) = accum || *((npy_bool *)dataptr[@nop@]);
-# else
- *((npy_bool *)dataptr[nop]) = accum || *((npy_bool *)dataptr[nop]);
-# endif
-}
-
-/**end repeat**/
-
-typedef void (*sum_of_products_fn)(int, char **, npy_intp const*, npy_intp);
-
-/* These tables need to match up with the type enum */
-static sum_of_products_fn
-_contig_outstride0_unary_specialization_table[NPY_NTYPES] = {
-/**begin repeat
- * #name = bool,
- * byte, ubyte,
- * short, ushort,
- * int, uint,
- * long, ulong,
- * longlong, ulonglong,
- * float, double, longdouble,
- * cfloat, cdouble, clongdouble,
- * object, string, unicode, void,
- * datetime, timedelta, half#
- * #use = 0,
- * 1, 1,
- * 1, 1,
- * 1, 1,
- * 1, 1,
- * 1, 1,
- * 1, 1, 1,
- * 1, 1, 1,
- * 0, 0, 0, 0,
- * 0, 0, 1#
- */
-#if @use@
- &@name@_sum_of_products_contig_outstride0_one,
-#else
- NULL,
-#endif
-/**end repeat**/
-}; /* End of _contig_outstride0_unary_specialization_table */
-
-static sum_of_products_fn _binary_specialization_table[NPY_NTYPES][5] = {
-/**begin repeat
- * #name = bool,
- * byte, ubyte,
- * short, ushort,
- * int, uint,
- * long, ulong,
- * longlong, ulonglong,
- * float, double, longdouble,
- * cfloat, cdouble, clongdouble,
- * object, string, unicode, void,
- * datetime, timedelta, half#
- * #use = 0,
- * 1, 1,
- * 1, 1,
- * 1, 1,
- * 1, 1,
- * 1, 1,
- * 1, 1, 1,
- * 0, 0, 0,
- * 0, 0, 0, 0,
- * 0, 0, 1#
- */
-#if @use@
-{
- &@name@_sum_of_products_stride0_contig_outstride0_two,
- &@name@_sum_of_products_stride0_contig_outcontig_two,
- &@name@_sum_of_products_contig_stride0_outstride0_two,
- &@name@_sum_of_products_contig_stride0_outcontig_two,
- &@name@_sum_of_products_contig_contig_outstride0_two,
-},
-#else
- {NULL, NULL, NULL, NULL, NULL},
-#endif
-/**end repeat**/
-}; /* End of _binary_specialization_table */
-
-static sum_of_products_fn _outstride0_specialized_table[NPY_NTYPES][4] = {
-/**begin repeat
- * #name = bool,
- * byte, ubyte,
- * short, ushort,
- * int, uint,
- * long, ulong,
- * longlong, ulonglong,
- * float, double, longdouble,
- * cfloat, cdouble, clongdouble,
- * object, string, unicode, void,
- * datetime, timedelta, half#
- * #use = 1,
- * 1, 1,
- * 1, 1,
- * 1, 1,
- * 1, 1,
- * 1, 1,
- * 1, 1, 1,
- * 1, 1, 1,
- * 0, 0, 0, 0,
- * 0, 0, 1#
- */
-#if @use@
-{
- &@name@_sum_of_products_outstride0_any,
- &@name@_sum_of_products_outstride0_one,
- &@name@_sum_of_products_outstride0_two,
- &@name@_sum_of_products_outstride0_three
-},
-#else
- {NULL, NULL, NULL, NULL},
-#endif
-/**end repeat**/
-}; /* End of _outstride0_specialized_table */
-
-static sum_of_products_fn _allcontig_specialized_table[NPY_NTYPES][4] = {
-/**begin repeat
- * #name = bool,
- * byte, ubyte,
- * short, ushort,
- * int, uint,
- * long, ulong,
- * longlong, ulonglong,
- * float, double, longdouble,
- * cfloat, cdouble, clongdouble,
- * object, string, unicode, void,
- * datetime, timedelta, half#
- * #use = 1,
- * 1, 1,
- * 1, 1,
- * 1, 1,
- * 1, 1,
- * 1, 1,
- * 1, 1, 1,
- * 1, 1, 1,
- * 0, 0, 0, 0,
- * 0, 0, 1#
- */
-#if @use@
-{
- &@name@_sum_of_products_contig_any,
- &@name@_sum_of_products_contig_one,
- &@name@_sum_of_products_contig_two,
- &@name@_sum_of_products_contig_three
-},
-#else
- {NULL, NULL, NULL, NULL},
-#endif
-/**end repeat**/
-}; /* End of _allcontig_specialized_table */
-
-static sum_of_products_fn _unspecialized_table[NPY_NTYPES][4] = {
-/**begin repeat
- * #name = bool,
- * byte, ubyte,
- * short, ushort,
- * int, uint,
- * long, ulong,
- * longlong, ulonglong,
- * float, double, longdouble,
- * cfloat, cdouble, clongdouble,
- * object, string, unicode, void,
- * datetime, timedelta, half#
- * #use = 1,
- * 1, 1,
- * 1, 1,
- * 1, 1,
- * 1, 1,
- * 1, 1,
- * 1, 1, 1,
- * 1, 1, 1,
- * 0, 0, 0, 0,
- * 0, 0, 1#
- */
-#if @use@
-{
- &@name@_sum_of_products_any,
- &@name@_sum_of_products_one,
- &@name@_sum_of_products_two,
- &@name@_sum_of_products_three
-},
-#else
- {NULL, NULL, NULL, NULL},
-#endif
-/**end repeat**/
-}; /* End of _unnspecialized_table */
-
-static sum_of_products_fn
-get_sum_of_products_function(int nop, int type_num,
- npy_intp itemsize, npy_intp const *fixed_strides)
-{
- int iop;
-
- if (type_num >= NPY_NTYPES) {
- return NULL;
- }
-
- /* contiguous reduction */
- if (nop == 1 && fixed_strides[0] == itemsize && fixed_strides[1] == 0) {
- sum_of_products_fn ret =
- _contig_outstride0_unary_specialization_table[type_num];
- if (ret != NULL) {
- return ret;
- }
- }
-
- /* nop of 2 has more specializations */
- if (nop == 2) {
- /* Encode the zero/contiguous strides */
- int code;
- code = (fixed_strides[0] == 0) ? 0 :
- (fixed_strides[0] == itemsize) ? 2*2*1 : 8;
- code += (fixed_strides[1] == 0) ? 0 :
- (fixed_strides[1] == itemsize) ? 2*1 : 8;
- code += (fixed_strides[2] == 0) ? 0 :
- (fixed_strides[2] == itemsize) ? 1 : 8;
- if (code >= 2 && code < 7) {
- sum_of_products_fn ret =
- _binary_specialization_table[type_num][code-2];
- if (ret != NULL) {
- return ret;
- }
- }
- }
-
- /* Inner loop with an output stride of 0 */
- if (fixed_strides[nop] == 0) {
- return _outstride0_specialized_table[type_num][nop <= 3 ? nop : 0];
- }
-
- /* Check for all contiguous */
- for (iop = 0; iop < nop + 1; ++iop) {
- if (fixed_strides[iop] != itemsize) {
- break;
- }
- }
-
- /* Contiguous loop */
- if (iop == nop + 1) {
- return _allcontig_specialized_table[type_num][nop <= 3 ? nop : 0];
- }
-
- /* None of the above specializations caught it, general loops */
- return _unspecialized_table[type_num][nop <= 3 ? nop : 0];
-}
+#include "einsum_sumprod.h"
+#include "einsum_debug.h"
/*
diff --git a/numpy/core/src/multiarray/einsum_debug.h b/numpy/core/src/multiarray/einsum_debug.h
new file mode 100644
index 000000000..9aa81fcbd
--- /dev/null
+++ b/numpy/core/src/multiarray/einsum_debug.h
@@ -0,0 +1,28 @@
+/*
+ * This file provides debug macros used by the other einsum files.
+ *
+ * Copyright (c) 2011 by Mark Wiebe (mwwiebe@gmail.com)
+ * The University of British Columbia
+ *
+ * See LICENSE.txt for the license.
+ */
+#ifndef _NPY_MULTIARRAY_EINSUM_DEBUG_H
+#define _NPY_MULTIARRAY_EINSUM_DEBUG_H
+
+/********** PRINTF DEBUG TRACING **************/
+#define NPY_EINSUM_DBG_TRACING 0
+
+#if NPY_EINSUM_DBG_TRACING
+#include <cstdio>
+#define NPY_EINSUM_DBG_PRINT(s) printf("%s", s);
+#define NPY_EINSUM_DBG_PRINT1(s, p1) printf(s, p1);
+#define NPY_EINSUM_DBG_PRINT2(s, p1, p2) printf(s, p1, p2);
+#define NPY_EINSUM_DBG_PRINT3(s, p1, p2, p3) printf(s);
+#else
+#define NPY_EINSUM_DBG_PRINT(s)
+#define NPY_EINSUM_DBG_PRINT1(s, p1)
+#define NPY_EINSUM_DBG_PRINT2(s, p1, p2)
+#define NPY_EINSUM_DBG_PRINT3(s, p1, p2, p3)
+#endif
+
+#endif
diff --git a/numpy/core/src/multiarray/einsum_sumprod.c.src b/numpy/core/src/multiarray/einsum_sumprod.c.src
new file mode 100644
index 000000000..c58e74287
--- /dev/null
+++ b/numpy/core/src/multiarray/einsum_sumprod.c.src
@@ -0,0 +1,1897 @@
+/*
+ * This file provides optimized sum of product implementations used internally
+ * by einsum.
+ *
+ * Copyright (c) 2011 by Mark Wiebe (mwwiebe@gmail.com)
+ * The University of British Columbia
+ *
+ * See LICENSE.txt for the license.
+ */
+
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+#define _MULTIARRAYMODULE
+
+#include <numpy/npy_common.h>
+#include <numpy/ndarraytypes.h> /* for NPY_NTYPES */
+#include <numpy/halffloat.h>
+
+#include "einsum_sumprod.h"
+#include "einsum_debug.h"
+
+
+#ifdef NPY_HAVE_SSE_INTRINSICS
+#define EINSUM_USE_SSE1 1
+#else
+#define EINSUM_USE_SSE1 0
+#endif
+
+#ifdef NPY_HAVE_SSE2_INTRINSICS
+#define EINSUM_USE_SSE2 1
+#else
+#define EINSUM_USE_SSE2 0
+#endif
+
+#if EINSUM_USE_SSE1
+#include <xmmintrin.h>
+#endif
+
+#if EINSUM_USE_SSE2
+#include <emmintrin.h>
+#endif
+
+#define EINSUM_IS_SSE_ALIGNED(x) ((((npy_intp)x)&0xf) == 0)
+
+/**********************************************/
+
+/**begin repeat
+ * #name = byte, short, int, long, longlong,
+ * ubyte, ushort, uint, ulong, ulonglong,
+ * half, float, double, longdouble,
+ * cfloat, cdouble, clongdouble#
+ * #type = npy_byte, npy_short, npy_int, npy_long, npy_longlong,
+ * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong,
+ * npy_half, npy_float, npy_double, npy_longdouble,
+ * npy_cfloat, npy_cdouble, npy_clongdouble#
+ * #temptype = npy_byte, npy_short, npy_int, npy_long, npy_longlong,
+ * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong,
+ * npy_float, npy_float, npy_double, npy_longdouble,
+ * npy_float, npy_double, npy_longdouble#
+ * #to = ,,,,,
+ * ,,,,,
+ * npy_float_to_half,,,,
+ * ,,#
+ * #from = ,,,,,
+ * ,,,,,
+ * npy_half_to_float,,,,
+ * ,,#
+ * #complex = 0*5,
+ * 0*5,
+ * 0*4,
+ * 1*3#
+ * #float32 = 0*5,
+ * 0*5,
+ * 0,1,0,0,
+ * 0*3#
+ * #float64 = 0*5,
+ * 0*5,
+ * 0,0,1,0,
+ * 0*3#
+ */
+
+/**begin repeat1
+ * #nop = 1, 2, 3, 1000#
+ * #noplabel = one, two, three, any#
+ */
+static void
+@name@_sum_of_products_@noplabel@(int nop, char **dataptr,
+ npy_intp const *strides, npy_intp count)
+{
+#if (@nop@ == 1) || (@nop@ <= 3 && !@complex@)
+ char *data0 = dataptr[0];
+ npy_intp stride0 = strides[0];
+#endif
+#if (@nop@ == 2 || @nop@ == 3) && !@complex@
+ char *data1 = dataptr[1];
+ npy_intp stride1 = strides[1];
+#endif
+#if (@nop@ == 3) && !@complex@
+ char *data2 = dataptr[2];
+ npy_intp stride2 = strides[2];
+#endif
+#if (@nop@ == 1) || (@nop@ <= 3 && !@complex@)
+ char *data_out = dataptr[@nop@];
+ npy_intp stride_out = strides[@nop@];
+#endif
+
+ NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_@noplabel@ (%d)\n", (int)count);
+
+ while (count--) {
+#if !@complex@
+# if @nop@ == 1
+ *(@type@ *)data_out = @to@(@from@(*(@type@ *)data0) +
+ @from@(*(@type@ *)data_out));
+ data0 += stride0;
+ data_out += stride_out;
+# elif @nop@ == 2
+ *(@type@ *)data_out = @to@(@from@(*(@type@ *)data0) *
+ @from@(*(@type@ *)data1) +
+ @from@(*(@type@ *)data_out));
+ data0 += stride0;
+ data1 += stride1;
+ data_out += stride_out;
+# elif @nop@ == 3
+ *(@type@ *)data_out = @to@(@from@(*(@type@ *)data0) *
+ @from@(*(@type@ *)data1) *
+ @from@(*(@type@ *)data2) +
+ @from@(*(@type@ *)data_out));
+ data0 += stride0;
+ data1 += stride1;
+ data2 += stride2;
+ data_out += stride_out;
+# else
+ @temptype@ temp = @from@(*(@type@ *)dataptr[0]);
+ int i;
+ for (i = 1; i < nop; ++i) {
+ temp *= @from@(*(@type@ *)dataptr[i]);
+ }
+ *(@type@ *)dataptr[nop] = @to@(temp +
+ @from@(*(@type@ *)dataptr[i]));
+ for (i = 0; i <= nop; ++i) {
+ dataptr[i] += strides[i];
+ }
+# endif
+#else /* complex */
+# if @nop@ == 1
+ ((@temptype@ *)data_out)[0] = ((@temptype@ *)data0)[0] +
+ ((@temptype@ *)data_out)[0];
+ ((@temptype@ *)data_out)[1] = ((@temptype@ *)data0)[1] +
+ ((@temptype@ *)data_out)[1];
+ data0 += stride0;
+ data_out += stride_out;
+# else
+# if @nop@ <= 3
+#define _SUMPROD_NOP @nop@
+# else
+#define _SUMPROD_NOP nop
+# endif
+ @temptype@ re, im, tmp;
+ int i;
+ re = ((@temptype@ *)dataptr[0])[0];
+ im = ((@temptype@ *)dataptr[0])[1];
+ for (i = 1; i < _SUMPROD_NOP; ++i) {
+ tmp = re * ((@temptype@ *)dataptr[i])[0] -
+ im * ((@temptype@ *)dataptr[i])[1];
+ im = re * ((@temptype@ *)dataptr[i])[1] +
+ im * ((@temptype@ *)dataptr[i])[0];
+ re = tmp;
+ }
+ ((@temptype@ *)dataptr[_SUMPROD_NOP])[0] = re +
+ ((@temptype@ *)dataptr[_SUMPROD_NOP])[0];
+ ((@temptype@ *)dataptr[_SUMPROD_NOP])[1] = im +
+ ((@temptype@ *)dataptr[_SUMPROD_NOP])[1];
+
+ for (i = 0; i <= _SUMPROD_NOP; ++i) {
+ dataptr[i] += strides[i];
+ }
+#undef _SUMPROD_NOP
+# endif
+#endif
+ }
+}
+
+#if @nop@ == 1
+
+static void
+@name@_sum_of_products_contig_one(int nop, char **dataptr,
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
+{
+ @type@ *data0 = (@type@ *)dataptr[0];
+ @type@ *data_out = (@type@ *)dataptr[1];
+
+ NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_one (%d)\n",
+ (int)count);
+
+/* This is placed before the main loop to make small counts faster */
+finish_after_unrolled_loop:
+ switch (count) {
+/**begin repeat2
+ * #i = 6, 5, 4, 3, 2, 1, 0#
+ */
+ case @i@+1:
+#if !@complex@
+ data_out[@i@] = @to@(@from@(data0[@i@]) +
+ @from@(data_out[@i@]));
+#else
+ ((@temptype@ *)data_out + 2*@i@)[0] =
+ ((@temptype@ *)data0 + 2*@i@)[0] +
+ ((@temptype@ *)data_out + 2*@i@)[0];
+ ((@temptype@ *)data_out + 2*@i@)[1] =
+ ((@temptype@ *)data0 + 2*@i@)[1] +
+ ((@temptype@ *)data_out + 2*@i@)[1];
+#endif
+/**end repeat2**/
+ case 0:
+ return;
+ }
+
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+/**begin repeat2
+ * #i = 0, 1, 2, 3, 4, 5, 6, 7#
+ */
+#if !@complex@
+ data_out[@i@] = @to@(@from@(data0[@i@]) +
+ @from@(data_out[@i@]));
+#else /* complex */
+ ((@temptype@ *)data_out + 2*@i@)[0] =
+ ((@temptype@ *)data0 + 2*@i@)[0] +
+ ((@temptype@ *)data_out + 2*@i@)[0];
+ ((@temptype@ *)data_out + 2*@i@)[1] =
+ ((@temptype@ *)data0 + 2*@i@)[1] +
+ ((@temptype@ *)data_out + 2*@i@)[1];
+#endif
+/**end repeat2**/
+ data0 += 8;
+ data_out += 8;
+ }
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+}
+
+#elif @nop@ == 2 && !@complex@
+
+static void
+@name@_sum_of_products_contig_two(int nop, char **dataptr,
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
+{
+ @type@ *data0 = (@type@ *)dataptr[0];
+ @type@ *data1 = (@type@ *)dataptr[1];
+ @type@ *data_out = (@type@ *)dataptr[2];
+
+#if EINSUM_USE_SSE1 && @float32@
+ __m128 a, b;
+#elif EINSUM_USE_SSE2 && @float64@
+ __m128d a, b;
+#endif
+
+ NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_two (%d)\n",
+ (int)count);
+
+/* This is placed before the main loop to make small counts faster */
+finish_after_unrolled_loop:
+ switch (count) {
+/**begin repeat2
+ * #i = 6, 5, 4, 3, 2, 1, 0#
+ */
+ case @i@+1:
+ data_out[@i@] = @to@(@from@(data0[@i@]) *
+ @from@(data1[@i@]) +
+ @from@(data_out[@i@]));
+/**end repeat2**/
+ case 0:
+ return;
+ }
+
+#if EINSUM_USE_SSE1 && @float32@
+ /* Use aligned instructions if possible */
+ if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data1) &&
+ EINSUM_IS_SSE_ALIGNED(data_out)) {
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+/**begin repeat2
+ * #i = 0, 4#
+ */
+ a = _mm_mul_ps(_mm_load_ps(data0+@i@), _mm_load_ps(data1+@i@));
+ b = _mm_add_ps(a, _mm_load_ps(data_out+@i@));
+ _mm_store_ps(data_out+@i@, b);
+/**end repeat2**/
+ data0 += 8;
+ data1 += 8;
+ data_out += 8;
+ }
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+ }
+#elif EINSUM_USE_SSE2 && @float64@
+ /* Use aligned instructions if possible */
+ if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data1) &&
+ EINSUM_IS_SSE_ALIGNED(data_out)) {
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ a = _mm_mul_pd(_mm_load_pd(data0+@i@), _mm_load_pd(data1+@i@));
+ b = _mm_add_pd(a, _mm_load_pd(data_out+@i@));
+ _mm_store_pd(data_out+@i@, b);
+/**end repeat2**/
+ data0 += 8;
+ data1 += 8;
+ data_out += 8;
+ }
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+ }
+#endif
+
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+#if EINSUM_USE_SSE1 && @float32@
+/**begin repeat2
+ * #i = 0, 4#
+ */
+ a = _mm_mul_ps(_mm_loadu_ps(data0+@i@), _mm_loadu_ps(data1+@i@));
+ b = _mm_add_ps(a, _mm_loadu_ps(data_out+@i@));
+ _mm_storeu_ps(data_out+@i@, b);
+/**end repeat2**/
+#elif EINSUM_USE_SSE2 && @float64@
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ a = _mm_mul_pd(_mm_loadu_pd(data0+@i@), _mm_loadu_pd(data1+@i@));
+ b = _mm_add_pd(a, _mm_loadu_pd(data_out+@i@));
+ _mm_storeu_pd(data_out+@i@, b);
+/**end repeat2**/
+#else
+/**begin repeat2
+ * #i = 0, 1, 2, 3, 4, 5, 6, 7#
+ */
+ data_out[@i@] = @to@(@from@(data0[@i@]) *
+ @from@(data1[@i@]) +
+ @from@(data_out[@i@]));
+/**end repeat2**/
+#endif
+ data0 += 8;
+ data1 += 8;
+ data_out += 8;
+ }
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+}
+
+/* Some extra specializations for the two operand case */
+static void
+@name@_sum_of_products_stride0_contig_outcontig_two(int nop, char **dataptr,
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
+{
+ @temptype@ value0 = @from@(*(@type@ *)dataptr[0]);
+ @type@ *data1 = (@type@ *)dataptr[1];
+ @type@ *data_out = (@type@ *)dataptr[2];
+
+#if EINSUM_USE_SSE1 && @float32@
+ __m128 a, b, value0_sse;
+#elif EINSUM_USE_SSE2 && @float64@
+ __m128d a, b, value0_sse;
+#endif
+
+ NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_stride0_contig_outcontig_two (%d)\n",
+ (int)count);
+
+/* This is placed before the main loop to make small counts faster */
+finish_after_unrolled_loop:
+ switch (count) {
+/**begin repeat2
+ * #i = 6, 5, 4, 3, 2, 1, 0#
+ */
+ case @i@+1:
+ data_out[@i@] = @to@(value0 *
+ @from@(data1[@i@]) +
+ @from@(data_out[@i@]));
+/**end repeat2**/
+ case 0:
+ return;
+ }
+
+#if EINSUM_USE_SSE1 && @float32@
+ value0_sse = _mm_set_ps1(value0);
+
+ /* Use aligned instructions if possible */
+ if (EINSUM_IS_SSE_ALIGNED(data1) && EINSUM_IS_SSE_ALIGNED(data_out)) {
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+/**begin repeat2
+ * #i = 0, 4#
+ */
+ a = _mm_mul_ps(value0_sse, _mm_load_ps(data1+@i@));
+ b = _mm_add_ps(a, _mm_load_ps(data_out+@i@));
+ _mm_store_ps(data_out+@i@, b);
+/**end repeat2**/
+ data1 += 8;
+ data_out += 8;
+ }
+
+ /* Finish off the loop */
+ if (count > 0) {
+ goto finish_after_unrolled_loop;
+ }
+ else {
+ return;
+ }
+ }
+#elif EINSUM_USE_SSE2 && @float64@
+ value0_sse = _mm_set1_pd(value0);
+
+ /* Use aligned instructions if possible */
+ if (EINSUM_IS_SSE_ALIGNED(data1) && EINSUM_IS_SSE_ALIGNED(data_out)) {
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ a = _mm_mul_pd(value0_sse, _mm_load_pd(data1+@i@));
+ b = _mm_add_pd(a, _mm_load_pd(data_out+@i@));
+ _mm_store_pd(data_out+@i@, b);
+/**end repeat2**/
+ data1 += 8;
+ data_out += 8;
+ }
+
+ /* Finish off the loop */
+ if (count > 0) {
+ goto finish_after_unrolled_loop;
+ }
+ else {
+ return;
+ }
+ }
+#endif
+
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+#if EINSUM_USE_SSE1 && @float32@
+/**begin repeat2
+ * #i = 0, 4#
+ */
+ a = _mm_mul_ps(value0_sse, _mm_loadu_ps(data1+@i@));
+ b = _mm_add_ps(a, _mm_loadu_ps(data_out+@i@));
+ _mm_storeu_ps(data_out+@i@, b);
+/**end repeat2**/
+#elif EINSUM_USE_SSE2 && @float64@
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ a = _mm_mul_pd(value0_sse, _mm_loadu_pd(data1+@i@));
+ b = _mm_add_pd(a, _mm_loadu_pd(data_out+@i@));
+ _mm_storeu_pd(data_out+@i@, b);
+/**end repeat2**/
+#else
+/**begin repeat2
+ * #i = 0, 1, 2, 3, 4, 5, 6, 7#
+ */
+ data_out[@i@] = @to@(value0 *
+ @from@(data1[@i@]) +
+ @from@(data_out[@i@]));
+/**end repeat2**/
+#endif
+ data1 += 8;
+ data_out += 8;
+ }
+
+ /* Finish off the loop */
+ if (count > 0) {
+ goto finish_after_unrolled_loop;
+ }
+}
+
+static void
+@name@_sum_of_products_contig_stride0_outcontig_two(int nop, char **dataptr,
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
+{
+ @type@ *data0 = (@type@ *)dataptr[0];
+ @temptype@ value1 = @from@(*(@type@ *)dataptr[1]);
+ @type@ *data_out = (@type@ *)dataptr[2];
+
+#if EINSUM_USE_SSE1 && @float32@
+ __m128 a, b, value1_sse;
+#elif EINSUM_USE_SSE2 && @float64@
+ __m128d a, b, value1_sse;
+#endif
+
+ NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_stride0_outcontig_two (%d)\n",
+ (int)count);
+
+/* This is placed before the main loop to make small counts faster */
+finish_after_unrolled_loop:
+ switch (count) {
+/**begin repeat2
+ * #i = 6, 5, 4, 3, 2, 1, 0#
+ */
+ case @i@+1:
+ data_out[@i@] = @to@(@from@(data0[@i@])*
+ value1 +
+ @from@(data_out[@i@]));
+/**end repeat2**/
+ case 0:
+ return;
+ }
+
+#if EINSUM_USE_SSE1 && @float32@
+ value1_sse = _mm_set_ps1(value1);
+
+ /* Use aligned instructions if possible */
+ if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data_out)) {
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+/**begin repeat2
+ * #i = 0, 4#
+ */
+ a = _mm_mul_ps(_mm_load_ps(data0+@i@), value1_sse);
+ b = _mm_add_ps(a, _mm_load_ps(data_out+@i@));
+ _mm_store_ps(data_out+@i@, b);
+/**end repeat2**/
+ data0 += 8;
+ data_out += 8;
+ }
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+ }
+#elif EINSUM_USE_SSE2 && @float64@
+ value1_sse = _mm_set1_pd(value1);
+
+ /* Use aligned instructions if possible */
+ if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data_out)) {
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ a = _mm_mul_pd(_mm_load_pd(data0+@i@), value1_sse);
+ b = _mm_add_pd(a, _mm_load_pd(data_out+@i@));
+ _mm_store_pd(data_out+@i@, b);
+/**end repeat2**/
+ data0 += 8;
+ data_out += 8;
+ }
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+ }
+#endif
+
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+#if EINSUM_USE_SSE1 && @float32@
+/**begin repeat2
+ * #i = 0, 4#
+ */
+ a = _mm_mul_ps(_mm_loadu_ps(data0+@i@), value1_sse);
+ b = _mm_add_ps(a, _mm_loadu_ps(data_out+@i@));
+ _mm_storeu_ps(data_out+@i@, b);
+/**end repeat2**/
+#elif EINSUM_USE_SSE2 && @float64@
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ a = _mm_mul_pd(_mm_loadu_pd(data0+@i@), value1_sse);
+ b = _mm_add_pd(a, _mm_loadu_pd(data_out+@i@));
+ _mm_storeu_pd(data_out+@i@, b);
+/**end repeat2**/
+#else
+/**begin repeat2
+ * #i = 0, 1, 2, 3, 4, 5, 6, 7#
+ */
+ data_out[@i@] = @to@(@from@(data0[@i@])*
+ value1 +
+ @from@(data_out[@i@]));
+/**end repeat2**/
+#endif
+ data0 += 8;
+ data_out += 8;
+ }
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+}
+
+static void
+@name@_sum_of_products_contig_contig_outstride0_two(int nop, char **dataptr,
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
+{
+ @type@ *data0 = (@type@ *)dataptr[0];
+ @type@ *data1 = (@type@ *)dataptr[1];
+ @temptype@ accum = 0;
+
+#if EINSUM_USE_SSE1 && @float32@
+ __m128 a, accum_sse = _mm_setzero_ps();
+#elif EINSUM_USE_SSE2 && @float64@
+ __m128d a, accum_sse = _mm_setzero_pd();
+#endif
+
+ NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_contig_outstride0_two (%d)\n",
+ (int)count);
+
+/* This is placed before the main loop to make small counts faster */
+finish_after_unrolled_loop:
+ switch (count) {
+/**begin repeat2
+ * #i = 6, 5, 4, 3, 2, 1, 0#
+ */
+ case @i@+1:
+ accum += @from@(data0[@i@]) * @from@(data1[@i@]);
+/**end repeat2**/
+ case 0:
+ *(@type@ *)dataptr[2] = @to@(@from@(*(@type@ *)dataptr[2]) + accum);
+ return;
+ }
+
+#if EINSUM_USE_SSE1 && @float32@
+ /* Use aligned instructions if possible */
+ if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data1)) {
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+ _mm_prefetch(data0 + 512, _MM_HINT_T0);
+ _mm_prefetch(data1 + 512, _MM_HINT_T0);
+
+/**begin repeat2
+ * #i = 0, 4#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ a = _mm_mul_ps(_mm_load_ps(data0+@i@), _mm_load_ps(data1+@i@));
+ accum_sse = _mm_add_ps(accum_sse, a);
+/**end repeat2**/
+ data0 += 8;
+ data1 += 8;
+ }
+
+ /* Add the four SSE values and put in accum */
+ a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
+ accum_sse = _mm_add_ps(a, accum_sse);
+ a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
+ accum_sse = _mm_add_ps(a, accum_sse);
+ _mm_store_ss(&accum, accum_sse);
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+ }
+#elif EINSUM_USE_SSE2 && @float64@
+ /* Use aligned instructions if possible */
+ if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data1)) {
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+ _mm_prefetch(data0 + 512, _MM_HINT_T0);
+ _mm_prefetch(data1 + 512, _MM_HINT_T0);
+
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ a = _mm_mul_pd(_mm_load_pd(data0+@i@), _mm_load_pd(data1+@i@));
+ accum_sse = _mm_add_pd(accum_sse, a);
+/**end repeat2**/
+ data0 += 8;
+ data1 += 8;
+ }
+
+ /* Add the two SSE2 values and put in accum */
+ a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
+ accum_sse = _mm_add_pd(a, accum_sse);
+ _mm_store_sd(&accum, accum_sse);
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+ }
+#endif
+
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+#if EINSUM_USE_SSE1 && @float32@
+ _mm_prefetch(data0 + 512, _MM_HINT_T0);
+ _mm_prefetch(data1 + 512, _MM_HINT_T0);
+
+/**begin repeat2
+ * #i = 0, 4#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ a = _mm_mul_ps(_mm_loadu_ps(data0+@i@), _mm_loadu_ps(data1+@i@));
+ accum_sse = _mm_add_ps(accum_sse, a);
+/**end repeat2**/
+#elif EINSUM_USE_SSE2 && @float64@
+ _mm_prefetch(data0 + 512, _MM_HINT_T0);
+ _mm_prefetch(data1 + 512, _MM_HINT_T0);
+
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ a = _mm_mul_pd(_mm_loadu_pd(data0+@i@), _mm_loadu_pd(data1+@i@));
+ accum_sse = _mm_add_pd(accum_sse, a);
+/**end repeat2**/
+#else
+/**begin repeat2
+ * #i = 0, 1, 2, 3, 4, 5, 6, 7#
+ */
+ accum += @from@(data0[@i@]) * @from@(data1[@i@]);
+/**end repeat2**/
+#endif
+ data0 += 8;
+ data1 += 8;
+ }
+
+#if EINSUM_USE_SSE1 && @float32@
+ /* Add the four SSE values and put in accum */
+ a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
+ accum_sse = _mm_add_ps(a, accum_sse);
+ a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
+ accum_sse = _mm_add_ps(a, accum_sse);
+ _mm_store_ss(&accum, accum_sse);
+#elif EINSUM_USE_SSE2 && @float64@
+ /* Add the two SSE2 values and put in accum */
+ a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
+ accum_sse = _mm_add_pd(a, accum_sse);
+ _mm_store_sd(&accum, accum_sse);
+#endif
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+}
+
+static void
+@name@_sum_of_products_stride0_contig_outstride0_two(int nop, char **dataptr,
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
+{
+ @temptype@ value0 = @from@(*(@type@ *)dataptr[0]);
+ @type@ *data1 = (@type@ *)dataptr[1];
+ @temptype@ accum = 0;
+
+#if EINSUM_USE_SSE1 && @float32@
+ __m128 a, accum_sse = _mm_setzero_ps();
+#elif EINSUM_USE_SSE2 && @float64@
+ __m128d a, accum_sse = _mm_setzero_pd();
+#endif
+
+ NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_stride0_contig_outstride0_two (%d)\n",
+ (int)count);
+
+/* This is placed before the main loop to make small counts faster */
+finish_after_unrolled_loop:
+ switch (count) {
+/**begin repeat2
+ * #i = 6, 5, 4, 3, 2, 1, 0#
+ */
+ case @i@+1:
+ accum += @from@(data1[@i@]);
+/**end repeat2**/
+ case 0:
+ *(@type@ *)dataptr[2] = @to@(@from@(*(@type@ *)dataptr[2]) + value0 * accum);
+ return;
+ }
+
+#if EINSUM_USE_SSE1 && @float32@
+ /* Use aligned instructions if possible */
+ if (EINSUM_IS_SSE_ALIGNED(data1)) {
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+/**begin repeat2
+ * #i = 0, 4#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ accum_sse = _mm_add_ps(accum_sse, _mm_load_ps(data1+@i@));
+/**end repeat2**/
+ data1 += 8;
+ }
+ /* Add the four SSE values and put in accum */
+ a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
+ accum_sse = _mm_add_ps(a, accum_sse);
+ a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
+ accum_sse = _mm_add_ps(a, accum_sse);
+ _mm_store_ss(&accum, accum_sse);
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+ }
+#elif EINSUM_USE_SSE2 && @float64@
+ /* Use aligned instructions if possible */
+ if (EINSUM_IS_SSE_ALIGNED(data1)) {
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ accum_sse = _mm_add_pd(accum_sse, _mm_load_pd(data1+@i@));
+/**end repeat2**/
+ data1 += 8;
+ }
+ /* Add the two SSE2 values and put in accum */
+ a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
+ accum_sse = _mm_add_pd(a, accum_sse);
+ _mm_store_sd(&accum, accum_sse);
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+ }
+#endif
+
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+#if EINSUM_USE_SSE1 && @float32@
+/**begin repeat2
+ * #i = 0, 4#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ accum_sse = _mm_add_ps(accum_sse, _mm_loadu_ps(data1+@i@));
+/**end repeat2**/
+#elif EINSUM_USE_SSE2 && @float64@
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ accum_sse = _mm_add_pd(accum_sse, _mm_loadu_pd(data1+@i@));
+/**end repeat2**/
+#else
+/**begin repeat2
+ * #i = 0, 1, 2, 3, 4, 5, 6, 7#
+ */
+ accum += @from@(data1[@i@]);
+/**end repeat2**/
+#endif
+ data1 += 8;
+ }
+
+#if EINSUM_USE_SSE1 && @float32@
+ /* Add the four SSE values and put in accum */
+ a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
+ accum_sse = _mm_add_ps(a, accum_sse);
+ a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
+ accum_sse = _mm_add_ps(a, accum_sse);
+ _mm_store_ss(&accum, accum_sse);
+#elif EINSUM_USE_SSE2 && @float64@
+ /* Add the two SSE2 values and put in accum */
+ a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
+ accum_sse = _mm_add_pd(a, accum_sse);
+ _mm_store_sd(&accum, accum_sse);
+#endif
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+}
+
+static void
+@name@_sum_of_products_contig_stride0_outstride0_two(int nop, char **dataptr,
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
+{
+ @type@ *data0 = (@type@ *)dataptr[0];
+ @temptype@ value1 = @from@(*(@type@ *)dataptr[1]);
+ @temptype@ accum = 0;
+
+#if EINSUM_USE_SSE1 && @float32@
+ __m128 a, accum_sse = _mm_setzero_ps();
+#elif EINSUM_USE_SSE2 && @float64@
+ __m128d a, accum_sse = _mm_setzero_pd();
+#endif
+
+ NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_stride0_outstride0_two (%d)\n",
+ (int)count);
+
+/* This is placed before the main loop to make small counts faster */
+finish_after_unrolled_loop:
+ switch (count) {
+/**begin repeat2
+ * #i = 6, 5, 4, 3, 2, 1, 0#
+ */
+ case @i@+1:
+ accum += @from@(data0[@i@]);
+/**end repeat2**/
+ case 0:
+ *(@type@ *)dataptr[2] = @to@(@from@(*(@type@ *)dataptr[2]) + accum * value1);
+ return;
+ }
+
+#if EINSUM_USE_SSE1 && @float32@
+ /* Use aligned instructions if possible */
+ if (EINSUM_IS_SSE_ALIGNED(data0)) {
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+/**begin repeat2
+ * #i = 0, 4#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ accum_sse = _mm_add_ps(accum_sse, _mm_load_ps(data0+@i@));
+/**end repeat2**/
+ data0 += 8;
+ }
+ /* Add the four SSE values and put in accum */
+ a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
+ accum_sse = _mm_add_ps(a, accum_sse);
+ a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
+ accum_sse = _mm_add_ps(a, accum_sse);
+ _mm_store_ss(&accum, accum_sse);
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+ }
+#elif EINSUM_USE_SSE2 && @float64@
+ /* Use aligned instructions if possible */
+ if (EINSUM_IS_SSE_ALIGNED(data0)) {
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ accum_sse = _mm_add_pd(accum_sse, _mm_load_pd(data0+@i@));
+/**end repeat2**/
+ data0 += 8;
+ }
+ /* Add the two SSE2 values and put in accum */
+ a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
+ accum_sse = _mm_add_pd(a, accum_sse);
+ _mm_store_sd(&accum, accum_sse);
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+ }
+#endif
+
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+#if EINSUM_USE_SSE1 && @float32@
+/**begin repeat2
+ * #i = 0, 4#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ accum_sse = _mm_add_ps(accum_sse, _mm_loadu_ps(data0+@i@));
+/**end repeat2**/
+#elif EINSUM_USE_SSE2 && @float64@
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ accum_sse = _mm_add_pd(accum_sse, _mm_loadu_pd(data0+@i@));
+/**end repeat2**/
+#else
+/**begin repeat2
+ * #i = 0, 1, 2, 3, 4, 5, 6, 7#
+ */
+ accum += @from@(data0[@i@]);
+/**end repeat2**/
+#endif
+ data0 += 8;
+ }
+
+#if EINSUM_USE_SSE1 && @float32@
+ /* Add the four SSE values and put in accum */
+ a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
+ accum_sse = _mm_add_ps(a, accum_sse);
+ a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
+ accum_sse = _mm_add_ps(a, accum_sse);
+ _mm_store_ss(&accum, accum_sse);
+#elif EINSUM_USE_SSE2 && @float64@
+ /* Add the two SSE2 values and put in accum */
+ a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
+ accum_sse = _mm_add_pd(a, accum_sse);
+ _mm_store_sd(&accum, accum_sse);
+#endif
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+}
+
+#elif @nop@ == 3 && !@complex@
+
+static void
+@name@_sum_of_products_contig_three(int nop, char **dataptr,
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
+{
+ @type@ *data0 = (@type@ *)dataptr[0];
+ @type@ *data1 = (@type@ *)dataptr[1];
+ @type@ *data2 = (@type@ *)dataptr[2];
+ @type@ *data_out = (@type@ *)dataptr[3];
+
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+/**begin repeat2
+ * #i = 0, 1, 2, 3, 4, 5, 6, 7#
+ */
+ data_out[@i@] = @to@(@from@(data0[@i@]) *
+ @from@(data1[@i@]) *
+ @from@(data2[@i@]) +
+ @from@(data_out[@i@]));
+/**end repeat2**/
+ data0 += 8;
+ data1 += 8;
+ data2 += 8;
+ data_out += 8;
+ }
+
+ /* Finish off the loop */
+
+/**begin repeat2
+ * #i = 0, 1, 2, 3, 4, 5, 6, 7#
+ */
+ if (count-- == 0) {
+ return;
+ }
+ data_out[@i@] = @to@(@from@(data0[@i@]) *
+ @from@(data1[@i@]) *
+ @from@(data2[@i@]) +
+ @from@(data_out[@i@]));
+/**end repeat2**/
+}
+
+#else /* @nop@ > 3 || @complex */
+
+static void
+@name@_sum_of_products_contig_@noplabel@(int nop, char **dataptr,
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
+{
+ NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_@noplabel@ (%d)\n",
+ (int)count);
+
+ while (count--) {
+#if !@complex@
+ @temptype@ temp = @from@(*(@type@ *)dataptr[0]);
+ int i;
+ for (i = 1; i < nop; ++i) {
+ temp *= @from@(*(@type@ *)dataptr[i]);
+ }
+ *(@type@ *)dataptr[nop] = @to@(temp +
+ @from@(*(@type@ *)dataptr[i]));
+ for (i = 0; i <= nop; ++i) {
+ dataptr[i] += sizeof(@type@);
+ }
+#else /* complex */
+# if @nop@ <= 3
+# define _SUMPROD_NOP @nop@
+# else
+# define _SUMPROD_NOP nop
+# endif
+ @temptype@ re, im, tmp;
+ int i;
+ re = ((@temptype@ *)dataptr[0])[0];
+ im = ((@temptype@ *)dataptr[0])[1];
+ for (i = 1; i < _SUMPROD_NOP; ++i) {
+ tmp = re * ((@temptype@ *)dataptr[i])[0] -
+ im * ((@temptype@ *)dataptr[i])[1];
+ im = re * ((@temptype@ *)dataptr[i])[1] +
+ im * ((@temptype@ *)dataptr[i])[0];
+ re = tmp;
+ }
+ ((@temptype@ *)dataptr[_SUMPROD_NOP])[0] = re +
+ ((@temptype@ *)dataptr[_SUMPROD_NOP])[0];
+ ((@temptype@ *)dataptr[_SUMPROD_NOP])[1] = im +
+ ((@temptype@ *)dataptr[_SUMPROD_NOP])[1];
+
+ for (i = 0; i <= _SUMPROD_NOP; ++i) {
+ dataptr[i] += sizeof(@type@);
+ }
+# undef _SUMPROD_NOP
+#endif
+ }
+}
+
+#endif /* functions for various @nop@ */
+
+#if @nop@ == 1
+
+static void
+@name@_sum_of_products_contig_outstride0_one(int nop, char **dataptr,
+ npy_intp const *strides, npy_intp count)
+{
+#if @complex@
+ @temptype@ accum_re = 0, accum_im = 0;
+ @temptype@ *data0 = (@temptype@ *)dataptr[0];
+#else
+ @temptype@ accum = 0;
+ @type@ *data0 = (@type@ *)dataptr[0];
+#endif
+
+#if EINSUM_USE_SSE1 && @float32@
+ __m128 a, accum_sse = _mm_setzero_ps();
+#elif EINSUM_USE_SSE2 && @float64@
+ __m128d a, accum_sse = _mm_setzero_pd();
+#endif
+
+
+ NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_outstride0_one (%d)\n",
+ (int)count);
+
+/* This is placed before the main loop to make small counts faster */
+finish_after_unrolled_loop:
+ switch (count) {
+/**begin repeat2
+ * #i = 6, 5, 4, 3, 2, 1, 0#
+ */
+ case @i@+1:
+#if !@complex@
+ accum += @from@(data0[@i@]);
+#else /* complex */
+ accum_re += data0[2*@i@+0];
+ accum_im += data0[2*@i@+1];
+#endif
+/**end repeat2**/
+ case 0:
+#if @complex@
+ ((@temptype@ *)dataptr[1])[0] += accum_re;
+ ((@temptype@ *)dataptr[1])[1] += accum_im;
+#else
+ *((@type@ *)dataptr[1]) = @to@(accum +
+ @from@(*((@type@ *)dataptr[1])));
+#endif
+ return;
+ }
+
+#if EINSUM_USE_SSE1 && @float32@
+ /* Use aligned instructions if possible */
+ if (EINSUM_IS_SSE_ALIGNED(data0)) {
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+ _mm_prefetch(data0 + 512, _MM_HINT_T0);
+
+/**begin repeat2
+ * #i = 0, 4#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ accum_sse = _mm_add_ps(accum_sse, _mm_load_ps(data0+@i@));
+/**end repeat2**/
+ data0 += 8;
+ }
+
+ /* Add the four SSE values and put in accum */
+ a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
+ accum_sse = _mm_add_ps(a, accum_sse);
+ a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
+ accum_sse = _mm_add_ps(a, accum_sse);
+ _mm_store_ss(&accum, accum_sse);
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+ }
+#elif EINSUM_USE_SSE2 && @float64@
+ /* Use aligned instructions if possible */
+ if (EINSUM_IS_SSE_ALIGNED(data0)) {
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+ _mm_prefetch(data0 + 512, _MM_HINT_T0);
+
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ accum_sse = _mm_add_pd(accum_sse, _mm_load_pd(data0+@i@));
+/**end repeat2**/
+ data0 += 8;
+ }
+
+ /* Add the two SSE2 values and put in accum */
+ a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
+ accum_sse = _mm_add_pd(a, accum_sse);
+ _mm_store_sd(&accum, accum_sse);
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+ }
+#endif
+
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+#if EINSUM_USE_SSE1 && @float32@
+ _mm_prefetch(data0 + 512, _MM_HINT_T0);
+
+/**begin repeat2
+ * #i = 0, 4#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ accum_sse = _mm_add_ps(accum_sse, _mm_loadu_ps(data0+@i@));
+/**end repeat2**/
+#elif EINSUM_USE_SSE2 && @float64@
+ _mm_prefetch(data0 + 512, _MM_HINT_T0);
+
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ accum_sse = _mm_add_pd(accum_sse, _mm_loadu_pd(data0+@i@));
+/**end repeat2**/
+#else
+/**begin repeat2
+ * #i = 0, 1, 2, 3, 4, 5, 6, 7#
+ */
+# if !@complex@
+ accum += @from@(data0[@i@]);
+# else /* complex */
+ accum_re += data0[2*@i@+0];
+ accum_im += data0[2*@i@+1];
+# endif
+/**end repeat2**/
+#endif
+
+#if !@complex@
+ data0 += 8;
+#else
+ data0 += 8*2;
+#endif
+ }
+
+#if EINSUM_USE_SSE1 && @float32@
+ /* Add the four SSE values and put in accum */
+ a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
+ accum_sse = _mm_add_ps(a, accum_sse);
+ a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
+ accum_sse = _mm_add_ps(a, accum_sse);
+ _mm_store_ss(&accum, accum_sse);
+#elif EINSUM_USE_SSE2 && @float64@
+ /* Add the two SSE2 values and put in accum */
+ a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
+ accum_sse = _mm_add_pd(a, accum_sse);
+ _mm_store_sd(&accum, accum_sse);
+#endif
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+}
+
+#endif /* @nop@ == 1 */
+
+static void
+@name@_sum_of_products_outstride0_@noplabel@(int nop, char **dataptr,
+ npy_intp const *strides, npy_intp count)
+{
+#if @complex@
+ @temptype@ accum_re = 0, accum_im = 0;
+#else
+ @temptype@ accum = 0;
+#endif
+
+#if (@nop@ == 1) || (@nop@ <= 3 && !@complex@)
+ char *data0 = dataptr[0];
+ npy_intp stride0 = strides[0];
+#endif
+#if (@nop@ == 2 || @nop@ == 3) && !@complex@
+ char *data1 = dataptr[1];
+ npy_intp stride1 = strides[1];
+#endif
+#if (@nop@ == 3) && !@complex@
+ char *data2 = dataptr[2];
+ npy_intp stride2 = strides[2];
+#endif
+
+ NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_outstride0_@noplabel@ (%d)\n",
+ (int)count);
+
+ while (count--) {
+#if !@complex@
+# if @nop@ == 1
+ accum += @from@(*(@type@ *)data0);
+ data0 += stride0;
+# elif @nop@ == 2
+ accum += @from@(*(@type@ *)data0) *
+ @from@(*(@type@ *)data1);
+ data0 += stride0;
+ data1 += stride1;
+# elif @nop@ == 3
+ accum += @from@(*(@type@ *)data0) *
+ @from@(*(@type@ *)data1) *
+ @from@(*(@type@ *)data2);
+ data0 += stride0;
+ data1 += stride1;
+ data2 += stride2;
+# else
+ @temptype@ temp = @from@(*(@type@ *)dataptr[0]);
+ int i;
+ for (i = 1; i < nop; ++i) {
+ temp *= @from@(*(@type@ *)dataptr[i]);
+ }
+ accum += temp;
+ for (i = 0; i < nop; ++i) {
+ dataptr[i] += strides[i];
+ }
+# endif
+#else /* complex */
+# if @nop@ == 1
+ accum_re += ((@temptype@ *)data0)[0];
+ accum_im += ((@temptype@ *)data0)[1];
+ data0 += stride0;
+# else
+# if @nop@ <= 3
+#define _SUMPROD_NOP @nop@
+# else
+#define _SUMPROD_NOP nop
+# endif
+ @temptype@ re, im, tmp;
+ int i;
+ re = ((@temptype@ *)dataptr[0])[0];
+ im = ((@temptype@ *)dataptr[0])[1];
+ for (i = 1; i < _SUMPROD_NOP; ++i) {
+ tmp = re * ((@temptype@ *)dataptr[i])[0] -
+ im * ((@temptype@ *)dataptr[i])[1];
+ im = re * ((@temptype@ *)dataptr[i])[1] +
+ im * ((@temptype@ *)dataptr[i])[0];
+ re = tmp;
+ }
+ accum_re += re;
+ accum_im += im;
+ for (i = 0; i < _SUMPROD_NOP; ++i) {
+ dataptr[i] += strides[i];
+ }
+#undef _SUMPROD_NOP
+# endif
+#endif
+ }
+
+#if @complex@
+# if @nop@ <= 3
+ ((@temptype@ *)dataptr[@nop@])[0] += accum_re;
+ ((@temptype@ *)dataptr[@nop@])[1] += accum_im;
+# else
+ ((@temptype@ *)dataptr[nop])[0] += accum_re;
+ ((@temptype@ *)dataptr[nop])[1] += accum_im;
+# endif
+#else
+# if @nop@ <= 3
+ *((@type@ *)dataptr[@nop@]) = @to@(accum +
+ @from@(*((@type@ *)dataptr[@nop@])));
+# else
+ *((@type@ *)dataptr[nop]) = @to@(accum +
+ @from@(*((@type@ *)dataptr[nop])));
+# endif
+#endif
+
+}
+
+/**end repeat1**/
+
+/**end repeat**/
+
+
+/* Do OR of ANDs for the boolean type */
+
+/**begin repeat
+ * #nop = 1, 2, 3, 1000#
+ * #noplabel = one, two, three, any#
+ */
+
+static void
+bool_sum_of_products_@noplabel@(int nop, char **dataptr,
+ npy_intp const *strides, npy_intp count)
+{
+#if (@nop@ <= 3)
+ char *data0 = dataptr[0];
+ npy_intp stride0 = strides[0];
+#endif
+#if (@nop@ == 2 || @nop@ == 3)
+ char *data1 = dataptr[1];
+ npy_intp stride1 = strides[1];
+#endif
+#if (@nop@ == 3)
+ char *data2 = dataptr[2];
+ npy_intp stride2 = strides[2];
+#endif
+#if (@nop@ <= 3)
+ char *data_out = dataptr[@nop@];
+ npy_intp stride_out = strides[@nop@];
+#endif
+
+ while (count--) {
+#if @nop@ == 1
+ *(npy_bool *)data_out = *(npy_bool *)data0 ||
+ *(npy_bool *)data_out;
+ data0 += stride0;
+ data_out += stride_out;
+#elif @nop@ == 2
+ *(npy_bool *)data_out = (*(npy_bool *)data0 &&
+ *(npy_bool *)data1) ||
+ *(npy_bool *)data_out;
+ data0 += stride0;
+ data1 += stride1;
+ data_out += stride_out;
+#elif @nop@ == 3
+ *(npy_bool *)data_out = (*(npy_bool *)data0 &&
+ *(npy_bool *)data1 &&
+ *(npy_bool *)data2) ||
+ *(npy_bool *)data_out;
+ data0 += stride0;
+ data1 += stride1;
+ data2 += stride2;
+ data_out += stride_out;
+#else
+ npy_bool temp = *(npy_bool *)dataptr[0];
+ int i;
+ for (i = 1; i < nop; ++i) {
+ temp = temp && *(npy_bool *)dataptr[i];
+ }
+ *(npy_bool *)dataptr[nop] = temp || *(npy_bool *)dataptr[i];
+ for (i = 0; i <= nop; ++i) {
+ dataptr[i] += strides[i];
+ }
+#endif
+ }
+}
+
+static void
+bool_sum_of_products_contig_@noplabel@(int nop, char **dataptr,
+ npy_intp const *strides, npy_intp count)
+{
+#if (@nop@ <= 3)
+ char *data0 = dataptr[0];
+#endif
+#if (@nop@ == 2 || @nop@ == 3)
+ char *data1 = dataptr[1];
+#endif
+#if (@nop@ == 3)
+ char *data2 = dataptr[2];
+#endif
+#if (@nop@ <= 3)
+ char *data_out = dataptr[@nop@];
+#endif
+
+#if (@nop@ <= 3)
+/* This is placed before the main loop to make small counts faster */
+finish_after_unrolled_loop:
+ switch (count) {
+/**begin repeat1
+ * #i = 6, 5, 4, 3, 2, 1, 0#
+ */
+ case @i@+1:
+# if @nop@ == 1
+ ((npy_bool *)data_out)[@i@] = ((npy_bool *)data0)[@i@] ||
+ ((npy_bool *)data_out)[@i@];
+# elif @nop@ == 2
+ ((npy_bool *)data_out)[@i@] =
+ (((npy_bool *)data0)[@i@] &&
+ ((npy_bool *)data1)[@i@]) ||
+ ((npy_bool *)data_out)[@i@];
+# elif @nop@ == 3
+ ((npy_bool *)data_out)[@i@] =
+ (((npy_bool *)data0)[@i@] &&
+ ((npy_bool *)data1)[@i@] &&
+ ((npy_bool *)data2)[@i@]) ||
+ ((npy_bool *)data_out)[@i@];
+# endif
+/**end repeat1**/
+ case 0:
+ return;
+ }
+#endif
+
+/* Unroll the loop by 8 for fixed-size nop */
+#if (@nop@ <= 3)
+ while (count >= 8) {
+ count -= 8;
+#else
+ while (count--) {
+#endif
+
+# if @nop@ == 1
+/**begin repeat1
+ * #i = 0, 1, 2, 3, 4, 5, 6, 7#
+ */
+ *((npy_bool *)data_out + @i@) = (*((npy_bool *)data0 + @i@)) ||
+ (*((npy_bool *)data_out + @i@));
+/**end repeat1**/
+ data0 += 8*sizeof(npy_bool);
+ data_out += 8*sizeof(npy_bool);
+# elif @nop@ == 2
+/**begin repeat1
+ * #i = 0, 1, 2, 3, 4, 5, 6, 7#
+ */
+ *((npy_bool *)data_out + @i@) =
+ ((*((npy_bool *)data0 + @i@)) &&
+ (*((npy_bool *)data1 + @i@))) ||
+ (*((npy_bool *)data_out + @i@));
+/**end repeat1**/
+ data0 += 8*sizeof(npy_bool);
+ data1 += 8*sizeof(npy_bool);
+ data_out += 8*sizeof(npy_bool);
+# elif @nop@ == 3
+/**begin repeat1
+ * #i = 0, 1, 2, 3, 4, 5, 6, 7#
+ */
+ *((npy_bool *)data_out + @i@) =
+ ((*((npy_bool *)data0 + @i@)) &&
+ (*((npy_bool *)data1 + @i@)) &&
+ (*((npy_bool *)data2 + @i@))) ||
+ (*((npy_bool *)data_out + @i@));
+/**end repeat1**/
+ data0 += 8*sizeof(npy_bool);
+ data1 += 8*sizeof(npy_bool);
+ data2 += 8*sizeof(npy_bool);
+ data_out += 8*sizeof(npy_bool);
+# else
+ npy_bool temp = *(npy_bool *)dataptr[0];
+ int i;
+ for (i = 1; i < nop; ++i) {
+ temp = temp && *(npy_bool *)dataptr[i];
+ }
+ *(npy_bool *)dataptr[nop] = temp || *(npy_bool *)dataptr[i];
+ for (i = 0; i <= nop; ++i) {
+ dataptr[i] += sizeof(npy_bool);
+ }
+# endif
+ }
+
+ /* If the loop was unrolled, we need to finish it off */
+#if (@nop@ <= 3)
+ goto finish_after_unrolled_loop;
+#endif
+}
+
+static void
+bool_sum_of_products_outstride0_@noplabel@(int nop, char **dataptr,
+ npy_intp const *strides, npy_intp count)
+{
+ npy_bool accum = 0;
+
+#if (@nop@ <= 3)
+ char *data0 = dataptr[0];
+ npy_intp stride0 = strides[0];
+#endif
+#if (@nop@ == 2 || @nop@ == 3)
+ char *data1 = dataptr[1];
+ npy_intp stride1 = strides[1];
+#endif
+#if (@nop@ == 3)
+ char *data2 = dataptr[2];
+ npy_intp stride2 = strides[2];
+#endif
+
+ while (count--) {
+#if @nop@ == 1
+ accum = *(npy_bool *)data0 || accum;
+ data0 += stride0;
+#elif @nop@ == 2
+ accum = (*(npy_bool *)data0 && *(npy_bool *)data1) || accum;
+ data0 += stride0;
+ data1 += stride1;
+#elif @nop@ == 3
+ accum = (*(npy_bool *)data0 &&
+ *(npy_bool *)data1 &&
+ *(npy_bool *)data2) || accum;
+ data0 += stride0;
+ data1 += stride1;
+ data2 += stride2;
+#else
+ npy_bool temp = *(npy_bool *)dataptr[0];
+ int i;
+ for (i = 1; i < nop; ++i) {
+ temp = temp && *(npy_bool *)dataptr[i];
+ }
+ accum = temp || accum;
+ for (i = 0; i <= nop; ++i) {
+ dataptr[i] += strides[i];
+ }
+#endif
+ }
+
+# if @nop@ <= 3
+ *((npy_bool *)dataptr[@nop@]) = accum || *((npy_bool *)dataptr[@nop@]);
+# else
+ *((npy_bool *)dataptr[nop]) = accum || *((npy_bool *)dataptr[nop]);
+# endif
+}
+
+/**end repeat**/
+
+/* These tables need to match up with the type enum */
+static sum_of_products_fn
+_contig_outstride0_unary_specialization_table[NPY_NTYPES] = {
+/**begin repeat
+ * #name = bool,
+ * byte, ubyte,
+ * short, ushort,
+ * int, uint,
+ * long, ulong,
+ * longlong, ulonglong,
+ * float, double, longdouble,
+ * cfloat, cdouble, clongdouble,
+ * object, string, unicode, void,
+ * datetime, timedelta, half#
+ * #use = 0,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1, 1,
+ * 1, 1, 1,
+ * 0, 0, 0, 0,
+ * 0, 0, 1#
+ */
+#if @use@
+ &@name@_sum_of_products_contig_outstride0_one,
+#else
+ NULL,
+#endif
+/**end repeat**/
+}; /* End of _contig_outstride0_unary_specialization_table */
+
+static sum_of_products_fn _binary_specialization_table[NPY_NTYPES][5] = {
+/**begin repeat
+ * #name = bool,
+ * byte, ubyte,
+ * short, ushort,
+ * int, uint,
+ * long, ulong,
+ * longlong, ulonglong,
+ * float, double, longdouble,
+ * cfloat, cdouble, clongdouble,
+ * object, string, unicode, void,
+ * datetime, timedelta, half#
+ * #use = 0,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1, 1,
+ * 0, 0, 0,
+ * 0, 0, 0, 0,
+ * 0, 0, 1#
+ */
+#if @use@
+{
+ &@name@_sum_of_products_stride0_contig_outstride0_two,
+ &@name@_sum_of_products_stride0_contig_outcontig_two,
+ &@name@_sum_of_products_contig_stride0_outstride0_two,
+ &@name@_sum_of_products_contig_stride0_outcontig_two,
+ &@name@_sum_of_products_contig_contig_outstride0_two,
+},
+#else
+ {NULL, NULL, NULL, NULL, NULL},
+#endif
+/**end repeat**/
+}; /* End of _binary_specialization_table */
+
+static sum_of_products_fn _outstride0_specialized_table[NPY_NTYPES][4] = {
+/**begin repeat
+ * #name = bool,
+ * byte, ubyte,
+ * short, ushort,
+ * int, uint,
+ * long, ulong,
+ * longlong, ulonglong,
+ * float, double, longdouble,
+ * cfloat, cdouble, clongdouble,
+ * object, string, unicode, void,
+ * datetime, timedelta, half#
+ * #use = 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1, 1,
+ * 1, 1, 1,
+ * 0, 0, 0, 0,
+ * 0, 0, 1#
+ */
+#if @use@
+{
+ &@name@_sum_of_products_outstride0_any,
+ &@name@_sum_of_products_outstride0_one,
+ &@name@_sum_of_products_outstride0_two,
+ &@name@_sum_of_products_outstride0_three
+},
+#else
+ {NULL, NULL, NULL, NULL},
+#endif
+/**end repeat**/
+}; /* End of _outstride0_specialized_table */
+
+static sum_of_products_fn _allcontig_specialized_table[NPY_NTYPES][4] = {
+/**begin repeat
+ * #name = bool,
+ * byte, ubyte,
+ * short, ushort,
+ * int, uint,
+ * long, ulong,
+ * longlong, ulonglong,
+ * float, double, longdouble,
+ * cfloat, cdouble, clongdouble,
+ * object, string, unicode, void,
+ * datetime, timedelta, half#
+ * #use = 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1, 1,
+ * 1, 1, 1,
+ * 0, 0, 0, 0,
+ * 0, 0, 1#
+ */
+#if @use@
+{
+ &@name@_sum_of_products_contig_any,
+ &@name@_sum_of_products_contig_one,
+ &@name@_sum_of_products_contig_two,
+ &@name@_sum_of_products_contig_three
+},
+#else
+ {NULL, NULL, NULL, NULL},
+#endif
+/**end repeat**/
+}; /* End of _allcontig_specialized_table */
+
+static sum_of_products_fn _unspecialized_table[NPY_NTYPES][4] = {
+/**begin repeat
+ * #name = bool,
+ * byte, ubyte,
+ * short, ushort,
+ * int, uint,
+ * long, ulong,
+ * longlong, ulonglong,
+ * float, double, longdouble,
+ * cfloat, cdouble, clongdouble,
+ * object, string, unicode, void,
+ * datetime, timedelta, half#
+ * #use = 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1,
+ * 1, 1, 1,
+ * 1, 1, 1,
+ * 0, 0, 0, 0,
+ * 0, 0, 1#
+ */
+#if @use@
+{
+ &@name@_sum_of_products_any,
+ &@name@_sum_of_products_one,
+ &@name@_sum_of_products_two,
+ &@name@_sum_of_products_three
+},
+#else
+ {NULL, NULL, NULL, NULL},
+#endif
+/**end repeat**/
+}; /* End of _unnspecialized_table */
+
+NPY_VISIBILITY_HIDDEN sum_of_products_fn
+get_sum_of_products_function(int nop, int type_num,
+ npy_intp itemsize, npy_intp const *fixed_strides)
+{
+ int iop;
+
+ if (type_num >= NPY_NTYPES) {
+ return NULL;
+ }
+
+ /* contiguous reduction */
+ if (nop == 1 && fixed_strides[0] == itemsize && fixed_strides[1] == 0) {
+ sum_of_products_fn ret =
+ _contig_outstride0_unary_specialization_table[type_num];
+ if (ret != NULL) {
+ return ret;
+ }
+ }
+
+ /* nop of 2 has more specializations */
+ if (nop == 2) {
+ /* Encode the zero/contiguous strides */
+ int code;
+ code = (fixed_strides[0] == 0) ? 0 :
+ (fixed_strides[0] == itemsize) ? 2*2*1 : 8;
+ code += (fixed_strides[1] == 0) ? 0 :
+ (fixed_strides[1] == itemsize) ? 2*1 : 8;
+ code += (fixed_strides[2] == 0) ? 0 :
+ (fixed_strides[2] == itemsize) ? 1 : 8;
+ if (code >= 2 && code < 7) {
+ sum_of_products_fn ret =
+ _binary_specialization_table[type_num][code-2];
+ if (ret != NULL) {
+ return ret;
+ }
+ }
+ }
+
+ /* Inner loop with an output stride of 0 */
+ if (fixed_strides[nop] == 0) {
+ return _outstride0_specialized_table[type_num][nop <= 3 ? nop : 0];
+ }
+
+ /* Check for all contiguous */
+ for (iop = 0; iop < nop + 1; ++iop) {
+ if (fixed_strides[iop] != itemsize) {
+ break;
+ }
+ }
+
+ /* Contiguous loop */
+ if (iop == nop + 1) {
+ return _allcontig_specialized_table[type_num][nop <= 3 ? nop : 0];
+ }
+
+ /* None of the above specializations caught it, general loops */
+ return _unspecialized_table[type_num][nop <= 3 ? nop : 0];
+}
diff --git a/numpy/core/src/multiarray/einsum_sumprod.h b/numpy/core/src/multiarray/einsum_sumprod.h
new file mode 100644
index 000000000..c6cf18ec6
--- /dev/null
+++ b/numpy/core/src/multiarray/einsum_sumprod.h
@@ -0,0 +1,12 @@
+#ifndef _NPY_MULTIARRAY_EINSUM_SUMPROD_H
+#define _NPY_MULTIARRAY_EINSUM_SUMPROD_H
+
+#include <numpy/npy_common.h>
+
+typedef void (*sum_of_products_fn)(int, char **, npy_intp const*, npy_intp);
+
+NPY_VISIBILITY_HIDDEN sum_of_products_fn
+get_sum_of_products_function(int nop, int type_num,
+ npy_intp itemsize, npy_intp const *fixed_strides);
+
+#endif
diff --git a/numpy/core/src/multiarray/flagsobject.c b/numpy/core/src/multiarray/flagsobject.c
index d5f24e75a..9b7d8deae 100644
--- a/numpy/core/src/multiarray/flagsobject.c
+++ b/numpy/core/src/multiarray/flagsobject.c
@@ -307,7 +307,7 @@ arrayflags_farray_get(PyArrayFlagsObject *self)
static PyObject *
arrayflags_num_get(PyArrayFlagsObject *self)
{
- return PyInt_FromLong(self->flags);
+ return PyLong_FromLong(self->flags);
}
/* relies on setflags order being write, align, uic */
@@ -711,7 +711,7 @@ arrayflags_print(PyArrayFlagsObject *self)
if (fl & NPY_ARRAY_WARN_ON_WRITE) {
_warn_on_write = " (with WARN_ON_WRITE=True)";
}
- return PyUString_FromFormat(
+ return PyUnicode_FromFormat(
" %s : %s\n %s : %s\n"
" %s : %s\n %s : %s%s\n"
" %s : %s\n %s : %s\n"
diff --git a/numpy/core/src/multiarray/getset.c b/numpy/core/src/multiarray/getset.c
index 9066f52a8..3575d6fad 100644
--- a/numpy/core/src/multiarray/getset.c
+++ b/numpy/core/src/multiarray/getset.c
@@ -28,7 +28,7 @@
static PyObject *
array_ndim_get(PyArrayObject *self)
{
- return PyInt_FromLong(PyArray_NDIM(self));
+ return PyLong_FromLong(PyArray_NDIM(self));
}
static PyObject *
@@ -217,7 +217,7 @@ array_protocol_descr_get(PyArrayObject *self)
if (dobj == NULL) {
return NULL;
}
- PyTuple_SET_ITEM(dobj, 0, PyString_FromString(""));
+ PyTuple_SET_ITEM(dobj, 0, PyUnicode_FromString(""));
PyTuple_SET_ITEM(dobj, 1, array_typestr_get(self));
res = PyList_New(1);
if (res == NULL) {
@@ -318,7 +318,7 @@ array_interface_get(PyArrayObject *self)
return NULL;
}
- obj = PyInt_FromLong(3);
+ obj = PyLong_FromLong(3);
ret = PyDict_SetItemString(dict, "version", obj);
Py_DECREF(obj);
if (ret < 0) {
@@ -413,7 +413,7 @@ array_data_set(PyArrayObject *self, PyObject *op)
static PyObject *
array_itemsize_get(PyArrayObject *self)
{
- return PyInt_FromLong((long) PyArray_DESCR(self)->elsize);
+ return PyLong_FromLong((long) PyArray_DESCR(self)->elsize);
}
static PyObject *
@@ -421,13 +421,13 @@ array_size_get(PyArrayObject *self)
{
npy_intp size=PyArray_SIZE(self);
#if NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG
- return PyInt_FromLong((long) size);
+ return PyLong_FromLong((long) size);
#else
if (size > NPY_MAX_LONG || size < NPY_MIN_LONG) {
return PyLong_FromLongLong(size);
}
else {
- return PyInt_FromLong((long) size);
+ return PyLong_FromLong((long) size);
}
#endif
}
@@ -437,13 +437,13 @@ array_nbytes_get(PyArrayObject *self)
{
npy_intp nbytes = PyArray_NBYTES(self);
#if NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG
- return PyInt_FromLong((long) nbytes);
+ return PyLong_FromLong((long) nbytes);
#else
if (nbytes > NPY_MAX_LONG || nbytes < NPY_MIN_LONG) {
return PyLong_FromLongLong(nbytes);
}
else {
- return PyInt_FromLong((long) nbytes);
+ return PyLong_FromLong((long) nbytes);
}
#endif
}
@@ -621,7 +621,6 @@ static PyObject *
array_struct_get(PyArrayObject *self)
{
PyArrayInterface *inter;
- PyObject *ret;
inter = (PyArrayInterface *)PyArray_malloc(sizeof(PyArrayInterface));
if (inter==NULL) {
@@ -673,8 +672,14 @@ array_struct_get(PyArrayObject *self)
else {
inter->descr = NULL;
}
+ PyObject *ret = PyCapsule_New(inter, NULL, gentype_struct_free);
+ if (ret == NULL) {
+ return NULL;
+ }
Py_INCREF(self);
- ret = NpyCapsule_FromVoidPtrAndDesc(inter, self, gentype_struct_free);
+ if (PyCapsule_SetContext(ret, self) < 0) {
+ return NULL;
+ }
return ret;
}
diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c
index ac5b90400..96f501c55 100644
--- a/numpy/core/src/multiarray/iterators.c
+++ b/numpy/core/src/multiarray/iterators.c
@@ -1411,10 +1411,10 @@ static PyObject *
arraymultiter_size_get(PyArrayMultiIterObject *self)
{
#if NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG
- return PyInt_FromLong((long) self->size);
+ return PyLong_FromLong((long) self->size);
#else
if (self->size < NPY_MAX_LONG) {
- return PyInt_FromLong((long) self->size);
+ return PyLong_FromLong((long) self->size);
}
else {
return PyLong_FromLongLong((npy_longlong) self->size);
@@ -1426,10 +1426,10 @@ static PyObject *
arraymultiter_index_get(PyArrayMultiIterObject *self)
{
#if NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG
- return PyInt_FromLong((long) self->index);
+ return PyLong_FromLong((long) self->index);
#else
if (self->size < NPY_MAX_LONG) {
- return PyInt_FromLong((long) self->index);
+ return PyLong_FromLong((long) self->index);
}
else {
return PyLong_FromLongLong((npy_longlong) self->index);
diff --git a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
index d234c366c..9dc802508 100644
--- a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
+++ b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
@@ -110,7 +110,7 @@
* if not it can decrease performance
* tested to improve performance on intel xeon 5x/7x, core2duo, amd phenom x4
*/
-static void
+static int
#if @is_aligned@ && @is_swap@ == 0 && @elsize@ <= NPY_SIZEOF_INTP
NPY_GCC_UNROLL_LOOPS
#endif
@@ -171,6 +171,7 @@ static void
--N;
}
+ return 0;
}
#endif
@@ -182,7 +183,7 @@ static void
* but it profits from vectorization enabled with -O3
*/
#if (@src_contig@ == 0) && @is_aligned@
-static NPY_GCC_OPT_3 void
+static NPY_GCC_OPT_3 int
@prefix@_@oper@_size@elsize@_srcstride0(char *dst,
npy_intp dst_stride,
char *src, npy_intp NPY_UNUSED(src_stride),
@@ -197,7 +198,7 @@ static NPY_GCC_OPT_3 void
npy_uint64 temp0, temp1;
#endif
if (N == 0) {
- return;
+ return 0;
}
#if @is_aligned@ && @elsize@ != 16
/* sanity check */
@@ -238,6 +239,7 @@ static NPY_GCC_OPT_3 void
--N;
}
#endif/* @elsize == 1 && @dst_contig@ -- else */
+ return 0;
}
#endif/* (@src_contig@ == 0) && @is_aligned@ */
@@ -247,7 +249,7 @@ static NPY_GCC_OPT_3 void
/**end repeat1**/
/**end repeat**/
-static void
+static int
_strided_to_strided(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -259,9 +261,10 @@ _strided_to_strided(char *dst, npy_intp dst_stride,
src += src_stride;
--N;
}
+ return 0;
}
-static void
+static int
_swap_strided_to_strided(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -284,9 +287,10 @@ _swap_strided_to_strided(char *dst, npy_intp dst_stride,
src += src_stride;
--N;
}
+ return 0;
}
-static void
+static int
_swap_pair_strided_to_strided(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
@@ -319,15 +323,17 @@ _swap_pair_strided_to_strided(char *dst, npy_intp dst_stride,
src += src_stride;
--N;
}
+ return 0;
}
-static void
+static int
_contig_to_contig(char *dst, npy_intp NPY_UNUSED(dst_stride),
char *src, npy_intp NPY_UNUSED(src_stride),
npy_intp N, npy_intp src_itemsize,
NpyAuxData *NPY_UNUSED(data))
{
memmove(dst, src, src_itemsize*N);
+ return 0;
}
@@ -787,7 +793,7 @@ NPY_NO_EXPORT PyArray_StridedUnaryOp *
#endif
-static NPY_GCC_OPT_3 void
+static NPY_GCC_OPT_3 int
@prefix@_cast_@name1@_to_@name2@(
char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
@@ -873,6 +879,7 @@ static NPY_GCC_OPT_3 void
src += src_stride;
#endif
}
+ return 0;
}
#undef _CONVERT_FN
@@ -989,10 +996,14 @@ PyArray_TransferNDimToStrided(npy_intp ndim,
src_stride0 = src_strides[0];
N = shape0 - coord0;
if (N >= count) {
- stransfer(dst, dst_stride, src, src_stride0, count, src_itemsize, data);
- return 0;
+ return stransfer(dst, dst_stride, src, src_stride0,
+ count, src_itemsize, data);
+ }
+ int res = stransfer(dst, dst_stride, src, src_stride0,
+ N, src_itemsize, data);
+ if (res < 0) {
+ return -1;
}
- stransfer(dst, dst_stride, src, src_stride0, N, src_itemsize, data);
count -= N;
/* If it's 1-dimensional, there's no more to copy */
@@ -1012,13 +1023,15 @@ PyArray_TransferNDimToStrided(npy_intp ndim,
N = shape0*M;
for (i = 0; i < M; ++i) {
if (shape0 >= count) {
- stransfer(dst, dst_stride, src, src_stride0,
- count, src_itemsize, data);
- return 0;
+ return stransfer(dst, dst_stride, src, src_stride0,
+ count, src_itemsize, data);
}
else {
- stransfer(dst, dst_stride, src, src_stride0,
- shape0, src_itemsize, data);
+ res = stransfer(dst, dst_stride, src, src_stride0,
+ shape0, src_itemsize, data);
+ if (res < 0) {
+ return -1;
+ }
}
count -= shape0;
src += src_stride1;
@@ -1073,13 +1086,15 @@ PyArray_TransferNDimToStrided(npy_intp ndim,
/* A loop for dimensions 0 and 1 */
for (i = 0; i < shape1; ++i) {
if (shape0 >= count) {
- stransfer(dst, dst_stride, src, src_stride0,
- count, src_itemsize, data);
- return 0;
+ return stransfer(dst, dst_stride, src, src_stride0,
+ count, src_itemsize, data);
}
else {
- stransfer(dst, dst_stride, src, src_stride0,
- shape0, src_itemsize, data);
+ res = stransfer(dst, dst_stride, src, src_stride0,
+ shape0, src_itemsize, data);
+ if (res < 0) {
+ return -1;
+ }
}
count -= shape0;
src += src_stride1;
@@ -1108,10 +1123,14 @@ PyArray_TransferStridedToNDim(npy_intp ndim,
dst_stride0 = dst_strides[0];
N = shape0 - coord0;
if (N >= count) {
- stransfer(dst, dst_stride0, src, src_stride, count, src_itemsize, data);
- return 0;
+ return stransfer(dst, dst_stride0, src, src_stride,
+ count, src_itemsize, data);
+ }
+ int res = stransfer(dst, dst_stride0, src, src_stride,
+ N, src_itemsize, data);
+ if (res < 0) {
+ return -1;
}
- stransfer(dst, dst_stride0, src, src_stride, N, src_itemsize, data);
count -= N;
/* If it's 1-dimensional, there's no more to copy */
@@ -1131,13 +1150,15 @@ PyArray_TransferStridedToNDim(npy_intp ndim,
N = shape0*M;
for (i = 0; i < M; ++i) {
if (shape0 >= count) {
- stransfer(dst, dst_stride0, src, src_stride,
- count, src_itemsize, data);
- return 0;
+ return stransfer(dst, dst_stride0, src, src_stride,
+ count, src_itemsize, data);
}
else {
- stransfer(dst, dst_stride0, src, src_stride,
- shape0, src_itemsize, data);
+ res = stransfer(dst, dst_stride0, src, src_stride,
+ shape0, src_itemsize, data);
+ if (res < 0) {
+ return -1;
+ }
}
count -= shape0;
dst += dst_stride1;
@@ -1192,13 +1213,15 @@ PyArray_TransferStridedToNDim(npy_intp ndim,
/* A loop for dimensions 0 and 1 */
for (i = 0; i < shape1; ++i) {
if (shape0 >= count) {
- stransfer(dst, dst_stride0, src, src_stride,
- count, src_itemsize, data);
- return 0;
+ return stransfer(dst, dst_stride0, src, src_stride,
+ count, src_itemsize, data);
}
else {
- stransfer(dst, dst_stride0, src, src_stride,
- shape0, src_itemsize, data);
+ res = stransfer(dst, dst_stride0, src, src_stride,
+ shape0, src_itemsize, data);
+ if (res < 0) {
+ return -1;
+ }
}
count -= shape0;
dst += dst_stride1;
@@ -1228,16 +1251,18 @@ PyArray_TransferMaskedStridedToNDim(npy_intp ndim,
dst_stride0 = dst_strides[0];
N = shape0 - coord0;
if (N >= count) {
- stransfer(dst, dst_stride0,
- src, src_stride,
- mask, mask_stride,
- count, src_itemsize, data);
- return 0;
- }
- stransfer(dst, dst_stride0,
- src, src_stride,
+ return stransfer(
+ dst, dst_stride0, src, src_stride,
mask, mask_stride,
- N, src_itemsize, data);
+ count, src_itemsize, data);
+ }
+ int res = stransfer(
+ dst, dst_stride0, src, src_stride,
+ mask, mask_stride,
+ N, src_itemsize, data);
+ if (res < 0) {
+ return -1;
+ }
count -= N;
/* If it's 1-dimensional, there's no more to copy */
@@ -1258,17 +1283,19 @@ PyArray_TransferMaskedStridedToNDim(npy_intp ndim,
N = shape0*M;
for (i = 0; i < M; ++i) {
if (shape0 >= count) {
- stransfer(dst, dst_stride0,
- src, src_stride,
- mask, mask_stride,
- count, src_itemsize, data);
- return 0;
+ return stransfer(
+ dst, dst_stride0, src, src_stride,
+ mask, mask_stride,
+ count, src_itemsize, data);
}
else {
- stransfer(dst, dst_stride0,
- src, src_stride,
- mask, mask_stride,
- shape0, src_itemsize, data);
+ int res = stransfer(
+ dst, dst_stride0, src, src_stride,
+ mask, mask_stride,
+ N, src_itemsize, data);
+ if (res < 0) {
+ return -1;
+ }
}
count -= shape0;
dst += dst_stride1;
@@ -1324,17 +1351,19 @@ PyArray_TransferMaskedStridedToNDim(npy_intp ndim,
/* A loop for dimensions 0 and 1 */
for (i = 0; i < shape1; ++i) {
if (shape0 >= count) {
- stransfer(dst, dst_stride0,
- src, src_stride,
- mask, mask_stride,
- count, src_itemsize, data);
- return 0;
+ return stransfer(
+ dst, dst_stride0, src, src_stride,
+ mask, mask_stride,
+ count, src_itemsize, data);
}
else {
- stransfer(dst, dst_stride0,
- src, src_stride,
- mask, mask_stride,
- shape0, src_itemsize, data);
+ res = stransfer(
+ dst, dst_stride0, src, src_stride,
+ mask, mask_stride,
+ shape0, src_itemsize, data);
+ if (res < 0) {
+ return -1;
+ }
}
count -= shape0;
dst += dst_stride1;
@@ -1760,13 +1789,23 @@ mapiter_@name@(PyArrayMapIterObject *mit)
do {
#if @isget@
- stransfer(subspace_ptrs[1], subspace_strides[1],
- subspace_ptrs[0], subspace_strides[0],
- *counter, src_itemsize, transferdata);
+ if (NPY_UNLIKELY(stransfer(
+ subspace_ptrs[1], subspace_strides[1],
+ subspace_ptrs[0], subspace_strides[0],
+ *counter, src_itemsize, transferdata) < 0)) {
+ NPY_END_THREADS;
+ NPY_AUXDATA_FREE(transferdata);
+ return -1;
+ }
#else
- stransfer(subspace_ptrs[0], subspace_strides[0],
- subspace_ptrs[1], subspace_strides[1],
- *counter, src_itemsize, transferdata);
+ if (NPY_UNLIKELY(stransfer(
+ subspace_ptrs[0], subspace_strides[0],
+ subspace_ptrs[1], subspace_strides[1],
+ *counter, src_itemsize, transferdata) < 0)) {
+ NPY_END_THREADS;
+ NPY_AUXDATA_FREE(transferdata);
+ return -1;
+ }
#endif
} while (mit->subspace_next(mit->subspace_iter));
diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c
index 1f2bec8b1..fdf248c97 100644
--- a/numpy/core/src/multiarray/mapping.c
+++ b/numpy/core/src/multiarray/mapping.c
@@ -946,9 +946,9 @@ get_view_from_index(PyArrayObject *self, PyArrayObject **view,
}
break;
case HAS_SLICE:
- if (NpySlice_GetIndicesEx(indices[i].object,
- PyArray_DIMS(self)[orig_dim],
- &start, &stop, &step, &n_steps) < 0) {
+ if (PySlice_GetIndicesEx(indices[i].object,
+ PyArray_DIMS(self)[orig_dim],
+ &start, &stop, &step, &n_steps) < 0) {
return -1;
}
if (n_steps <= 0) {
@@ -1091,6 +1091,7 @@ array_boolean_subscript(PyArrayObject *self,
self_stride = innerstrides[0];
bmask_stride = innerstrides[1];
+ int res = 0;
do {
innersize = *NpyIter_GetInnerLoopSizePtr(iter);
self_data = dataptrs[0];
@@ -1105,8 +1106,11 @@ array_boolean_subscript(PyArrayObject *self,
/* Process unmasked values */
bmask_data = npy_memchr(bmask_data, 0, bmask_stride, innersize,
&subloopsize, 0);
- stransfer(ret_data, itemsize, self_data, self_stride,
- subloopsize, itemsize, transferdata);
+ res = stransfer(ret_data, itemsize, self_data, self_stride,
+ subloopsize, itemsize, transferdata);
+ if (res < 0) {
+ break;
+ }
innersize -= subloopsize;
self_data += subloopsize * self_stride;
ret_data += subloopsize * itemsize;
@@ -1115,8 +1119,15 @@ array_boolean_subscript(PyArrayObject *self,
NPY_END_THREADS;
- NpyIter_Deallocate(iter);
+ if (!NpyIter_Deallocate(iter)) {
+ res = -1;
+ }
NPY_AUXDATA_FREE(transferdata);
+ if (res < 0) {
+ /* Should be practically impossible, since there is no cast */
+ Py_DECREF(ret);
+ return NULL;
+ }
}
if (!PyArray_CheckExact(self)) {
@@ -1209,6 +1220,7 @@ array_assign_boolean_subscript(PyArrayObject *self,
v_data = PyArray_DATA(v);
/* Create an iterator for the data */
+ int res = 0;
if (size > 0) {
NpyIter *iter;
PyArrayObject *op[2] = {self, bmask};
@@ -1253,7 +1265,7 @@ array_assign_boolean_subscript(PyArrayObject *self,
/* Get a dtype transfer function */
NpyIter_GetInnerFixedStrideArray(iter, fixed_strides);
if (PyArray_GetDTypeTransferFunction(
- IsUintAligned(self) && IsAligned(self) &&
+ IsUintAligned(self) && IsAligned(self) &&
IsUintAligned(v) && IsAligned(v),
v_stride, fixed_strides[0],
PyArray_DESCR(v), PyArray_DESCR(self),
@@ -1282,8 +1294,11 @@ array_assign_boolean_subscript(PyArrayObject *self,
/* Process unmasked values */
bmask_data = npy_memchr(bmask_data, 0, bmask_stride, innersize,
&subloopsize, 0);
- stransfer(self_data, self_stride, v_data, v_stride,
- subloopsize, src_itemsize, transferdata);
+ res = stransfer(self_data, self_stride, v_data, v_stride,
+ subloopsize, src_itemsize, transferdata);
+ if (res < 0) {
+ break;
+ }
innersize -= subloopsize;
self_data += subloopsize * self_stride;
v_data += subloopsize * v_stride;
@@ -1295,22 +1310,12 @@ array_assign_boolean_subscript(PyArrayObject *self,
}
NPY_AUXDATA_FREE(transferdata);
- NpyIter_Deallocate(iter);
- }
-
- if (needs_api) {
- /*
- * FIXME?: most assignment operations stop after the first occurrence
- * of an error. Boolean does not currently, but should at least
- * report the error. (This is only relevant for things like str->int
- * casts which call into python)
- */
- if (PyErr_Occurred()) {
- return -1;
+ if (!NpyIter_Deallocate(iter)) {
+ res = -1;
}
}
- return 0;
+ return res;
}
@@ -1413,7 +1418,7 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view)
return 0;
}
else if (tup == NULL){
- PyObject *errmsg = PyUString_FromString("no field of name ");
+ PyObject *errmsg = PyUnicode_FromString("no field of name ");
PyUString_Concat(&errmsg, ind);
PyErr_SetObject(PyExc_ValueError, errmsg);
Py_DECREF(errmsg);
@@ -2433,7 +2438,7 @@ mapiter_fill_info(PyArrayMapIterObject *mit, npy_index_info *indices,
* Attempt to set a meaningful exception. Could also find out
* if a boolean index was converted.
*/
- errmsg = PyUString_FromString("shape mismatch: indexing arrays could not "
+ errmsg = PyUnicode_FromString("shape mismatch: indexing arrays could not "
"be broadcast together with shapes ");
if (errmsg == NULL) {
return -1;
@@ -3178,7 +3183,7 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type,
goto finish;
broadcast_error:
- errmsg = PyUString_FromString("shape mismatch: value array "
+ errmsg = PyUnicode_FromString("shape mismatch: value array "
"of shape ");
if (errmsg == NULL) {
goto finish;
@@ -3199,7 +3204,7 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type,
goto finish;
}
- tmp = PyUString_FromString("could not be broadcast to indexing "
+ tmp = PyUnicode_FromString("could not be broadcast to indexing "
"result of shape ");
PyUString_ConcatAndDel(&errmsg, tmp);
if (errmsg == NULL) {
diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c
index a2db8042f..f7cb2185b 100644
--- a/numpy/core/src/multiarray/methods.c
+++ b/numpy/core/src/multiarray/methods.c
@@ -1508,14 +1508,14 @@ _deepcopy_call(char *iptr, char *optr, PyArray_Descr *dtype,
else {
PyObject *itemp, *otemp;
PyObject *res;
- NPY_COPY_PYOBJECT_PTR(&itemp, iptr);
- NPY_COPY_PYOBJECT_PTR(&otemp, optr);
+ memcpy(&itemp, iptr, sizeof(itemp));
+ memcpy(&otemp, optr, sizeof(otemp));
Py_XINCREF(itemp);
/* call deepcopy on this argument */
res = PyObject_CallFunctionObjArgs(deepcopy, itemp, visit, NULL);
Py_XDECREF(itemp);
Py_XDECREF(otemp);
- NPY_COPY_PYOBJECT_PTR(optr, &res);
+ memcpy(optr, &res, sizeof(res));
}
}
@@ -1616,7 +1616,7 @@ _getlist_pkl(PyArrayObject *self)
}
while (iter->index < iter->size) {
theobject = getitem(iter->dataptr, self);
- PyList_SET_ITEM(list, (int) iter->index, theobject);
+ PyList_SET_ITEM(list, iter->index, theobject);
PyArray_ITER_NEXT(iter);
}
Py_DECREF(iter);
@@ -1636,7 +1636,7 @@ _setlist_pkl(PyArrayObject *self, PyObject *list)
return -1;
}
while(iter->index < iter->size) {
- theobject = PyList_GET_ITEM(list, (int) iter->index);
+ theobject = PyList_GET_ITEM(list, iter->index);
setitem(theobject, iter->dataptr, self);
PyArray_ITER_NEXT(iter);
}
@@ -1676,7 +1676,7 @@ array_reduce(PyArrayObject *self, PyObject *NPY_UNUSED(args))
Py_BuildValue("ONc",
(PyObject *)Py_TYPE(self),
Py_BuildValue("(N)",
- PyInt_FromLong(0)),
+ PyLong_FromLong(0)),
/* dummy data-type */
'b'));
@@ -1701,7 +1701,7 @@ array_reduce(PyArrayObject *self, PyObject *NPY_UNUSED(args))
Py_DECREF(ret);
return NULL;
}
- PyTuple_SET_ITEM(state, 0, PyInt_FromLong(version));
+ PyTuple_SET_ITEM(state, 0, PyLong_FromLong(version));
PyTuple_SET_ITEM(state, 1, PyObject_GetAttrString((PyObject *)self,
"shape"));
descr = PyArray_DESCR(self);
@@ -1763,7 +1763,7 @@ array_reduce_ex_picklebuffer(PyArrayObject *self, int protocol)
#if PY_VERSION_HEX >= 0x03080000
/* we expect protocol 5 to be available in Python 3.8 */
pickle_module = PyImport_ImportModule("pickle");
-#elif PY_VERSION_HEX >= 0x03060000
+#else
pickle_module = PyImport_ImportModule("pickle5");
if (pickle_module == NULL) {
/* for protocol 5, raise a clear ImportError if pickle5 is not found
@@ -1772,10 +1772,6 @@ array_reduce_ex_picklebuffer(PyArrayObject *self, int protocol)
"requires the pickle5 module for Python >=3.6 and <3.8");
return NULL;
}
-#else
- PyErr_SetString(PyExc_ValueError, "pickle protocol 5 is not available "
- "for Python < 3.6");
- return NULL;
#endif
if (pickle_module == NULL){
return NULL;
@@ -2585,9 +2581,10 @@ array_complex(PyArrayObject *self, PyObject *NPY_UNUSED(args))
PyArrayObject *arr;
PyArray_Descr *dtype;
PyObject *c;
+
if (PyArray_SIZE(self) != 1) {
- PyErr_SetString(PyExc_TypeError, "only length-1 arrays can "\
- "be converted to Python scalars");
+ PyErr_SetString(PyExc_TypeError,
+ "only length-1 arrays can be converted to Python scalars");
return NULL;
}
@@ -2598,38 +2595,18 @@ array_complex(PyArrayObject *self, PyObject *NPY_UNUSED(args))
if (!PyArray_CanCastArrayTo(self, dtype, NPY_SAME_KIND_CASTING) &&
!(PyArray_TYPE(self) == NPY_OBJECT)) {
- PyObject *err, *msg_part;
+ PyObject *descr = (PyObject*)PyArray_DESCR(self);
+
Py_DECREF(dtype);
- err = PyString_FromString("unable to convert ");
- if (err == NULL) {
- return NULL;
- }
- msg_part = PyObject_Repr((PyObject*)PyArray_DESCR(self));
- if (msg_part == NULL) {
- Py_DECREF(err);
- return NULL;
- }
- PyString_ConcatAndDel(&err, msg_part);
- if (err == NULL) {
- return NULL;
- }
- msg_part = PyString_FromString(", to complex.");
- if (msg_part == NULL) {
- Py_DECREF(err);
- return NULL;
- }
- PyString_ConcatAndDel(&err, msg_part);
- if (err == NULL) {
- return NULL;
- }
- PyErr_SetObject(PyExc_TypeError, err);
- Py_DECREF(err);
+ PyErr_Format(PyExc_TypeError,
+ "Unable to convert %R to complex", descr);
return NULL;
}
if (PyArray_TYPE(self) == NPY_OBJECT) {
/* let python try calling __complex__ on the object. */
PyObject *args, *res;
+
Py_DECREF(dtype);
args = Py_BuildValue("(O)", *((PyObject**)PyArray_DATA(self)));
if (args == NULL) {
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index 7c5ceb962..276ceabc4 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -65,7 +65,6 @@ NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0;
#include "templ_common.h" /* for npy_mul_with_overflow_intp */
#include "compiled_base.h"
#include "mem_overlap.h"
-#include "alloc.h"
#include "typeinfo.h"
#include "get_attr_string.h"
@@ -363,7 +362,8 @@ PyArray_GetSubType(int narrays, PyArrayObject **arrays) {
*/
NPY_NO_EXPORT PyArrayObject *
PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis,
- PyArrayObject* ret)
+ PyArrayObject* ret, PyArray_Descr *dtype,
+ NPY_CASTING casting)
{
int iarrays, idim, ndim;
npy_intp shape[NPY_MAXDIMS];
@@ -427,6 +427,7 @@ PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis,
}
if (ret != NULL) {
+ assert(dtype == NULL);
if (PyArray_NDIM(ret) != ndim) {
PyErr_SetString(PyExc_ValueError,
"Output array has wrong dimensionality");
@@ -446,10 +447,16 @@ PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis,
/* Get the priority subtype for the array */
PyTypeObject *subtype = PyArray_GetSubType(narrays, arrays);
- /* Get the resulting dtype from combining all the arrays */
- PyArray_Descr *dtype = PyArray_ResultType(narrays, arrays, 0, NULL);
if (dtype == NULL) {
- return NULL;
+ /* Get the resulting dtype from combining all the arrays */
+ dtype = (PyArray_Descr *)PyArray_ResultType(
+ narrays, arrays, 0, NULL);
+ if (dtype == NULL) {
+ return NULL;
+ }
+ }
+ else {
+ Py_INCREF(dtype);
}
/*
@@ -495,7 +502,7 @@ PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis,
/* Copy the data for this array */
if (PyArray_AssignArray((PyArrayObject *)sliding_view, arrays[iarrays],
- NULL, NPY_SAME_KIND_CASTING) < 0) {
+ NULL, casting) < 0) {
Py_DECREF(sliding_view);
Py_DECREF(ret);
return NULL;
@@ -515,7 +522,9 @@ PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis,
*/
NPY_NO_EXPORT PyArrayObject *
PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays,
- NPY_ORDER order, PyArrayObject *ret)
+ NPY_ORDER order, PyArrayObject *ret,
+ PyArray_Descr *dtype, NPY_CASTING casting,
+ npy_bool casting_not_passed)
{
int iarrays;
npy_intp shape = 0;
@@ -542,7 +551,10 @@ PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays,
}
}
+ int out_passed = 0;
if (ret != NULL) {
+ assert(dtype == NULL);
+ out_passed = 1;
if (PyArray_NDIM(ret) != 1) {
PyErr_SetString(PyExc_ValueError,
"Output array must be 1D");
@@ -561,10 +573,16 @@ PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays,
/* Get the priority subtype for the array */
PyTypeObject *subtype = PyArray_GetSubType(narrays, arrays);
- /* Get the resulting dtype from combining all the arrays */
- PyArray_Descr *dtype = PyArray_ResultType(narrays, arrays, 0, NULL);
if (dtype == NULL) {
- return NULL;
+ /* Get the resulting dtype from combining all the arrays */
+ dtype = (PyArray_Descr *)PyArray_ResultType(
+ narrays, arrays, 0, NULL);
+ if (dtype == NULL) {
+ return NULL;
+ }
+ }
+ else {
+ Py_INCREF(dtype);
}
stride = dtype->elsize;
@@ -594,10 +612,37 @@ PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays,
return NULL;
}
+ int give_deprecation_warning = 1; /* To give warning for just one input array. */
for (iarrays = 0; iarrays < narrays; ++iarrays) {
/* Adjust the window dimensions for this array */
sliding_view->dimensions[0] = PyArray_SIZE(arrays[iarrays]);
+ if (!PyArray_CanCastArrayTo(
+ arrays[iarrays], PyArray_DESCR(ret), casting)) {
+ /* This should be an error, but was previously allowed here. */
+ if (casting_not_passed && out_passed) {
+ /* NumPy 1.20, 2020-09-03 */
+ if (give_deprecation_warning && DEPRECATE(
+ "concatenate() with `axis=None` will use same-kind "
+ "casting by default in the future. Please use "
+ "`casting='unsafe'` to retain the old behaviour. "
+ "In the future this will be a TypeError.") < 0) {
+ Py_DECREF(sliding_view);
+ Py_DECREF(ret);
+ return NULL;
+ }
+ give_deprecation_warning = 0;
+ }
+ else {
+ npy_set_invalid_cast_error(
+ PyArray_DESCR(arrays[iarrays]), PyArray_DESCR(ret),
+ casting, PyArray_NDIM(arrays[iarrays]) == 0);
+ Py_DECREF(sliding_view);
+ Py_DECREF(ret);
+ return NULL;
+ }
+ }
+
/* Copy the data for this array */
if (PyArray_CopyAsFlat((PyArrayObject *)sliding_view, arrays[iarrays],
order) < 0) {
@@ -615,8 +660,21 @@ PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays,
return ret;
}
+
+/**
+ * Implementation for np.concatenate
+ *
+ * @param op Sequence of arrays to concatenate
+ * @param axis Axis to concatenate along
+ * @param ret output array to fill
+ * @param dtype Forced output array dtype (cannot be combined with ret)
+ * @param casting Casting mode used
+ * @param casting_not_passed Deprecation helper
+ */
NPY_NO_EXPORT PyObject *
-PyArray_ConcatenateInto(PyObject *op, int axis, PyArrayObject *ret)
+PyArray_ConcatenateInto(PyObject *op,
+ int axis, PyArrayObject *ret, PyArray_Descr *dtype,
+ NPY_CASTING casting, npy_bool casting_not_passed)
{
int iarrays, narrays;
PyArrayObject **arrays;
@@ -626,6 +684,12 @@ PyArray_ConcatenateInto(PyObject *op, int axis, PyArrayObject *ret)
"The first input argument needs to be a sequence");
return NULL;
}
+ if (ret != NULL && dtype != NULL) {
+ PyErr_SetString(PyExc_TypeError,
+ "concatenate() only takes `out` or `dtype` as an "
+ "argument, but both were provided.");
+ return NULL;
+ }
/* Convert the input list into arrays */
narrays = PySequence_Size(op);
@@ -652,10 +716,13 @@ PyArray_ConcatenateInto(PyObject *op, int axis, PyArrayObject *ret)
}
if (axis >= NPY_MAXDIMS) {
- ret = PyArray_ConcatenateFlattenedArrays(narrays, arrays, NPY_CORDER, ret);
+ ret = PyArray_ConcatenateFlattenedArrays(
+ narrays, arrays, NPY_CORDER, ret, dtype,
+ casting, casting_not_passed);
}
else {
- ret = PyArray_ConcatenateArrays(narrays, arrays, axis, ret);
+ ret = PyArray_ConcatenateArrays(
+ narrays, arrays, axis, ret, dtype, casting);
}
for (iarrays = 0; iarrays < narrays; ++iarrays) {
@@ -687,7 +754,16 @@ fail:
NPY_NO_EXPORT PyObject *
PyArray_Concatenate(PyObject *op, int axis)
{
- return PyArray_ConcatenateInto(op, axis, NULL);
+ /* retain legacy behaviour for casting */
+ NPY_CASTING casting;
+ if (axis >= NPY_MAXDIMS) {
+ casting = NPY_UNSAFE_CASTING;
+ }
+ else {
+ casting = NPY_SAME_KIND_CASTING;
+ }
+ return PyArray_ConcatenateInto(
+ op, axis, NULL, NULL, casting, 0);
}
static int
@@ -1582,13 +1658,16 @@ _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws)
npy_bool subok = NPY_FALSE;
npy_bool copy = NPY_TRUE;
int ndmin = 0, nd;
+ PyObject* like;
PyArray_Descr *type = NULL;
PyArray_Descr *oldtype = NULL;
NPY_ORDER order = NPY_KEEPORDER;
int flags = 0;
- static char *kwd[]= {"object", "dtype", "copy", "order", "subok",
- "ndmin", NULL};
+ PyObject* array_function_result = NULL;
+
+ static char *kwd[] = {"object", "dtype", "copy", "order", "subok",
+ "ndmin", "like", NULL};
if (PyTuple_GET_SIZE(args) > 2) {
PyErr_Format(PyExc_TypeError,
@@ -1597,6 +1676,12 @@ _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws)
return NULL;
}
+ array_function_result = array_implement_c_array_function_creation(
+ "array", args, kws);
+ if (array_function_result != Py_NotImplemented) {
+ return array_function_result;
+ }
+
/* super-fast path for ndarray argument calls */
if (PyTuple_GET_SIZE(args) == 0) {
goto full_path;
@@ -1674,13 +1759,14 @@ _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws)
}
full_path:
- if (!PyArg_ParseTupleAndKeywords(args, kws, "O|O&O&O&O&i:array", kwd,
+ if (!PyArg_ParseTupleAndKeywords(args, kws, "O|O&O&O&O&i$O:array", kwd,
&op,
PyArray_DescrConverter2, &type,
PyArray_BoolConverter, &copy,
PyArray_OrderConverter, &order,
PyArray_BoolConverter, &subok,
- &ndmin)) {
+ &ndmin,
+ &like)) {
goto clean_type;
}
@@ -1817,20 +1903,29 @@ static PyObject *
array_empty(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
{
- static char *kwlist[] = {"shape", "dtype", "order", NULL};
+ static char *kwlist[] = {"shape", "dtype", "order", "like", NULL};
PyArray_Descr *typecode = NULL;
PyArray_Dims shape = {NULL, 0};
NPY_ORDER order = NPY_CORDER;
+ PyObject *like = NULL;
npy_bool is_f_order;
+ PyObject *array_function_result = NULL;
PyArrayObject *ret = NULL;
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O&O&:empty", kwlist,
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O&O&$O:empty", kwlist,
PyArray_IntpConverter, &shape,
PyArray_DescrConverter, &typecode,
- PyArray_OrderConverter, &order)) {
+ PyArray_OrderConverter, &order,
+ &like)) {
goto fail;
}
+ array_function_result = array_implement_c_array_function_creation(
+ "empty", args, kwds);
+ if (array_function_result != Py_NotImplemented) {
+ return array_function_result;
+ }
+
switch (order) {
case NPY_CORDER:
is_f_order = NPY_FALSE;
@@ -1956,9 +2051,9 @@ array_scalar(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
return NULL;
}
}
- if (!PyString_Check(obj)) {
+ if (!PyBytes_Check(obj)) {
PyErr_SetString(PyExc_TypeError,
- "initializing object must be a string");
+ "initializing object must be a bytes object");
Py_XDECREF(tmpobj);
return NULL;
}
@@ -1968,7 +2063,7 @@ array_scalar(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
Py_XDECREF(tmpobj);
return NULL;
}
- dptr = PyString_AS_STRING(obj);
+ dptr = PyBytes_AS_STRING(obj);
}
}
ret = PyArray_Scalar(dptr, typecode, NULL);
@@ -1984,20 +2079,29 @@ array_scalar(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
static PyObject *
array_zeros(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
{
- static char *kwlist[] = {"shape", "dtype", "order", NULL};
+ static char *kwlist[] = {"shape", "dtype", "order", "like", NULL};
PyArray_Descr *typecode = NULL;
PyArray_Dims shape = {NULL, 0};
NPY_ORDER order = NPY_CORDER;
+ PyObject *like = NULL;
npy_bool is_f_order = NPY_FALSE;
+ PyObject *array_function_result = NULL;
PyArrayObject *ret = NULL;
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O&O&:zeros", kwlist,
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O&O&$O:zeros", kwlist,
PyArray_IntpConverter, &shape,
PyArray_DescrConverter, &typecode,
- PyArray_OrderConverter, &order)) {
+ PyArray_OrderConverter, &order,
+ &like)) {
goto fail;
}
+ array_function_result = array_implement_c_array_function_creation(
+ "zeros", args, kwds);
+ if (array_function_result != Py_NotImplemented) {
+ return array_function_result;
+ }
+
switch (order) {
case NPY_CORDER:
is_f_order = NPY_FALSE;
@@ -2050,16 +2154,24 @@ array_fromstring(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds
Py_ssize_t nin = -1;
char *sep = NULL;
Py_ssize_t s;
- static char *kwlist[] = {"string", "dtype", "count", "sep", NULL};
+ static char *kwlist[] = {"string", "dtype", "count", "sep", "like", NULL};
+ PyObject *like = NULL;
PyArray_Descr *descr = NULL;
+ PyObject *array_function_result = NULL;
if (!PyArg_ParseTupleAndKeywords(args, keywds,
- "s#|O&" NPY_SSIZE_T_PYFMT "s:fromstring", kwlist,
- &data, &s, PyArray_DescrConverter, &descr, &nin, &sep)) {
+ "s#|O&" NPY_SSIZE_T_PYFMT "s$O:fromstring", kwlist,
+ &data, &s, PyArray_DescrConverter, &descr, &nin, &sep, &like)) {
Py_XDECREF(descr);
return NULL;
}
+ array_function_result = array_implement_c_array_function_creation(
+ "fromstring", args, keywds);
+ if (array_function_result != Py_NotImplemented) {
+ return array_function_result;
+ }
+
/* binary mode, condition copied from PyArray_FromString */
if (sep == NULL || strlen(sep) == 0) {
/* Numpy 1.14, 2017-10-19 */
@@ -2082,19 +2194,27 @@ array_fromfile(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds)
PyObject *err_type = NULL, *err_value = NULL, *err_traceback = NULL;
char *sep = "";
Py_ssize_t nin = -1;
- static char *kwlist[] = {"file", "dtype", "count", "sep", "offset", NULL};
+ static char *kwlist[] = {"file", "dtype", "count", "sep", "offset", "like", NULL};
+ PyObject *like = NULL;
PyArray_Descr *type = NULL;
+ PyObject *array_function_result = NULL;
int own;
npy_off_t orig_pos = 0, offset = 0;
FILE *fp;
if (!PyArg_ParseTupleAndKeywords(args, keywds,
- "O|O&" NPY_SSIZE_T_PYFMT "s" NPY_OFF_T_PYFMT ":fromfile", kwlist,
- &file, PyArray_DescrConverter, &type, &nin, &sep, &offset)) {
+ "O|O&" NPY_SSIZE_T_PYFMT "s" NPY_OFF_T_PYFMT "$O:fromfile", kwlist,
+ &file, PyArray_DescrConverter, &type, &nin, &sep, &offset, &like)) {
Py_XDECREF(type);
return NULL;
}
+ array_function_result = array_implement_c_array_function_creation(
+ "fromfile", args, keywds);
+ if (array_function_result != Py_NotImplemented) {
+ return array_function_result;
+ }
+
file = NpyPath_PathlikeToFspath(file);
if (file == NULL) {
return NULL;
@@ -2161,15 +2281,24 @@ array_fromiter(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds)
{
PyObject *iter;
Py_ssize_t nin = -1;
- static char *kwlist[] = {"iter", "dtype", "count", NULL};
+ static char *kwlist[] = {"iter", "dtype", "count", "like", NULL};
+ PyObject *like = NULL;
PyArray_Descr *descr = NULL;
+ PyObject *array_function_result = NULL;
if (!PyArg_ParseTupleAndKeywords(args, keywds,
- "OO&|" NPY_SSIZE_T_PYFMT ":fromiter", kwlist,
- &iter, PyArray_DescrConverter, &descr, &nin)) {
+ "OO&|" NPY_SSIZE_T_PYFMT "$O:fromiter", kwlist,
+ &iter, PyArray_DescrConverter, &descr, &nin, &like)) {
Py_XDECREF(descr);
return NULL;
}
+
+ array_function_result = array_implement_c_array_function_creation(
+ "fromiter", args, keywds);
+ if (array_function_result != Py_NotImplemented) {
+ return array_function_result;
+ }
+
return PyArray_FromIter(iter, descr, (npy_intp)nin);
}
@@ -2178,15 +2307,24 @@ array_frombuffer(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds
{
PyObject *obj = NULL;
Py_ssize_t nin = -1, offset = 0;
- static char *kwlist[] = {"buffer", "dtype", "count", "offset", NULL};
+ static char *kwlist[] = {"buffer", "dtype", "count", "offset", "like", NULL};
+ PyObject *like = NULL;
PyArray_Descr *type = NULL;
+ PyObject *array_function_result = NULL;
if (!PyArg_ParseTupleAndKeywords(args, keywds,
- "O|O&" NPY_SSIZE_T_PYFMT NPY_SSIZE_T_PYFMT ":frombuffer", kwlist,
- &obj, PyArray_DescrConverter, &type, &nin, &offset)) {
+ "O|O&" NPY_SSIZE_T_PYFMT NPY_SSIZE_T_PYFMT "$O:frombuffer", kwlist,
+ &obj, PyArray_DescrConverter, &type, &nin, &offset, &like)) {
Py_XDECREF(type);
return NULL;
}
+
+ array_function_result = array_implement_c_array_function_creation(
+ "frombuffer", args, keywds);
+ if (array_function_result != Py_NotImplemented) {
+ return array_function_result;
+ }
+
if (type == NULL) {
type = PyArray_DescrFromType(NPY_DEFAULT_TYPE);
}
@@ -2198,11 +2336,27 @@ array_concatenate(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
{
PyObject *a0;
PyObject *out = NULL;
+ PyArray_Descr *dtype = NULL;
+ NPY_CASTING casting = NPY_SAME_KIND_CASTING;
+ PyObject *casting_obj = NULL;
+ PyObject *res;
int axis = 0;
- static char *kwlist[] = {"seq", "axis", "out", NULL};
-
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&O:concatenate", kwlist,
- &a0, PyArray_AxisConverter, &axis, &out)) {
+ static char *kwlist[] = {"seq", "axis", "out", "dtype", "casting", NULL};
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&O$O&O:concatenate", kwlist,
+ &a0, PyArray_AxisConverter, &axis, &out,
+ PyArray_DescrConverter2, &dtype, &casting_obj)) {
+ return NULL;
+ }
+ int casting_not_passed = 0;
+ if (casting_obj == NULL) {
+ /*
+ * Casting was not passed in, needed for deprecation only.
+ * This should be simplified once the deprecation is finished.
+ */
+ casting_not_passed = 1;
+ }
+ else if (!PyArray_CastingConverter(casting_obj, &casting)) {
+ Py_XDECREF(dtype);
return NULL;
}
if (out != NULL) {
@@ -2211,10 +2365,14 @@ array_concatenate(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
}
else if (!PyArray_Check(out)) {
PyErr_SetString(PyExc_TypeError, "'out' must be an array");
+ Py_XDECREF(dtype);
return NULL;
}
}
- return PyArray_ConcatenateInto(a0, axis, (PyArrayObject *)out);
+ res = PyArray_ConcatenateInto(a0, axis, (PyArrayObject *)out, dtype,
+ casting, casting_not_passed);
+ Py_XDECREF(dtype);
+ return res;
}
static PyObject *
@@ -2766,17 +2924,27 @@ array_correlate2(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
static PyObject *
array_arange(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws) {
PyObject *o_start = NULL, *o_stop = NULL, *o_step = NULL, *range=NULL;
- static char *kwd[]= {"start", "stop", "step", "dtype", NULL};
+ PyObject *like = NULL;
+ PyObject *array_function_result = NULL;
+ static char *kwd[] = {"start", "stop", "step", "dtype", "like", NULL};
PyArray_Descr *typecode = NULL;
- if (!PyArg_ParseTupleAndKeywords(args, kws, "O|OOO&:arange", kwd,
+ if (!PyArg_ParseTupleAndKeywords(args, kws, "O|OOO&$O:arange", kwd,
&o_start,
&o_stop,
&o_step,
- PyArray_DescrConverter2, &typecode)) {
+ PyArray_DescrConverter2, &typecode,
+ &like)) {
Py_XDECREF(typecode);
return NULL;
}
+
+ array_function_result = array_implement_c_array_function_creation(
+ "arange", args, kws);
+ if (array_function_result != Py_NotImplemented) {
+ return array_function_result;
+ }
+
range = PyArray_ArangeObj(o_start, o_stop, o_step, typecode);
Py_XDECREF(typecode);
@@ -2810,7 +2978,7 @@ array__get_ndarray_c_version(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObje
if (!PyArg_ParseTupleAndKeywords(args, kwds, "", kwlist )) {
return NULL;
}
- return PyInt_FromLong( (long) PyArray_GetNDArrayCVersion() );
+ return PyLong_FromLong( (long) PyArray_GetNDArrayCVersion() );
}
/*NUMPY_API
@@ -3950,7 +4118,7 @@ normalize_axis_index(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds)
return NULL;
}
- return PyInt_FromLong(axis);
+ return PyLong_FromLong(axis);
}
@@ -4292,13 +4460,13 @@ set_flaginfo(PyObject *d)
newd = PyDict_New();
#define _addnew(key, val, one) \
- PyDict_SetItemString(newd, #key, s=PyInt_FromLong(val)); \
+ PyDict_SetItemString(newd, #key, s=PyLong_FromLong(val)); \
Py_DECREF(s); \
- PyDict_SetItemString(newd, #one, s=PyInt_FromLong(val)); \
+ PyDict_SetItemString(newd, #one, s=PyLong_FromLong(val)); \
Py_DECREF(s)
#define _addone(key, val) \
- PyDict_SetItemString(newd, #key, s=PyInt_FromLong(val)); \
+ PyDict_SetItemString(newd, #key, s=PyLong_FromLong(val)); \
Py_DECREF(s)
_addnew(OWNDATA, NPY_ARRAY_OWNDATA, O);
@@ -4331,28 +4499,33 @@ NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_dtype = NULL;
NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_ndmin = NULL;
NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_axis1 = NULL;
NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_axis2 = NULL;
+NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_like = NULL;
+NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_numpy = NULL;
static int
intern_strings(void)
{
- npy_ma_str_array = PyUString_InternFromString("__array__");
- npy_ma_str_array_prepare = PyUString_InternFromString("__array_prepare__");
- npy_ma_str_array_wrap = PyUString_InternFromString("__array_wrap__");
- npy_ma_str_array_finalize = PyUString_InternFromString("__array_finalize__");
- npy_ma_str_ufunc = PyUString_InternFromString("__array_ufunc__");
- npy_ma_str_implementation = PyUString_InternFromString("_implementation");
- npy_ma_str_order = PyUString_InternFromString("order");
- npy_ma_str_copy = PyUString_InternFromString("copy");
- npy_ma_str_dtype = PyUString_InternFromString("dtype");
- npy_ma_str_ndmin = PyUString_InternFromString("ndmin");
- npy_ma_str_axis1 = PyUString_InternFromString("axis1");
- npy_ma_str_axis2 = PyUString_InternFromString("axis2");
+ npy_ma_str_array = PyUnicode_InternFromString("__array__");
+ npy_ma_str_array_prepare = PyUnicode_InternFromString("__array_prepare__");
+ npy_ma_str_array_wrap = PyUnicode_InternFromString("__array_wrap__");
+ npy_ma_str_array_finalize = PyUnicode_InternFromString("__array_finalize__");
+ npy_ma_str_ufunc = PyUnicode_InternFromString("__array_ufunc__");
+ npy_ma_str_implementation = PyUnicode_InternFromString("_implementation");
+ npy_ma_str_order = PyUnicode_InternFromString("order");
+ npy_ma_str_copy = PyUnicode_InternFromString("copy");
+ npy_ma_str_dtype = PyUnicode_InternFromString("dtype");
+ npy_ma_str_ndmin = PyUnicode_InternFromString("ndmin");
+ npy_ma_str_axis1 = PyUnicode_InternFromString("axis1");
+ npy_ma_str_axis2 = PyUnicode_InternFromString("axis2");
+ npy_ma_str_like = PyUnicode_InternFromString("like");
+ npy_ma_str_numpy = PyUnicode_InternFromString("numpy");
return npy_ma_str_array && npy_ma_str_array_prepare &&
npy_ma_str_array_wrap && npy_ma_str_array_finalize &&
npy_ma_str_ufunc && npy_ma_str_implementation &&
npy_ma_str_order && npy_ma_str_copy && npy_ma_str_dtype &&
- npy_ma_str_ndmin && npy_ma_str_axis1 && npy_ma_str_axis2;
+ npy_ma_str_ndmin && npy_ma_str_axis1 && npy_ma_str_axis2 &&
+ npy_ma_str_like && npy_ma_str_numpy;
}
static struct PyModuleDef moduledef = {
@@ -4477,14 +4650,14 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) {
goto err;
}
- c_api = NpyCapsule_FromVoidPtr((void *)PyArray_API, NULL);
+ c_api = PyCapsule_New((void *)PyArray_API, NULL, NULL);
if (c_api == NULL) {
goto err;
}
PyDict_SetItemString(d, "_ARRAY_API", c_api);
Py_DECREF(c_api);
- c_api = NpyCapsule_FromVoidPtr((void *)PyUFunc_API, NULL);
+ c_api = PyCapsule_New((void *)PyUFunc_API, NULL, NULL);
if (c_api == NULL) {
goto err;
}
@@ -4502,11 +4675,11 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) {
*/
PyDict_SetItemString (d, "error", PyExc_Exception);
- s = PyInt_FromLong(NPY_TRACE_DOMAIN);
+ s = PyLong_FromLong(NPY_TRACE_DOMAIN);
PyDict_SetItemString(d, "tracemalloc_domain", s);
Py_DECREF(s);
- s = PyUString_FromString("3.1");
+ s = PyUnicode_FromString("3.1");
PyDict_SetItemString(d, "__version__", s);
Py_DECREF(s);
@@ -4540,7 +4713,7 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) {
}
Py_DECREF(s);
- s = NpyCapsule_FromVoidPtr((void *)_datetime_strings, NULL);
+ s = PyCapsule_New((void *)_datetime_strings, NULL, NULL);
if (s == NULL) {
goto err;
}
@@ -4548,7 +4721,7 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) {
Py_DECREF(s);
#define ADDCONST(NAME) \
- s = PyInt_FromLong(NPY_##NAME); \
+ s = PyLong_FromLong(NPY_##NAME); \
PyDict_SetItemString(d, #NAME, s); \
Py_DECREF(s)
diff --git a/numpy/core/src/multiarray/multiarraymodule.h b/numpy/core/src/multiarray/multiarraymodule.h
index dd437e091..d3ee3337c 100644
--- a/numpy/core/src/multiarray/multiarraymodule.h
+++ b/numpy/core/src/multiarray/multiarraymodule.h
@@ -13,5 +13,7 @@ NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_dtype;
NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_ndmin;
NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_axis1;
NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_axis2;
+NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_like;
+NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_numpy;
#endif
diff --git a/numpy/core/src/multiarray/nditer_api.c b/numpy/core/src/multiarray/nditer_api.c
index a5b5e5c51..059f2c437 100644
--- a/numpy/core/src/multiarray/nditer_api.c
+++ b/numpy/core/src/multiarray/nditer_api.c
@@ -229,13 +229,22 @@ NpyIter_EnableExternalLoop(NpyIter *iter)
return NpyIter_Reset(iter, NULL);
}
+
+static char *_reset_cast_error = (
+ "Iterator reset failed due to a casting failure. "
+ "This error is set as a Python error.");
+
/*NUMPY_API
* Resets the iterator to its initial state
*
+ * The use of errmsg is discouraged, it cannot be guaranteed that the GIL
+ * will not be grabbed on casting errors even when this is passed.
+ *
* If errmsg is non-NULL, it should point to a variable which will
* receive the error message, and no Python exception will be set.
* This is so that the function can be called from code not holding
- * the GIL.
+ * the GIL. Note that cast errors may still lead to the GIL being
+ * grabbed temporarily.
*/
NPY_NO_EXPORT int
NpyIter_Reset(NpyIter *iter, char **errmsg)
@@ -250,6 +259,9 @@ NpyIter_Reset(NpyIter *iter, char **errmsg)
/* If buffer allocation was delayed, do it now */
if (itflags&NPY_ITFLAG_DELAYBUF) {
if (!npyiter_allocate_buffers(iter, errmsg)) {
+ if (errmsg != NULL) {
+ *errmsg = _reset_cast_error;
+ }
return NPY_FAIL;
}
NIT_ITFLAGS(iter) &= ~NPY_ITFLAG_DELAYBUF;
@@ -257,7 +269,7 @@ NpyIter_Reset(NpyIter *iter, char **errmsg)
else {
/*
* If the iterindex is already right, no need to
- * do anything
+ * do anything (and no cast error has previously occurred).
*/
bufferdata = NIT_BUFFERDATA(iter);
if (NIT_ITERINDEX(iter) == NIT_ITERSTART(iter) &&
@@ -265,9 +277,12 @@ NpyIter_Reset(NpyIter *iter, char **errmsg)
NBF_SIZE(bufferdata) > 0) {
return NPY_SUCCEED;
}
-
- /* Copy any data from the buffers back to the arrays */
- npyiter_copy_from_buffers(iter);
+ if (npyiter_copy_from_buffers(iter) < 0) {
+ if (errmsg != NULL) {
+ *errmsg = _reset_cast_error;
+ }
+ return NPY_FAIL;
+ }
}
}
@@ -275,7 +290,12 @@ NpyIter_Reset(NpyIter *iter, char **errmsg)
if (itflags&NPY_ITFLAG_BUFFER) {
/* Prepare the next buffers and set iterend/size */
- npyiter_copy_to_buffers(iter, NULL);
+ if (npyiter_copy_to_buffers(iter, NULL) < 0) {
+ if (errmsg != NULL) {
+ *errmsg = _reset_cast_error;
+ }
+ return NPY_FAIL;
+ }
}
return NPY_SUCCEED;
@@ -288,7 +308,8 @@ NpyIter_Reset(NpyIter *iter, char **errmsg)
* If errmsg is non-NULL, it should point to a variable which will
* receive the error message, and no Python exception will be set.
* This is so that the function can be called from code not holding
- * the GIL.
+ * the GIL. Note that cast errors may still lead to the GIL being
+ * grabbed temporarily.
*/
NPY_NO_EXPORT int
NpyIter_ResetBasePointers(NpyIter *iter, char **baseptrs, char **errmsg)
@@ -309,8 +330,12 @@ NpyIter_ResetBasePointers(NpyIter *iter, char **baseptrs, char **errmsg)
NIT_ITFLAGS(iter) &= ~NPY_ITFLAG_DELAYBUF;
}
else {
- /* Copy any data from the buffers back to the arrays */
- npyiter_copy_from_buffers(iter);
+ if (npyiter_copy_from_buffers(iter) < 0) {
+ if (errmsg != NULL) {
+ *errmsg = _reset_cast_error;
+ }
+ return NPY_FAIL;
+ }
}
}
@@ -323,7 +348,12 @@ NpyIter_ResetBasePointers(NpyIter *iter, char **baseptrs, char **errmsg)
if (itflags&NPY_ITFLAG_BUFFER) {
/* Prepare the next buffers and set iterend/size */
- npyiter_copy_to_buffers(iter, NULL);
+ if (npyiter_copy_to_buffers(iter, NULL) < 0) {
+ if (errmsg != NULL) {
+ *errmsg = _reset_cast_error;
+ }
+ return NPY_FAIL;
+ }
}
return NPY_SUCCEED;
@@ -335,7 +365,8 @@ NpyIter_ResetBasePointers(NpyIter *iter, char **baseptrs, char **errmsg)
* If errmsg is non-NULL, it should point to a variable which will
* receive the error message, and no Python exception will be set.
* This is so that the function can be called from code not holding
- * the GIL.
+ * the GIL. Note that cast errors may still lead to the GIL being
+ * grabbed temporarily.
*/
NPY_NO_EXPORT int
NpyIter_ResetToIterIndexRange(NpyIter *iter,
@@ -633,12 +664,16 @@ NpyIter_GotoIterIndex(NpyIter *iter, npy_intp iterindex)
/* Start the buffer at the provided iterindex */
else {
/* Write back to the arrays */
- npyiter_copy_from_buffers(iter);
+ if (npyiter_copy_from_buffers(iter) < 0) {
+ return NPY_FAIL;
+ }
npyiter_goto_iterindex(iter, iterindex);
/* Prepare the next buffers and set iterend/size */
- npyiter_copy_to_buffers(iter, NULL);
+ if (npyiter_copy_to_buffers(iter, NULL) < 0) {
+ return NPY_FAIL;
+ }
}
}
else {
@@ -1376,6 +1411,7 @@ NpyIter_GetInnerLoopSizePtr(NpyIter *iter)
}
}
+
/*NUMPY_API
* For debugging
*/
@@ -1828,7 +1864,7 @@ npyiter_goto_iterindex(NpyIter *iter, npy_intp iterindex)
* their data needs to be written back to the arrays. The multi-index
* must be positioned for the beginning of the buffer.
*/
-NPY_NO_EXPORT void
+NPY_NO_EXPORT int
npyiter_copy_from_buffers(NpyIter *iter)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
@@ -1861,7 +1897,7 @@ npyiter_copy_from_buffers(NpyIter *iter)
/* If we're past the end, nothing to copy */
if (NBF_SIZE(bufferdata) == 0) {
- return;
+ return 0;
}
NPY_IT_DBG_PRINT("Iterator: Copying buffers to outputs\n");
@@ -1968,7 +2004,7 @@ npyiter_copy_from_buffers(NpyIter *iter)
maskptr = (npy_bool *)ad_ptrs[maskop];
}
- PyArray_TransferMaskedStridedToNDim(ndim_transfer,
+ if (PyArray_TransferMaskedStridedToNDim(ndim_transfer,
ad_ptrs[iop], dst_strides, axisdata_incr,
buffer, src_stride,
maskptr, strides[maskop],
@@ -1976,18 +2012,22 @@ npyiter_copy_from_buffers(NpyIter *iter)
dst_shape, axisdata_incr,
op_transfersize, dtypes[iop]->elsize,
(PyArray_MaskedStridedUnaryOp *)stransfer,
- transferdata);
+ transferdata) < 0) {
+ return -1;
+ }
}
/* Regular operand */
else {
- PyArray_TransferStridedToNDim(ndim_transfer,
+ if (PyArray_TransferStridedToNDim(ndim_transfer,
ad_ptrs[iop], dst_strides, axisdata_incr,
buffer, src_stride,
dst_coords, axisdata_incr,
dst_shape, axisdata_incr,
op_transfersize, dtypes[iop]->elsize,
stransfer,
- transferdata);
+ transferdata) < 0) {
+ return -1;
+ }
}
}
/* If there's no copy back, we may have to decrement refs. In
@@ -2002,9 +2042,13 @@ npyiter_copy_from_buffers(NpyIter *iter)
NPY_IT_DBG_PRINT1("Iterator: Freeing refs and zeroing buffer "
"of operand %d\n", (int)iop);
/* Decrement refs */
- stransfer(NULL, 0, buffer, dtypes[iop]->elsize,
- transfersize, dtypes[iop]->elsize,
- transferdata);
+ if (stransfer(NULL, 0, buffer, dtypes[iop]->elsize,
+ transfersize, dtypes[iop]->elsize,
+ transferdata) < 0) {
+ /* Since this should only decrement, it should never error */
+ assert(0);
+ return -1;
+ }
/*
* Zero out the memory for safety. For instance,
* if during iteration some Python code copied an
@@ -2016,6 +2060,7 @@ npyiter_copy_from_buffers(NpyIter *iter)
}
NPY_IT_DBG_PRINT("Iterator: Finished copying buffers to outputs\n");
+ return 0;
}
/*
@@ -2023,7 +2068,7 @@ npyiter_copy_from_buffers(NpyIter *iter)
* for the start of a buffer. It decides which operands need a buffer,
* and copies the data into the buffers.
*/
-NPY_NO_EXPORT void
+NPY_NO_EXPORT int
npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
@@ -2142,7 +2187,7 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs)
NBF_BUFITEREND(bufferdata) = iterindex + reduce_innersize;
if (reduce_innersize == 0) {
NBF_REDUCE_OUTERSIZE(bufferdata) = 0;
- return;
+ return 0;
}
else {
NBF_REDUCE_OUTERSIZE(bufferdata) = transfersize/reduce_innersize;
@@ -2508,14 +2553,15 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs)
"buffer (%d items)\n",
(int)iop, (int)op_transfersize);
- PyArray_TransferNDimToStrided(ndim_transfer,
- ptrs[iop], dst_stride,
- ad_ptrs[iop], src_strides, axisdata_incr,
- src_coords, axisdata_incr,
- src_shape, axisdata_incr,
- op_transfersize, src_itemsize,
- stransfer,
- transferdata);
+ if (PyArray_TransferNDimToStrided(
+ ndim_transfer, ptrs[iop], dst_stride,
+ ad_ptrs[iop], src_strides, axisdata_incr,
+ src_coords, axisdata_incr,
+ src_shape, axisdata_incr,
+ op_transfersize, src_itemsize,
+ stransfer, transferdata) < 0) {
+ return -1;
+ }
}
}
else if (ptrs[iop] == buffers[iop]) {
@@ -2551,8 +2597,80 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs)
NPY_IT_DBG_PRINT1("Iterator: Finished copying inputs to buffers "
"(buffered size is %d)\n", (int)NBF_SIZE(bufferdata));
+ return 0;
}
+
+/**
+ * This function clears any references still held by the buffers and should
+ * only be used to discard buffers if an error occurred.
+ *
+ * @param iter Iterator
+ */
+NPY_NO_EXPORT void
+npyiter_clear_buffers(NpyIter *iter)
+{
+ int nop = iter->nop;
+ NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter);
+
+ if (NBF_SIZE(bufferdata) == 0) {
+ /* if the buffers are empty already, there is nothing to do */
+ return;
+ }
+
+ if (!(NIT_ITFLAGS(iter) & NPY_ITFLAG_NEEDSAPI)) {
+ /* Buffers do not require clearing, but should not be copied back */
+ NBF_SIZE(bufferdata) = 0;
+ return;
+ }
+
+ /*
+ * The iterator may be using a dtype with references, which always
+ * requires the API. In that case, further cleanup may be necessary.
+ *
+ * TODO: At this time, we assume that a dtype having references
+ * implies the need to hold the GIL at all times. In theory
+ * we could broaden this definition for a new
+ * `PyArray_Item_XDECREF` API and the assumption may become
+ * incorrect.
+ */
+ PyObject *type, *value, *traceback;
+ PyErr_Fetch(&type, &value, &traceback);
+
+ /* Cleanup any buffers with references */
+ char **buffers = NBF_BUFFERS(bufferdata);
+ PyArray_Descr **dtypes = NIT_DTYPES(iter);
+ for (int iop = 0; iop < nop; ++iop, ++buffers) {
+ /*
+ * We may want to find a better way to do this, on the other hand,
+ * this cleanup seems rare and fairly special. A dtype using
+ * references (right now only us) must always keep the buffer in
+ * a well defined state (either NULL or owning the reference).
+ * Only we implement cleanup
+ */
+ if (!PyDataType_REFCHK(dtypes[iop])) {
+ continue;
+ }
+ if (*buffers == 0) {
+ continue;
+ }
+ int itemsize = dtypes[iop]->elsize;
+ for (npy_intp i = 0; i < NBF_SIZE(bufferdata); i++) {
+ /*
+ * See above comment, if this API is expanded the GIL assumption
+ * could become incorrect.
+ */
+ PyArray_Item_XDECREF(*buffers + (itemsize * i), dtypes[iop]);
+ }
+ /* Clear out the buffer just to be sure */
+ memset(*buffers, 0, NBF_SIZE(bufferdata) * itemsize);
+ }
+ /* Signal that the buffers are empty */
+ NBF_SIZE(bufferdata) = 0;
+ PyErr_Restore(type, value, traceback);
+}
+
+
/*
* This checks how much space can be buffered without encountering the
* same value twice, or for operands whose innermost stride is zero,
diff --git a/numpy/core/src/multiarray/nditer_constr.c b/numpy/core/src/multiarray/nditer_constr.c
index 7da17eafe..4bc6d2ca1 100644
--- a/numpy/core/src/multiarray/nditer_constr.c
+++ b/numpy/core/src/multiarray/nditer_constr.c
@@ -476,7 +476,10 @@ NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 flags,
}
/* Prepare the next buffers and set iterend/size */
- npyiter_copy_to_buffers(iter, NULL);
+ if (npyiter_copy_to_buffers(iter, NULL) < 0) {
+ NpyIter_Deallocate(iter);
+ return NULL;
+ }
}
}
@@ -642,21 +645,27 @@ NpyIter_Copy(NpyIter *iter)
}
/*NUMPY_API
- * Deallocate an iterator
+ * Deallocate an iterator.
+ *
+ * To correctly work when an error is in progress, we have to check
+ * `PyErr_Occurred()`. This is necessary when buffers are not finalized
+ * or WritebackIfCopy is used. We could avoid that check by exposing a new
+ * function which is passed in whether or not a Python error is already set.
*/
NPY_NO_EXPORT int
NpyIter_Deallocate(NpyIter *iter)
{
+ int success = PyErr_Occurred() == NULL;
+
npy_uint32 itflags;
/*int ndim = NIT_NDIM(iter);*/
int iop, nop;
PyArray_Descr **dtype;
PyArrayObject **object;
npyiter_opitflags *op_itflags;
- npy_bool resolve = 1;
if (iter == NULL) {
- return NPY_SUCCEED;
+ return success;
}
itflags = NIT_ITFLAGS(iter);
@@ -667,13 +676,23 @@ NpyIter_Deallocate(NpyIter *iter)
/* Deallocate any buffers and buffering data */
if (itflags & NPY_ITFLAG_BUFFER) {
+ /* Ensure no data is held by the buffers before they are cleared */
+ if (success) {
+ if (npyiter_copy_from_buffers(iter) < 0) {
+ success = NPY_FAIL;
+ }
+ }
+ else {
+ npyiter_clear_buffers(iter);
+ }
+
NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter);
char **buffers;
NpyAuxData **transferdata;
/* buffers */
buffers = NBF_BUFFERS(bufferdata);
- for(iop = 0; iop < nop; ++iop, ++buffers) {
+ for (iop = 0; iop < nop; ++iop, ++buffers) {
PyArray_free(*buffers);
}
/* read bufferdata */
@@ -694,12 +713,12 @@ NpyIter_Deallocate(NpyIter *iter)
/*
* Deallocate all the dtypes and objects that were iterated and resolve
- * any writeback buffers created by the iterator
+ * any writeback buffers created by the iterator.
*/
- for(iop = 0; iop < nop; ++iop, ++dtype, ++object) {
+ for (iop = 0; iop < nop; ++iop, ++dtype, ++object) {
if (op_itflags[iop] & NPY_OP_ITFLAG_HAS_WRITEBACK) {
- if (resolve && PyArray_ResolveWritebackIfCopy(*object) < 0) {
- resolve = 0;
+ if (success && PyArray_ResolveWritebackIfCopy(*object) < 0) {
+ success = 0;
}
else {
PyArray_DiscardWritebackIfCopy(*object);
@@ -711,12 +730,10 @@ NpyIter_Deallocate(NpyIter *iter)
/* Deallocate the iterator memory */
PyObject_Free(iter);
- if (resolve == 0) {
- return NPY_FAIL;
- }
- return NPY_SUCCEED;
+ return success;
}
+
/* Checks 'flags' for (C|F)_ORDER_INDEX, MULTI_INDEX, and EXTERNAL_LOOP,
* setting the appropriate internal flags in 'itflags'.
*
@@ -1738,7 +1755,7 @@ broadcast_error: {
char *tmpstr;
if (op_axes == NULL) {
- errmsg = PyUString_FromString("operands could not be broadcast "
+ errmsg = PyUnicode_FromString("operands could not be broadcast "
"together with shapes ");
if (errmsg == NULL) {
return 0;
@@ -1759,7 +1776,7 @@ broadcast_error: {
}
}
if (itershape != NULL) {
- tmp = PyUString_FromString("and requested shape ");
+ tmp = PyUnicode_FromString("and requested shape ");
if (tmp == NULL) {
Py_DECREF(errmsg);
return 0;
@@ -1784,7 +1801,7 @@ broadcast_error: {
Py_DECREF(errmsg);
}
else {
- errmsg = PyUString_FromString("operands could not be broadcast "
+ errmsg = PyUnicode_FromString("operands could not be broadcast "
"together with remapped shapes "
"[original->remapped]: ");
for (iop = 0; iop < nop; ++iop) {
@@ -1826,7 +1843,7 @@ broadcast_error: {
}
}
if (itershape != NULL) {
- tmp = PyUString_FromString("and requested shape ");
+ tmp = PyUnicode_FromString("and requested shape ");
if (tmp == NULL) {
Py_DECREF(errmsg);
return 0;
@@ -1860,11 +1877,11 @@ operand_different_than_broadcast: {
/* Start of error message */
if (op_flags[iop] & NPY_ITER_READONLY) {
- errmsg = PyUString_FromString("non-broadcastable operand "
+ errmsg = PyUnicode_FromString("non-broadcastable operand "
"with shape ");
}
else {
- errmsg = PyUString_FromString("non-broadcastable output "
+ errmsg = PyUnicode_FromString("non-broadcastable output "
"operand with shape ");
}
if (errmsg == NULL) {
@@ -1896,7 +1913,7 @@ operand_different_than_broadcast: {
}
}
- tmp = PyUString_FromString(" [remapped to ");
+ tmp = PyUnicode_FromString(" [remapped to ");
if (tmp == NULL) {
return 0;
}
@@ -1915,7 +1932,7 @@ operand_different_than_broadcast: {
}
}
- tmp = PyUString_FromString(" doesn't match the broadcast shape ");
+ tmp = PyUnicode_FromString(" doesn't match the broadcast shape ");
if (tmp == NULL) {
return 0;
}
diff --git a/numpy/core/src/multiarray/nditer_impl.h b/numpy/core/src/multiarray/nditer_impl.h
index 1477c8631..bb483bb1f 100644
--- a/numpy/core/src/multiarray/nditer_impl.h
+++ b/numpy/core/src/multiarray/nditer_impl.h
@@ -342,10 +342,11 @@ NPY_NO_EXPORT int
npyiter_allocate_buffers(NpyIter *iter, char **errmsg);
NPY_NO_EXPORT void
npyiter_goto_iterindex(NpyIter *iter, npy_intp iterindex);
-NPY_NO_EXPORT void
+NPY_NO_EXPORT int
npyiter_copy_from_buffers(NpyIter *iter);
-NPY_NO_EXPORT void
+NPY_NO_EXPORT int
npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs);
-
+NPY_NO_EXPORT void
+npyiter_clear_buffers(NpyIter *iter);
#endif
diff --git a/numpy/core/src/multiarray/nditer_pywrap.c b/numpy/core/src/multiarray/nditer_pywrap.c
index 7f31a5096..8839d1be7 100644
--- a/numpy/core/src/multiarray/nditer_pywrap.c
+++ b/numpy/core/src/multiarray/nditer_pywrap.c
@@ -894,7 +894,7 @@ NpyIter_NestedIters(PyObject *NPY_UNUSED(self),
Py_DECREF(item);
return NULL;
}
- axis = PyInt_AsLong(v);
+ axis = PyLong_AsLong(v);
Py_DECREF(v);
if (axis < 0 || axis >= NPY_MAXDIMS) {
PyErr_SetString(PyExc_ValueError,
@@ -1142,7 +1142,7 @@ npyiter_dealloc(NewNpyArrayIterObject *self)
"results.", 1) < 0) {
PyObject *s;
- s = PyUString_FromString("npyiter_dealloc");
+ s = PyUnicode_FromString("npyiter_dealloc");
if (s) {
PyErr_WriteUnraisable(s);
Py_DECREF(s);
@@ -1268,6 +1268,10 @@ npyiter_iternext(NewNpyArrayIterObject *self)
Py_RETURN_TRUE;
}
else {
+ if (PyErr_Occurred()) {
+ /* casting error, buffer cleanup will occur at reset or dealloc */
+ return NULL;
+ }
self->finished = 1;
Py_RETURN_FALSE;
}
@@ -1483,6 +1487,10 @@ npyiter_next(NewNpyArrayIterObject *self)
*/
if (self->started) {
if (!self->iternext(self->iter)) {
+ /*
+ * A casting error may be set here (or no error causing a
+ * StopIteration). Buffers may only be cleaned up later.
+ */
self->finished = 1;
return NULL;
}
@@ -1514,7 +1522,7 @@ static PyObject *npyiter_shape_get(NewNpyArrayIterObject *self)
if (ret != NULL) {
for (idim = 0; idim < ndim; ++idim) {
PyTuple_SET_ITEM(ret, idim,
- PyInt_FromLong(shape[idim]));
+ PyLong_FromLong(shape[idim]));
}
return ret;
}
@@ -1543,7 +1551,7 @@ static PyObject *npyiter_multi_index_get(NewNpyArrayIterObject *self)
}
for (idim = 0; idim < ndim; ++idim) {
PyTuple_SET_ITEM(ret, idim,
- PyInt_FromLong(multi_index[idim]));
+ PyLong_FromLong(multi_index[idim]));
}
return ret;
}
@@ -1597,7 +1605,7 @@ npyiter_multi_index_set(NewNpyArrayIterObject *self, PyObject *value)
}
for (idim = 0; idim < ndim; ++idim) {
PyObject *v = PySequence_GetItem(value, idim);
- multi_index[idim] = PyInt_AsLong(v);
+ multi_index[idim] = PyLong_AsLong(v);
if (error_converting(multi_index[idim])) {
Py_XDECREF(v);
return -1;
@@ -1633,7 +1641,7 @@ static PyObject *npyiter_index_get(NewNpyArrayIterObject *self)
if (NpyIter_HasIndex(self->iter)) {
npy_intp ind = *NpyIter_GetIndexPtr(self->iter);
- return PyInt_FromLong(ind);
+ return PyLong_FromLong(ind);
}
else {
PyErr_SetString(PyExc_ValueError,
@@ -1657,7 +1665,7 @@ static int npyiter_index_set(NewNpyArrayIterObject *self, PyObject *value)
if (NpyIter_HasIndex(self->iter)) {
npy_intp ind;
- ind = PyInt_AsLong(value);
+ ind = PyLong_AsLong(value);
if (error_converting(ind)) {
return -1;
}
@@ -1689,7 +1697,7 @@ static PyObject *npyiter_iterindex_get(NewNpyArrayIterObject *self)
return NULL;
}
- return PyInt_FromLong(NpyIter_GetIterIndex(self->iter));
+ return PyLong_FromLong(NpyIter_GetIterIndex(self->iter));
}
static int npyiter_iterindex_set(NewNpyArrayIterObject *self, PyObject *value)
@@ -1707,7 +1715,7 @@ static int npyiter_iterindex_set(NewNpyArrayIterObject *self, PyObject *value)
return -1;
}
- iterindex = PyInt_AsLong(value);
+ iterindex = PyLong_AsLong(value);
if (error_converting(iterindex)) {
return -1;
}
@@ -1743,8 +1751,8 @@ static PyObject *npyiter_iterrange_get(NewNpyArrayIterObject *self)
return NULL;
}
- PyTuple_SET_ITEM(ret, 0, PyInt_FromLong(istart));
- PyTuple_SET_ITEM(ret, 1, PyInt_FromLong(iend));
+ PyTuple_SET_ITEM(ret, 0, PyLong_FromLong(istart));
+ PyTuple_SET_ITEM(ret, 1, PyLong_FromLong(iend));
return ret;
}
@@ -1892,7 +1900,7 @@ static PyObject *npyiter_ndim_get(NewNpyArrayIterObject *self)
return NULL;
}
- return PyInt_FromLong(NpyIter_GetNDim(self->iter));
+ return PyLong_FromLong(NpyIter_GetNDim(self->iter));
}
static PyObject *npyiter_nop_get(NewNpyArrayIterObject *self)
@@ -1903,7 +1911,7 @@ static PyObject *npyiter_nop_get(NewNpyArrayIterObject *self)
return NULL;
}
- return PyInt_FromLong(NpyIter_GetNOp(self->iter));
+ return PyLong_FromLong(NpyIter_GetNOp(self->iter));
}
static PyObject *npyiter_itersize_get(NewNpyArrayIterObject *self)
@@ -1914,7 +1922,7 @@ static PyObject *npyiter_itersize_get(NewNpyArrayIterObject *self)
return NULL;
}
- return PyInt_FromLong(NpyIter_GetIterSize(self->iter));
+ return PyLong_FromLong(NpyIter_GetIterSize(self->iter));
}
static PyObject *npyiter_finished_get(NewNpyArrayIterObject *self)
@@ -2213,7 +2221,7 @@ npyiter_subscript(NewNpyArrayIterObject *self, PyObject *op)
return NULL;
}
- if (PyInt_Check(op) || PyLong_Check(op) ||
+ if (PyLong_Check(op) ||
(PyIndex_Check(op) && !PySequence_Check(op))) {
npy_intp i = PyArray_PyIntAsIntp(op);
if (error_converting(i)) {
@@ -2223,8 +2231,8 @@ npyiter_subscript(NewNpyArrayIterObject *self, PyObject *op)
}
else if (PySlice_Check(op)) {
Py_ssize_t istart = 0, iend = 0, istep = 0, islicelength;
- if (NpySlice_GetIndicesEx(op, NpyIter_GetNOp(self->iter),
- &istart, &iend, &istep, &islicelength) < 0) {
+ if (PySlice_GetIndicesEx(op, NpyIter_GetNOp(self->iter),
+ &istart, &iend, &istep, &islicelength) < 0) {
return NULL;
}
if (istep != 1) {
@@ -2262,7 +2270,7 @@ npyiter_ass_subscript(NewNpyArrayIterObject *self, PyObject *op,
return -1;
}
- if (PyInt_Check(op) || PyLong_Check(op) ||
+ if (PyLong_Check(op) ||
(PyIndex_Check(op) && !PySequence_Check(op))) {
npy_intp i = PyArray_PyIntAsIntp(op);
if (error_converting(i)) {
@@ -2272,8 +2280,8 @@ npyiter_ass_subscript(NewNpyArrayIterObject *self, PyObject *op,
}
else if (PySlice_Check(op)) {
Py_ssize_t istart = 0, iend = 0, istep = 0, islicelength = 0;
- if (NpySlice_GetIndicesEx(op, NpyIter_GetNOp(self->iter),
- &istart, &iend, &istep, &islicelength) < 0) {
+ if (PySlice_GetIndicesEx(op, NpyIter_GetNOp(self->iter),
+ &istart, &iend, &istep, &islicelength) < 0) {
return -1;
}
if (istep != 1) {
diff --git a/numpy/core/src/multiarray/nditer_templ.c.src b/numpy/core/src/multiarray/nditer_templ.c.src
index 0f0d59972..05ce6ae75 100644
--- a/numpy/core/src/multiarray/nditer_templ.c.src
+++ b/numpy/core/src/multiarray/nditer_templ.c.src
@@ -249,7 +249,10 @@ npyiter_buffered_reduce_iternext_iters@tag_nop@(NpyIter *iter)
memcpy(prev_dataptrs, NAD_PTRS(axisdata), NPY_SIZEOF_INTP*nop);
/* Write back to the arrays */
- npyiter_copy_from_buffers(iter);
+ if (npyiter_copy_from_buffers(iter) < 0) {
+ npyiter_clear_buffers(iter);
+ return 0;
+ }
/* Check if we're past the end */
if (NIT_ITERINDEX(iter) >= NIT_ITEREND(iter)) {
@@ -262,7 +265,10 @@ npyiter_buffered_reduce_iternext_iters@tag_nop@(NpyIter *iter)
}
/* Prepare the next buffers and set iterend/size */
- npyiter_copy_to_buffers(iter, prev_dataptrs);
+ if (npyiter_copy_to_buffers(iter, prev_dataptrs) < 0) {
+ npyiter_clear_buffers(iter);
+ return 0;
+ }
return 1;
}
@@ -303,7 +309,10 @@ npyiter_buffered_iternext(NpyIter *iter)
}
/* Write back to the arrays */
- npyiter_copy_from_buffers(iter);
+ if (npyiter_copy_from_buffers(iter) < 0) {
+ npyiter_clear_buffers(iter);
+ return 0;
+ }
/* Check if we're past the end */
if (NIT_ITERINDEX(iter) >= NIT_ITEREND(iter)) {
@@ -316,7 +325,10 @@ npyiter_buffered_iternext(NpyIter *iter)
}
/* Prepare the next buffers and set iterend/size */
- npyiter_copy_to_buffers(iter, NULL);
+ if (npyiter_copy_to_buffers(iter, NULL) < 0) {
+ npyiter_clear_buffers(iter);
+ return 0;
+ }
return 1;
}
diff --git a/numpy/core/src/multiarray/number.c b/numpy/core/src/multiarray/number.c
index 19ac7d7f9..87c3c9b0a 100644
--- a/numpy/core/src/multiarray/number.c
+++ b/numpy/core/src/multiarray/number.c
@@ -398,7 +398,7 @@ is_scalar_with_conversion(PyObject *o2, double* out_exponent)
const int optimize_fpexps = 1;
if (PyInt_Check(o2)) {
- *out_exponent = (double)PyInt_AsLong(o2);
+ *out_exponent = (double)PyLong_AsLong(o2);
return NPY_INTPOS_SCALAR;
}
if (optimize_fpexps && PyFloat_Check(o2)) {
@@ -448,7 +448,7 @@ is_scalar_with_conversion(PyObject *o2, double* out_exponent)
}
return NPY_NOSCALAR;
}
- val = PyInt_AsSsize_t(value);
+ val = PyLong_AsSsize_t(value);
if (error_converting(val)) {
PyErr_Clear();
return NPY_NOSCALAR;
@@ -826,7 +826,7 @@ _array_nonzero(PyArrayObject *mp)
n = PyArray_SIZE(mp);
if (n == 1) {
int res;
- if (Npy_EnterRecursiveCall(" while converting array to bool")) {
+ if (Py_EnterRecursiveCall(" while converting array to bool")) {
return -1;
}
res = PyArray_DESCR(mp)->f->nonzero(PyArray_DATA(mp), mp);
@@ -880,7 +880,7 @@ array_scalar_forward(PyArrayObject *v,
/* Need to guard against recursion if our array holds references */
if (PyDataType_REFCHK(PyArray_DESCR(v))) {
PyObject *res;
- if (Npy_EnterRecursiveCall(where) != 0) {
+ if (Py_EnterRecursiveCall(where) != 0) {
Py_DECREF(scalar);
return NULL;
}
diff --git a/numpy/core/src/multiarray/refcount.c b/numpy/core/src/multiarray/refcount.c
index c869b5eea..0f84449af 100644
--- a/numpy/core/src/multiarray/refcount.c
+++ b/numpy/core/src/multiarray/refcount.c
@@ -36,7 +36,7 @@ PyArray_Item_INCREF(char *data, PyArray_Descr *descr)
return;
}
if (descr->type_num == NPY_OBJECT) {
- NPY_COPY_PYOBJECT_PTR(&temp, data);
+ memcpy(&temp, data, sizeof(temp));
Py_XINCREF(temp);
}
else if (PyDataType_HASFIELDS(descr)) {
@@ -98,7 +98,7 @@ PyArray_Item_XDECREF(char *data, PyArray_Descr *descr)
}
if (descr->type_num == NPY_OBJECT) {
- NPY_COPY_PYOBJECT_PTR(&temp, data);
+ memcpy(&temp, data, sizeof(temp));
Py_XDECREF(temp);
}
else if (PyDataType_HASFIELDS(descr)) {
@@ -181,7 +181,7 @@ PyArray_INCREF(PyArrayObject *mp)
}
else {
for( i = 0; i < n; i++, data++) {
- NPY_COPY_PYOBJECT_PTR(&temp, data);
+ memcpy(&temp, data, sizeof(temp));
Py_XINCREF(temp);
}
}
@@ -192,7 +192,7 @@ PyArray_INCREF(PyArrayObject *mp)
return -1;
}
while(it->index < it->size) {
- NPY_COPY_PYOBJECT_PTR(&temp, it->dataptr);
+ memcpy(&temp, it->dataptr, sizeof(temp));
Py_XINCREF(temp);
PyArray_ITER_NEXT(it);
}
@@ -238,7 +238,7 @@ PyArray_XDECREF(PyArrayObject *mp)
}
else {
for (i = 0; i < n; i++, data++) {
- NPY_COPY_PYOBJECT_PTR(&temp, data);
+ memcpy(&temp, data, sizeof(temp));
Py_XDECREF(temp);
}
}
@@ -246,7 +246,7 @@ PyArray_XDECREF(PyArrayObject *mp)
else { /* handles misaligned data too */
PyArray_RawIterBaseInit(&it, mp);
while(it.index < it.size) {
- NPY_COPY_PYOBJECT_PTR(&temp, it.dataptr);
+ memcpy(&temp, it.dataptr, sizeof(temp));
Py_XDECREF(temp);
PyArray_ITER_NEXT(&it);
}
@@ -292,7 +292,7 @@ static void
_fillobject(char *optr, PyObject *obj, PyArray_Descr *dtype)
{
if (!PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT)) {
- if ((obj == Py_None) || (PyInt_Check(obj) && PyInt_AsLong(obj)==0)) {
+ if ((obj == Py_None) || (PyInt_Check(obj) && PyLong_AsLong(obj)==0)) {
return;
}
else {
@@ -309,7 +309,7 @@ _fillobject(char *optr, PyObject *obj, PyArray_Descr *dtype)
}
if (dtype->type_num == NPY_OBJECT) {
Py_XINCREF(obj);
- NPY_COPY_PYOBJECT_PTR(optr, &obj);
+ memcpy(optr, &obj, sizeof(obj));
}
else if (PyDataType_HASFIELDS(dtype)) {
PyObject *key, *value, *title = NULL;
diff --git a/numpy/core/src/multiarray/scalarapi.c b/numpy/core/src/multiarray/scalarapi.c
index 6f3d102a4..b2f52f554 100644
--- a/numpy/core/src/multiarray/scalarapi.c
+++ b/numpy/core/src/multiarray/scalarapi.c
@@ -138,7 +138,7 @@ scalar_value(PyObject *scalar, PyArray_Descr *descr)
}
else if (_CHK(Flexible)) {
if (_CHK(String)) {
- return (void *)PyString_AS_STRING(scalar);
+ return (void *)PyBytes_AS_STRING(scalar);
}
if (_CHK(Unicode)) {
/* Treat this the same as the NPY_UNICODE base class */
@@ -380,7 +380,7 @@ PyArray_ScalarFromObject(PyObject *object)
}
/*
* Booleans in Python are implemented as a subclass of integers,
- * so PyBool_Check must be called before PyInt_Check.
+ * so PyBool_Check must be called before PyLong_Check.
*/
if (PyBool_Check(object)) {
if (object == Py_True) {
@@ -395,7 +395,7 @@ PyArray_ScalarFromObject(PyObject *object)
if (ret == NULL) {
return NULL;
}
- PyArrayScalar_VAL(ret, Long) = PyInt_AS_LONG(object);
+ PyArrayScalar_VAL(ret, Long) = PyLong_AsLong(object);
}
else if (PyFloat_Check(object)) {
ret = PyArrayScalar_New(Double);
@@ -755,8 +755,8 @@ PyArray_Scalar(void *data, PyArray_Descr *descr, PyObject *base)
}
if (PyTypeNum_ISFLEXIBLE(type_num)) {
if (type_num == NPY_STRING) {
- destptr = PyString_AS_STRING(obj);
- ((PyStringObject *)obj)->ob_shash = -1;
+ destptr = PyBytes_AS_STRING(obj);
+ ((PyBytesObject *)obj)->ob_shash = -1;
memcpy(destptr, data, itemsize);
return obj;
}
diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src
index 58b9e2c30..c1bff1e42 100644
--- a/numpy/core/src/multiarray/scalartypes.c.src
+++ b/numpy/core/src/multiarray/scalartypes.c.src
@@ -447,7 +447,7 @@ _void_to_hex(const char* argbuf, const Py_ssize_t arglen,
}
memcpy(&retbuf[j], echars, strlen(echars));
- retval = PyUString_FromStringAndSize(retbuf, slen);
+ retval = PyUnicode_FromStringAndSize(retbuf, slen);
PyMem_Free(retbuf);
return retval;
@@ -518,21 +518,21 @@ datetimetype_repr(PyObject *self)
*/
if ((scal->obmeta.num == 1 && scal->obmeta.base != NPY_FR_h) ||
scal->obmeta.base == NPY_FR_GENERIC) {
- ret = PyUString_FromString("numpy.datetime64('");
+ ret = PyUnicode_FromString("numpy.datetime64('");
PyUString_ConcatAndDel(&ret,
- PyUString_FromString(iso));
+ PyUnicode_FromString(iso));
PyUString_ConcatAndDel(&ret,
- PyUString_FromString("')"));
+ PyUnicode_FromString("')"));
}
else {
- ret = PyUString_FromString("numpy.datetime64('");
+ ret = PyUnicode_FromString("numpy.datetime64('");
PyUString_ConcatAndDel(&ret,
- PyUString_FromString(iso));
+ PyUnicode_FromString(iso));
PyUString_ConcatAndDel(&ret,
- PyUString_FromString("','"));
+ PyUnicode_FromString("','"));
ret = append_metastr_to_string(&scal->obmeta, 1, ret);
PyUString_ConcatAndDel(&ret,
- PyUString_FromString("')"));
+ PyUnicode_FromString("')"));
}
return ret;
@@ -554,31 +554,31 @@ timedeltatype_repr(PyObject *self)
/* The value */
if (scal->obval == NPY_DATETIME_NAT) {
- ret = PyUString_FromString("numpy.timedelta64('NaT'");
+ ret = PyUnicode_FromString("numpy.timedelta64('NaT'");
}
else {
/*
* Can't use "%lld" if HAVE_LONG_LONG is not defined
*/
#if defined(HAVE_LONG_LONG)
- ret = PyUString_FromFormat("numpy.timedelta64(%lld",
+ ret = PyUnicode_FromFormat("numpy.timedelta64(%lld",
(long long)scal->obval);
#else
- ret = PyUString_FromFormat("numpy.timedelta64(%ld",
+ ret = PyUnicode_FromFormat("numpy.timedelta64(%ld",
(long)scal->obval);
#endif
}
/* The metadata unit */
if (scal->obmeta.base == NPY_FR_GENERIC) {
PyUString_ConcatAndDel(&ret,
- PyUString_FromString(")"));
+ PyUnicode_FromString(")"));
}
else {
PyUString_ConcatAndDel(&ret,
- PyUString_FromString(",'"));
+ PyUnicode_FromString(",'"));
ret = append_metastr_to_string(&scal->obmeta, 1, ret);
PyUString_ConcatAndDel(&ret,
- PyUString_FromString("')"));
+ PyUnicode_FromString("')"));
}
return ret;
@@ -611,7 +611,7 @@ datetimetype_str(PyObject *self)
return NULL;
}
- return PyUString_FromString(iso);
+ return PyUnicode_FromString(iso);
}
static char *_datetime_verbose_strings[NPY_DATETIME_NUMUNITS] = {
@@ -657,21 +657,21 @@ timedeltatype_str(PyObject *self)
}
if (scal->obval == NPY_DATETIME_NAT) {
- ret = PyUString_FromString("NaT");
+ ret = PyUnicode_FromString("NaT");
}
else {
/*
* Can't use "%lld" if HAVE_LONG_LONG is not defined
*/
#if defined(HAVE_LONG_LONG)
- ret = PyUString_FromFormat("%lld ",
+ ret = PyUnicode_FromFormat("%lld ",
(long long)(scal->obval * scal->obmeta.num));
#else
- ret = PyUString_FromFormat("%ld ",
+ ret = PyUnicode_FromFormat("%ld ",
(long)(scal->obval * scal->obmeta.num));
#endif
PyUString_ConcatAndDel(&ret,
- PyUString_FromString(basestr));
+ PyUnicode_FromString(basestr));
}
return ret;
@@ -795,7 +795,7 @@ legacy_@name@_format@kind@(@type@ val)
PyOS_snprintf(buf, sizeof(buf), "(%s%sj)", re, im);
}
- return PyUString_FromString(buf);
+ return PyUnicode_FromString(buf);
}
#undef _FMT1
@@ -836,7 +836,7 @@ legacy_@name@_format@kind@(npy_@name@ val){
strcpy(&buf[cnt],".0");
}
- return PyUString_FromString(buf);
+ return PyUnicode_FromString(buf);
}
#undef _FMT1
@@ -904,7 +904,7 @@ c@name@type_@kind@(PyObject *self)
return NULL;
}
- PyUString_ConcatAndDel(&istr, PyUString_FromString("j"));
+ PyUString_ConcatAndDel(&istr, PyUnicode_FromString("j"));
return istr;
}
@@ -915,13 +915,13 @@ c@name@type_@kind@(PyObject *self)
}
}
else if (npy_isnan(val.real)) {
- rstr = PyUString_FromString("nan");
+ rstr = PyUnicode_FromString("nan");
}
else if (val.real > 0){
- rstr = PyUString_FromString("inf");
+ rstr = PyUnicode_FromString("inf");
}
else {
- rstr = PyUString_FromString("-inf");
+ rstr = PyUnicode_FromString("-inf");
}
if (npy_isfinite(val.imag)) {
@@ -931,19 +931,19 @@ c@name@type_@kind@(PyObject *self)
}
}
else if (npy_isnan(val.imag)) {
- istr = PyUString_FromString("+nan");
+ istr = PyUnicode_FromString("+nan");
}
else if (val.imag > 0){
- istr = PyUString_FromString("+inf");
+ istr = PyUnicode_FromString("+inf");
}
else {
- istr = PyUString_FromString("-inf");
+ istr = PyUnicode_FromString("-inf");
}
- ret = PyUString_FromString("(");
+ ret = PyUnicode_FromString("(");
PyUString_ConcatAndDel(&ret, rstr);
PyUString_ConcatAndDel(&ret, istr);
- PyUString_ConcatAndDel(&ret, PyUString_FromString("j)"));
+ PyUString_ConcatAndDel(&ret, PyUnicode_FromString("j)"));
return ret;
}
@@ -1147,12 +1147,16 @@ gentype_sizeof(PyObject *self)
NPY_NO_EXPORT void
gentype_struct_free(PyObject *ptr)
{
- PyArrayInterface *arrif;
- PyObject *context;
-
- arrif = (PyArrayInterface*)PyCapsule_GetPointer(ptr, NULL);
- context = (PyObject *)PyCapsule_GetContext(ptr);
- Py_DECREF(context);
+ PyArrayInterface *arrif = (PyArrayInterface*)PyCapsule_GetPointer(ptr, NULL);
+ if (arrif == NULL) {
+ PyErr_WriteUnraisable(ptr);
+ return;
+ }
+ PyObject *context = (PyObject *)PyCapsule_GetContext(ptr);
+ if (context == NULL && PyErr_Occurred()) {
+ PyErr_WriteUnraisable(ptr);
+ }
+ Py_XDECREF(context);
Py_XDECREF(arrif->descr);
PyArray_free(arrif->shape);
PyArray_free(arrif);
diff --git a/numpy/core/src/multiarray/shape.c b/numpy/core/src/multiarray/shape.c
index 30507112d..1a38fe956 100644
--- a/numpy/core/src/multiarray/shape.c
+++ b/numpy/core/src/multiarray/shape.c
@@ -133,7 +133,7 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck,
if (newnbytes > oldnbytes && PyArray_ISWRITEABLE(self)) {
/* Fill new memory with zeros */
if (PyDataType_FLAGCHK(PyArray_DESCR(self), NPY_ITEM_REFCOUNT)) {
- PyObject *zero = PyInt_FromLong(0);
+ PyObject *zero = PyLong_FromLong(0);
char *optr;
optr = PyArray_BYTES(self) + oldnbytes;
npy_intp n_new = newsize - oldsize;
@@ -332,7 +332,7 @@ _putzero(char *optr, PyObject *zero, PyArray_Descr *dtype)
for (i = 0; i < nsize; i++) {
Py_INCREF(zero);
- NPY_COPY_PYOBJECT_PTR(optr, &zero);
+ memcpy(optr, &zero, sizeof(zero));
optr += sizeof(zero);
}
}
@@ -458,7 +458,7 @@ _attempt_nocopy_reshape(PyArrayObject *self, int newnd, const npy_intp *newdims,
static void
raise_reshape_size_mismatch(PyArray_Dims *newshape, PyArrayObject *arr)
{
- PyObject *msg = PyUString_FromFormat("cannot reshape array of size %zd "
+ PyObject *msg = PyUnicode_FromFormat("cannot reshape array of size %zd "
"into shape ", PyArray_SIZE(arr));
PyObject *tmp = convert_shape_to_string(newshape->len, newshape->ptr, "");
@@ -997,10 +997,10 @@ build_shape_string(npy_intp n, npy_intp const *vals)
}
if (i == n) {
- return PyUString_FromFormat("()");
+ return PyUnicode_FromFormat("()");
}
else {
- ret = PyUString_FromFormat("(%" NPY_INTP_FMT, vals[i++]);
+ ret = PyUnicode_FromFormat("(%" NPY_INTP_FMT, vals[i++]);
if (ret == NULL) {
return NULL;
}
@@ -1008,10 +1008,10 @@ build_shape_string(npy_intp n, npy_intp const *vals)
for (; i < n; ++i) {
if (vals[i] < 0) {
- tmp = PyUString_FromString(",newaxis");
+ tmp = PyUnicode_FromString(",newaxis");
}
else {
- tmp = PyUString_FromFormat(",%" NPY_INTP_FMT, vals[i]);
+ tmp = PyUnicode_FromFormat(",%" NPY_INTP_FMT, vals[i]);
}
if (tmp == NULL) {
Py_DECREF(ret);
@@ -1024,7 +1024,7 @@ build_shape_string(npy_intp n, npy_intp const *vals)
}
}
- tmp = PyUString_FromFormat(")");
+ tmp = PyUnicode_FromFormat(")");
PyUString_ConcatAndDel(&ret, tmp);
return ret;
}
diff --git a/numpy/core/src/multiarray/strfuncs.c b/numpy/core/src/multiarray/strfuncs.c
index 363cbdba2..d9d9b7c0a 100644
--- a/numpy/core/src/multiarray/strfuncs.c
+++ b/numpy/core/src/multiarray/strfuncs.c
@@ -3,14 +3,25 @@
#include <Python.h>
#include <numpy/arrayobject.h>
-
#include "npy_pycompat.h"
-
+#include "npy_import.h"
#include "strfuncs.h"
static PyObject *PyArray_StrFunction = NULL;
static PyObject *PyArray_ReprFunction = NULL;
+
+static void
+npy_PyErr_SetStringChained(PyObject *type, const char *message)
+{
+ PyObject *exc, *val, *tb;
+
+ PyErr_Fetch(&exc, &val, &tb);
+ PyErr_SetString(type, message);
+ npy_PyErr_ChainExceptionsCause(exc, val, tb);
+}
+
+
/*NUMPY_API
* Set the array print function to be a Python function.
*/
@@ -36,164 +47,52 @@ PyArray_SetStringFunction(PyObject *op, int repr)
}
-/*
- * Extend string. On failure, returns NULL and leaves *strp alone.
- * XXX we do this in multiple places; time for a string library?
- */
-static char *
-extend_str(char **strp, Py_ssize_t n, Py_ssize_t *maxp)
-{
- char *str = *strp;
- Py_ssize_t new_cap;
-
- if (n >= *maxp - 16) {
- new_cap = *maxp * 2;
-
- if (new_cap <= *maxp) { /* overflow */
- return NULL;
- }
- str = PyArray_realloc(*strp, new_cap);
- if (str != NULL) {
- *strp = str;
- *maxp = new_cap;
- }
- }
- return str;
-}
-
-
-static int
-dump_data(char **string, Py_ssize_t *n, Py_ssize_t *max_n, char *data, int nd,
- npy_intp const *dimensions, npy_intp const *strides, PyArrayObject* self)
-{
- PyObject *op = NULL, *sp = NULL;
- char *ostring;
- npy_intp i, N, ret = 0;
-
-#define CHECK_MEMORY do { \
- if (extend_str(string, *n, max_n) == NULL) { \
- ret = -1; \
- goto end; \
- } \
- } while (0)
-
- if (nd == 0) {
- if ((op = PyArray_GETITEM(self, data)) == NULL) {
- return -1;
- }
- sp = PyObject_Repr(op);
- if (sp == NULL) {
- ret = -1;
- goto end;
- }
- ostring = PyString_AsString(sp);
- N = PyString_Size(sp)*sizeof(char);
- *n += N;
- CHECK_MEMORY;
- memmove(*string + (*n - N), ostring, N);
- }
- else {
- CHECK_MEMORY;
- (*string)[*n] = '[';
- *n += 1;
- for (i = 0; i < dimensions[0]; i++) {
- if (dump_data(string, n, max_n,
- data + (*strides)*i,
- nd - 1, dimensions + 1,
- strides + 1, self) < 0) {
- return -1;
- }
- CHECK_MEMORY;
- if (i < dimensions[0] - 1) {
- (*string)[*n] = ',';
- (*string)[*n+1] = ' ';
- *n += 2;
- }
- }
- CHECK_MEMORY;
- (*string)[*n] = ']';
- *n += 1;
- }
-
-#undef CHECK_MEMORY
-
-end:
- Py_XDECREF(op);
- Py_XDECREF(sp);
- return ret;
-}
-
-
-static PyObject *
-array_repr_builtin(PyArrayObject *self, int repr)
-{
- PyObject *ret;
- char *string;
- /* max_n initial value is arbitrary, dump_data will extend it */
- Py_ssize_t n = 0, max_n = PyArray_NBYTES(self) * 4 + 7;
-
- if ((string = PyArray_malloc(max_n)) == NULL) {
- return PyErr_NoMemory();
- }
-
- if (dump_data(&string, &n, &max_n, PyArray_DATA(self),
- PyArray_NDIM(self), PyArray_DIMS(self),
- PyArray_STRIDES(self), self) < 0) {
- PyArray_free(string);
- return NULL;
- }
-
- if (repr) {
- if (PyArray_ISEXTENDED(self)) {
- ret = PyUString_FromFormat("array(%s, '%c%d')",
- string,
- PyArray_DESCR(self)->type,
- PyArray_DESCR(self)->elsize);
- }
- else {
- ret = PyUString_FromFormat("array(%s, '%c')",
- string,
- PyArray_DESCR(self)->type);
- }
- }
- else {
- ret = PyUString_FromStringAndSize(string, n);
- }
-
- PyArray_free(string);
- return ret;
-}
-
-
NPY_NO_EXPORT PyObject *
array_repr(PyArrayObject *self)
{
- PyObject *s;
+ static PyObject *repr = NULL;
- if (PyArray_ReprFunction == NULL) {
- s = array_repr_builtin(self, 1);
+ if (PyArray_ReprFunction != NULL) {
+ return PyObject_CallFunctionObjArgs(PyArray_ReprFunction, self, NULL);
}
- else {
- s = PyObject_CallFunctionObjArgs(PyArray_ReprFunction, self, NULL);
+
+ /*
+ * We need to do a delayed import here as initialization on module load
+ * leads to circular import problems.
+ */
+ npy_cache_import("numpy.core.arrayprint", "_default_array_repr", &repr);
+ if (repr == NULL) {
+ npy_PyErr_SetStringChained(PyExc_RuntimeError,
+ "Unable to configure default ndarray.__repr__");
+ return NULL;
}
- return s;
+ return PyObject_CallFunctionObjArgs(repr, self, NULL);
}
NPY_NO_EXPORT PyObject *
array_str(PyArrayObject *self)
{
- PyObject *s;
+ static PyObject *str = NULL;
- if (PyArray_StrFunction == NULL) {
- s = array_repr_builtin(self, 0);
+ if (PyArray_StrFunction != NULL) {
+ return PyObject_CallFunctionObjArgs(PyArray_StrFunction, self, NULL);
}
- else {
- s = PyObject_CallFunctionObjArgs(PyArray_StrFunction, self, NULL);
+
+ /*
+ * We need to do a delayed import here as initialization on module load leads
+ * to circular import problems.
+ */
+ npy_cache_import("numpy.core.arrayprint", "_default_array_str", &str);
+ if (str == NULL) {
+ npy_PyErr_SetStringChained(PyExc_RuntimeError,
+ "Unable to configure default ndarray.__str__");
+ return NULL;
}
- return s;
+ return PyObject_CallFunctionObjArgs(str, self, NULL);
}
+
NPY_NO_EXPORT PyObject *
array_format(PyArrayObject *self, PyObject *args)
{
@@ -221,4 +120,3 @@ array_format(PyArrayObject *self, PyObject *args)
);
}
}
-
diff --git a/numpy/core/src/multiarray/temp_elide.c b/numpy/core/src/multiarray/temp_elide.c
index 09b948218..b19dee418 100644
--- a/numpy/core/src/multiarray/temp_elide.c
+++ b/numpy/core/src/multiarray/temp_elide.c
@@ -62,12 +62,8 @@
#define NPY_ELIDE_DEBUG 0
#define NPY_MAX_STACKSIZE 10
-#if PY_VERSION_HEX >= 0x03060000
/* TODO can pep523 be used to somehow? */
#define PYFRAMEEVAL_FUNC "_PyEval_EvalFrameDefault"
-#else
-#define PYFRAMEEVAL_FUNC "PyEval_EvalFrameEx"
-#endif
/*
* Heuristic size of the array in bytes at which backtrace overhead generation
* becomes less than speed gained by in-place operations. Depends on stack depth
diff --git a/numpy/core/src/multiarray/usertypes.c b/numpy/core/src/multiarray/usertypes.c
index 0c8d49970..6b6c6bd9d 100644
--- a/numpy/core/src/multiarray/usertypes.c
+++ b/numpy/core/src/multiarray/usertypes.c
@@ -268,11 +268,11 @@ PyArray_RegisterCastFunc(PyArray_Descr *descr, int totype,
return -1;
}
}
- key = PyInt_FromLong(totype);
+ key = PyLong_FromLong(totype);
if (PyErr_Occurred()) {
return -1;
}
- cobj = NpyCapsule_FromVoidPtr((void *)castfunc, NULL);
+ cobj = PyCapsule_New((void *)castfunc, NULL, NULL);
if (cobj == NULL) {
Py_DECREF(key);
return -1;
diff --git a/numpy/core/src/npymath/npy_math_private.h b/numpy/core/src/npymath/npy_math_private.h
index e4a919db6..212d11a0b 100644
--- a/numpy/core/src/npymath/npy_math_private.h
+++ b/numpy/core/src/npymath/npy_math_private.h
@@ -25,7 +25,6 @@
#include "npy_fpmath.h"
#include "numpy/npy_math.h"
-#include "numpy/npy_cpu.h"
#include "numpy/npy_endian.h"
#include "numpy/npy_common.h"
diff --git a/numpy/core/src/npysort/binsearch.c.src b/numpy/core/src/npysort/binsearch.c.src
index c04e197b7..41165897b 100644
--- a/numpy/core/src/npysort/binsearch.c.src
+++ b/numpy/core/src/npysort/binsearch.c.src
@@ -35,7 +35,7 @@
* #CMP = LT, LTE#
*/
-NPY_VISIBILITY_HIDDEN void
+NPY_NO_EXPORT void
binsearch_@side@_@suff@(const char *arr, const char *key, char *ret,
npy_intp arr_len, npy_intp key_len,
npy_intp arr_str, npy_intp key_str, npy_intp ret_str,
@@ -81,7 +81,7 @@ binsearch_@side@_@suff@(const char *arr, const char *key, char *ret,
}
}
-NPY_VISIBILITY_HIDDEN int
+NPY_NO_EXPORT int
argbinsearch_@side@_@suff@(const char *arr, const char *key,
const char *sort, char *ret,
npy_intp arr_len, npy_intp key_len,
@@ -153,7 +153,7 @@ argbinsearch_@side@_@suff@(const char *arr, const char *key,
* #CMP = <, <=#
*/
-NPY_VISIBILITY_HIDDEN void
+NPY_NO_EXPORT void
npy_binsearch_@side@(const char *arr, const char *key, char *ret,
npy_intp arr_len, npy_intp key_len,
npy_intp arr_str, npy_intp key_str, npy_intp ret_str,
@@ -195,7 +195,7 @@ npy_binsearch_@side@(const char *arr, const char *key, char *ret,
}
}
-NPY_VISIBILITY_HIDDEN int
+NPY_NO_EXPORT int
npy_argbinsearch_@side@(const char *arr, const char *key,
const char *sort, char *ret,
npy_intp arr_len, npy_intp key_len,
diff --git a/numpy/core/src/npysort/heapsort.c.src b/numpy/core/src/npysort/heapsort.c.src
index c2e3b63cb..4bfea1388 100644
--- a/numpy/core/src/npysort/heapsort.c.src
+++ b/numpy/core/src/npysort/heapsort.c.src
@@ -60,7 +60,7 @@
* npy_cdouble, npy_clongdouble, npy_datetime, npy_timedelta#
*/
-int
+NPY_NO_EXPORT int
heapsort_@suff@(void *start, npy_intp n, void *NOT_USED)
{
@type@ tmp, *a;
@@ -111,7 +111,7 @@ heapsort_@suff@(void *start, npy_intp n, void *NOT_USED)
}
-int
+NPY_NO_EXPORT int
aheapsort_@suff@(void *vv, npy_intp *tosort, npy_intp n, void *NOT_USED)
{
@type@ *v = vv;
@@ -177,7 +177,7 @@ aheapsort_@suff@(void *vv, npy_intp *tosort, npy_intp n, void *NOT_USED)
* #type = npy_char, npy_ucs4#
*/
-int
+NPY_NO_EXPORT int
heapsort_@suff@(void *start, npy_intp n, void *varr)
{
PyArrayObject *arr = varr;
@@ -231,7 +231,7 @@ heapsort_@suff@(void *start, npy_intp n, void *varr)
}
-int
+NPY_NO_EXPORT int
aheapsort_@suff@(void *vv, npy_intp *tosort, npy_intp n, void *varr)
{
@type@ *v = vv;
@@ -291,7 +291,7 @@ aheapsort_@suff@(void *vv, npy_intp *tosort, npy_intp n, void *varr)
*/
-int
+NPY_NO_EXPORT int
npy_heapsort(void *start, npy_intp num, void *varr)
{
PyArrayObject *arr = varr;
@@ -348,7 +348,7 @@ npy_heapsort(void *start, npy_intp num, void *varr)
}
-int
+NPY_NO_EXPORT int
npy_aheapsort(void *vv, npy_intp *tosort, npy_intp n, void *varr)
{
char *v = vv;
diff --git a/numpy/core/src/npysort/mergesort.c.src b/numpy/core/src/npysort/mergesort.c.src
index 6f659617a..f83fbf758 100644
--- a/numpy/core/src/npysort/mergesort.c.src
+++ b/numpy/core/src/npysort/mergesort.c.src
@@ -103,7 +103,7 @@ mergesort0_@suff@(@type@ *pl, @type@ *pr, @type@ *pw)
}
-int
+NPY_NO_EXPORT int
mergesort_@suff@(void *start, npy_intp num, void *NOT_USED)
{
@type@ *pl, *pr, *pw;
@@ -166,7 +166,7 @@ amergesort0_@suff@(npy_intp *pl, npy_intp *pr, @type@ *v, npy_intp *pw)
}
-int
+NPY_NO_EXPORT int
amergesort_@suff@(void *v, npy_intp *tosort, npy_intp num, void *NOT_USED)
{
npy_intp *pl, *pr, *pw;
@@ -245,7 +245,7 @@ mergesort0_@suff@(@type@ *pl, @type@ *pr, @type@ *pw, @type@ *vp, size_t len)
}
-int
+NPY_NO_EXPORT int
mergesort_@suff@(void *start, npy_intp num, void *varr)
{
PyArrayObject *arr = varr;
@@ -326,7 +326,7 @@ amergesort0_@suff@(npy_intp *pl, npy_intp *pr, @type@ *v, npy_intp *pw, size_t l
}
-int
+NPY_NO_EXPORT int
amergesort_@suff@(void *v, npy_intp *tosort, npy_intp num, void *varr)
{
PyArrayObject *arr = varr;
@@ -407,7 +407,7 @@ npy_mergesort0(char *pl, char *pr, char *pw, char *vp, npy_intp elsize,
}
-int
+NPY_NO_EXPORT int
npy_mergesort(void *start, npy_intp num, void *varr)
{
PyArrayObject *arr = varr;
@@ -485,7 +485,7 @@ npy_amergesort0(npy_intp *pl, npy_intp *pr, char *v, npy_intp *pw,
}
-int
+NPY_NO_EXPORT int
npy_amergesort(void *v, npy_intp *tosort, npy_intp num, void *varr)
{
PyArrayObject *arr = varr;
diff --git a/numpy/core/src/npysort/quicksort.c.src b/numpy/core/src/npysort/quicksort.c.src
index 49a2c4906..933f75808 100644
--- a/numpy/core/src/npysort/quicksort.c.src
+++ b/numpy/core/src/npysort/quicksort.c.src
@@ -85,7 +85,7 @@
* npy_cdouble, npy_clongdouble, npy_datetime, npy_timedelta#
*/
-int
+NPY_NO_EXPORT int
quicksort_@suff@(void *start, npy_intp num, void *NOT_USED)
{
@type@ vp;
@@ -160,7 +160,7 @@ stack_pop:
}
-int
+NPY_NO_EXPORT int
aquicksort_@suff@(void *vv, npy_intp* tosort, npy_intp num, void *NOT_USED)
{
@type@ *v = vv;
@@ -253,7 +253,7 @@ stack_pop:
* #type = npy_char, npy_ucs4#
*/
-int
+NPY_NO_EXPORT int
quicksort_@suff@(void *start, npy_intp num, void *varr)
{
PyArrayObject *arr = varr;
@@ -341,7 +341,7 @@ stack_pop:
}
-int
+NPY_NO_EXPORT int
aquicksort_@suff@(void *vv, npy_intp* tosort, npy_intp num, void *varr)
{
@type@ *v = vv;
@@ -434,7 +434,7 @@ stack_pop:
*/
-int
+NPY_NO_EXPORT int
npy_quicksort(void *start, npy_intp num, void *varr)
{
PyArrayObject *arr = varr;
@@ -539,7 +539,7 @@ stack_pop:
}
-int
+NPY_NO_EXPORT int
npy_aquicksort(void *vv, npy_intp* tosort, npy_intp num, void *varr)
{
char *v = vv;
diff --git a/numpy/core/src/npysort/radixsort.c.src b/numpy/core/src/npysort/radixsort.c.src
index 72887d7e4..99d8ed42a 100644
--- a/numpy/core/src/npysort/radixsort.c.src
+++ b/numpy/core/src/npysort/radixsort.c.src
@@ -46,7 +46,7 @@ nth_byte_@suff@(@type@ key, npy_intp l) {
return (key >> (l << 3)) & 0xFF;
}
-@type@*
+static @type@*
radixsort0_@suff@(@type@ *arr, @type@ *aux, npy_intp num)
{
npy_intp cnt[sizeof(@type@)][1 << 8] = { { 0 } };
@@ -95,7 +95,7 @@ radixsort0_@suff@(@type@ *arr, @type@ *aux, npy_intp num)
return arr;
}
-int
+NPY_NO_EXPORT int
radixsort_@suff@(void *start, npy_intp num, void *NPY_UNUSED(varr))
{
void *sorted;
@@ -136,7 +136,7 @@ radixsort_@suff@(void *start, npy_intp num, void *NPY_UNUSED(varr))
return 0;
}
-npy_intp*
+static npy_intp*
aradixsort0_@suff@(@type@ *arr, npy_intp *aux, npy_intp *tosort, npy_intp num)
{
npy_intp cnt[sizeof(@type@)][1 << 8] = { { 0 } };
@@ -185,7 +185,7 @@ aradixsort0_@suff@(@type@ *arr, npy_intp *aux, npy_intp *tosort, npy_intp num)
return tosort;
}
-int
+NPY_NO_EXPORT int
aradixsort_@suff@(void *start, npy_intp* tosort, npy_intp num, void *NPY_UNUSED(varr))
{
npy_intp *sorted;
diff --git a/numpy/core/src/npysort/selection.c.src b/numpy/core/src/npysort/selection.c.src
index be645450f..4fd955200 100644
--- a/numpy/core/src/npysort/selection.c.src
+++ b/numpy/core/src/npysort/selection.c.src
@@ -280,7 +280,7 @@ static int
* kth 8: 0 1 2 3 4 5 6 [8 7] -> stack []
*
*/
-int
+NPY_NO_EXPORT int
@name@introselect_@suff@(@type@ *v,
#if @arg@
npy_intp* tosort,
diff --git a/numpy/core/src/npysort/timsort.c.src b/numpy/core/src/npysort/timsort.c.src
index 26313ca5b..3fdd46f61 100644
--- a/numpy/core/src/npysort/timsort.c.src
+++ b/numpy/core/src/npysort/timsort.c.src
@@ -42,7 +42,7 @@
-npy_intp compute_min_run(npy_intp num)
+static npy_intp compute_min_run(npy_intp num)
{
npy_intp r = 0;
@@ -476,7 +476,7 @@ force_collapse_@suff@(@type@ *arr, run *stack, npy_intp *stack_ptr,
}
-int
+NPY_NO_EXPORT int
timsort_@suff@(void *start, npy_intp num, void *NPY_UNUSED(varr))
{
int ret;
@@ -854,7 +854,7 @@ aforce_collapse_@suff@(@type@ *arr, npy_intp *tosort, run *stack,
}
-int
+NPY_NO_EXPORT int
atimsort_@suff@(void *v, npy_intp *tosort, npy_intp num,
void *NPY_UNUSED(varr))
{
@@ -904,7 +904,7 @@ cleanup:
* run length to reduce the cost of insertion sort.
*/
-npy_intp compute_min_run_short(npy_intp num)
+static npy_intp compute_min_run_short(npy_intp num)
{
npy_intp r = 0;
@@ -1303,7 +1303,7 @@ force_collapse_@suff@(@type@ *arr, run *stack, npy_intp *stack_ptr,
}
-int
+NPY_NO_EXPORT int
timsort_@suff@(void *start, npy_intp num, void *varr)
{
PyArrayObject *arr = varr;
@@ -1691,7 +1691,7 @@ aforce_collapse_@suff@(@type@ *arr, npy_intp *tosort, run *stack,
}
-int
+NPY_NO_EXPORT int
atimsort_@suff@(void *start, npy_intp *tosort, npy_intp num, void *varr)
{
PyArrayObject *arr = varr;
@@ -2128,7 +2128,7 @@ npy_force_collapse(char *arr, run *stack, npy_intp *stack_ptr,
}
-int
+NPY_NO_EXPORT int
npy_timsort(void *start, npy_intp num, void *varr)
{
PyArrayObject *arr = varr;
@@ -2524,7 +2524,7 @@ npy_aforce_collapse(char *arr, npy_intp *tosort, run *stack,
}
-int
+NPY_NO_EXPORT int
npy_atimsort(void *start, npy_intp *tosort, npy_intp num, void *varr)
{
PyArrayObject *arr = varr;
diff --git a/numpy/core/src/umath/_rational_tests.c.src b/numpy/core/src/umath/_rational_tests.c.src
index 13e33d0a5..e611a0847 100644
--- a/numpy/core/src/umath/_rational_tests.c.src
+++ b/numpy/core/src/umath/_rational_tests.c.src
@@ -406,8 +406,9 @@ pyrational_new(PyTypeObject* type, PyObject* args, PyObject* kwds) {
Py_INCREF(x[0]);
return x[0];
}
- else if (PyString_Check(x[0])) {
- const char* s = PyString_AS_STRING(x[0]);
+ // TODO: allow construction from unicode strings
+ else if (PyBytes_Check(x[0])) {
+ const char* s = PyBytes_AS_STRING(x[0]);
rational x;
if (scan_rational(&s,&x)) {
const char* p;
@@ -526,11 +527,11 @@ static PyObject*
pyrational_repr(PyObject* self) {
rational x = ((PyRational*)self)->r;
if (d(x)!=1) {
- return PyUString_FromFormat(
+ return PyUnicode_FromFormat(
"rational(%ld,%ld)",(long)x.n,(long)d(x));
}
else {
- return PyUString_FromFormat(
+ return PyUnicode_FromFormat(
"rational(%ld)",(long)x.n);
}
}
@@ -539,11 +540,11 @@ static PyObject*
pyrational_str(PyObject* self) {
rational x = ((PyRational*)self)->r;
if (d(x)!=1) {
- return PyUString_FromFormat(
+ return PyUnicode_FromFormat(
"%ld/%ld",(long)x.n,(long)d(x));
}
else {
- return PyUString_FromFormat(
+ return PyUnicode_FromFormat(
"%ld",(long)x.n);
}
}
@@ -1126,7 +1127,7 @@ PyMODINIT_FUNC PyInit__rational_tests(void) {
if (PyErr_Occurred()) {
goto fail;
}
- numpy_str = PyUString_FromString("numpy");
+ numpy_str = PyUnicode_FromString("numpy");
if (!numpy_str) {
goto fail;
}
diff --git a/numpy/core/src/umath/_umath_tests.c.src b/numpy/core/src/umath/_umath_tests.c.src
index d08aabd64..660c296d6 100644
--- a/numpy/core/src/umath/_umath_tests.c.src
+++ b/numpy/core/src/umath/_umath_tests.c.src
@@ -480,7 +480,7 @@ UMath_Tests_test_signature(PyObject *NPY_UNUSED(dummy), PyObject *args)
return NULL;
}
- if (PyString_Check(signature)) {
+ if (PyBytes_Check(signature)) {
sig_str = signature;
} else if (PyUnicode_Check(signature)) {
sig_str = PyUnicode_AsUTF8String(signature);
@@ -493,7 +493,7 @@ UMath_Tests_test_signature(PyObject *NPY_UNUSED(dummy), PyObject *args)
NULL, NULL, NULL,
0, nin, nout, PyUFunc_None, "no name",
"doc:none",
- 1, PyString_AS_STRING(sig_str));
+ 1, PyBytes_AS_STRING(sig_str));
if (sig_str != signature) {
Py_DECREF(sig_str);
}
@@ -588,11 +588,11 @@ static PyObject *
UMath_Tests_test_dispatch(PyObject *NPY_UNUSED(dummy), PyObject *NPY_UNUSED(dummy2))
{
const char *highest_func, *highest_var;
- NPY_CPU_DISPATCH_CALL(highest_func = _umath_tests_dispatch_func, ())
- NPY_CPU_DISPATCH_CALL(highest_var = _umath_tests_dispatch_var)
+ NPY_CPU_DISPATCH_CALL(highest_func = _umath_tests_dispatch_func, ());
+ NPY_CPU_DISPATCH_CALL(highest_var = _umath_tests_dispatch_var);
const char *highest_func_xb = "nobase", *highest_var_xb = "nobase";
- NPY_CPU_DISPATCH_CALL_XB(highest_func_xb = _umath_tests_dispatch_func, ())
- NPY_CPU_DISPATCH_CALL_XB(highest_var_xb = _umath_tests_dispatch_var)
+ NPY_CPU_DISPATCH_CALL_XB(highest_func_xb = _umath_tests_dispatch_func, ());
+ NPY_CPU_DISPATCH_CALL_XB(highest_var_xb = _umath_tests_dispatch_var);
PyObject *dict = PyDict_New(), *item;
if (dict == NULL) {
@@ -610,7 +610,7 @@ UMath_Tests_test_dispatch(PyObject *NPY_UNUSED(dummy), PyObject *NPY_UNUSED(dumm
if (item == NULL || PyDict_SetItemString(dict, "all", item) < 0) {
goto err;
}
- NPY_CPU_DISPATCH_CALL_ALL(_umath_tests_dispatch_attach, (item))
+ NPY_CPU_DISPATCH_CALL_ALL(_umath_tests_dispatch_attach, (item));
if (PyErr_Occurred()) {
goto err;
}
@@ -671,7 +671,7 @@ PyMODINIT_FUNC PyInit__umath_tests(void) {
d = PyModule_GetDict(m);
- version = PyString_FromString("0.1");
+ version = PyUnicode_FromString("0.1");
PyDict_SetItemString(d, "__version__", version);
Py_DECREF(version);
diff --git a/numpy/core/src/umath/extobj.c b/numpy/core/src/umath/extobj.c
index 3404a0c6a..cd81f7734 100644
--- a/numpy/core/src/umath/extobj.c
+++ b/numpy/core/src/umath/extobj.c
@@ -109,8 +109,8 @@ _error_handler(int method, PyObject *errobj, char *errtype, int retstatus, int *
errtype, name);
goto fail;
}
- args = Py_BuildValue("NN", PyUString_FromString(errtype),
- PyInt_FromLong((long) retstatus));
+ args = Py_BuildValue("NN", PyUnicode_FromString(errtype),
+ PyLong_FromLong((long) retstatus));
if (args == NULL) {
goto fail;
}
@@ -212,7 +212,7 @@ _extract_pyvals(PyObject *ref, const char *name, int *bufsize,
}
if (bufsize != NULL) {
- *bufsize = PyInt_AsLong(PyList_GET_ITEM(ref, 0));
+ *bufsize = PyLong_AsLong(PyList_GET_ITEM(ref, 0));
if (error_converting(*bufsize)) {
return -1;
}
@@ -229,7 +229,7 @@ _extract_pyvals(PyObject *ref, const char *name, int *bufsize,
}
if (errmask != NULL) {
- *errmask = PyInt_AsLong(PyList_GET_ITEM(ref, 1));
+ *errmask = PyLong_AsLong(PyList_GET_ITEM(ref, 1));
if (*errmask < 0) {
if (PyErr_Occurred()) {
return -1;
diff --git a/numpy/core/src/umath/override.c b/numpy/core/src/umath/override.c
index bf6e5a698..a0090e302 100644
--- a/numpy/core/src/umath/override.c
+++ b/numpy/core/src/umath/override.c
@@ -605,7 +605,7 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method,
goto fail;
}
- method_name = PyUString_FromString(method);
+ method_name = PyUnicode_FromString(method);
if (method_name == NULL) {
goto fail;
}
diff --git a/numpy/core/src/umath/reduction.c b/numpy/core/src/umath/reduction.c
index 4037a4757..f1423d8b9 100644
--- a/numpy/core/src/umath/reduction.c
+++ b/numpy/core/src/umath/reduction.c
@@ -16,7 +16,6 @@
#include "npy_config.h"
#include <numpy/arrayobject.h>
-#include "npy_config.h"
#include "npy_pycompat.h"
#include "ctors.h"
@@ -254,7 +253,6 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
}
op_flags[2] = NPY_ITER_READONLY;
}
-
/* Set up result array axes mapping, operand and wheremask use default */
int result_axes[NPY_MAXDIMS];
int *op_axes[3] = {result_axes, NULL, NULL};
@@ -363,7 +361,6 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
if (loop(iter, dataptr, strideptr, countptr,
iternext, needs_api, skip_first_count, data) < 0) {
-
goto fail;
}
}
@@ -379,7 +376,10 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
}
Py_INCREF(result);
- NpyIter_Deallocate(iter);
+ if (!NpyIter_Deallocate(iter)) {
+ Py_DECREF(result);
+ return NULL;
+ }
return result;
fail:
diff --git a/numpy/core/src/umath/scalarmath.c.src b/numpy/core/src/umath/scalarmath.c.src
index 90cc7a513..55bc958cb 100644
--- a/numpy/core/src/umath/scalarmath.c.src
+++ b/numpy/core/src/umath/scalarmath.c.src
@@ -794,15 +794,8 @@ static PyObject *
{
PyObject *ret;
@type@ arg1, arg2;
- /*
- * NOTE: In gcc >= 4.1, the compiler will reorder floating point
- * operations and floating point error state checks. In
- * particular, the arithmetic operations were being reordered
- * so that the errors weren't caught. Declaring this output
- * variable volatile was the minimal fix for the issue.
- * (Ticket #1671)
- */
- volatile @otype@ out;
+ @otype@ out;
+
#if @twoout@
@otype@ out2;
PyObject *obj;
@@ -932,96 +925,14 @@ static PyObject *
* Double, LongDouble,
* CFloat, CDouble, CLongDouble#
*
- * #isint = (1,0)*5,0*7#
+ * #isint = 1*10,0*7#
+ * #isuint = (0,1)*5,0*7#
* #cmplx = 0*14,1*3#
* #iszero = _IS_ZERO*10, npy_half_iszero, _IS_ZERO*6#
* #zero = 0*10, NPY_HALF_ZERO, 0*6#
* #one = 1*10, NPY_HALF_ONE, 1*6#
*/
-#if @cmplx@
-static PyObject *
-@name@_power(PyObject *a, PyObject *b, PyObject *modulo)
-{
- PyObject *ret;
- @type@ arg1, arg2;
- int retstatus;
- int first;
- @type@ out = {@zero@, @zero@};
-
- BINOP_GIVE_UP_IF_NEEDED(a, b, nb_power, @name@_power);
-
- switch(_@name@_convert2_to_ctypes(a, &arg1, b, &arg2)) {
- case 0:
- break;
- case -1:
- /* can't cast both safely mixed-types? */
- return PyArray_Type.tp_as_number->nb_power(a,b,modulo);
- case -2:
- /* use default handling */
- if (PyErr_Occurred()) {
- return NULL;
- }
- return PyGenericArrType_Type.tp_as_number->nb_power(a,b,modulo);
- case -3:
- default:
- /*
- * special case for longdouble and clongdouble
- * because they have a recursive getitem in their dtype
- */
- Py_INCREF(Py_NotImplemented);
- return Py_NotImplemented;
- }
-
- if (modulo != Py_None) {
- /* modular exponentiation is not implemented (gh-8804) */
- Py_INCREF(Py_NotImplemented);
- return Py_NotImplemented;
- }
-
- npy_clear_floatstatus_barrier((char*)&out);
-
- /*
- * here we do the actual calculation with arg1 and arg2
- * as a function call.
- */
- if (@iszero@(arg2.real) && @iszero@(arg2.imag)) {
- out.real = @one@;
- out.imag = @zero@;
- }
- else {
- @name@_ctype_power(arg1, arg2, &out);
- }
-
- /* Check status flag. If it is set, then look up what to do */
- retstatus = npy_get_floatstatus_barrier((char*)&out);
- if (retstatus) {
- int bufsize, errmask;
- PyObject *errobj;
-
- if (PyUFunc_GetPyValues("@name@_scalars", &bufsize, &errmask,
- &errobj) < 0) {
- return NULL;
- }
- first = 1;
- if (PyUFunc_handlefperr(errmask, errobj, retstatus, &first)) {
- Py_XDECREF(errobj);
- return NULL;
- }
- Py_XDECREF(errobj);
- }
-
- ret = PyArrayScalar_New(@Name@);
- if (ret == NULL) {
- return NULL;
- }
- PyArrayScalar_ASSIGN(ret, @Name@, out);
-
- return ret;
-}
-
-#elif @isint@
-
static PyObject *
@name@_power(PyObject *a, PyObject *b, PyObject *modulo)
{
@@ -1058,85 +969,25 @@ static PyObject *
return Py_NotImplemented;
}
+#if !@isint@
npy_clear_floatstatus_barrier((char*)&out);
-
+#endif
/*
* here we do the actual calculation with arg1 and arg2
* as a function call.
*/
+#if @isint@ && !@isuint@
if (arg2 < 0) {
PyErr_SetString(PyExc_ValueError,
"Integers to negative integer powers are not allowed.");
return NULL;
}
+#endif
@name@_ctype_power(arg1, arg2, &out);
- ret = PyArrayScalar_New(@Name@);
- if (ret == NULL) {
- return NULL;
- }
- PyArrayScalar_ASSIGN(ret, @Name@, out);
-
- return ret;
-}
-
-#else
-
-static PyObject *
-@name@_power(PyObject *a, PyObject *b, PyObject *modulo)
-{
- PyObject *ret;
- @type@ arg1, arg2;
- int retstatus;
- int first;
-
- @type@ out = @zero@;
-
- BINOP_GIVE_UP_IF_NEEDED(a, b, nb_power, @name@_power);
-
- switch(_@name@_convert2_to_ctypes(a, &arg1, b, &arg2)) {
- case 0:
- break;
- case -1:
- /* can't cast both safely mixed-types? */
- return PyArray_Type.tp_as_number->nb_power(a,b,modulo);
- case -2:
- /* use default handling */
- if (PyErr_Occurred()) {
- return NULL;
- }
- return PyGenericArrType_Type.tp_as_number->nb_power(a,b,modulo);
- case -3:
- default:
- /*
- * special case for longdouble and clongdouble
- * because they have a recursive getitem in their dtype
- */
- Py_INCREF(Py_NotImplemented);
- return Py_NotImplemented;
- }
-
- if (modulo != Py_None) {
- /* modular exponentiation is not implemented (gh-8804) */
- Py_INCREF(Py_NotImplemented);
- return Py_NotImplemented;
- }
-
- npy_clear_floatstatus_barrier((char*)&out);
-
- /*
- * here we do the actual calculation with arg1 and arg2
- * as a function call.
- */
- if (@iszero@(arg2)) {
- out = @one@;
- }
- else {
- @name@_ctype_power(arg1, arg2, &out);
- }
-
+#if !@isint@
/* Check status flag. If it is set, then look up what to do */
- retstatus = npy_get_floatstatus_barrier((char*)&out);
+ int retstatus = npy_get_floatstatus_barrier((char*)&out);
if (retstatus) {
int bufsize, errmask;
PyObject *errobj;
@@ -1145,13 +996,14 @@ static PyObject *
&errobj) < 0) {
return NULL;
}
- first = 1;
+ int first = 1;
if (PyUFunc_handlefperr(errmask, errobj, retstatus, &first)) {
Py_XDECREF(errobj);
return NULL;
}
Py_XDECREF(errobj);
}
+#endif
ret = PyArrayScalar_New(@Name@);
if (ret == NULL) {
@@ -1162,7 +1014,6 @@ static PyObject *
return ret;
}
-#endif
/**end repeat**/
#undef _IS_ZERO
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index 8f841c6fa..f693eb5c2 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -1536,7 +1536,14 @@ iterator_loop(PyUFuncObject *ufunc,
NPY_END_THREADS;
}
- return NpyIter_Deallocate(iter);
+ /*
+ * Currently `innerloop` may leave an error set, in this case
+ * NpyIter_Deallocate will always return an error as well.
+ */
+ if (NpyIter_Deallocate(iter) == NPY_FAIL) {
+ return -1;
+ }
+ return 0;
}
/*
@@ -2425,15 +2432,15 @@ _get_identity(PyUFuncObject *ufunc, npy_bool *reorderable) {
switch(ufunc->identity) {
case PyUFunc_One:
*reorderable = 1;
- return PyInt_FromLong(1);
+ return PyLong_FromLong(1);
case PyUFunc_Zero:
*reorderable = 1;
- return PyInt_FromLong(0);
+ return PyLong_FromLong(0);
case PyUFunc_MinusOne:
*reorderable = 1;
- return PyInt_FromLong(-1);
+ return PyLong_FromLong(-1);
case PyUFunc_ReorderableNone:
*reorderable = 1;
@@ -3233,9 +3240,13 @@ PyUFunc_GenericFunction_int(PyUFuncObject *ufunc,
goto fail;
}
- /* Check whether any errors occurred during the loop */
+ /*
+ * Check whether any errors occurred during the loop. The loops should
+ * indicate this in retval, but since the inner-loop currently does not
+ * report errors, this does not happen in all branches (at this time).
+ */
if (PyErr_Occurred() ||
- _check_ufunc_fperr(errormask, extobj, ufunc_name) < 0) {
+ _check_ufunc_fperr(errormask, extobj, ufunc_name) < 0) {
retval = -1;
goto fail;
}
@@ -3307,7 +3318,6 @@ get_binary_op_function(PyUFuncObject *ufunc, int *otype,
void **out_innerloopdata)
{
int i;
- PyUFunc_Loop1d *funcdata;
NPY_UF_DBG_PRINT1("Getting binary op function for type number %d\n",
*otype);
@@ -3315,7 +3325,7 @@ get_binary_op_function(PyUFuncObject *ufunc, int *otype,
/* If the type is custom and there are userloops, search for it here */
if (ufunc->userloops != NULL && PyTypeNum_ISUSERDEF(*otype)) {
PyObject *key, *obj;
- key = PyInt_FromLong(*otype);
+ key = PyLong_FromLong(*otype);
if (key == NULL) {
return -1;
}
@@ -3325,7 +3335,10 @@ get_binary_op_function(PyUFuncObject *ufunc, int *otype,
return -1;
}
else if (obj != NULL) {
- funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj);
+ PyUFunc_Loop1d *funcdata = PyCapsule_GetPointer(obj, NULL);
+ if (funcdata == NULL) {
+ return -1;
+ }
while (funcdata != NULL) {
int *types = funcdata->arg_types;
@@ -3997,8 +4010,17 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
finish:
Py_XDECREF(op_dtypes[0]);
- NpyIter_Deallocate(iter);
- NpyIter_Deallocate(iter_inner);
+ int res = 0;
+ if (!NpyIter_Deallocate(iter)) {
+ res = -1;
+ }
+ if (!NpyIter_Deallocate(iter_inner)) {
+ res = -1;
+ }
+ if (res < 0) {
+ Py_DECREF(out);
+ return NULL;
+ }
return (PyObject *)out;
@@ -4379,7 +4401,10 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind,
finish:
Py_XDECREF(op_dtypes[0]);
- NpyIter_Deallocate(iter);
+ if (!NpyIter_Deallocate(iter)) {
+ Py_DECREF(out);
+ return NULL;
+ }
return (PyObject *)out;
@@ -4388,7 +4413,6 @@ fail:
Py_XDECREF(op_dtypes[0]);
NpyIter_Deallocate(iter);
-
return NULL;
}
@@ -4812,8 +4836,8 @@ ufunc_geterr(PyObject *NPY_UNUSED(dummy), PyObject *args)
if (res == NULL) {
return NULL;
}
- PyList_SET_ITEM(res, 0, PyInt_FromLong(NPY_BUFSIZE));
- PyList_SET_ITEM(res, 1, PyInt_FromLong(UFUNC_ERR_DEFAULT));
+ PyList_SET_ITEM(res, 0, PyLong_FromLong(NPY_BUFSIZE));
+ PyList_SET_ITEM(res, 1, PyLong_FromLong(UFUNC_ERR_DEFAULT));
PyList_SET_ITEM(res, 2, Py_None); Py_INCREF(Py_None);
return res;
}
@@ -5133,7 +5157,7 @@ PyUFunc_RegisterLoopForDescr(PyUFuncObject *ufunc,
return -1;
}
- key = PyInt_FromLong((long) user_dtype->type_num);
+ key = PyLong_FromLong((long) user_dtype->type_num);
if (key == NULL) {
return -1;
}
@@ -5168,9 +5192,12 @@ PyUFunc_RegisterLoopForDescr(PyUFuncObject *ufunc,
result = -1;
}
else {
- PyUFunc_Loop1d *current;
int cmp = 1;
- current = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(cobj);
+ PyUFunc_Loop1d *current = PyCapsule_GetPointer(cobj, NULL);
+ if (current == NULL) {
+ result = -1;
+ goto done;
+ }
while (current != NULL) {
cmp = cmp_arg_types(current->arg_types,
arg_typenums, ufunc->nargs);
@@ -5204,6 +5231,7 @@ PyUFunc_RegisterLoopForDescr(PyUFuncObject *ufunc,
}
}
+done:
PyArray_free(arg_typenums);
Py_DECREF(key);
@@ -5235,7 +5263,7 @@ PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc,
if (ufunc->userloops == NULL) {
ufunc->userloops = PyDict_New();
}
- key = PyInt_FromLong((long) usertype);
+ key = PyLong_FromLong((long) usertype);
if (key == NULL) {
return -1;
}
@@ -5272,7 +5300,7 @@ PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc,
}
/* If it's not there, then make one and return. */
else if (cobj == NULL) {
- cobj = NpyCapsule_FromVoidPtr((void *)funcdata, _loop1d_list_free);
+ cobj = PyCapsule_New((void *)funcdata, NULL, _loop1d_list_free);
if (cobj == NULL) {
goto fail;
}
@@ -5290,7 +5318,10 @@ PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc,
* is exactly like this one, then just replace.
* Otherwise insert.
*/
- current = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(cobj);
+ current = PyCapsule_GetPointer(cobj, NULL);
+ if (current == NULL) {
+ goto fail;
+ }
while (current != NULL) {
cmp = cmp_arg_types(current->arg_types, newtypes, ufunc->nargs);
if (cmp >= 0) {
@@ -5361,7 +5392,7 @@ ufunc_dealloc(PyUFuncObject *ufunc)
static PyObject *
ufunc_repr(PyUFuncObject *ufunc)
{
- return PyUString_FromFormat("<ufunc '%s'>", ufunc->name);
+ return PyUnicode_FromFormat("<ufunc '%s'>", ufunc->name);
}
static int
@@ -5973,7 +6004,7 @@ ufunc_get_doc(PyUFuncObject *ufunc)
}
if (ufunc->doc != NULL) {
PyUString_ConcatAndDel(&doc,
- PyUString_FromFormat("\n\n%s", ufunc->doc));
+ PyUnicode_FromFormat("\n\n%s", ufunc->doc));
}
return doc;
}
@@ -5981,25 +6012,25 @@ ufunc_get_doc(PyUFuncObject *ufunc)
static PyObject *
ufunc_get_nin(PyUFuncObject *ufunc)
{
- return PyInt_FromLong(ufunc->nin);
+ return PyLong_FromLong(ufunc->nin);
}
static PyObject *
ufunc_get_nout(PyUFuncObject *ufunc)
{
- return PyInt_FromLong(ufunc->nout);
+ return PyLong_FromLong(ufunc->nout);
}
static PyObject *
ufunc_get_nargs(PyUFuncObject *ufunc)
{
- return PyInt_FromLong(ufunc->nargs);
+ return PyLong_FromLong(ufunc->nargs);
}
static PyObject *
ufunc_get_ntypes(PyUFuncObject *ufunc)
{
- return PyInt_FromLong(ufunc->ntypes);
+ return PyLong_FromLong(ufunc->ntypes);
}
static PyObject *
@@ -6029,7 +6060,7 @@ ufunc_get_types(PyUFuncObject *ufunc)
t[ni + 2 + j] = _typecharfromnum(ufunc->types[n]);
n++;
}
- str = PyUString_FromStringAndSize(t, no + ni + 2);
+ str = PyUnicode_FromStringAndSize(t, no + ni + 2);
PyList_SET_ITEM(list, k, str);
}
PyArray_free(t);
@@ -6039,7 +6070,7 @@ ufunc_get_types(PyUFuncObject *ufunc)
static PyObject *
ufunc_get_name(PyUFuncObject *ufunc)
{
- return PyUString_FromString(ufunc->name);
+ return PyUnicode_FromString(ufunc->name);
}
static PyObject *
@@ -6055,7 +6086,7 @@ ufunc_get_signature(PyUFuncObject *ufunc)
if (!ufunc->core_enabled) {
Py_RETURN_NONE;
}
- return PyUString_FromString(ufunc->core_signature);
+ return PyUnicode_FromString(ufunc->core_signature);
}
#undef _typecharfromnum
diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c
index ea20bb24f..aa6f34d59 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.c
+++ b/numpy/core/src/umath/ufunc_type_resolution.c
@@ -36,17 +36,17 @@ npy_casting_to_py_object(NPY_CASTING casting)
{
switch (casting) {
case NPY_NO_CASTING:
- return PyUString_FromString("no");
+ return PyUnicode_FromString("no");
case NPY_EQUIV_CASTING:
- return PyUString_FromString("equiv");
+ return PyUnicode_FromString("equiv");
case NPY_SAFE_CASTING:
- return PyUString_FromString("safe");
+ return PyUnicode_FromString("safe");
case NPY_SAME_KIND_CASTING:
- return PyUString_FromString("same_kind");
+ return PyUnicode_FromString("same_kind");
case NPY_UNSAFE_CASTING:
- return PyUString_FromString("unsafe");
+ return PyUnicode_FromString("unsafe");
default:
- return PyInt_FromLong(casting);
+ return PyLong_FromLong(casting);
}
}
@@ -1336,7 +1336,6 @@ find_userloop(PyUFuncObject *ufunc,
void **out_innerloopdata)
{
npy_intp i, nin = ufunc->nin, j, nargs = nin + ufunc->nout;
- PyUFunc_Loop1d *funcdata;
/* Use this to try to avoid repeating the same userdef loop search */
int last_userdef = -1;
@@ -1356,7 +1355,7 @@ find_userloop(PyUFuncObject *ufunc,
last_userdef = type_num;
- key = PyInt_FromLong(type_num);
+ key = PyLong_FromLong(type_num);
if (key == NULL) {
return -1;
}
@@ -1368,9 +1367,11 @@ find_userloop(PyUFuncObject *ufunc,
else if (obj == NULL) {
continue;
}
- for (funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj);
- funcdata != NULL;
- funcdata = funcdata->next) {
+ PyUFunc_Loop1d *funcdata = PyCapsule_GetPointer(obj, NULL);
+ if (funcdata == NULL) {
+ return -1;
+ }
+ for (; funcdata != NULL; funcdata = funcdata->next) {
int *types = funcdata->arg_types;
for (j = 0; j < nargs; ++j) {
@@ -1744,7 +1745,6 @@ linear_search_userloop_type_resolver(PyUFuncObject *self,
char *out_err_dst_typecode)
{
npy_intp i, nop = self->nin + self->nout;
- PyUFunc_Loop1d *funcdata;
/* Use this to try to avoid repeating the same userdef loop search */
int last_userdef = -1;
@@ -1764,7 +1764,7 @@ linear_search_userloop_type_resolver(PyUFuncObject *self,
last_userdef = type_num;
- key = PyInt_FromLong(type_num);
+ key = PyLong_FromLong(type_num);
if (key == NULL) {
return -1;
}
@@ -1776,9 +1776,11 @@ linear_search_userloop_type_resolver(PyUFuncObject *self,
else if (obj == NULL) {
continue;
}
- for (funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj);
- funcdata != NULL;
- funcdata = funcdata->next) {
+ PyUFunc_Loop1d *funcdata = PyCapsule_GetPointer(obj, NULL);
+ if (funcdata == NULL) {
+ return -1;
+ }
+ for (; funcdata != NULL; funcdata = funcdata->next) {
int *types = funcdata->arg_types;
switch (ufunc_loop_matches(self, op,
input_casting, output_casting,
@@ -1816,7 +1818,6 @@ type_tuple_userloop_type_resolver(PyUFuncObject *self,
PyArray_Descr **out_dtype)
{
int i, j, nin = self->nin, nop = nin + self->nout;
- PyUFunc_Loop1d *funcdata;
/* Use this to try to avoid repeating the same userdef loop search */
int last_userdef = -1;
@@ -1831,7 +1832,7 @@ type_tuple_userloop_type_resolver(PyUFuncObject *self,
last_userdef = type_num;
- key = PyInt_FromLong(type_num);
+ key = PyLong_FromLong(type_num);
if (key == NULL) {
return -1;
}
@@ -1844,9 +1845,11 @@ type_tuple_userloop_type_resolver(PyUFuncObject *self,
continue;
}
- for (funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj);
- funcdata != NULL;
- funcdata = funcdata->next) {
+ PyUFunc_Loop1d *funcdata = PyCapsule_GetPointer(obj, NULL);
+ if (funcdata == NULL) {
+ return -1;
+ }
+ for (; funcdata != NULL; funcdata = funcdata->next) {
int *types = funcdata->arg_types;
int matched = 1;
diff --git a/numpy/core/src/umath/umathmodule.c b/numpy/core/src/umath/umathmodule.c
index 708a27ad0..474db0245 100644
--- a/numpy/core/src/umath/umathmodule.c
+++ b/numpy/core/src/umath/umathmodule.c
@@ -75,7 +75,8 @@ ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) {
int nin, nout, i, nargs;
PyUFunc_PyFuncData *fdata;
PyUFuncObject *self;
- char *fname, *str, *types, *doc;
+ const char *fname = NULL;
+ char *str, *types, *doc;
Py_ssize_t fname_len = -1;
void * ptr, **data;
int offset[2];
@@ -95,12 +96,12 @@ ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) {
pyname = PyObject_GetAttrString(function, "__name__");
if (pyname) {
- (void) PyString_AsStringAndSize(pyname, &fname, &fname_len);
+ fname = PyUnicode_AsUTF8AndSize(pyname, &fname_len);
}
- if (PyErr_Occurred()) {
+ if (fname == NULL) {
+ PyErr_Clear();
fname = "?";
fname_len = 1;
- PyErr_Clear();
}
/*
@@ -237,23 +238,23 @@ NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_pyvals_name = NULL;
static int
intern_strings(void)
{
- if (!(npy_um_str_out = PyUString_InternFromString("out"))) return -1;
- if (!(npy_um_str_where = PyUString_InternFromString("where"))) return -1;
- if (!(npy_um_str_axes = PyUString_InternFromString("axes"))) return -1;
- if (!(npy_um_str_axis = PyUString_InternFromString("axis"))) return -1;
- if (!(npy_um_str_keepdims = PyUString_InternFromString("keepdims"))) return -1;
- if (!(npy_um_str_casting = PyUString_InternFromString("casting"))) return -1;
- if (!(npy_um_str_order = PyUString_InternFromString("order"))) return -1;
- if (!(npy_um_str_dtype = PyUString_InternFromString("dtype"))) return -1;
- if (!(npy_um_str_subok = PyUString_InternFromString("subok"))) return -1;
- if (!(npy_um_str_signature = PyUString_InternFromString("signature"))) return -1;
- if (!(npy_um_str_sig = PyUString_InternFromString("sig"))) return -1;
- if (!(npy_um_str_extobj = PyUString_InternFromString("extobj"))) return -1;
- if (!(npy_um_str_array_prepare = PyUString_InternFromString("__array_prepare__"))) return -1;
- if (!(npy_um_str_array_wrap = PyUString_InternFromString("__array_wrap__"))) return -1;
- if (!(npy_um_str_array_finalize = PyUString_InternFromString("__array_finalize__"))) return -1;
- if (!(npy_um_str_ufunc = PyUString_InternFromString("__array_ufunc__"))) return -1;
- if (!(npy_um_str_pyvals_name = PyUString_InternFromString(UFUNC_PYVALS_NAME))) return -1;
+ if (!(npy_um_str_out = PyUnicode_InternFromString("out"))) return -1;
+ if (!(npy_um_str_where = PyUnicode_InternFromString("where"))) return -1;
+ if (!(npy_um_str_axes = PyUnicode_InternFromString("axes"))) return -1;
+ if (!(npy_um_str_axis = PyUnicode_InternFromString("axis"))) return -1;
+ if (!(npy_um_str_keepdims = PyUnicode_InternFromString("keepdims"))) return -1;
+ if (!(npy_um_str_casting = PyUnicode_InternFromString("casting"))) return -1;
+ if (!(npy_um_str_order = PyUnicode_InternFromString("order"))) return -1;
+ if (!(npy_um_str_dtype = PyUnicode_InternFromString("dtype"))) return -1;
+ if (!(npy_um_str_subok = PyUnicode_InternFromString("subok"))) return -1;
+ if (!(npy_um_str_signature = PyUnicode_InternFromString("signature"))) return -1;
+ if (!(npy_um_str_sig = PyUnicode_InternFromString("sig"))) return -1;
+ if (!(npy_um_str_extobj = PyUnicode_InternFromString("extobj"))) return -1;
+ if (!(npy_um_str_array_prepare = PyUnicode_InternFromString("__array_prepare__"))) return -1;
+ if (!(npy_um_str_array_wrap = PyUnicode_InternFromString("__array_wrap__"))) return -1;
+ if (!(npy_um_str_array_finalize = PyUnicode_InternFromString("__array_finalize__"))) return -1;
+ if (!(npy_um_str_ufunc = PyUnicode_InternFromString("__array_ufunc__"))) return -1;
+ if (!(npy_um_str_pyvals_name = PyUnicode_InternFromString(UFUNC_PYVALS_NAME))) return -1;
return 0;
}
diff --git a/numpy/core/tests/test_array_coercion.py b/numpy/core/tests/test_array_coercion.py
index d18df2e9c..a6c8cc8b2 100644
--- a/numpy/core/tests/test_array_coercion.py
+++ b/numpy/core/tests/test_array_coercion.py
@@ -11,6 +11,7 @@ from itertools import product
import numpy as np
from numpy.core._rational_tests import rational
+from numpy.core._multiarray_umath import _discover_array_parameters
from numpy.testing import (
assert_array_equal, assert_warns, IS_PYPY)
@@ -478,6 +479,27 @@ class TestNested:
with pytest.raises(ValueError):
np.array([[], np.empty((0, 1))], dtype=object)
+ def test_array_of_different_depths(self):
+ # When multiple arrays (or array-likes) are included in a
+ # sequences and have different depth, we currently discover
+ # as many dimensions as they share. (see also gh-17224)
+ arr = np.zeros((3, 2))
+ mismatch_first_dim = np.zeros((1, 2))
+ mismatch_second_dim = np.zeros((3, 3))
+
+ dtype, shape = _discover_array_parameters(
+ [arr, mismatch_second_dim], dtype=np.dtype("O"))
+ assert shape == (2, 3)
+
+ dtype, shape = _discover_array_parameters(
+ [arr, mismatch_first_dim], dtype=np.dtype("O"))
+ assert shape == (2,)
+ # The second case is currently supported because the arrays
+ # can be stored as objects:
+ res = np.asarray([arr, mismatch_first_dim], dtype=np.dtype("O"))
+ assert res[0] is arr
+ assert res[1] is mismatch_first_dim
+
class TestBadSequences:
# These are tests for bad objects passed into `np.array`, in general
diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py
index 59a3954fd..f725091c5 100644
--- a/numpy/core/tests/test_datetime.py
+++ b/numpy/core/tests/test_datetime.py
@@ -26,6 +26,7 @@ class TestDateTime:
def test_datetime_dtype_creation(self):
for unit in ['Y', 'M', 'W', 'D',
'h', 'm', 's', 'ms', 'us',
+ 'μs', # alias for us
'ns', 'ps', 'fs', 'as']:
dt1 = np.dtype('M8[750%s]' % unit)
assert_(dt1 == np.dtype('datetime64[750%s]' % unit))
@@ -2389,3 +2390,19 @@ class TestDateTimeData:
def test_basic(self):
a = np.array(['1980-03-23'], dtype=np.datetime64)
assert_equal(np.datetime_data(a.dtype), ('D', 1))
+
+ def test_bytes(self):
+ # byte units are converted to unicode
+ dt = np.datetime64('2000', (b'ms', 5))
+ assert np.datetime_data(dt.dtype) == ('ms', 5)
+
+ dt = np.datetime64('2000', b'5ms')
+ assert np.datetime_data(dt.dtype) == ('ms', 5)
+
+ def test_non_ascii(self):
+ # μs is normalized to μ
+ dt = np.datetime64('2000', ('μs', 5))
+ assert np.datetime_data(dt.dtype) == ('us', 5)
+
+ dt = np.datetime64('2000', '5μs')
+ assert np.datetime_data(dt.dtype) == ('us', 5)
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index 9004bef30..17391e80c 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -708,19 +708,23 @@ class TestRaggedArray(_DeprecationTestCase):
self.assert_deprecated(lambda: np.array([[0], arr], dtype=np.float64))
-class TestTrimZeros(_DeprecationTestCase):
- # Numpy 1.20.0, 2020-07-31
- @pytest.mark.parametrize("arr", [np.random.rand(10, 10).tolist(),
- np.random.rand(10).astype(str)])
- def test_deprecated(self, arr):
- with warnings.catch_warnings():
- warnings.simplefilter('error', DeprecationWarning)
- try:
- np.trim_zeros(arr)
- except DeprecationWarning as ex:
- assert_(isinstance(ex.__cause__, ValueError))
- else:
- raise AssertionError("No error raised during function call")
-
- out = np.lib.function_base._trim_zeros_old(arr)
- assert_array_equal(arr, out)
+class FlatteningConcatenateUnsafeCast(_DeprecationTestCase):
+ # NumPy 1.20, 2020-09-03
+ message = "concatenate with `axis=None` will use same-kind casting"
+
+ def test_deprecated(self):
+ self.assert_deprecated(np.concatenate,
+ args=(([0.], [1.]),),
+ kwargs=dict(axis=None, out=np.empty(2, dtype=np.int64)))
+
+ def test_not_deprecated(self):
+ self.assert_not_deprecated(np.concatenate,
+ args=(([0.], [1.]),),
+ kwargs={'axis': None, 'out': np.empty(2, dtype=np.int64),
+ 'casting': "unsafe"})
+
+ with assert_raises(TypeError):
+ # Tests should notice if the deprecation warning is given first...
+ np.concatenate(([0.], [1.]), out=np.empty(2, dtype=np.int64),
+ casting="same_kind")
+
diff --git a/numpy/core/tests/test_function_base.py b/numpy/core/tests/test_function_base.py
index 62a9772c8..dad7a5883 100644
--- a/numpy/core/tests/test_function_base.py
+++ b/numpy/core/tests/test_function_base.py
@@ -402,3 +402,8 @@ class TestLinspace:
stop = array(2, dtype='O')
y = linspace(start, stop, 3)
assert_array_equal(y, array([1., 1.5, 2.]))
+
+ def test_round_negative(self):
+ y = linspace(-1, 3, num=8, dtype=int)
+ t = array([-1, -1, 0, 0, 1, 1, 2, 3], dtype=int)
+ assert_array_equal(y, t)
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 03f10bf2d..6f8af1757 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -21,7 +21,6 @@ import builtins
from decimal import Decimal
import numpy as np
-from numpy.compat import strchar
import numpy.core._multiarray_tests as _multiarray_tests
from numpy.testing import (
assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal,
@@ -2031,7 +2030,7 @@ class TestMethods:
strtype = '>i2'
else:
strtype = '<i2'
- mydtype = [('name', strchar + '5'), ('col2', strtype)]
+ mydtype = [('name', 'U5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype=mydtype)
r.sort(order='col2')
@@ -3868,13 +3867,6 @@ class TestPickling:
with pytest.raises(ImportError):
array.__reduce_ex__(5)
- elif sys.version_info[:2] < (3, 6):
- # when calling __reduce_ex__ explicitly with protocol=5 on python
- # raise a ValueError saying that protocol 5 is not available for
- # this python version
- with pytest.raises(ValueError):
- array.__reduce_ex__(5)
-
def test_record_array_with_object_dtype(self):
my_object = object()
@@ -7436,6 +7428,18 @@ def test_array_interface_offset():
arr1 = np.asarray(DummyArray())
assert_equal(arr1, arr[1:])
+def test_array_interface_unicode_typestr():
+ arr = np.array([1, 2, 3], dtype='int32')
+ interface = dict(arr.__array_interface__)
+ interface['typestr'] = '\N{check mark}'
+
+ class DummyArray:
+ __array_interface__ = interface
+
+ # should not be UnicodeEncodeError
+ with pytest.raises(TypeError):
+ np.asarray(DummyArray())
+
def test_flat_element_deletion():
it = np.ones(3).flat
try:
diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py
index 7b3c3a40d..e10c7ad92 100644
--- a/numpy/core/tests/test_nditer.py
+++ b/numpy/core/tests/test_nditer.py
@@ -2880,3 +2880,68 @@ def test_warn_noclose():
casting='equiv', op_dtypes=[np.dtype('f4')])
del it
assert len(sup.log) == 1
+
+
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+@pytest.mark.parametrize(["in_dtype", "buf_dtype"],
+ [("i", "O"), ("O", "i"), # most simple cases
+ ("i,O", "O,O"), # structured partially only copying O
+ ("O,i", "i,O"), # structured casting to and from O
+ ])
+@pytest.mark.parametrize("steps", [1, 2, 3])
+def test_partial_iteration_cleanup(in_dtype, buf_dtype, steps):
+ value = 123 # relies on python cache (leak-check will still find it)
+ arr = np.full(int(np.BUFSIZE * 2.5), value).astype(in_dtype)
+ count = sys.getrefcount(value)
+
+ it = np.nditer(arr, op_dtypes=[np.dtype(buf_dtype)],
+ flags=["buffered", "external_loop", "refs_ok"], casting="unsafe")
+ for step in range(steps):
+ # The iteration finishes in 3 steps, the first two are partial
+ next(it)
+
+ # Note that resetting does not free references
+ del it
+ assert count == sys.getrefcount(value)
+
+ # Repeat the test with `iternext`
+ it = np.nditer(arr, op_dtypes=[np.dtype(buf_dtype)],
+ flags=["buffered", "external_loop", "refs_ok"], casting="unsafe")
+ for step in range(steps):
+ it.iternext()
+
+ del it # should ensure cleanup
+ assert count == sys.getrefcount(value)
+
+
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+@pytest.mark.parametrize(["in_dtype", "buf_dtype"],
+ [("O", "i"), # most simple cases
+ ("O,i", "i,O"), # structured casting to and from O
+ ])
+def test_partial_iteration_error(in_dtype, buf_dtype):
+ value = 123 # relies on python cache (leak-check will still find it)
+ arr = np.full(int(np.BUFSIZE * 2.5), value).astype(in_dtype)
+ if in_dtype == "O":
+ arr[int(np.BUFSIZE * 1.5)] = None
+ else:
+ arr[int(np.BUFSIZE * 1.5)]["f0"] = None
+
+ count = sys.getrefcount(value)
+
+ it = np.nditer(arr, op_dtypes=[np.dtype(buf_dtype)],
+ flags=["buffered", "external_loop", "refs_ok"], casting="unsafe")
+ with pytest.raises(TypeError):
+ # pytest.raises seems to have issues with the error originating
+ # in the for loop, so manually unravel:
+ next(it)
+ next(it) # raises TypeError
+
+ # Repeat the test with `iternext` after resetting, the buffers should
+ # already be cleared from any references, so resetting is sufficient.
+ it.reset()
+ with pytest.raises(TypeError):
+ it.iternext()
+ it.iternext()
+
+ assert count == sys.getrefcount(value)
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index badf48b33..ae5ee4c88 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -14,6 +14,7 @@ from numpy.testing import (
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
assert_warns, assert_array_max_ulp, HAS_REFCOUNT
)
+from numpy.core._rational_tests import rational
from hypothesis import assume, given, strategies as st
from hypothesis.extra import numpy as hynp
@@ -863,6 +864,30 @@ class TestTypes:
assert_equal(np.promote_types('<m8', '<m8'), np.dtype('m8'))
assert_equal(np.promote_types('>m8', '>m8'), np.dtype('m8'))
+ def test_can_cast_and_promote_usertypes(self):
+ # The rational type defines safe casting for signed integers,
+ # boolean. Rational itself *does* cast safely to double.
+ # (rational does not actually cast to all signed integers, e.g.
+ # int64 can be both long and longlong and it registers only the first)
+ valid_types = ["int8", "int16", "int32", "int64", "bool"]
+ invalid_types = "BHILQP" + "FDG" + "mM" + "f" + "V"
+
+ rational_dt = np.dtype(rational)
+ for numpy_dtype in valid_types:
+ numpy_dtype = np.dtype(numpy_dtype)
+ assert np.can_cast(numpy_dtype, rational_dt)
+ assert np.promote_types(numpy_dtype, rational_dt) is rational_dt
+
+ for numpy_dtype in invalid_types:
+ numpy_dtype = np.dtype(numpy_dtype)
+ assert not np.can_cast(numpy_dtype, rational_dt)
+ with pytest.raises(TypeError):
+ np.promote_types(numpy_dtype, rational_dt)
+
+ double_dt = np.dtype("double")
+ assert np.can_cast(rational_dt, double_dt)
+ assert np.promote_types(double_dt, rational_dt) is double_dt
+
def test_promote_types_strings(self):
assert_equal(np.promote_types('bool', 'S'), np.dtype('S5'))
assert_equal(np.promote_types('b', 'S'), np.dtype('S4'))
@@ -897,6 +922,110 @@ class TestTypes:
assert_equal(np.promote_types('u8', 'S1'), np.dtype('S20'))
assert_equal(np.promote_types('u8', 'S30'), np.dtype('S30'))
+ @pytest.mark.parametrize("dtype",
+ list(np.typecodes["All"]) +
+ ["i,i", "S3", "S100", "U3", "U100", rational])
+ def test_promote_identical_types_metadata(self, dtype):
+ # The same type passed in twice to promote types always
+ # preserves metadata
+ metadata = {1: 1}
+ dtype = np.dtype(dtype, metadata=metadata)
+
+ res = np.promote_types(dtype, dtype)
+ assert res.metadata == dtype.metadata
+
+ # byte-swapping preserves and makes the dtype native:
+ dtype = dtype.newbyteorder()
+ if dtype.isnative:
+ # The type does not have byte swapping
+ return
+
+ res = np.promote_types(dtype, dtype)
+ if res.char in "?bhilqpBHILQPefdgFDGOmM":
+ # Metadata is lost for simple promotions (they create a new dtype)
+ assert res.metadata is None
+ else:
+ assert res.metadata == metadata
+ if dtype.kind != "V":
+ # the result is native (except for structured void)
+ assert res.isnative
+
+ @pytest.mark.slow
+ @pytest.mark.parametrize(["dtype1", "dtype2"],
+ itertools.product(
+ list(np.typecodes["All"]) +
+ ["i,i", "S3", "S100", "U3", "U100", rational],
+ repeat=2))
+ def test_promote_types_metadata(self, dtype1, dtype2):
+ """Metadata handling in promotion does not appear formalized
+ right now in NumPy. This test should thus be considered to
+ document behaviour, rather than test the correct definition of it.
+
+ This test is very ugly, it was useful for rewriting part of the
+ promotion, but probably should eventually be replaced/deleted
+ (i.e. when metadata handling in promotion is better defined).
+ """
+ metadata1 = {1: 1}
+ metadata2 = {2: 2}
+ dtype1 = np.dtype(dtype1, metadata=metadata1)
+ dtype2 = np.dtype(dtype2, metadata=metadata2)
+
+ try:
+ res = np.promote_types(dtype1, dtype2)
+ except TypeError:
+ # Promotion failed, this test only checks metadata
+ return
+
+ # The rules for when metadata is preserved and which dtypes metadta
+ # will be used are very confusing and depend on multiple paths.
+ # This long if statement attempts to reproduce this:
+ if dtype1.type is rational or dtype2.type is rational:
+ # User dtype promotion preserves byte-order here:
+ if np.can_cast(res, dtype1):
+ assert res.metadata == dtype1.metadata
+ else:
+ assert res.metadata == dtype2.metadata
+
+ elif res.char in "?bhilqpBHILQPefdgFDGOmM":
+ # All simple types lose metadata (due to using promotion table):
+ assert res.metadata is None
+ elif res.kind in "SU" and dtype1 == dtype2:
+ # Strings give precedence to the second dtype:
+ assert res is dtype2
+ elif res == dtype1:
+ # If one result is the result, it is usually returned unchanged:
+ assert res is dtype1
+ elif res == dtype2:
+ # If one result is the result, it is usually returned unchanged:
+ assert res is dtype2
+ elif dtype1.kind == "S" and dtype2.kind == "U":
+ # Promotion creates a new unicode dtype from scratch
+ assert res.metadata is None
+ elif dtype1.kind == "U" and dtype2.kind == "S":
+ # Promotion creates a new unicode dtype from scratch
+ assert res.metadata is None
+ elif res.kind in "SU" and dtype2.kind != res.kind:
+ # We build on top of dtype1:
+ assert res.metadata == dtype1.metadata
+ elif res.kind in "SU" and res.kind == dtype1.kind:
+ assert res.metadata == dtype1.metadata
+ elif res.kind in "SU" and res.kind == dtype2.kind:
+ assert res.metadata == dtype2.metadata
+ else:
+ assert res.metadata is None
+
+ # Try again for byteswapped version
+ dtype1 = dtype1.newbyteorder()
+ assert dtype1.metadata == metadata1
+ res_bs = np.promote_types(dtype1, dtype2)
+ if res_bs.names is not None:
+ # Structured promotion doesn't remove byteswap:
+ assert res_bs.newbyteorder() == res
+ else:
+ assert res_bs == res
+ assert res_bs.metadata == res.metadata
+
+
def test_can_cast(self):
assert_(np.can_cast(np.int32, np.int64))
assert_(np.can_cast(np.float64, complex))
diff --git a/numpy/core/tests/test_overrides.py b/numpy/core/tests/test_overrides.py
index 7e73d8c03..42600a12b 100644
--- a/numpy/core/tests/test_overrides.py
+++ b/numpy/core/tests/test_overrides.py
@@ -1,5 +1,7 @@
import inspect
import sys
+import tempfile
+from io import StringIO
from unittest import mock
import numpy as np
@@ -425,3 +427,168 @@ class TestNumPyFunctions:
# note: the internal implementation of np.sum() calls the .sum() method
array = np.array(1).view(MyArray)
assert_equal(np.sum(array), 'summed')
+
+
+class TestArrayLike:
+
+ class MyArray():
+
+ def __init__(self, function=None):
+ self.function = function
+
+ def __array_function__(self, func, types, args, kwargs):
+ try:
+ my_func = getattr(TestArrayLike.MyArray, func.__name__)
+ except AttributeError:
+ return NotImplemented
+ return my_func(*args, **kwargs)
+
+ class MyNoArrayFunctionArray():
+
+ def __init__(self, function=None):
+ self.function = function
+
+ def add_method(name, arr_class, enable_value_error=False):
+ def _definition(*args, **kwargs):
+ # Check that `like=` isn't propagated downstream
+ assert 'like' not in kwargs
+
+ if enable_value_error and 'value_error' in kwargs:
+ raise ValueError
+
+ return arr_class(getattr(arr_class, name))
+ setattr(arr_class, name, _definition)
+
+ def func_args(*args, **kwargs):
+ return args, kwargs
+
+ @requires_array_function
+ def test_array_like_not_implemented(self):
+ TestArrayLike.add_method('array', TestArrayLike.MyArray)
+
+ ref = TestArrayLike.MyArray.array()
+
+ with assert_raises_regex(TypeError, 'no implementation found'):
+ array_like = np.asarray(1, like=ref)
+
+ _array_tests = [
+ ('array', *func_args((1,))),
+ ('asarray', *func_args((1,))),
+ ('asanyarray', *func_args((1,))),
+ ('ascontiguousarray', *func_args((2, 3))),
+ ('asfortranarray', *func_args((2, 3))),
+ ('require', *func_args((np.arange(6).reshape(2, 3),),
+ requirements=['A', 'F'])),
+ ('empty', *func_args((1,))),
+ ('full', *func_args((1,), 2)),
+ ('ones', *func_args((1,))),
+ ('zeros', *func_args((1,))),
+ ('arange', *func_args(3)),
+ ('frombuffer', *func_args(b'\x00' * 8, dtype=int)),
+ ('fromiter', *func_args(range(3), dtype=int)),
+ ('fromstring', *func_args('1,2', dtype=int, sep=',')),
+ ('loadtxt', *func_args(lambda: StringIO('0 1\n2 3'))),
+ ('genfromtxt', *func_args(lambda: StringIO(u'1,2.1'),
+ dtype=[('int', 'i8'), ('float', 'f8')],
+ delimiter=',')),
+ ]
+
+ @pytest.mark.parametrize('function, args, kwargs', _array_tests)
+ @pytest.mark.parametrize('numpy_ref', [True, False])
+ @requires_array_function
+ def test_array_like(self, function, args, kwargs, numpy_ref):
+ TestArrayLike.add_method('array', TestArrayLike.MyArray)
+ TestArrayLike.add_method(function, TestArrayLike.MyArray)
+ np_func = getattr(np, function)
+ my_func = getattr(TestArrayLike.MyArray, function)
+
+ if numpy_ref is True:
+ ref = np.array(1)
+ else:
+ ref = TestArrayLike.MyArray.array()
+
+ like_args = tuple(a() if callable(a) else a for a in args)
+ array_like = np_func(*like_args, **kwargs, like=ref)
+
+ if numpy_ref is True:
+ assert type(array_like) is np.ndarray
+
+ np_args = tuple(a() if callable(a) else a for a in args)
+ np_arr = np_func(*np_args, **kwargs)
+
+ # Special-case np.empty to ensure values match
+ if function == "empty":
+ np_arr.fill(1)
+ array_like.fill(1)
+
+ assert_equal(array_like, np_arr)
+ else:
+ assert type(array_like) is TestArrayLike.MyArray
+ assert array_like.function is my_func
+
+ @pytest.mark.parametrize('function, args, kwargs', _array_tests)
+ @pytest.mark.parametrize('numpy_ref', [True, False])
+ @requires_array_function
+ def test_no_array_function_like(self, function, args, kwargs, numpy_ref):
+ TestArrayLike.add_method('array', TestArrayLike.MyNoArrayFunctionArray)
+ TestArrayLike.add_method(function, TestArrayLike.MyNoArrayFunctionArray)
+ np_func = getattr(np, function)
+ my_func = getattr(TestArrayLike.MyNoArrayFunctionArray, function)
+
+ if numpy_ref is True:
+ ref = np.array(1)
+ else:
+ ref = TestArrayLike.MyNoArrayFunctionArray.array()
+
+ like_args = tuple(a() if callable(a) else a for a in args)
+ array_like = np_func(*like_args, **kwargs, like=ref)
+
+ assert type(array_like) is np.ndarray
+ if numpy_ref is True:
+ np_args = tuple(a() if callable(a) else a for a in args)
+ np_arr = np_func(*np_args, **kwargs)
+
+ # Special-case np.empty to ensure values match
+ if function == "empty":
+ np_arr.fill(1)
+ array_like.fill(1)
+
+ assert_equal(array_like, np_arr)
+
+ @pytest.mark.parametrize('numpy_ref', [True, False])
+ def test_array_like_fromfile(self, numpy_ref):
+ TestArrayLike.add_method('array', TestArrayLike.MyArray)
+ TestArrayLike.add_method("fromfile", TestArrayLike.MyArray)
+
+ if numpy_ref is True:
+ ref = np.array(1)
+ else:
+ ref = TestArrayLike.MyArray.array()
+
+ data = np.random.random(5)
+
+ fname = tempfile.mkstemp()[1]
+ data.tofile(fname)
+
+ array_like = np.fromfile(fname, like=ref)
+ if numpy_ref is True:
+ assert type(array_like) is np.ndarray
+ np_res = np.fromfile(fname, like=ref)
+ assert_equal(np_res, data)
+ assert_equal(array_like, np_res)
+ else:
+ assert type(array_like) is TestArrayLike.MyArray
+ assert array_like.function is TestArrayLike.MyArray.fromfile
+
+ @requires_array_function
+ def test_exception_handling(self):
+ TestArrayLike.add_method(
+ 'array',
+ TestArrayLike.MyArray,
+ enable_value_error=True,
+ )
+
+ ref = TestArrayLike.MyArray.array()
+
+ with assert_raises(ValueError):
+ np.array(1, value_error=True, like=ref)
diff --git a/numpy/core/tests/test_records.py b/numpy/core/tests/test_records.py
index 4350a3407..f28ad5ac9 100644
--- a/numpy/core/tests/test_records.py
+++ b/numpy/core/tests/test_records.py
@@ -1,5 +1,6 @@
import collections.abc
import textwrap
+from io import BytesIO
from os import path
from pathlib import Path
import pytest
@@ -79,8 +80,14 @@ class TestFromrecords:
r1 = np.rec.fromfile(fd, formats='f8,i4,a5', shape=3, byteorder='big')
fd.seek(2880 * 2)
r2 = np.rec.array(fd, formats='f8,i4,a5', shape=3, byteorder='big')
+ fd.seek(2880 * 2)
+ bytes_array = BytesIO()
+ bytes_array.write(fd.read())
+ bytes_array.seek(0)
+ r3 = np.rec.fromfile(bytes_array, formats='f8,i4,a5', shape=3, byteorder='big')
fd.close()
assert_equal(r1, r2)
+ assert_equal(r2, r3)
def test_recarray_from_obj(self):
count = 10
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index 51cf7039f..2e731d4fa 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -14,7 +14,7 @@ from numpy.testing import (
assert_raises_regex, assert_warns, suppress_warnings,
_assert_valid_refcount, HAS_REFCOUNT,
)
-from numpy.testing._private.utils import _no_tracing
+from numpy.testing._private.utils import _no_tracing, requires_memory
from numpy.compat import asbytes, asunicode, pickle
try:
@@ -2488,3 +2488,39 @@ class TestRegression:
assert arr.size * arr.itemsize > 2 ** 31
c_arr = np.ctypeslib.as_ctypes(arr)
assert_equal(c_arr._length_, arr.size)
+
+ def test_complex_conversion_error(self):
+ # gh-17068
+ with pytest.raises(TypeError, match=r"Unable to convert dtype.*"):
+ complex(np.array("now", np.datetime64))
+
+ def test__array_interface__descr(self):
+ # gh-17068
+ dt = np.dtype(dict(names=['a', 'b'],
+ offsets=[0, 0],
+ formats=[np.int64, np.int64]))
+ descr = np.array((1, 1), dtype=dt).__array_interface__['descr']
+ assert descr == [('', '|V8')] # instead of [(b'', '|V8')]
+
+ @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python')
+ @requires_memory(free_bytes=9e9)
+ def test_dot_big_stride(self):
+ # gh-17111
+ # blas stride = stride//itemsize > int32 max
+ int32_max = np.iinfo(np.int32).max
+ n = int32_max + 3
+ a = np.empty([n], dtype=np.float32)
+ b = a[::n-1]
+ b[...] = 1
+ assert b.strides[0] > int32_max * b.dtype.itemsize
+ assert np.dot(b, b) == 2.0
+
+ def test_frompyfunc_name(self):
+ # name conversion was failing for python 3 strings
+ # resulting in the default '?' name. Also test utf-8
+ # encoding using non-ascii name.
+ def cassé(x):
+ return x
+
+ f = np.frompyfunc(cassé, 1, 1)
+ assert str(f) == "<ufunc 'cassé (vectorized)'>"
diff --git a/numpy/core/tests/test_shape_base.py b/numpy/core/tests/test_shape_base.py
index 94a916193..4e56ace90 100644
--- a/numpy/core/tests/test_shape_base.py
+++ b/numpy/core/tests/test_shape_base.py
@@ -342,19 +342,32 @@ class TestConcatenate:
assert_raises(ValueError, concatenate, (a, b), out=np.empty((1,4)))
concatenate((a, b), out=np.empty(4))
- def test_out_dtype(self):
- out = np.empty(4, np.float32)
- res = concatenate((array([1, 2]), array([3, 4])), out=out)
- assert_(out is res)
-
- out = np.empty(4, np.complex64)
- res = concatenate((array([0.1, 0.2]), array([0.3, 0.4])), out=out)
- assert_(out is res)
-
- # invalid cast
- out = np.empty(4, np.int32)
- assert_raises(TypeError, concatenate,
- (array([0.1, 0.2]), array([0.3, 0.4])), out=out)
+ @pytest.mark.parametrize("axis", [None, 0])
+ @pytest.mark.parametrize("out_dtype", ["c8", "f4", "f8", ">f8", "i8"])
+ @pytest.mark.parametrize("casting",
+ ['no', 'equiv', 'safe', 'same_kind', 'unsafe'])
+ def test_out_and_dtype(self, axis, out_dtype, casting):
+ # Compare usage of `out=out` with `dtype=out.dtype`
+ out = np.empty(4, dtype=out_dtype)
+ to_concat = (array([1.1, 2.2]), array([3.3, 4.4]))
+
+ if not np.can_cast(to_concat[0], out_dtype, casting=casting):
+ with assert_raises(TypeError):
+ concatenate(to_concat, out=out, axis=axis, casting=casting)
+ with assert_raises(TypeError):
+ concatenate(to_concat, dtype=out.dtype,
+ axis=axis, casting=casting)
+ else:
+ res_out = concatenate(to_concat, out=out,
+ axis=axis, casting=casting)
+ res_dtype = concatenate(to_concat, dtype=out.dtype,
+ axis=axis, casting=casting)
+ assert res_out is out
+ assert_array_equal(out, res_dtype)
+ assert res_dtype.dtype == out_dtype
+
+ with assert_raises(TypeError):
+ concatenate(to_concat, out=out, dtype=out_dtype, axis=axis)
def test_stack():
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index 1305f4877..9eaa1a977 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -1,5 +1,6 @@
import warnings
import itertools
+import sys
import pytest
@@ -11,7 +12,7 @@ import numpy.core._rational_tests as _rational_tests
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal,
assert_almost_equal, assert_array_almost_equal, assert_no_warnings,
- assert_allclose,
+ assert_allclose, HAS_REFCOUNT,
)
from numpy.compat import pickle
@@ -2074,3 +2075,60 @@ def test_ufunc_warn_with_nan(ufunc):
else:
raise ValueError('ufunc with more than 2 inputs')
+
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+def test_ufunc_casterrors():
+ # Tests that casting errors are correctly reported and buffers are
+ # cleared.
+ # The following array can be added to itself as an object array, but
+ # the result cannot be cast to an integer output:
+ value = 123 # relies on python cache (leak-check will still find it)
+ arr = np.array([value] * int(np.BUFSIZE * 1.5) +
+ ["string"] +
+ [value] * int(1.5 * np.BUFSIZE), dtype=object)
+ out = np.ones(len(arr), dtype=np.intp)
+
+ count = sys.getrefcount(value)
+ with pytest.raises(ValueError):
+ # Output casting failure:
+ np.add(arr, arr, out=out, casting="unsafe")
+
+ assert count == sys.getrefcount(value)
+ # output is unchanged after the error, this shows that the iteration
+ # was aborted (this is not necessarily defined behaviour)
+ assert out[-1] == 1
+
+ with pytest.raises(ValueError):
+ # Input casting failure:
+ np.add(arr, arr, out=out, dtype=np.intp, casting="unsafe")
+
+ assert count == sys.getrefcount(value)
+ # output is unchanged after the error, this shows that the iteration
+ # was aborted (this is not necessarily defined behaviour)
+ assert out[-1] == 1
+
+
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+@pytest.mark.parametrize("offset",
+ [0, np.BUFSIZE//2, int(1.5*np.BUFSIZE)])
+def test_reduce_casterrors(offset):
+ # Test reporting of casting errors in reductions, we test various
+ # offsets to where the casting error will occur, since these may occur
+ # at different places during the reduction procedure. For example
+ # the first item may be special.
+ value = 123 # relies on python cache (leak-check will still find it)
+ arr = np.array([value] * offset +
+ ["string"] +
+ [value] * int(1.5 * np.BUFSIZE), dtype=object)
+ out = np.array(-1, dtype=np.intp)
+
+ count = sys.getrefcount(value)
+ with pytest.raises(ValueError):
+ # This is an unsafe cast, but we currently always allow that:
+ np.add.reduce(arr, dtype=np.intp, out=out)
+ assert count == sys.getrefcount(value)
+ # If an error occurred during casting, the operation is done at most until
+ # the error occurs (the result of which would be `value * offset`) and -1
+ # if the error happened immediately.
+ # This does not define behaviour, the output is invalid and thus undefined
+ assert out[()] < value * offset
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index ae72687ca..818b2ad6c 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -2457,7 +2457,7 @@ class TestSpecialMethods:
assert_raises(ValueError, inner1d, a, a, out=())
def test_ufunc_override_with_super(self):
- # NOTE: this class is given as an example in doc/subclassing.py;
+ # NOTE: this class is used in doc/source/user/basics.subclassing.rst
# if you make any changes here, do update it there too.
class A(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, out=None, **kwargs):
diff --git a/numpy/ctypeslib.py b/numpy/ctypeslib.py
index 76ba838b7..e8f7750fe 100644
--- a/numpy/ctypeslib.py
+++ b/numpy/ctypeslib.py
@@ -49,12 +49,11 @@ Then, we're ready to call ``foo_func``:
>>> _lib.foo_func(out, len(out)) #doctest: +SKIP
"""
-__all__ = ['load_library', 'ndpointer', 'ctypes_load_library',
- 'c_intp', 'as_ctypes', 'as_array']
+__all__ = ['load_library', 'ndpointer', 'c_intp', 'as_ctypes', 'as_array']
import os
from numpy import (
- integer, ndarray, dtype as _dtype, deprecate, array, frombuffer
+ integer, ndarray, dtype as _dtype, array, frombuffer
)
from numpy.core.multiarray import _flagdict, flagsobj
@@ -75,7 +74,6 @@ if ctypes is None:
"""
raise ImportError("ctypes is not available.")
- ctypes_load_library = _dummy
load_library = _dummy
as_ctypes = _dummy
as_array = _dummy
@@ -154,8 +152,6 @@ else:
## if no successful return in the libname_ext loop:
raise OSError("no file with expected extension")
- ctypes_load_library = deprecate(load_library, 'ctypes_load_library',
- 'load_library')
def _num_fromflags(flaglist):
num = 0
diff --git a/numpy/ctypeslib.pyi b/numpy/ctypeslib.pyi
new file mode 100644
index 000000000..cacc97d68
--- /dev/null
+++ b/numpy/ctypeslib.pyi
@@ -0,0 +1,7 @@
+from typing import Any
+
+load_library: Any
+ndpointer: Any
+c_intp: Any
+as_ctypes: Any
+as_array: Any
diff --git a/numpy/distutils/__init__.pyi b/numpy/distutils/__init__.pyi
new file mode 100644
index 000000000..3938d68de
--- /dev/null
+++ b/numpy/distutils/__init__.pyi
@@ -0,0 +1,4 @@
+from typing import Any
+
+# TODO: remove when the full numpy namespace is defined
+def __getattr__(name: str) -> Any: ...
diff --git a/numpy/distutils/ccompiler_opt.py b/numpy/distutils/ccompiler_opt.py
index 85dc2f1e8..9d6bfcbd4 100644
--- a/numpy/distutils/ccompiler_opt.py
+++ b/numpy/distutils/ccompiler_opt.py
@@ -152,6 +152,18 @@ class _Config:
By default(None), treated as True if the feature contains at
least one applicable flag. see `feature_can_autovec()`
+ "extra_checks": str or list, optional
+ Extra test case names for the CPU feature that need to be tested
+ against the compiler.
+
+ Each test case must have a C file named ``extra_xxxx.c``, where
+ ``xxxx`` is the case name in lower case, under 'conf_check_path'.
+ It should contain at least one intrinsic or function related to the test case.
+
+ If the compiler able to successfully compile the C file then `CCompilerOpt`
+ will add a C ``#define`` for it into the main dispatch header, e.g.
+ ```#define {conf_c_prefix}_XXXX`` where ``XXXX`` is the case name in upper case.
+
**NOTES**:
* space can be used as separator with options that supports "str or list"
* case-sensitive for all values and feature name must be in upper-case.
@@ -230,7 +242,10 @@ class _Config:
F16C = dict(interest=11, implies="AVX"),
FMA3 = dict(interest=12, implies="F16C"),
AVX2 = dict(interest=13, implies="F16C"),
- AVX512F = dict(interest=20, implies="FMA3 AVX2", implies_detect=False),
+ AVX512F = dict(
+ interest=20, implies="FMA3 AVX2", implies_detect=False,
+ extra_checks="AVX512F_REDUCE"
+ ),
AVX512CD = dict(interest=21, implies="AVX512F"),
AVX512_KNL = dict(
interest=40, implies="AVX512CD", group="AVX512ER AVX512PF",
@@ -243,7 +258,8 @@ class _Config:
),
AVX512_SKX = dict(
interest=42, implies="AVX512CD", group="AVX512VL AVX512BW AVX512DQ",
- detect="AVX512_SKX", implies_detect=False
+ detect="AVX512_SKX", implies_detect=False,
+ extra_checks="AVX512BW_MASK"
),
AVX512_CLX = dict(
interest=43, implies="AVX512_SKX", group="AVX512VNNI",
@@ -673,7 +689,7 @@ class _Distutils:
# intel and msvc compilers don't raise
# fatal errors when flags are wrong or unsupported
".*("
- "warning D9002|" # msvc, it should be work with any language.
+ "warning D9002|" # msvc, it should be work with any language.
"invalid argument for option" # intel
").*"
)
@@ -1137,7 +1153,7 @@ class _Feature:
continue
# list is used internally for these options
for option in (
- "implies", "group", "detect", "headers", "flags"
+ "implies", "group", "detect", "headers", "flags", "extra_checks"
) :
oval = feature.get(option)
if isinstance(oval, str):
@@ -1439,7 +1455,7 @@ class _Feature:
self.conf_check_path, "cpu_%s.c" % name.lower()
)
if not os.path.exists(test_path):
- self.dist_fatal("feature test file is not exist", path)
+ self.dist_fatal("feature test file is not exist", test_path)
test = self.dist_test(test_path, force_flags + self.cc_flags["werror"])
if not test:
@@ -1487,6 +1503,45 @@ class _Feature:
can = valid_flags and any(valid_flags)
return can
+ @_Cache.me
+ def feature_extra_checks(self, name):
+ """
+ Return a list of supported extra checks after testing them against
+ the compiler.
+
+ Parameters
+ ----------
+ names: str
+ CPU feature name in uppercase.
+ """
+ assert isinstance(name, str)
+ d = self.feature_supported[name]
+ extra_checks = d.get("extra_checks", [])
+ if not extra_checks:
+ return []
+
+ self.dist_log("Testing extra checks for feature '%s'" % name, extra_checks)
+ flags = self.feature_flags(name)
+ available = []
+ not_available = []
+ for chk in extra_checks:
+ test_path = os.path.join(
+ self.conf_check_path, "extra_%s.c" % chk.lower()
+ )
+ if not os.path.exists(test_path):
+ self.dist_fatal("extra check file does not exist", test_path)
+
+ is_supported = self.dist_test(test_path, flags + self.cc_flags["werror"])
+ if is_supported:
+ available.append(chk)
+ else:
+ not_available.append(chk)
+
+ if not_available:
+ self.dist_log("testing failed for checks", not_available, stderr=True)
+ return available
+
+
def feature_c_preprocessor(self, feature_name, tabs=0):
"""
Generate C preprocessor definitions and include headers of a CPU feature.
@@ -1520,14 +1575,18 @@ class _Feature:
prepr += [
"#include <%s>" % h for h in feature.get("headers", [])
]
- group = feature.get("group", [])
- for f in group:
- # Guard features in case of duplicate definitions
+
+ extra_defs = feature.get("group", [])
+ extra_defs += self.feature_extra_checks(feature_name)
+ for edef in extra_defs:
+ # Guard extra definitions in case of duplicate with
+ # another feature
prepr += [
- "#ifndef %sHAVE_%s" % (self.conf_c_prefix, f),
- "\t#define %sHAVE_%s 1" % (self.conf_c_prefix, f),
+ "#ifndef %sHAVE_%s" % (self.conf_c_prefix, edef),
+ "\t#define %sHAVE_%s 1" % (self.conf_c_prefix, edef),
"#endif",
]
+
if tabs > 0:
prepr = [('\t'*tabs) + l for l in prepr]
return '\n'.join(prepr)
@@ -2269,6 +2328,12 @@ class CCompilerOpt(_Config, _Distutils, _Cache, _CCompiler, _Feature, _Parse):
baseline_rows.append((
"Flags", (' '.join(baseline_flags) if baseline_flags else "none")
))
+ extra_checks = []
+ for name in baseline_names:
+ extra_checks += self.feature_extra_checks(name)
+ baseline_rows.append((
+ "Extra checks", (' '.join(extra_checks) if extra_checks else "none")
+ ))
########## dispatch ##########
if self.cc_noopt:
@@ -2307,14 +2372,21 @@ class CCompilerOpt(_Config, _Distutils, _Cache, _CCompiler, _Feature, _Parse):
else:
dispatch_rows.append(("Generated", ''))
for tar in self.feature_sorted(target_sources):
+ tar_as_seq = [tar] if isinstance(tar, str) else tar
sources = target_sources[tar]
name = tar if isinstance(tar, str) else '(%s)' % ' '.join(tar)
flags = ' '.join(self.feature_flags(tar))
implies = ' '.join(self.feature_sorted(self.feature_implies(tar)))
detect = ' '.join(self.feature_detect(tar))
+ extra_checks = []
+ for name in tar_as_seq:
+ extra_checks += self.feature_extra_checks(name)
+ extra_checks = (' '.join(extra_checks) if extra_checks else "none")
+
dispatch_rows.append(('', ''))
dispatch_rows.append((name, implies))
dispatch_rows.append(("Flags", flags))
+ dispatch_rows.append(("Extra checks", extra_checks))
dispatch_rows.append(("Detect", detect))
for src in sources:
dispatch_rows.append(("", src))
diff --git a/numpy/distutils/checks/extra_avx512bw_mask.c b/numpy/distutils/checks/extra_avx512bw_mask.c
new file mode 100644
index 000000000..9cfd0c2a5
--- /dev/null
+++ b/numpy/distutils/checks/extra_avx512bw_mask.c
@@ -0,0 +1,18 @@
+#include <immintrin.h>
+/**
+ * Test BW mask operations due to:
+ * - MSVC has supported it since vs2019 see,
+ * https://developercommunity.visualstudio.com/content/problem/518298/missing-avx512bw-mask-intrinsics.html
+ * - Clang >= v8.0
+ * - GCC >= v7.1
+ */
+int main(void)
+{
+ __mmask64 m64 = _mm512_cmpeq_epi8_mask(_mm512_set1_epi8((char)1), _mm512_set1_epi8((char)1));
+ m64 = _kor_mask64(m64, m64);
+ m64 = _kxor_mask64(m64, m64);
+ m64 = _cvtu64_mask64(_cvtmask64_u64(m64));
+ m64 = _mm512_kunpackd(m64, m64);
+ m64 = (__mmask64)_mm512_kunpackw((__mmask32)m64, (__mmask32)m64);
+ return (int)_cvtmask64_u64(m64);
+}
diff --git a/numpy/distutils/checks/extra_avx512f_reduce.c b/numpy/distutils/checks/extra_avx512f_reduce.c
new file mode 100644
index 000000000..f979d504e
--- /dev/null
+++ b/numpy/distutils/checks/extra_avx512f_reduce.c
@@ -0,0 +1,41 @@
+#include <immintrin.h>
+/**
+ * The following intrinsics don't have direct native support but compilers
+ * tend to emulate them.
+ * They're usually supported by gcc >= 7.1, clang >= 4 and icc >= 19
+ */
+int main(void)
+{
+ __m512 one_ps = _mm512_set1_ps(1.0f);
+ __m512d one_pd = _mm512_set1_pd(1.0);
+ __m512i one_i64 = _mm512_set1_epi64(1.0);
+ // add
+ float sum_ps = _mm512_reduce_add_ps(one_ps);
+ double sum_pd = _mm512_reduce_add_pd(one_pd);
+ int sum_int = (int)_mm512_reduce_add_epi64(one_i64);
+ sum_int += (int)_mm512_reduce_add_epi32(one_i64);
+ // mul
+ sum_ps += _mm512_reduce_mul_ps(one_ps);
+ sum_pd += _mm512_reduce_mul_pd(one_pd);
+ sum_int += (int)_mm512_reduce_mul_epi64(one_i64);
+ sum_int += (int)_mm512_reduce_mul_epi32(one_i64);
+ // min
+ sum_ps += _mm512_reduce_min_ps(one_ps);
+ sum_pd += _mm512_reduce_min_pd(one_pd);
+ sum_int += (int)_mm512_reduce_min_epi32(one_i64);
+ sum_int += (int)_mm512_reduce_min_epu32(one_i64);
+ sum_int += (int)_mm512_reduce_min_epi64(one_i64);
+ // max
+ sum_ps += _mm512_reduce_max_ps(one_ps);
+ sum_pd += _mm512_reduce_max_pd(one_pd);
+ sum_int += (int)_mm512_reduce_max_epi32(one_i64);
+ sum_int += (int)_mm512_reduce_max_epu32(one_i64);
+ sum_int += (int)_mm512_reduce_max_epi64(one_i64);
+ // and
+ sum_int += (int)_mm512_reduce_and_epi32(one_i64);
+ sum_int += (int)_mm512_reduce_and_epi64(one_i64);
+ // or
+ sum_int += (int)_mm512_reduce_or_epi32(one_i64);
+ sum_int += (int)_mm512_reduce_or_epi64(one_i64);
+ return (int)sum_ps + (int)sum_pd + sum_int;
+}
diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py
index b6557fcf6..c7502d3e6 100644
--- a/numpy/distutils/command/build_ext.py
+++ b/numpy/distutils/command/build_ext.py
@@ -559,7 +559,7 @@ class build_ext (old_build_ext):
unlinkable_fobjects = list(unlinkable_fobjects)
# Expand possible fake static libraries to objects
- for lib in list(libraries):
+ for lib in libraries:
for libdir in library_dirs:
fake_lib = os.path.join(libdir, lib + '.fobjects')
if os.path.isfile(fake_lib):
diff --git a/numpy/distutils/fcompiler/__init__.py b/numpy/distutils/fcompiler/__init__.py
index 1c3069363..a1c52412d 100644
--- a/numpy/distutils/fcompiler/__init__.py
+++ b/numpy/distutils/fcompiler/__init__.py
@@ -20,8 +20,6 @@ import os
import sys
import re
-from numpy.compat import open_latin1
-
from distutils.sysconfig import get_python_lib
from distutils.fancy_getopt import FancyGetopt
from distutils.errors import DistutilsModuleError, \
@@ -975,29 +973,27 @@ def is_free_format(file):
# f90 allows both fixed and free format, assuming fixed unless
# signs of free format are detected.
result = 0
- f = open_latin1(file, 'r')
- line = f.readline()
- n = 10000 # the number of non-comment lines to scan for hints
- if _has_f_header(line):
- n = 0
- elif _has_f90_header(line):
- n = 0
- result = 1
- while n>0 and line:
- line = line.rstrip()
- if line and line[0]!='!':
- n -= 1
- if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-1:]=='&':
- result = 1
- break
+ with open(file, encoding='latin1') as f:
line = f.readline()
- f.close()
+ n = 10000 # the number of non-comment lines to scan for hints
+ if _has_f_header(line):
+ n = 0
+ elif _has_f90_header(line):
+ n = 0
+ result = 1
+ while n>0 and line:
+ line = line.rstrip()
+ if line and line[0]!='!':
+ n -= 1
+ if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-1:]=='&':
+ result = 1
+ break
+ line = f.readline()
return result
def has_f90_header(src):
- f = open_latin1(src, 'r')
- line = f.readline()
- f.close()
+ with open(src, encoding='latin1') as f:
+ line = f.readline()
return _has_f90_header(line) or _has_fix_header(line)
_f77flags_re = re.compile(r'(c|)f77flags\s*\(\s*(?P<fcname>\w+)\s*\)\s*=\s*(?P<fflags>.*)', re.I)
@@ -1008,17 +1004,16 @@ def get_f77flags(src):
Return a dictionary {<fcompiler type>:<f77 flags>}.
"""
flags = {}
- f = open_latin1(src, 'r')
- i = 0
- for line in f:
- i += 1
- if i>20: break
- m = _f77flags_re.match(line)
- if not m: continue
- fcname = m.group('fcname').strip()
- fflags = m.group('fflags').strip()
- flags[fcname] = split_quoted(fflags)
- f.close()
+ with open(src, encoding='latin1') as f:
+ i = 0
+ for line in f:
+ i += 1
+ if i>20: break
+ m = _f77flags_re.match(line)
+ if not m: continue
+ fcname = m.group('fcname').strip()
+ fflags = m.group('fflags').strip()
+ flags[fcname] = split_quoted(fflags)
return flags
# TODO: implement get_f90flags and use it in _compile similarly to get_f77flags
diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py
index caa08549e..0d9d769c2 100644
--- a/numpy/distutils/fcompiler/gnu.py
+++ b/numpy/distutils/fcompiler/gnu.py
@@ -23,13 +23,6 @@ def is_win64():
return sys.platform == "win32" and platform.architecture()[0] == "64bit"
-if is_win64():
- #_EXTRAFLAGS = ["-fno-leading-underscore"]
- _EXTRAFLAGS = []
-else:
- _EXTRAFLAGS = []
-
-
class GnuFCompiler(FCompiler):
compiler_type = 'gnu'
compiler_aliases = ('g77', )
@@ -238,7 +231,7 @@ class GnuFCompiler(FCompiler):
def _c_arch_flags(self):
""" Return detected arch flags from CFLAGS """
- from distutils import sysconfig
+ import sysconfig
try:
cflags = sysconfig.get_config_vars()['CFLAGS']
except KeyError:
@@ -297,11 +290,11 @@ class Gnu95FCompiler(GnuFCompiler):
executables = {
'version_cmd' : ["<F90>", "-dumpversion"],
'compiler_f77' : [None, "-Wall", "-g", "-ffixed-form",
- "-fno-second-underscore"] + _EXTRAFLAGS,
+ "-fno-second-underscore"],
'compiler_f90' : [None, "-Wall", "-g",
- "-fno-second-underscore"] + _EXTRAFLAGS,
+ "-fno-second-underscore"],
'compiler_fix' : [None, "-Wall", "-g","-ffixed-form",
- "-fno-second-underscore"] + _EXTRAFLAGS,
+ "-fno-second-underscore"],
'linker_so' : ["<F90>", "-Wall", "-g"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"],
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
index d2a3149f7..aa649a23f 100644
--- a/numpy/distutils/misc_util.py
+++ b/numpy/distutils/misc_util.py
@@ -1900,15 +1900,16 @@ class Configuration:
revision0 = f.read().strip()
branch_map = {}
- for line in open(branch_cache_fn, 'r'):
- branch1, revision1 = line.split()[:2]
- if revision1==revision0:
- branch0 = branch1
- try:
- revision1 = int(revision1)
- except ValueError:
- continue
- branch_map[branch1] = revision1
+ with open(branch_cache_fn, 'r') as f:
+ for line in f:
+ branch1, revision1 = line.split()[:2]
+ if revision1==revision0:
+ branch0 = branch1
+ try:
+ revision1 = int(revision1)
+ except ValueError:
+ continue
+ branch_map[branch1] = revision1
return branch_map.get(branch0)
diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py
index df82683dc..19f7482f2 100644
--- a/numpy/distutils/system_info.py
+++ b/numpy/distutils/system_info.py
@@ -171,7 +171,7 @@ from configparser import RawConfigParser as ConfigParser
from distutils.errors import DistutilsError
from distutils.dist import Distribution
-import distutils.sysconfig
+import sysconfig
from numpy.distutils import log
from distutils.util import get_platform
@@ -187,6 +187,7 @@ import distutils.ccompiler
import tempfile
import shutil
+__all__ = ['system_info']
# Determine number of bits
import platform
@@ -255,7 +256,7 @@ def libpaths(paths, bits):
if sys.platform == 'win32':
default_lib_dirs = ['C:\\',
- os.path.join(distutils.sysconfig.EXEC_PREFIX,
+ os.path.join(sysconfig.get_config_var('exec_prefix'),
'libs')]
default_runtime_dirs = []
default_include_dirs = []
@@ -715,8 +716,7 @@ class system_info:
AliasedOptionError :
in case more than one of the options are found
"""
- found = map(lambda opt: self.cp.has_option(self.section, opt), options)
- found = list(found)
+ found = [self.cp.has_option(self.section, opt) for opt in options]
if sum(found) == 1:
return options[found.index(True)]
elif sum(found) == 0:
@@ -2499,13 +2499,12 @@ class _numpy_info(system_info):
except AttributeError:
pass
- include_dirs.append(distutils.sysconfig.get_python_inc(
- prefix=os.sep.join(prefix)))
+ include_dirs.append(sysconfig.get_path('include'))
except ImportError:
pass
- py_incl_dir = distutils.sysconfig.get_python_inc()
+ py_incl_dir = sysconfig.get_path('include')
include_dirs.append(py_incl_dir)
- py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True)
+ py_pincl_dir = sysconfig.get_path('platinclude')
if py_pincl_dir not in include_dirs:
include_dirs.append(py_pincl_dir)
for d in default_include_dirs:
@@ -2632,8 +2631,8 @@ class boost_python_info(system_info):
break
if not src_dir:
return
- py_incl_dirs = [distutils.sysconfig.get_python_inc()]
- py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True)
+ py_incl_dirs = [sysconfig.get_path('include')]
+ py_pincl_dir = sysconfig.get_path('platinclude')
if py_pincl_dir not in py_incl_dirs:
py_incl_dirs.append(py_pincl_dir)
srcs_dir = os.path.join(src_dir, 'libs', 'python', 'src')
diff --git a/numpy/distutils/tests/test_ccompiler_opt_conf.py b/numpy/distutils/tests/test_ccompiler_opt_conf.py
index 2f83a59e0..244748e58 100644
--- a/numpy/distutils/tests/test_ccompiler_opt_conf.py
+++ b/numpy/distutils/tests/test_ccompiler_opt_conf.py
@@ -66,11 +66,12 @@ class _TestConfFeatures(FakeCCompilerOpt):
self.test_implies(error_msg, search_in, feature_name, feature_dict)
self.test_group(error_msg, search_in, feature_name, feature_dict)
+ self.test_extra_checks(error_msg, search_in, feature_name, feature_dict)
def test_option_types(self, error_msg, option, val):
for tp, available in (
((str, list), (
- "implies", "headers", "flags", "group", "detect"
+ "implies", "headers", "flags", "group", "detect", "extra_checks"
)),
((str,), ("disable",)),
((int,), ("interest",)),
@@ -83,29 +84,25 @@ class _TestConfFeatures(FakeCCompilerOpt):
if not isinstance(val, tp):
error_tp = [t.__name__ for t in (*tp,)]
error_tp = ' or '.join(error_tp)
- raise AssertionError(error_msg + \
+ raise AssertionError(error_msg +
"expected '%s' type for option '%s' not '%s'" % (
error_tp, option, type(val).__name__
))
break
if not found_it:
- raise AssertionError(error_msg + \
- "invalid option name '%s'" % option
- )
+ raise AssertionError(error_msg + "invalid option name '%s'" % option)
def test_duplicates(self, error_msg, option, val):
if option not in (
- "implies", "headers", "flags", "group", "detect"
+ "implies", "headers", "flags", "group", "detect", "extra_checks"
) : return
if isinstance(val, str):
val = val.split()
if len(val) != len(set(val)):
- raise AssertionError(error_msg + \
- "duplicated values in option '%s'" % option
- )
+ raise AssertionError(error_msg + "duplicated values in option '%s'" % option)
def test_implies(self, error_msg, search_in, feature_name, feature_dict):
if feature_dict.get("disabled") is not None:
@@ -117,21 +114,15 @@ class _TestConfFeatures(FakeCCompilerOpt):
implies = implies.split()
if feature_name in implies:
- raise AssertionError(error_msg + \
- "feature implies itself"
- )
+ raise AssertionError(error_msg + "feature implies itself")
for impl in implies:
impl_dict = search_in.get(impl)
if impl_dict is not None:
if "disable" in impl_dict:
- raise AssertionError(error_msg + \
- "implies disabled feature '%s'" % impl
- )
+ raise AssertionError(error_msg + "implies disabled feature '%s'" % impl)
continue
- raise AssertionError(error_msg + \
- "implies non-exist feature '%s'" % impl
- )
+ raise AssertionError(error_msg + "implies non-exist feature '%s'" % impl)
def test_group(self, error_msg, search_in, feature_name, feature_dict):
if feature_dict.get("disabled") is not None:
@@ -146,10 +137,26 @@ class _TestConfFeatures(FakeCCompilerOpt):
impl_dict = search_in.get(f)
if not impl_dict or "disable" in impl_dict:
continue
- raise AssertionError(error_msg + \
- "in option '%s', '%s' already exists as a feature name" % (
- option, f
- ))
+ raise AssertionError(error_msg +
+ "in option 'group', '%s' already exists as a feature name" % f
+ )
+
+ def test_extra_checks(self, error_msg, search_in, feature_name, feature_dict):
+ if feature_dict.get("disabled") is not None:
+ return
+ extra_checks = feature_dict.get("extra_checks", "")
+ if not extra_checks:
+ return
+ if isinstance(extra_checks, str):
+ extra_checks = extra_checks.split()
+
+ for f in extra_checks:
+ impl_dict = search_in.get(f)
+ if not impl_dict or "disable" in impl_dict:
+ continue
+ raise AssertionError(error_msg +
+ "in option 'extra_checks', extra test case '%s' already exists as a feature name" % f
+ )
class TestConfFeatures(unittest.TestCase):
def __init__(self, methodName="runTest"):
diff --git a/numpy/distutils/unixccompiler.py b/numpy/distutils/unixccompiler.py
index 5f36c439f..9bb7251d8 100644
--- a/numpy/distutils/unixccompiler.py
+++ b/numpy/distutils/unixccompiler.py
@@ -26,7 +26,8 @@ def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts
self.compiler_so = ccomp
# ensure OPT environment variable is read
if 'OPT' in os.environ:
- from distutils.sysconfig import get_config_vars
+ # XXX who uses this?
+ from sysconfig import get_config_vars
opt = " ".join(os.environ['OPT'].split())
gcv_opt = " ".join(get_config_vars('OPT')[0].split())
ccomp_s = " ".join(self.compiler_so)
diff --git a/numpy/doc/basics.py b/numpy/doc/basics.py
deleted file mode 100644
index 635c1b1b8..000000000
--- a/numpy/doc/basics.py
+++ /dev/null
@@ -1,341 +0,0 @@
-"""
-============
-Array basics
-============
-
-Array types and conversions between types
-=========================================
-
-NumPy supports a much greater variety of numerical types than Python does.
-This section shows which are available, and how to modify an array's data-type.
-
-The primitive types supported are tied closely to those in C:
-
-.. list-table::
- :header-rows: 1
-
- * - Numpy type
- - C type
- - Description
-
- * - `np.bool_`
- - ``bool``
- - Boolean (True or False) stored as a byte
-
- * - `np.byte`
- - ``signed char``
- - Platform-defined
-
- * - `np.ubyte`
- - ``unsigned char``
- - Platform-defined
-
- * - `np.short`
- - ``short``
- - Platform-defined
-
- * - `np.ushort`
- - ``unsigned short``
- - Platform-defined
-
- * - `np.intc`
- - ``int``
- - Platform-defined
-
- * - `np.uintc`
- - ``unsigned int``
- - Platform-defined
-
- * - `np.int_`
- - ``long``
- - Platform-defined
-
- * - `np.uint`
- - ``unsigned long``
- - Platform-defined
-
- * - `np.longlong`
- - ``long long``
- - Platform-defined
-
- * - `np.ulonglong`
- - ``unsigned long long``
- - Platform-defined
-
- * - `np.half` / `np.float16`
- -
- - Half precision float:
- sign bit, 5 bits exponent, 10 bits mantissa
-
- * - `np.single`
- - ``float``
- - Platform-defined single precision float:
- typically sign bit, 8 bits exponent, 23 bits mantissa
-
- * - `np.double`
- - ``double``
- - Platform-defined double precision float:
- typically sign bit, 11 bits exponent, 52 bits mantissa.
-
- * - `np.longdouble`
- - ``long double``
- - Platform-defined extended-precision float
-
- * - `np.csingle`
- - ``float complex``
- - Complex number, represented by two single-precision floats (real and imaginary components)
-
- * - `np.cdouble`
- - ``double complex``
- - Complex number, represented by two double-precision floats (real and imaginary components).
-
- * - `np.clongdouble`
- - ``long double complex``
- - Complex number, represented by two extended-precision floats (real and imaginary components).
-
-
-Since many of these have platform-dependent definitions, a set of fixed-size
-aliases are provided:
-
-.. list-table::
- :header-rows: 1
-
- * - Numpy type
- - C type
- - Description
-
- * - `np.int8`
- - ``int8_t``
- - Byte (-128 to 127)
-
- * - `np.int16`
- - ``int16_t``
- - Integer (-32768 to 32767)
-
- * - `np.int32`
- - ``int32_t``
- - Integer (-2147483648 to 2147483647)
-
- * - `np.int64`
- - ``int64_t``
- - Integer (-9223372036854775808 to 9223372036854775807)
-
- * - `np.uint8`
- - ``uint8_t``
- - Unsigned integer (0 to 255)
-
- * - `np.uint16`
- - ``uint16_t``
- - Unsigned integer (0 to 65535)
-
- * - `np.uint32`
- - ``uint32_t``
- - Unsigned integer (0 to 4294967295)
-
- * - `np.uint64`
- - ``uint64_t``
- - Unsigned integer (0 to 18446744073709551615)
-
- * - `np.intp`
- - ``intptr_t``
- - Integer used for indexing, typically the same as ``ssize_t``
-
- * - `np.uintp`
- - ``uintptr_t``
- - Integer large enough to hold a pointer
-
- * - `np.float32`
- - ``float``
- -
-
- * - `np.float64` / `np.float_`
- - ``double``
- - Note that this matches the precision of the builtin python `float`.
-
- * - `np.complex64`
- - ``float complex``
- - Complex number, represented by two 32-bit floats (real and imaginary components)
-
- * - `np.complex128` / `np.complex_`
- - ``double complex``
- - Note that this matches the precision of the builtin python `complex`.
-
-
-NumPy numerical types are instances of ``dtype`` (data-type) objects, each
-having unique characteristics. Once you have imported NumPy using
-
- ::
-
- >>> import numpy as np
-
-the dtypes are available as ``np.bool_``, ``np.float32``, etc.
-
-Advanced types, not listed in the table above, are explored in
-section :ref:`structured_arrays`.
-
-There are 5 basic numerical types representing booleans (bool), integers (int),
-unsigned integers (uint) floating point (float) and complex. Those with numbers
-in their name indicate the bitsize of the type (i.e. how many bits are needed
-to represent a single value in memory). Some types, such as ``int`` and
-``intp``, have differing bitsizes, dependent on the platforms (e.g. 32-bit
-vs. 64-bit machines). This should be taken into account when interfacing
-with low-level code (such as C or Fortran) where the raw memory is addressed.
-
-Data-types can be used as functions to convert python numbers to array scalars
-(see the array scalar section for an explanation), python sequences of numbers
-to arrays of that type, or as arguments to the dtype keyword that many numpy
-functions or methods accept. Some examples::
-
- >>> import numpy as np
- >>> x = np.float32(1.0)
- >>> x
- 1.0
- >>> y = np.int_([1,2,4])
- >>> y
- array([1, 2, 4])
- >>> z = np.arange(3, dtype=np.uint8)
- >>> z
- array([0, 1, 2], dtype=uint8)
-
-Array types can also be referred to by character codes, mostly to retain
-backward compatibility with older packages such as Numeric. Some
-documentation may still refer to these, for example::
-
- >>> np.array([1, 2, 3], dtype='f')
- array([ 1., 2., 3.], dtype=float32)
-
-We recommend using dtype objects instead.
-
-To convert the type of an array, use the .astype() method (preferred) or
-the type itself as a function. For example: ::
-
- >>> z.astype(float) #doctest: +NORMALIZE_WHITESPACE
- array([ 0., 1., 2.])
- >>> np.int8(z)
- array([0, 1, 2], dtype=int8)
-
-Note that, above, we use the *Python* float object as a dtype. NumPy knows
-that ``int`` refers to ``np.int_``, ``bool`` means ``np.bool_``,
-that ``float`` is ``np.float_`` and ``complex`` is ``np.complex_``.
-The other data-types do not have Python equivalents.
-
-To determine the type of an array, look at the dtype attribute::
-
- >>> z.dtype
- dtype('uint8')
-
-dtype objects also contain information about the type, such as its bit-width
-and its byte-order. The data type can also be used indirectly to query
-properties of the type, such as whether it is an integer::
-
- >>> d = np.dtype(int)
- >>> d
- dtype('int32')
-
- >>> np.issubdtype(d, np.integer)
- True
-
- >>> np.issubdtype(d, np.floating)
- False
-
-
-Array Scalars
-=============
-
-NumPy generally returns elements of arrays as array scalars (a scalar
-with an associated dtype). Array scalars differ from Python scalars, but
-for the most part they can be used interchangeably (the primary
-exception is for versions of Python older than v2.x, where integer array
-scalars cannot act as indices for lists and tuples). There are some
-exceptions, such as when code requires very specific attributes of a scalar
-or when it checks specifically whether a value is a Python scalar. Generally,
-problems are easily fixed by explicitly converting array scalars
-to Python scalars, using the corresponding Python type function
-(e.g., ``int``, ``float``, ``complex``, ``str``, ``unicode``).
-
-The primary advantage of using array scalars is that
-they preserve the array type (Python may not have a matching scalar type
-available, e.g. ``int16``). Therefore, the use of array scalars ensures
-identical behaviour between arrays and scalars, irrespective of whether the
-value is inside an array or not. NumPy scalars also have many of the same
-methods arrays do.
-
-Overflow Errors
-===============
-
-The fixed size of NumPy numeric types may cause overflow errors when a value
-requires more memory than available in the data type. For example,
-`numpy.power` evaluates ``100 * 10 ** 8`` correctly for 64-bit integers,
-but gives 1874919424 (incorrect) for a 32-bit integer.
-
- >>> np.power(100, 8, dtype=np.int64)
- 10000000000000000
- >>> np.power(100, 8, dtype=np.int32)
- 1874919424
-
-The behaviour of NumPy and Python integer types differs significantly for
-integer overflows and may confuse users expecting NumPy integers to behave
-similar to Python's ``int``. Unlike NumPy, the size of Python's ``int`` is
-flexible. This means Python integers may expand to accommodate any integer and
-will not overflow.
-
-NumPy provides `numpy.iinfo` and `numpy.finfo` to verify the
-minimum or maximum values of NumPy integer and floating point values
-respectively ::
-
- >>> np.iinfo(int) # Bounds of the default integer on this system.
- iinfo(min=-9223372036854775808, max=9223372036854775807, dtype=int64)
- >>> np.iinfo(np.int32) # Bounds of a 32-bit integer
- iinfo(min=-2147483648, max=2147483647, dtype=int32)
- >>> np.iinfo(np.int64) # Bounds of a 64-bit integer
- iinfo(min=-9223372036854775808, max=9223372036854775807, dtype=int64)
-
-If 64-bit integers are still too small the result may be cast to a
-floating point number. Floating point numbers offer a larger, but inexact,
-range of possible values.
-
- >>> np.power(100, 100, dtype=np.int64) # Incorrect even with 64-bit int
- 0
- >>> np.power(100, 100, dtype=np.float64)
- 1e+200
-
-Extended Precision
-==================
-
-Python's floating-point numbers are usually 64-bit floating-point numbers,
-nearly equivalent to ``np.float64``. In some unusual situations it may be
-useful to use floating-point numbers with more precision. Whether this
-is possible in numpy depends on the hardware and on the development
-environment: specifically, x86 machines provide hardware floating-point
-with 80-bit precision, and while most C compilers provide this as their
-``long double`` type, MSVC (standard for Windows builds) makes
-``long double`` identical to ``double`` (64 bits). NumPy makes the
-compiler's ``long double`` available as ``np.longdouble`` (and
-``np.clongdouble`` for the complex numbers). You can find out what your
-numpy provides with ``np.finfo(np.longdouble)``.
-
-NumPy does not provide a dtype with more precision than C's
-``long double``\\; in particular, the 128-bit IEEE quad precision
-data type (FORTRAN's ``REAL*16``\\) is not available.
-
-For efficient memory alignment, ``np.longdouble`` is usually stored
-padded with zero bits, either to 96 or 128 bits. Which is more efficient
-depends on hardware and development environment; typically on 32-bit
-systems they are padded to 96 bits, while on 64-bit systems they are
-typically padded to 128 bits. ``np.longdouble`` is padded to the system
-default; ``np.float96`` and ``np.float128`` are provided for users who
-want specific padding. In spite of the names, ``np.float96`` and
-``np.float128`` provide only as much precision as ``np.longdouble``,
-that is, 80 bits on most x86 machines and 64 bits in standard
-Windows builds.
-
-Be warned that even if ``np.longdouble`` offers more precision than
-python ``float``, it is easy to lose that extra precision, since
-python often forces values to pass through ``float``. For example,
-the ``%`` formatting operator requires its arguments to be converted
-to standard python types, and it is therefore impossible to preserve
-extended precision even if many decimal places are requested. It can
-be useful to test your code with the value
-``1 + np.finfo(np.longdouble).eps``.
-
-"""
diff --git a/numpy/doc/broadcasting.py b/numpy/doc/broadcasting.py
deleted file mode 100644
index 4ac1fd129..000000000
--- a/numpy/doc/broadcasting.py
+++ /dev/null
@@ -1,180 +0,0 @@
-"""
-========================
-Broadcasting over arrays
-========================
-
-.. note::
- See `this article
- <https://numpy.org/devdocs/user/theory.broadcasting.html>`_
- for illustrations of broadcasting concepts.
-
-
-The term broadcasting describes how numpy treats arrays with different
-shapes during arithmetic operations. Subject to certain constraints,
-the smaller array is "broadcast" across the larger array so that they
-have compatible shapes. Broadcasting provides a means of vectorizing
-array operations so that looping occurs in C instead of Python. It does
-this without making needless copies of data and usually leads to
-efficient algorithm implementations. There are, however, cases where
-broadcasting is a bad idea because it leads to inefficient use of memory
-that slows computation.
-
-NumPy operations are usually done on pairs of arrays on an
-element-by-element basis. In the simplest case, the two arrays must
-have exactly the same shape, as in the following example:
-
- >>> a = np.array([1.0, 2.0, 3.0])
- >>> b = np.array([2.0, 2.0, 2.0])
- >>> a * b
- array([ 2., 4., 6.])
-
-NumPy's broadcasting rule relaxes this constraint when the arrays'
-shapes meet certain constraints. The simplest broadcasting example occurs
-when an array and a scalar value are combined in an operation:
-
->>> a = np.array([1.0, 2.0, 3.0])
->>> b = 2.0
->>> a * b
-array([ 2., 4., 6.])
-
-The result is equivalent to the previous example where ``b`` was an array.
-We can think of the scalar ``b`` being *stretched* during the arithmetic
-operation into an array with the same shape as ``a``. The new elements in
-``b`` are simply copies of the original scalar. The stretching analogy is
-only conceptual. NumPy is smart enough to use the original scalar value
-without actually making copies so that broadcasting operations are as
-memory and computationally efficient as possible.
-
-The code in the second example is more efficient than that in the first
-because broadcasting moves less memory around during the multiplication
-(``b`` is a scalar rather than an array).
-
-General Broadcasting Rules
-==========================
-When operating on two arrays, NumPy compares their shapes element-wise.
-It starts with the trailing (i.e. rightmost) dimensions and works its
-way left. Two dimensions are compatible when
-
-1) they are equal, or
-2) one of them is 1
-
-If these conditions are not met, a
-``ValueError: operands could not be broadcast together`` exception is
-thrown, indicating that the arrays have incompatible shapes. The size of
-the resulting array is the size that is not 1 along each axis of the inputs.
-
-Arrays do not need to have the same *number* of dimensions. For example,
-if you have a ``256x256x3`` array of RGB values, and you want to scale
-each color in the image by a different value, you can multiply the image
-by a one-dimensional array with 3 values. Lining up the sizes of the
-trailing axes of these arrays according to the broadcast rules, shows that
-they are compatible::
-
- Image (3d array): 256 x 256 x 3
- Scale (1d array): 3
- Result (3d array): 256 x 256 x 3
-
-When either of the dimensions compared is one, the other is
-used. In other words, dimensions with size 1 are stretched or "copied"
-to match the other.
-
-In the following example, both the ``A`` and ``B`` arrays have axes with
-length one that are expanded to a larger size during the broadcast
-operation::
-
- A (4d array): 8 x 1 x 6 x 1
- B (3d array): 7 x 1 x 5
- Result (4d array): 8 x 7 x 6 x 5
-
-Here are some more examples::
-
- A (2d array): 5 x 4
- B (1d array): 1
- Result (2d array): 5 x 4
-
- A (2d array): 5 x 4
- B (1d array): 4
- Result (2d array): 5 x 4
-
- A (3d array): 15 x 3 x 5
- B (3d array): 15 x 1 x 5
- Result (3d array): 15 x 3 x 5
-
- A (3d array): 15 x 3 x 5
- B (2d array): 3 x 5
- Result (3d array): 15 x 3 x 5
-
- A (3d array): 15 x 3 x 5
- B (2d array): 3 x 1
- Result (3d array): 15 x 3 x 5
-
-Here are examples of shapes that do not broadcast::
-
- A (1d array): 3
- B (1d array): 4 # trailing dimensions do not match
-
- A (2d array): 2 x 1
- B (3d array): 8 x 4 x 3 # second from last dimensions mismatched
-
-An example of broadcasting in practice::
-
- >>> x = np.arange(4)
- >>> xx = x.reshape(4,1)
- >>> y = np.ones(5)
- >>> z = np.ones((3,4))
-
- >>> x.shape
- (4,)
-
- >>> y.shape
- (5,)
-
- >>> x + y
- ValueError: operands could not be broadcast together with shapes (4,) (5,)
-
- >>> xx.shape
- (4, 1)
-
- >>> y.shape
- (5,)
-
- >>> (xx + y).shape
- (4, 5)
-
- >>> xx + y
- array([[ 1., 1., 1., 1., 1.],
- [ 2., 2., 2., 2., 2.],
- [ 3., 3., 3., 3., 3.],
- [ 4., 4., 4., 4., 4.]])
-
- >>> x.shape
- (4,)
-
- >>> z.shape
- (3, 4)
-
- >>> (x + z).shape
- (3, 4)
-
- >>> x + z
- array([[ 1., 2., 3., 4.],
- [ 1., 2., 3., 4.],
- [ 1., 2., 3., 4.]])
-
-Broadcasting provides a convenient way of taking the outer product (or
-any other outer operation) of two arrays. The following example shows an
-outer addition operation of two 1-d arrays::
-
- >>> a = np.array([0.0, 10.0, 20.0, 30.0])
- >>> b = np.array([1.0, 2.0, 3.0])
- >>> a[:, np.newaxis] + b
- array([[ 1., 2., 3.],
- [ 11., 12., 13.],
- [ 21., 22., 23.],
- [ 31., 32., 33.]])
-
-Here the ``newaxis`` index operator inserts a new axis into ``a``,
-making it a two-dimensional ``4x1`` array. Combining the ``4x1`` array
-with ``b``, which has shape ``(3,)``, yields a ``4x3`` array.
-
-"""
diff --git a/numpy/doc/byteswapping.py b/numpy/doc/byteswapping.py
deleted file mode 100644
index fe9461977..000000000
--- a/numpy/doc/byteswapping.py
+++ /dev/null
@@ -1,155 +0,0 @@
-"""
-
-=============================
- Byteswapping and byte order
-=============================
-
-Introduction to byte ordering and ndarrays
-==========================================
-
-The ``ndarray`` is an object that provide a python array interface to data
-in memory.
-
-It often happens that the memory that you want to view with an array is
-not of the same byte ordering as the computer on which you are running
-Python.
-
-For example, I might be working on a computer with a little-endian CPU -
-such as an Intel Pentium, but I have loaded some data from a file
-written by a computer that is big-endian. Let's say I have loaded 4
-bytes from a file written by a Sun (big-endian) computer. I know that
-these 4 bytes represent two 16-bit integers. On a big-endian machine, a
-two-byte integer is stored with the Most Significant Byte (MSB) first,
-and then the Least Significant Byte (LSB). Thus the bytes are, in memory order:
-
-#. MSB integer 1
-#. LSB integer 1
-#. MSB integer 2
-#. LSB integer 2
-
-Let's say the two integers were in fact 1 and 770. Because 770 = 256 *
-3 + 2, the 4 bytes in memory would contain respectively: 0, 1, 3, 2.
-The bytes I have loaded from the file would have these contents:
-
->>> big_end_buffer = bytearray([0,1,3,2])
->>> big_end_buffer
-bytearray(b'\\x00\\x01\\x03\\x02')
-
-We might want to use an ``ndarray`` to access these integers. In that
-case, we can create an array around this memory, and tell numpy that
-there are two integers, and that they are 16 bit and big-endian:
-
->>> import numpy as np
->>> big_end_arr = np.ndarray(shape=(2,),dtype='>i2', buffer=big_end_buffer)
->>> big_end_arr[0]
-1
->>> big_end_arr[1]
-770
-
-Note the array ``dtype`` above of ``>i2``. The ``>`` means 'big-endian'
-(``<`` is little-endian) and ``i2`` means 'signed 2-byte integer'. For
-example, if our data represented a single unsigned 4-byte little-endian
-integer, the dtype string would be ``<u4``.
-
-In fact, why don't we try that?
-
->>> little_end_u4 = np.ndarray(shape=(1,),dtype='<u4', buffer=big_end_buffer)
->>> little_end_u4[0] == 1 * 256**1 + 3 * 256**2 + 2 * 256**3
-True
-
-Returning to our ``big_end_arr`` - in this case our underlying data is
-big-endian (data endianness) and we've set the dtype to match (the dtype
-is also big-endian). However, sometimes you need to flip these around.
-
-.. warning::
-
- Scalars currently do not include byte order information, so extracting
- a scalar from an array will return an integer in native byte order.
- Hence:
-
- >>> big_end_arr[0].dtype.byteorder == little_end_u4[0].dtype.byteorder
- True
-
-Changing byte ordering
-======================
-
-As you can imagine from the introduction, there are two ways you can
-affect the relationship between the byte ordering of the array and the
-underlying memory it is looking at:
-
-* Change the byte-ordering information in the array dtype so that it
- interprets the underlying data as being in a different byte order.
- This is the role of ``arr.newbyteorder()``
-* Change the byte-ordering of the underlying data, leaving the dtype
- interpretation as it was. This is what ``arr.byteswap()`` does.
-
-The common situations in which you need to change byte ordering are:
-
-#. Your data and dtype endianness don't match, and you want to change
- the dtype so that it matches the data.
-#. Your data and dtype endianness don't match, and you want to swap the
- data so that they match the dtype
-#. Your data and dtype endianness match, but you want the data swapped
- and the dtype to reflect this
-
-Data and dtype endianness don't match, change dtype to match data
------------------------------------------------------------------
-
-We make something where they don't match:
-
->>> wrong_end_dtype_arr = np.ndarray(shape=(2,),dtype='<i2', buffer=big_end_buffer)
->>> wrong_end_dtype_arr[0]
-256
-
-The obvious fix for this situation is to change the dtype so it gives
-the correct endianness:
-
->>> fixed_end_dtype_arr = wrong_end_dtype_arr.newbyteorder()
->>> fixed_end_dtype_arr[0]
-1
-
-Note the array has not changed in memory:
-
->>> fixed_end_dtype_arr.tobytes() == big_end_buffer
-True
-
-Data and type endianness don't match, change data to match dtype
-----------------------------------------------------------------
-
-You might want to do this if you need the data in memory to be a certain
-ordering. For example you might be writing the memory out to a file
-that needs a certain byte ordering.
-
->>> fixed_end_mem_arr = wrong_end_dtype_arr.byteswap()
->>> fixed_end_mem_arr[0]
-1
-
-Now the array *has* changed in memory:
-
->>> fixed_end_mem_arr.tobytes() == big_end_buffer
-False
-
-Data and dtype endianness match, swap data and dtype
-----------------------------------------------------
-
-You may have a correctly specified array dtype, but you need the array
-to have the opposite byte order in memory, and you want the dtype to
-match so the array values make sense. In this case you just do both of
-the previous operations:
-
->>> swapped_end_arr = big_end_arr.byteswap().newbyteorder()
->>> swapped_end_arr[0]
-1
->>> swapped_end_arr.tobytes() == big_end_buffer
-False
-
-An easier way of casting the data to a specific dtype and byte ordering
-can be achieved with the ndarray astype method:
-
->>> swapped_end_arr = big_end_arr.astype('<i2')
->>> swapped_end_arr[0]
-1
->>> swapped_end_arr.tobytes() == big_end_buffer
-False
-
-"""
diff --git a/numpy/doc/creation.py b/numpy/doc/creation.py
deleted file mode 100644
index 067f8bb33..000000000
--- a/numpy/doc/creation.py
+++ /dev/null
@@ -1,143 +0,0 @@
-"""
-==============
-Array Creation
-==============
-
-Introduction
-============
-
-There are 5 general mechanisms for creating arrays:
-
-1) Conversion from other Python structures (e.g., lists, tuples)
-2) Intrinsic numpy array creation objects (e.g., arange, ones, zeros,
- etc.)
-3) Reading arrays from disk, either from standard or custom formats
-4) Creating arrays from raw bytes through the use of strings or buffers
-5) Use of special library functions (e.g., random)
-
-This section will not cover means of replicating, joining, or otherwise
-expanding or mutating existing arrays. Nor will it cover creating object
-arrays or structured arrays. Both of those are covered in their own sections.
-
-Converting Python array_like Objects to NumPy Arrays
-====================================================
-
-In general, numerical data arranged in an array-like structure in Python can
-be converted to arrays through the use of the array() function. The most
-obvious examples are lists and tuples. See the documentation for array() for
-details for its use. Some objects may support the array-protocol and allow
-conversion to arrays this way. A simple way to find out if the object can be
-converted to a numpy array using array() is simply to try it interactively and
-see if it works! (The Python Way).
-
-Examples: ::
-
- >>> x = np.array([2,3,1,0])
- >>> x = np.array([2, 3, 1, 0])
- >>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists,
- and types
- >>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
-
-Intrinsic NumPy Array Creation
-==============================
-
-NumPy has built-in functions for creating arrays from scratch:
-
-zeros(shape) will create an array filled with 0 values with the specified
-shape. The default dtype is float64. ::
-
- >>> np.zeros((2, 3))
- array([[ 0., 0., 0.], [ 0., 0., 0.]])
-
-ones(shape) will create an array filled with 1 values. It is identical to
-zeros in all other respects.
-
-arange() will create arrays with regularly incrementing values. Check the
-docstring for complete information on the various ways it can be used. A few
-examples will be given here: ::
-
- >>> np.arange(10)
- array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
- >>> np.arange(2, 10, dtype=float)
- array([ 2., 3., 4., 5., 6., 7., 8., 9.])
- >>> np.arange(2, 3, 0.1)
- array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
-
-Note that there are some subtleties regarding the last usage that the user
-should be aware of that are described in the arange docstring.
-
-linspace() will create arrays with a specified number of elements, and
-spaced equally between the specified beginning and end values. For
-example: ::
-
- >>> np.linspace(1., 4., 6)
- array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
-
-The advantage of this creation function is that one can guarantee the
-number of elements and the starting and end point, which arange()
-generally will not do for arbitrary start, stop, and step values.
-
-indices() will create a set of arrays (stacked as a one-higher dimensioned
-array), one per dimension with each representing variation in that dimension.
-An example illustrates much better than a verbal description: ::
-
- >>> np.indices((3,3))
- array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
-
-This is particularly useful for evaluating functions of multiple dimensions on
-a regular grid.
-
-Reading Arrays From Disk
-========================
-
-This is presumably the most common case of large array creation. The details,
-of course, depend greatly on the format of data on disk and so this section
-can only give general pointers on how to handle various formats.
-
-Standard Binary Formats
------------------------
-
-Various fields have standard formats for array data. The following lists the
-ones with known python libraries to read them and return numpy arrays (there
-may be others for which it is possible to read and convert to numpy arrays so
-check the last section as well)
-::
-
- HDF5: h5py
- FITS: Astropy
-
-Examples of formats that cannot be read directly but for which it is not hard to
-convert are those formats supported by libraries like PIL (able to read and
-write many image formats such as jpg, png, etc).
-
-Common ASCII Formats
-------------------------
-
-Comma Separated Value files (CSV) are widely used (and an export and import
-option for programs like Excel). There are a number of ways of reading these
-files in Python. There are CSV functions in Python and functions in pylab
-(part of matplotlib).
-
-More generic ascii files can be read using the io package in scipy.
-
-Custom Binary Formats
----------------------
-
-There are a variety of approaches one can use. If the file has a relatively
-simple format then one can write a simple I/O library and use the numpy
-fromfile() function and .tofile() method to read and write numpy arrays
-directly (mind your byteorder though!) If a good C or C++ library exists that
-read the data, one can wrap that library with a variety of techniques though
-that certainly is much more work and requires significantly more advanced
-knowledge to interface with C or C++.
-
-Use of Special Libraries
-------------------------
-
-There are libraries that can be used to generate arrays for special purposes
-and it isn't possible to enumerate all of them. The most common uses are use
-of the many array generation functions in random that can generate arrays of
-random values, and some utility functions to generate special matrices (e.g.
-diagonal).
-
-"""
diff --git a/numpy/doc/dispatch.py b/numpy/doc/dispatch.py
deleted file mode 100644
index af70ed836..000000000
--- a/numpy/doc/dispatch.py
+++ /dev/null
@@ -1,271 +0,0 @@
-""".. _dispatch_mechanism:
-
-Numpy's dispatch mechanism, introduced in numpy version v1.16 is the
-recommended approach for writing custom N-dimensional array containers that are
-compatible with the numpy API and provide custom implementations of numpy
-functionality. Applications include `dask <http://dask.pydata.org>`_ arrays, an
-N-dimensional array distributed across multiple nodes, and `cupy
-<https://docs-cupy.chainer.org/en/stable/>`_ arrays, an N-dimensional array on
-a GPU.
-
-To get a feel for writing custom array containers, we'll begin with a simple
-example that has rather narrow utility but illustrates the concepts involved.
-
->>> import numpy as np
->>> class DiagonalArray:
-... def __init__(self, N, value):
-... self._N = N
-... self._i = value
-... def __repr__(self):
-... return f"{self.__class__.__name__}(N={self._N}, value={self._i})"
-... def __array__(self):
-... return self._i * np.eye(self._N)
-...
-
-Our custom array can be instantiated like:
-
->>> arr = DiagonalArray(5, 1)
->>> arr
-DiagonalArray(N=5, value=1)
-
-We can convert to a numpy array using :func:`numpy.array` or
-:func:`numpy.asarray`, which will call its ``__array__`` method to obtain a
-standard ``numpy.ndarray``.
-
->>> np.asarray(arr)
-array([[1., 0., 0., 0., 0.],
- [0., 1., 0., 0., 0.],
- [0., 0., 1., 0., 0.],
- [0., 0., 0., 1., 0.],
- [0., 0., 0., 0., 1.]])
-
-If we operate on ``arr`` with a numpy function, numpy will again use the
-``__array__`` interface to convert it to an array and then apply the function
-in the usual way.
-
->>> np.multiply(arr, 2)
-array([[2., 0., 0., 0., 0.],
- [0., 2., 0., 0., 0.],
- [0., 0., 2., 0., 0.],
- [0., 0., 0., 2., 0.],
- [0., 0., 0., 0., 2.]])
-
-
-Notice that the return type is a standard ``numpy.ndarray``.
-
->>> type(arr)
-numpy.ndarray
-
-How can we pass our custom array type through this function? Numpy allows a
-class to indicate that it would like to handle computations in a custom-defined
-way through the interfaces ``__array_ufunc__`` and ``__array_function__``. Let's
-take one at a time, starting with ``_array_ufunc__``. This method covers
-:ref:`ufuncs`, a class of functions that includes, for example,
-:func:`numpy.multiply` and :func:`numpy.sin`.
-
-The ``__array_ufunc__`` receives:
-
-- ``ufunc``, a function like ``numpy.multiply``
-- ``method``, a string, differentiating between ``numpy.multiply(...)`` and
- variants like ``numpy.multiply.outer``, ``numpy.multiply.accumulate``, and so
- on. For the common case, ``numpy.multiply(...)``, ``method == '__call__'``.
-- ``inputs``, which could be a mixture of different types
-- ``kwargs``, keyword arguments passed to the function
-
-For this example we will only handle the method ``__call__``.
-
->>> from numbers import Number
->>> class DiagonalArray:
-... def __init__(self, N, value):
-... self._N = N
-... self._i = value
-... def __repr__(self):
-... return f"{self.__class__.__name__}(N={self._N}, value={self._i})"
-... def __array__(self):
-... return self._i * np.eye(self._N)
-... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
-... if method == '__call__':
-... N = None
-... scalars = []
-... for input in inputs:
-... if isinstance(input, Number):
-... scalars.append(input)
-... elif isinstance(input, self.__class__):
-... scalars.append(input._i)
-... if N is not None:
-... if N != self._N:
-... raise TypeError("inconsistent sizes")
-... else:
-... N = self._N
-... else:
-... return NotImplemented
-... return self.__class__(N, ufunc(*scalars, **kwargs))
-... else:
-... return NotImplemented
-...
-
-Now our custom array type passes through numpy functions.
-
->>> arr = DiagonalArray(5, 1)
->>> np.multiply(arr, 3)
-DiagonalArray(N=5, value=3)
->>> np.add(arr, 3)
-DiagonalArray(N=5, value=4)
->>> np.sin(arr)
-DiagonalArray(N=5, value=0.8414709848078965)
-
-At this point ``arr + 3`` does not work.
-
->>> arr + 3
-TypeError: unsupported operand type(s) for *: 'DiagonalArray' and 'int'
-
-To support it, we need to define the Python interfaces ``__add__``, ``__lt__``,
-and so on to dispatch to the corresponding ufunc. We can achieve this
-conveniently by inheriting from the mixin
-:class:`~numpy.lib.mixins.NDArrayOperatorsMixin`.
-
->>> import numpy.lib.mixins
->>> class DiagonalArray(numpy.lib.mixins.NDArrayOperatorsMixin):
-... def __init__(self, N, value):
-... self._N = N
-... self._i = value
-... def __repr__(self):
-... return f"{self.__class__.__name__}(N={self._N}, value={self._i})"
-... def __array__(self):
-... return self._i * np.eye(self._N)
-... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
-... if method == '__call__':
-... N = None
-... scalars = []
-... for input in inputs:
-... if isinstance(input, Number):
-... scalars.append(input)
-... elif isinstance(input, self.__class__):
-... scalars.append(input._i)
-... if N is not None:
-... if N != self._N:
-... raise TypeError("inconsistent sizes")
-... else:
-... N = self._N
-... else:
-... return NotImplemented
-... return self.__class__(N, ufunc(*scalars, **kwargs))
-... else:
-... return NotImplemented
-...
-
->>> arr = DiagonalArray(5, 1)
->>> arr + 3
-DiagonalArray(N=5, value=4)
->>> arr > 0
-DiagonalArray(N=5, value=True)
-
-Now let's tackle ``__array_function__``. We'll create dict that maps numpy
-functions to our custom variants.
-
->>> HANDLED_FUNCTIONS = {}
->>> class DiagonalArray(numpy.lib.mixins.NDArrayOperatorsMixin):
-... def __init__(self, N, value):
-... self._N = N
-... self._i = value
-... def __repr__(self):
-... return f"{self.__class__.__name__}(N={self._N}, value={self._i})"
-... def __array__(self):
-... return self._i * np.eye(self._N)
-... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
-... if method == '__call__':
-... N = None
-... scalars = []
-... for input in inputs:
-... # In this case we accept only scalar numbers or DiagonalArrays.
-... if isinstance(input, Number):
-... scalars.append(input)
-... elif isinstance(input, self.__class__):
-... scalars.append(input._i)
-... if N is not None:
-... if N != self._N:
-... raise TypeError("inconsistent sizes")
-... else:
-... N = self._N
-... else:
-... return NotImplemented
-... return self.__class__(N, ufunc(*scalars, **kwargs))
-... else:
-... return NotImplemented
-... def __array_function__(self, func, types, args, kwargs):
-... if func not in HANDLED_FUNCTIONS:
-... return NotImplemented
-... # Note: this allows subclasses that don't override
-... # __array_function__ to handle DiagonalArray objects.
-... if not all(issubclass(t, self.__class__) for t in types):
-... return NotImplemented
-... return HANDLED_FUNCTIONS[func](*args, **kwargs)
-...
-
-A convenient pattern is to define a decorator ``implements`` that can be used
-to add functions to ``HANDLED_FUNCTIONS``.
-
->>> def implements(np_function):
-... "Register an __array_function__ implementation for DiagonalArray objects."
-... def decorator(func):
-... HANDLED_FUNCTIONS[np_function] = func
-... return func
-... return decorator
-...
-
-Now we write implementations of numpy functions for ``DiagonalArray``.
-For completeness, to support the usage ``arr.sum()`` add a method ``sum`` that
-calls ``numpy.sum(self)``, and the same for ``mean``.
-
->>> @implements(np.sum)
-... def sum(arr):
-... "Implementation of np.sum for DiagonalArray objects"
-... return arr._i * arr._N
-...
->>> @implements(np.mean)
-... def mean(arr):
-... "Implementation of np.mean for DiagonalArray objects"
-... return arr._i / arr._N
-...
->>> arr = DiagonalArray(5, 1)
->>> np.sum(arr)
-5
->>> np.mean(arr)
-0.2
-
-If the user tries to use any numpy functions not included in
-``HANDLED_FUNCTIONS``, a ``TypeError`` will be raised by numpy, indicating that
-this operation is not supported. For example, concatenating two
-``DiagonalArrays`` does not produce another diagonal array, so it is not
-supported.
-
->>> np.concatenate([arr, arr])
-TypeError: no implementation found for 'numpy.concatenate' on types that implement __array_function__: [<class '__main__.DiagonalArray'>]
-
-Additionally, our implementations of ``sum`` and ``mean`` do not accept the
-optional arguments that numpy's implementation does.
-
->>> np.sum(arr, axis=0)
-TypeError: sum() got an unexpected keyword argument 'axis'
-
-The user always has the option of converting to a normal ``numpy.ndarray`` with
-:func:`numpy.asarray` and using standard numpy from there.
-
->>> np.concatenate([np.asarray(arr), np.asarray(arr)])
-array([[1., 0., 0., 0., 0.],
- [0., 1., 0., 0., 0.],
- [0., 0., 1., 0., 0.],
- [0., 0., 0., 1., 0.],
- [0., 0., 0., 0., 1.],
- [1., 0., 0., 0., 0.],
- [0., 1., 0., 0., 0.],
- [0., 0., 1., 0., 0.],
- [0., 0., 0., 1., 0.],
- [0., 0., 0., 0., 1.]])
-
-Refer to the `dask source code <https://github.com/dask/dask>`_ and
-`cupy source code <https://github.com/cupy/cupy>`_ for more fully-worked
-examples of custom array containers.
-
-See also :doc:`NEP 18<neps:nep-0018-array-function-protocol>`.
-"""
diff --git a/numpy/doc/glossary.py b/numpy/doc/glossary.py
deleted file mode 100644
index 31130559b..000000000
--- a/numpy/doc/glossary.py
+++ /dev/null
@@ -1,475 +0,0 @@
-"""
-========
-Glossary
-========
-
-.. glossary::
-
- along an axis
- Axes are defined for arrays with more than one dimension. A
- 2-dimensional array has two corresponding axes: the first running
- vertically downwards across rows (axis 0), and the second running
- horizontally across columns (axis 1).
-
- Many operations can take place along one of these axes. For example,
- we can sum each row of an array, in which case we operate along
- columns, or axis 1::
-
- >>> x = np.arange(12).reshape((3,4))
-
- >>> x
- array([[ 0, 1, 2, 3],
- [ 4, 5, 6, 7],
- [ 8, 9, 10, 11]])
-
- >>> x.sum(axis=1)
- array([ 6, 22, 38])
-
- array
- A homogeneous container of numerical elements. Each element in the
- array occupies a fixed amount of memory (hence homogeneous), and
- can be a numerical element of a single type (such as float, int
- or complex) or a combination (such as ``(float, int, float)``). Each
- array has an associated data-type (or ``dtype``), which describes
- the numerical type of its elements::
-
- >>> x = np.array([1, 2, 3], float)
-
- >>> x
- array([ 1., 2., 3.])
-
- >>> x.dtype # floating point number, 64 bits of memory per element
- dtype('float64')
-
-
- # More complicated data type: each array element is a combination of
- # and integer and a floating point number
- >>> np.array([(1, 2.0), (3, 4.0)], dtype=[('x', int), ('y', float)])
- array([(1, 2.0), (3, 4.0)],
- dtype=[('x', '<i4'), ('y', '<f8')])
-
- Fast element-wise operations, called a :term:`ufunc`, operate on arrays.
-
- array_like
- Any sequence that can be interpreted as an ndarray. This includes
- nested lists, tuples, scalars and existing arrays.
-
- attribute
- A property of an object that can be accessed using ``obj.attribute``,
- e.g., ``shape`` is an attribute of an array::
-
- >>> x = np.array([1, 2, 3])
- >>> x.shape
- (3,)
-
- big-endian
- When storing a multi-byte value in memory as a sequence of bytes, the
- sequence addresses/sends/stores the most significant byte first (lowest
- address) and the least significant byte last (highest address). Common in
- micro-processors and used for transmission of data over network protocols.
-
- BLAS
- `Basic Linear Algebra Subprograms <https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms>`_
-
- broadcast
- NumPy can do operations on arrays whose shapes are mismatched::
-
- >>> x = np.array([1, 2])
- >>> y = np.array([[3], [4]])
-
- >>> x
- array([1, 2])
-
- >>> y
- array([[3],
- [4]])
-
- >>> x + y
- array([[4, 5],
- [5, 6]])
-
- See `numpy.doc.broadcasting` for more information.
-
- C order
- See `row-major`
-
- column-major
- A way to represent items in a N-dimensional array in the 1-dimensional
- computer memory. In column-major order, the leftmost index "varies the
- fastest": for example the array::
-
- [[1, 2, 3],
- [4, 5, 6]]
-
- is represented in the column-major order as::
-
- [1, 4, 2, 5, 3, 6]
-
- Column-major order is also known as the Fortran order, as the Fortran
- programming language uses it.
-
- decorator
- An operator that transforms a function. For example, a ``log``
- decorator may be defined to print debugging information upon
- function execution::
-
- >>> def log(f):
- ... def new_logging_func(*args, **kwargs):
- ... print("Logging call with parameters:", args, kwargs)
- ... return f(*args, **kwargs)
- ...
- ... return new_logging_func
-
- Now, when we define a function, we can "decorate" it using ``log``::
-
- >>> @log
- ... def add(a, b):
- ... return a + b
-
- Calling ``add`` then yields:
-
- >>> add(1, 2)
- Logging call with parameters: (1, 2) {}
- 3
-
- dictionary
- Resembling a language dictionary, which provides a mapping between
- words and descriptions thereof, a Python dictionary is a mapping
- between two objects::
-
- >>> x = {1: 'one', 'two': [1, 2]}
-
- Here, `x` is a dictionary mapping keys to values, in this case
- the integer 1 to the string "one", and the string "two" to
- the list ``[1, 2]``. The values may be accessed using their
- corresponding keys::
-
- >>> x[1]
- 'one'
-
- >>> x['two']
- [1, 2]
-
- Note that dictionaries are not stored in any specific order. Also,
- most mutable (see *immutable* below) objects, such as lists, may not
- be used as keys.
-
- For more information on dictionaries, read the
- `Python tutorial <https://docs.python.org/tutorial/>`_.
-
- field
- In a :term:`structured data type`, each sub-type is called a `field`.
- The `field` has a name (a string), a type (any valid dtype), and
- an optional `title`. See :ref:`arrays.dtypes`
-
- Fortran order
- See `column-major`
-
- flattened
- Collapsed to a one-dimensional array. See `numpy.ndarray.flatten`
- for details.
-
- homogeneous
- Describes a block of memory comprised of blocks, each block comprised of
- items and of the same size, and blocks are interpreted in exactly the
- same way. In the simplest case each block contains a single item, for
- instance int32 or float64.
-
- immutable
- An object that cannot be modified after execution is called
- immutable. Two common examples are strings and tuples.
-
- instance
- A class definition gives the blueprint for constructing an object::
-
- >>> class House:
- ... wall_colour = 'white'
-
- Yet, we have to *build* a house before it exists::
-
- >>> h = House() # build a house
-
- Now, ``h`` is called a ``House`` instance. An instance is therefore
- a specific realisation of a class.
-
- iterable
- A sequence that allows "walking" (iterating) over items, typically
- using a loop such as::
-
- >>> x = [1, 2, 3]
- >>> [item**2 for item in x]
- [1, 4, 9]
-
- It is often used in combination with ``enumerate``::
- >>> keys = ['a','b','c']
- >>> for n, k in enumerate(keys):
- ... print("Key %d: %s" % (n, k))
- ...
- Key 0: a
- Key 1: b
- Key 2: c
-
- itemsize
- The size of the dtype element in bytes.
-
- list
- A Python container that can hold any number of objects or items.
- The items do not have to be of the same type, and can even be
- lists themselves::
-
- >>> x = [2, 2.0, "two", [2, 2.0]]
-
- The list `x` contains 4 items, each which can be accessed individually::
-
- >>> x[2] # the string 'two'
- 'two'
-
- >>> x[3] # a list, containing an integer 2 and a float 2.0
- [2, 2.0]
-
- It is also possible to select more than one item at a time,
- using *slicing*::
-
- >>> x[0:2] # or, equivalently, x[:2]
- [2, 2.0]
-
- In code, arrays are often conveniently expressed as nested lists::
-
-
- >>> np.array([[1, 2], [3, 4]])
- array([[1, 2],
- [3, 4]])
-
- For more information, read the section on lists in the `Python
- tutorial <https://docs.python.org/tutorial/>`_. For a mapping
- type (key-value), see *dictionary*.
-
- little-endian
- When storing a multi-byte value in memory as a sequence of bytes, the
- sequence addresses/sends/stores the least significant byte first (lowest
- address) and the most significant byte last (highest address). Common in
- x86 processors.
-
- mask
- A boolean array, used to select only certain elements for an operation::
-
- >>> x = np.arange(5)
- >>> x
- array([0, 1, 2, 3, 4])
-
- >>> mask = (x > 2)
- >>> mask
- array([False, False, False, True, True])
-
- >>> x[mask] = -1
- >>> x
- array([ 0, 1, 2, -1, -1])
-
- masked array
- Array that suppressed values indicated by a mask::
-
- >>> x = np.ma.masked_array([np.nan, 2, np.nan], [True, False, True])
- >>> x
- masked_array(data = [-- 2.0 --],
- mask = [ True False True],
- fill_value = 1e+20)
-
- >>> x + [1, 2, 3]
- masked_array(data = [-- 4.0 --],
- mask = [ True False True],
- fill_value = 1e+20)
-
-
- Masked arrays are often used when operating on arrays containing
- missing or invalid entries.
-
- matrix
- A 2-dimensional ndarray that preserves its two-dimensional nature
- throughout operations. It has certain special operations, such as ``*``
- (matrix multiplication) and ``**`` (matrix power), defined::
-
- >>> x = np.mat([[1, 2], [3, 4]])
- >>> x
- matrix([[1, 2],
- [3, 4]])
-
- >>> x**2
- matrix([[ 7, 10],
- [15, 22]])
-
- method
- A function associated with an object. For example, each ndarray has a
- method called ``repeat``::
-
- >>> x = np.array([1, 2, 3])
- >>> x.repeat(2)
- array([1, 1, 2, 2, 3, 3])
-
- ndarray
- See *array*.
-
- record array
- An :term:`ndarray` with :term:`structured data type` which has been
- subclassed as ``np.recarray`` and whose dtype is of type ``np.record``,
- making the fields of its data type to be accessible by attribute.
-
- reference
- If ``a`` is a reference to ``b``, then ``(a is b) == True``. Therefore,
- ``a`` and ``b`` are different names for the same Python object.
-
- row-major
- A way to represent items in a N-dimensional array in the 1-dimensional
- computer memory. In row-major order, the rightmost index "varies
- the fastest": for example the array::
-
- [[1, 2, 3],
- [4, 5, 6]]
-
- is represented in the row-major order as::
-
- [1, 2, 3, 4, 5, 6]
-
- Row-major order is also known as the C order, as the C programming
- language uses it. New NumPy arrays are by default in row-major order.
-
- self
- Often seen in method signatures, ``self`` refers to the instance
- of the associated class. For example:
-
- >>> class Paintbrush:
- ... color = 'blue'
- ...
- ... def paint(self):
- ... print("Painting the city %s!" % self.color)
- ...
- >>> p = Paintbrush()
- >>> p.color = 'red'
- >>> p.paint() # self refers to 'p'
- Painting the city red!
-
- slice
- Used to select only certain elements from a sequence:
-
- >>> x = range(5)
- >>> x
- [0, 1, 2, 3, 4]
-
- >>> x[1:3] # slice from 1 to 3 (excluding 3 itself)
- [1, 2]
-
- >>> x[1:5:2] # slice from 1 to 5, but skipping every second element
- [1, 3]
-
- >>> x[::-1] # slice a sequence in reverse
- [4, 3, 2, 1, 0]
-
- Arrays may have more than one dimension, each which can be sliced
- individually:
-
- >>> x = np.array([[1, 2], [3, 4]])
- >>> x
- array([[1, 2],
- [3, 4]])
-
- >>> x[:, 1]
- array([2, 4])
-
- structure
- See :term:`structured data type`
-
- structured data type
- A data type composed of other datatypes
-
- subarray data type
- A :term:`structured data type` may contain a :term:`ndarray` with its
- own dtype and shape:
-
- >>> dt = np.dtype([('a', np.int32), ('b', np.float32, (3,))])
- >>> np.zeros(3, dtype=dt)
- array([(0, [0., 0., 0.]), (0, [0., 0., 0.]), (0, [0., 0., 0.])],
- dtype=[('a', '<i4'), ('b', '<f4', (3,))])
-
- title
- In addition to field names, structured array fields may have an
- associated :ref:`title <titles>` which is an alias to the name and is
- commonly used for plotting.
-
- tuple
- A sequence that may contain a variable number of types of any
- kind. A tuple is immutable, i.e., once constructed it cannot be
- changed. Similar to a list, it can be indexed and sliced::
-
- >>> x = (1, 'one', [1, 2])
- >>> x
- (1, 'one', [1, 2])
-
- >>> x[0]
- 1
-
- >>> x[:2]
- (1, 'one')
-
- A useful concept is "tuple unpacking", which allows variables to
- be assigned to the contents of a tuple::
-
- >>> x, y = (1, 2)
- >>> x, y = 1, 2
-
- This is often used when a function returns multiple values:
-
- >>> def return_many():
- ... return 1, 'alpha', None
-
- >>> a, b, c = return_many()
- >>> a, b, c
- (1, 'alpha', None)
-
- >>> a
- 1
- >>> b
- 'alpha'
-
- ufunc
- Universal function. A fast element-wise, :term:`vectorized
- <vectorization>` array operation. Examples include ``add``, ``sin`` and
- ``logical_or``.
-
- vectorization
- Optimizing a looping block by specialized code. In a traditional sense,
- vectorization performs the same operation on multiple elements with
- fixed strides between them via specialized hardware. Compilers know how
- to take advantage of well-constructed loops to implement such
- optimizations. NumPy uses :ref:`vectorization <whatis-vectorization>`
- to mean any optimization via specialized code performing the same
- operations on multiple elements, typically achieving speedups by
- avoiding some of the overhead in looking up and converting the elements.
-
- view
- An array that does not own its data, but refers to another array's
- data instead. For example, we may create a view that only shows
- every second element of another array::
-
- >>> x = np.arange(5)
- >>> x
- array([0, 1, 2, 3, 4])
-
- >>> y = x[::2]
- >>> y
- array([0, 2, 4])
-
- >>> x[0] = 3 # changing x changes y as well, since y is a view on x
- >>> y
- array([3, 2, 4])
-
- wrapper
- Python is a high-level (highly abstracted, or English-like) language.
- This abstraction comes at a price in execution speed, and sometimes
- it becomes necessary to use lower level languages to do fast
- computations. A wrapper is code that provides a bridge between
- high and the low level languages, allowing, e.g., Python to execute
- code written in C or Fortran.
-
- Examples include ctypes, SWIG and Cython (which wraps C and C++)
- and f2py (which wraps Fortran).
-
-"""
diff --git a/numpy/doc/indexing.py b/numpy/doc/indexing.py
deleted file mode 100644
index c7dda2790..000000000
--- a/numpy/doc/indexing.py
+++ /dev/null
@@ -1,456 +0,0 @@
-"""
-==============
-Array indexing
-==============
-
-Array indexing refers to any use of the square brackets ([]) to index
-array values. There are many options to indexing, which give numpy
-indexing great power, but with power comes some complexity and the
-potential for confusion. This section is just an overview of the
-various options and issues related to indexing. Aside from single
-element indexing, the details on most of these options are to be
-found in related sections.
-
-Assignment vs referencing
-=========================
-
-Most of the following examples show the use of indexing when
-referencing data in an array. The examples work just as well
-when assigning to an array. See the section at the end for
-specific examples and explanations on how assignments work.
-
-Single element indexing
-=======================
-
-Single element indexing for a 1-D array is what one expects. It work
-exactly like that for other standard Python sequences. It is 0-based,
-and accepts negative indices for indexing from the end of the array. ::
-
- >>> x = np.arange(10)
- >>> x[2]
- 2
- >>> x[-2]
- 8
-
-Unlike lists and tuples, numpy arrays support multidimensional indexing
-for multidimensional arrays. That means that it is not necessary to
-separate each dimension's index into its own set of square brackets. ::
-
- >>> x.shape = (2,5) # now x is 2-dimensional
- >>> x[1,3]
- 8
- >>> x[1,-1]
- 9
-
-Note that if one indexes a multidimensional array with fewer indices
-than dimensions, one gets a subdimensional array. For example: ::
-
- >>> x[0]
- array([0, 1, 2, 3, 4])
-
-That is, each index specified selects the array corresponding to the
-rest of the dimensions selected. In the above example, choosing 0
-means that the remaining dimension of length 5 is being left unspecified,
-and that what is returned is an array of that dimensionality and size.
-It must be noted that the returned array is not a copy of the original,
-but points to the same values in memory as does the original array.
-In this case, the 1-D array at the first position (0) is returned.
-So using a single index on the returned array, results in a single
-element being returned. That is: ::
-
- >>> x[0][2]
- 2
-
-So note that ``x[0,2] = x[0][2]`` though the second case is more
-inefficient as a new temporary array is created after the first index
-that is subsequently indexed by 2.
-
-Note to those used to IDL or Fortran memory order as it relates to
-indexing. NumPy uses C-order indexing. That means that the last
-index usually represents the most rapidly changing memory location,
-unlike Fortran or IDL, where the first index represents the most
-rapidly changing location in memory. This difference represents a
-great potential for confusion.
-
-Other indexing options
-======================
-
-It is possible to slice and stride arrays to extract arrays of the
-same number of dimensions, but of different sizes than the original.
-The slicing and striding works exactly the same way it does for lists
-and tuples except that they can be applied to multiple dimensions as
-well. A few examples illustrates best: ::
-
- >>> x = np.arange(10)
- >>> x[2:5]
- array([2, 3, 4])
- >>> x[:-7]
- array([0, 1, 2])
- >>> x[1:7:2]
- array([1, 3, 5])
- >>> y = np.arange(35).reshape(5,7)
- >>> y[1:5:2,::3]
- array([[ 7, 10, 13],
- [21, 24, 27]])
-
-Note that slices of arrays do not copy the internal array data but
-only produce new views of the original data. This is different from
-list or tuple slicing and an explicit ``copy()`` is recommended if
-the original data is not required anymore.
-
-It is possible to index arrays with other arrays for the purposes of
-selecting lists of values out of arrays into new arrays. There are
-two different ways of accomplishing this. One uses one or more arrays
-of index values. The other involves giving a boolean array of the proper
-shape to indicate the values to be selected. Index arrays are a very
-powerful tool that allow one to avoid looping over individual elements in
-arrays and thus greatly improve performance.
-
-It is possible to use special features to effectively increase the
-number of dimensions in an array through indexing so the resulting
-array acquires the shape needed for use in an expression or with a
-specific function.
-
-Index arrays
-============
-
-NumPy arrays may be indexed with other arrays (or any other sequence-
-like object that can be converted to an array, such as lists, with the
-exception of tuples; see the end of this document for why this is). The
-use of index arrays ranges from simple, straightforward cases to
-complex, hard-to-understand cases. For all cases of index arrays, what
-is returned is a copy of the original data, not a view as one gets for
-slices.
-
-Index arrays must be of integer type. Each value in the array indicates
-which value in the array to use in place of the index. To illustrate: ::
-
- >>> x = np.arange(10,1,-1)
- >>> x
- array([10, 9, 8, 7, 6, 5, 4, 3, 2])
- >>> x[np.array([3, 3, 1, 8])]
- array([7, 7, 9, 2])
-
-
-The index array consisting of the values 3, 3, 1 and 8 correspondingly
-create an array of length 4 (same as the index array) where each index
-is replaced by the value the index array has in the array being indexed.
-
-Negative values are permitted and work as they do with single indices
-or slices: ::
-
- >>> x[np.array([3,3,-3,8])]
- array([7, 7, 4, 2])
-
-It is an error to have index values out of bounds: ::
-
- >>> x[np.array([3, 3, 20, 8])]
- <type 'exceptions.IndexError'>: index 20 out of bounds 0<=index<9
-
-Generally speaking, what is returned when index arrays are used is
-an array with the same shape as the index array, but with the type
-and values of the array being indexed. As an example, we can use a
-multidimensional index array instead: ::
-
- >>> x[np.array([[1,1],[2,3]])]
- array([[9, 9],
- [8, 7]])
-
-Indexing Multi-dimensional arrays
-=================================
-
-Things become more complex when multidimensional arrays are indexed,
-particularly with multidimensional index arrays. These tend to be
-more unusual uses, but they are permitted, and they are useful for some
-problems. We'll start with the simplest multidimensional case (using
-the array y from the previous examples): ::
-
- >>> y[np.array([0,2,4]), np.array([0,1,2])]
- array([ 0, 15, 30])
-
-In this case, if the index arrays have a matching shape, and there is
-an index array for each dimension of the array being indexed, the
-resultant array has the same shape as the index arrays, and the values
-correspond to the index set for each position in the index arrays. In
-this example, the first index value is 0 for both index arrays, and
-thus the first value of the resultant array is y[0,0]. The next value
-is y[2,1], and the last is y[4,2].
-
-If the index arrays do not have the same shape, there is an attempt to
-broadcast them to the same shape. If they cannot be broadcast to the
-same shape, an exception is raised: ::
-
- >>> y[np.array([0,2,4]), np.array([0,1])]
- <type 'exceptions.ValueError'>: shape mismatch: objects cannot be
- broadcast to a single shape
-
-The broadcasting mechanism permits index arrays to be combined with
-scalars for other indices. The effect is that the scalar value is used
-for all the corresponding values of the index arrays: ::
-
- >>> y[np.array([0,2,4]), 1]
- array([ 1, 15, 29])
-
-Jumping to the next level of complexity, it is possible to only
-partially index an array with index arrays. It takes a bit of thought
-to understand what happens in such cases. For example if we just use
-one index array with y: ::
-
- >>> y[np.array([0,2,4])]
- array([[ 0, 1, 2, 3, 4, 5, 6],
- [14, 15, 16, 17, 18, 19, 20],
- [28, 29, 30, 31, 32, 33, 34]])
-
-What results is the construction of a new array where each value of
-the index array selects one row from the array being indexed and the
-resultant array has the resulting shape (number of index elements,
-size of row).
-
-An example of where this may be useful is for a color lookup table
-where we want to map the values of an image into RGB triples for
-display. The lookup table could have a shape (nlookup, 3). Indexing
-such an array with an image with shape (ny, nx) with dtype=np.uint8
-(or any integer type so long as values are with the bounds of the
-lookup table) will result in an array of shape (ny, nx, 3) where a
-triple of RGB values is associated with each pixel location.
-
-In general, the shape of the resultant array will be the concatenation
-of the shape of the index array (or the shape that all the index arrays
-were broadcast to) with the shape of any unused dimensions (those not
-indexed) in the array being indexed.
-
-Boolean or "mask" index arrays
-==============================
-
-Boolean arrays used as indices are treated in a different manner
-entirely than index arrays. Boolean arrays must be of the same shape
-as the initial dimensions of the array being indexed. In the
-most straightforward case, the boolean array has the same shape: ::
-
- >>> b = y>20
- >>> y[b]
- array([21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34])
-
-Unlike in the case of integer index arrays, in the boolean case, the
-result is a 1-D array containing all the elements in the indexed array
-corresponding to all the true elements in the boolean array. The
-elements in the indexed array are always iterated and returned in
-:term:`row-major` (C-style) order. The result is also identical to
-``y[np.nonzero(b)]``. As with index arrays, what is returned is a copy
-of the data, not a view as one gets with slices.
-
-The result will be multidimensional if y has more dimensions than b.
-For example: ::
-
- >>> b[:,5] # use a 1-D boolean whose first dim agrees with the first dim of y
- array([False, False, False, True, True])
- >>> y[b[:,5]]
- array([[21, 22, 23, 24, 25, 26, 27],
- [28, 29, 30, 31, 32, 33, 34]])
-
-Here the 4th and 5th rows are selected from the indexed array and
-combined to make a 2-D array.
-
-In general, when the boolean array has fewer dimensions than the array
-being indexed, this is equivalent to y[b, ...], which means
-y is indexed by b followed by as many : as are needed to fill
-out the rank of y.
-Thus the shape of the result is one dimension containing the number
-of True elements of the boolean array, followed by the remaining
-dimensions of the array being indexed.
-
-For example, using a 2-D boolean array of shape (2,3)
-with four True elements to select rows from a 3-D array of shape
-(2,3,5) results in a 2-D result of shape (4,5): ::
-
- >>> x = np.arange(30).reshape(2,3,5)
- >>> x
- array([[[ 0, 1, 2, 3, 4],
- [ 5, 6, 7, 8, 9],
- [10, 11, 12, 13, 14]],
- [[15, 16, 17, 18, 19],
- [20, 21, 22, 23, 24],
- [25, 26, 27, 28, 29]]])
- >>> b = np.array([[True, True, False], [False, True, True]])
- >>> x[b]
- array([[ 0, 1, 2, 3, 4],
- [ 5, 6, 7, 8, 9],
- [20, 21, 22, 23, 24],
- [25, 26, 27, 28, 29]])
-
-For further details, consult the numpy reference documentation on array indexing.
-
-Combining index arrays with slices
-==================================
-
-Index arrays may be combined with slices. For example: ::
-
- >>> y[np.array([0, 2, 4]), 1:3]
- array([[ 1, 2],
- [15, 16],
- [29, 30]])
-
-In effect, the slice and index array operation are independent.
-The slice operation extracts columns with index 1 and 2,
-(i.e. the 2nd and 3rd columns),
-followed by the index array operation which extracts rows with
-index 0, 2 and 4 (i.e the first, third and fifth rows).
-
-This is equivalent to::
-
- >>> y[:, 1:3][np.array([0, 2, 4]), :]
- array([[ 1, 2],
- [15, 16],
- [29, 30]])
-
-Likewise, slicing can be combined with broadcasted boolean indices: ::
-
- >>> b = y > 20
- >>> b
- array([[False, False, False, False, False, False, False],
- [False, False, False, False, False, False, False],
- [False, False, False, False, False, False, False],
- [ True, True, True, True, True, True, True],
- [ True, True, True, True, True, True, True]])
- >>> y[b[:,5],1:3]
- array([[22, 23],
- [29, 30]])
-
-Structural indexing tools
-=========================
-
-To facilitate easy matching of array shapes with expressions and in
-assignments, the np.newaxis object can be used within array indices
-to add new dimensions with a size of 1. For example: ::
-
- >>> y.shape
- (5, 7)
- >>> y[:,np.newaxis,:].shape
- (5, 1, 7)
-
-Note that there are no new elements in the array, just that the
-dimensionality is increased. This can be handy to combine two
-arrays in a way that otherwise would require explicitly reshaping
-operations. For example: ::
-
- >>> x = np.arange(5)
- >>> x[:,np.newaxis] + x[np.newaxis,:]
- array([[0, 1, 2, 3, 4],
- [1, 2, 3, 4, 5],
- [2, 3, 4, 5, 6],
- [3, 4, 5, 6, 7],
- [4, 5, 6, 7, 8]])
-
-The ellipsis syntax maybe used to indicate selecting in full any
-remaining unspecified dimensions. For example: ::
-
- >>> z = np.arange(81).reshape(3,3,3,3)
- >>> z[1,...,2]
- array([[29, 32, 35],
- [38, 41, 44],
- [47, 50, 53]])
-
-This is equivalent to: ::
-
- >>> z[1,:,:,2]
- array([[29, 32, 35],
- [38, 41, 44],
- [47, 50, 53]])
-
-Assigning values to indexed arrays
-==================================
-
-As mentioned, one can select a subset of an array to assign to using
-a single index, slices, and index and mask arrays. The value being
-assigned to the indexed array must be shape consistent (the same shape
-or broadcastable to the shape the index produces). For example, it is
-permitted to assign a constant to a slice: ::
-
- >>> x = np.arange(10)
- >>> x[2:7] = 1
-
-or an array of the right size: ::
-
- >>> x[2:7] = np.arange(5)
-
-Note that assignments may result in changes if assigning
-higher types to lower types (like floats to ints) or even
-exceptions (assigning complex to floats or ints): ::
-
- >>> x[1] = 1.2
- >>> x[1]
- 1
- >>> x[1] = 1.2j
- TypeError: can't convert complex to int
-
-
-Unlike some of the references (such as array and mask indices)
-assignments are always made to the original data in the array
-(indeed, nothing else would make sense!). Note though, that some
-actions may not work as one may naively expect. This particular
-example is often surprising to people: ::
-
- >>> x = np.arange(0, 50, 10)
- >>> x
- array([ 0, 10, 20, 30, 40])
- >>> x[np.array([1, 1, 3, 1])] += 1
- >>> x
- array([ 0, 11, 20, 31, 40])
-
-Where people expect that the 1st location will be incremented by 3.
-In fact, it will only be incremented by 1. The reason is because
-a new array is extracted from the original (as a temporary) containing
-the values at 1, 1, 3, 1, then the value 1 is added to the temporary,
-and then the temporary is assigned back to the original array. Thus
-the value of the array at x[1]+1 is assigned to x[1] three times,
-rather than being incremented 3 times.
-
-Dealing with variable numbers of indices within programs
-========================================================
-
-The index syntax is very powerful but limiting when dealing with
-a variable number of indices. For example, if you want to write
-a function that can handle arguments with various numbers of
-dimensions without having to write special case code for each
-number of possible dimensions, how can that be done? If one
-supplies to the index a tuple, the tuple will be interpreted
-as a list of indices. For example (using the previous definition
-for the array z): ::
-
- >>> indices = (1,1,1,1)
- >>> z[indices]
- 40
-
-So one can use code to construct tuples of any number of indices
-and then use these within an index.
-
-Slices can be specified within programs by using the slice() function
-in Python. For example: ::
-
- >>> indices = (1,1,1,slice(0,2)) # same as [1,1,1,0:2]
- >>> z[indices]
- array([39, 40])
-
-Likewise, ellipsis can be specified by code by using the Ellipsis
-object: ::
-
- >>> indices = (1, Ellipsis, 1) # same as [1,...,1]
- >>> z[indices]
- array([[28, 31, 34],
- [37, 40, 43],
- [46, 49, 52]])
-
-For this reason it is possible to use the output from the np.nonzero()
-function directly as an index since it always returns a tuple of index
-arrays.
-
-Because the special treatment of tuples, they are not automatically
-converted to an array as a list would be. As an example: ::
-
- >>> z[[1,1,1,1]] # produces a large array
- array([[[[27, 28, 29],
- [30, 31, 32], ...
- >>> z[(1,1,1,1)] # returns a single value
- 40
-
-"""
diff --git a/numpy/doc/internals.py b/numpy/doc/internals.py
deleted file mode 100644
index 6718f1108..000000000
--- a/numpy/doc/internals.py
+++ /dev/null
@@ -1,162 +0,0 @@
-"""
-===============
-Array Internals
-===============
-
-Internal organization of numpy arrays
-=====================================
-
-It helps to understand a bit about how numpy arrays are handled under the covers to help understand numpy better. This section will not go into great detail. Those wishing to understand the full details are referred to Travis Oliphant's book "Guide to NumPy".
-
-NumPy arrays consist of two major components, the raw array data (from now on,
-referred to as the data buffer), and the information about the raw array data.
-The data buffer is typically what people think of as arrays in C or Fortran,
-a contiguous (and fixed) block of memory containing fixed sized data items.
-NumPy also contains a significant set of data that describes how to interpret
-the data in the data buffer. This extra information contains (among other things):
-
- 1) The basic data element's size in bytes
- 2) The start of the data within the data buffer (an offset relative to the
- beginning of the data buffer).
- 3) The number of dimensions and the size of each dimension
- 4) The separation between elements for each dimension (the 'stride'). This
- does not have to be a multiple of the element size
- 5) The byte order of the data (which may not be the native byte order)
- 6) Whether the buffer is read-only
- 7) Information (via the dtype object) about the interpretation of the basic
- data element. The basic data element may be as simple as a int or a float,
- or it may be a compound object (e.g., struct-like), a fixed character field,
- or Python object pointers.
- 8) Whether the array is to interpreted as C-order or Fortran-order.
-
-This arrangement allow for very flexible use of arrays. One thing that it allows
-is simple changes of the metadata to change the interpretation of the array buffer.
-Changing the byteorder of the array is a simple change involving no rearrangement
-of the data. The shape of the array can be changed very easily without changing
-anything in the data buffer or any data copying at all
-
-Among other things that are made possible is one can create a new array metadata
-object that uses the same data buffer
-to create a new view of that data buffer that has a different interpretation
-of the buffer (e.g., different shape, offset, byte order, strides, etc) but
-shares the same data bytes. Many operations in numpy do just this such as
-slices. Other operations, such as transpose, don't move data elements
-around in the array, but rather change the information about the shape and strides so that the indexing of the array changes, but the data in the doesn't move.
-
-Typically these new versions of the array metadata but the same data buffer are
-new 'views' into the data buffer. There is a different ndarray object, but it
-uses the same data buffer. This is why it is necessary to force copies through
-use of the .copy() method if one really wants to make a new and independent
-copy of the data buffer.
-
-New views into arrays mean the object reference counts for the data buffer
-increase. Simply doing away with the original array object will not remove the
-data buffer if other views of it still exist.
-
-Multidimensional Array Indexing Order Issues
-============================================
-
-What is the right way to index
-multi-dimensional arrays? Before you jump to conclusions about the one and
-true way to index multi-dimensional arrays, it pays to understand why this is
-a confusing issue. This section will try to explain in detail how numpy
-indexing works and why we adopt the convention we do for images, and when it
-may be appropriate to adopt other conventions.
-
-The first thing to understand is
-that there are two conflicting conventions for indexing 2-dimensional arrays.
-Matrix notation uses the first index to indicate which row is being selected and
-the second index to indicate which column is selected. This is opposite the
-geometrically oriented-convention for images where people generally think the
-first index represents x position (i.e., column) and the second represents y
-position (i.e., row). This alone is the source of much confusion;
-matrix-oriented users and image-oriented users expect two different things with
-regard to indexing.
-
-The second issue to understand is how indices correspond
-to the order the array is stored in memory. In Fortran the first index is the
-most rapidly varying index when moving through the elements of a two
-dimensional array as it is stored in memory. If you adopt the matrix
-convention for indexing, then this means the matrix is stored one column at a
-time (since the first index moves to the next row as it changes). Thus Fortran
-is considered a Column-major language. C has just the opposite convention. In
-C, the last index changes most rapidly as one moves through the array as
-stored in memory. Thus C is a Row-major language. The matrix is stored by
-rows. Note that in both cases it presumes that the matrix convention for
-indexing is being used, i.e., for both Fortran and C, the first index is the
-row. Note this convention implies that the indexing convention is invariant
-and that the data order changes to keep that so.
-
-But that's not the only way
-to look at it. Suppose one has large two-dimensional arrays (images or
-matrices) stored in data files. Suppose the data are stored by rows rather than
-by columns. If we are to preserve our index convention (whether matrix or
-image) that means that depending on the language we use, we may be forced to
-reorder the data if it is read into memory to preserve our indexing
-convention. For example if we read row-ordered data into memory without
-reordering, it will match the matrix indexing convention for C, but not for
-Fortran. Conversely, it will match the image indexing convention for Fortran,
-but not for C. For C, if one is using data stored in row order, and one wants
-to preserve the image index convention, the data must be reordered when
-reading into memory.
-
-In the end, which you do for Fortran or C depends on
-which is more important, not reordering data or preserving the indexing
-convention. For large images, reordering data is potentially expensive, and
-often the indexing convention is inverted to avoid that.
-
-The situation with
-numpy makes this issue yet more complicated. The internal machinery of numpy
-arrays is flexible enough to accept any ordering of indices. One can simply
-reorder indices by manipulating the internal stride information for arrays
-without reordering the data at all. NumPy will know how to map the new index
-order to the data without moving the data.
-
-So if this is true, why not choose
-the index order that matches what you most expect? In particular, why not define
-row-ordered images to use the image convention? (This is sometimes referred
-to as the Fortran convention vs the C convention, thus the 'C' and 'FORTRAN'
-order options for array ordering in numpy.) The drawback of doing this is
-potential performance penalties. It's common to access the data sequentially,
-either implicitly in array operations or explicitly by looping over rows of an
-image. When that is done, then the data will be accessed in non-optimal order.
-As the first index is incremented, what is actually happening is that elements
-spaced far apart in memory are being sequentially accessed, with usually poor
-memory access speeds. For example, for a two dimensional image 'im' defined so
-that im[0, 10] represents the value at x=0, y=10. To be consistent with usual
-Python behavior then im[0] would represent a column at x=0. Yet that data
-would be spread over the whole array since the data are stored in row order.
-Despite the flexibility of numpy's indexing, it can't really paper over the fact
-basic operations are rendered inefficient because of data order or that getting
-contiguous subarrays is still awkward (e.g., im[:,0] for the first row, vs
-im[0]), thus one can't use an idiom such as for row in im; for col in im does
-work, but doesn't yield contiguous column data.
-
-As it turns out, numpy is
-smart enough when dealing with ufuncs to determine which index is the most
-rapidly varying one in memory and uses that for the innermost loop. Thus for
-ufuncs there is no large intrinsic advantage to either approach in most cases.
-On the other hand, use of .flat with an FORTRAN ordered array will lead to
-non-optimal memory access as adjacent elements in the flattened array (iterator,
-actually) are not contiguous in memory.
-
-Indeed, the fact is that Python
-indexing on lists and other sequences naturally leads to an outside-to inside
-ordering (the first index gets the largest grouping, the next the next largest,
-and the last gets the smallest element). Since image data are normally stored
-by rows, this corresponds to position within rows being the last item indexed.
-
-If you do want to use Fortran ordering realize that
-there are two approaches to consider: 1) accept that the first index is just not
-the most rapidly changing in memory and have all your I/O routines reorder
-your data when going from memory to disk or visa versa, or use numpy's
-mechanism for mapping the first index to the most rapidly varying data. We
-recommend the former if possible. The disadvantage of the latter is that many
-of numpy's functions will yield arrays without Fortran ordering unless you are
-careful to use the 'order' keyword. Doing this would be highly inconvenient.
-
-Otherwise we recommend simply learning to reverse the usual order of indices
-when accessing elements of an array. Granted, it goes against the grain, but
-it is more in line with Python semantics and the natural order of the data.
-
-"""
diff --git a/numpy/doc/misc.py b/numpy/doc/misc.py
deleted file mode 100644
index fc1c4cd01..000000000
--- a/numpy/doc/misc.py
+++ /dev/null
@@ -1,226 +0,0 @@
-"""
-=============
-Miscellaneous
-=============
-
-IEEE 754 Floating Point Special Values
---------------------------------------
-
-Special values defined in numpy: nan, inf,
-
-NaNs can be used as a poor-man's mask (if you don't care what the
-original value was)
-
-Note: cannot use equality to test NaNs. E.g.: ::
-
- >>> myarr = np.array([1., 0., np.nan, 3.])
- >>> np.nonzero(myarr == np.nan)
- (array([], dtype=int64),)
- >>> np.nan == np.nan # is always False! Use special numpy functions instead.
- False
- >>> myarr[myarr == np.nan] = 0. # doesn't work
- >>> myarr
- array([ 1., 0., NaN, 3.])
- >>> myarr[np.isnan(myarr)] = 0. # use this instead find
- >>> myarr
- array([ 1., 0., 0., 3.])
-
-Other related special value functions: ::
-
- isinf(): True if value is inf
- isfinite(): True if not nan or inf
- nan_to_num(): Map nan to 0, inf to max float, -inf to min float
-
-The following corresponds to the usual functions except that nans are excluded
-from the results: ::
-
- nansum()
- nanmax()
- nanmin()
- nanargmax()
- nanargmin()
-
- >>> x = np.arange(10.)
- >>> x[3] = np.nan
- >>> x.sum()
- nan
- >>> np.nansum(x)
- 42.0
-
-How numpy handles numerical exceptions
---------------------------------------
-
-The default is to ``'warn'`` for ``invalid``, ``divide``, and ``overflow``
-and ``'ignore'`` for ``underflow``. But this can be changed, and it can be
-set individually for different kinds of exceptions. The different behaviors
-are:
-
- - 'ignore' : Take no action when the exception occurs.
- - 'warn' : Print a `RuntimeWarning` (via the Python `warnings` module).
- - 'raise' : Raise a `FloatingPointError`.
- - 'call' : Call a function specified using the `seterrcall` function.
- - 'print' : Print a warning directly to ``stdout``.
- - 'log' : Record error in a Log object specified by `seterrcall`.
-
-These behaviors can be set for all kinds of errors or specific ones:
-
- - all : apply to all numeric exceptions
- - invalid : when NaNs are generated
- - divide : divide by zero (for integers as well!)
- - overflow : floating point overflows
- - underflow : floating point underflows
-
-Note that integer divide-by-zero is handled by the same machinery.
-These behaviors are set on a per-thread basis.
-
-Examples
---------
-
-::
-
- >>> oldsettings = np.seterr(all='warn')
- >>> np.zeros(5,dtype=np.float32)/0.
- invalid value encountered in divide
- >>> j = np.seterr(under='ignore')
- >>> np.array([1.e-100])**10
- >>> j = np.seterr(invalid='raise')
- >>> np.sqrt(np.array([-1.]))
- FloatingPointError: invalid value encountered in sqrt
- >>> def errorhandler(errstr, errflag):
- ... print("saw stupid error!")
- >>> np.seterrcall(errorhandler)
- <function err_handler at 0x...>
- >>> j = np.seterr(all='call')
- >>> np.zeros(5, dtype=np.int32)/0
- FloatingPointError: invalid value encountered in divide
- saw stupid error!
- >>> j = np.seterr(**oldsettings) # restore previous
- ... # error-handling settings
-
-Interfacing to C
-----------------
-Only a survey of the choices. Little detail on how each works.
-
-1) Bare metal, wrap your own C-code manually.
-
- - Plusses:
-
- - Efficient
- - No dependencies on other tools
-
- - Minuses:
-
- - Lots of learning overhead:
-
- - need to learn basics of Python C API
- - need to learn basics of numpy C API
- - need to learn how to handle reference counting and love it.
-
- - Reference counting often difficult to get right.
-
- - getting it wrong leads to memory leaks, and worse, segfaults
-
- - API will change for Python 3.0!
-
-2) Cython
-
- - Plusses:
-
- - avoid learning C API's
- - no dealing with reference counting
- - can code in pseudo python and generate C code
- - can also interface to existing C code
- - should shield you from changes to Python C api
- - has become the de-facto standard within the scientific Python community
- - fast indexing support for arrays
-
- - Minuses:
-
- - Can write code in non-standard form which may become obsolete
- - Not as flexible as manual wrapping
-
-3) ctypes
-
- - Plusses:
-
- - part of Python standard library
- - good for interfacing to existing sharable libraries, particularly
- Windows DLLs
- - avoids API/reference counting issues
- - good numpy support: arrays have all these in their ctypes
- attribute: ::
-
- a.ctypes.data a.ctypes.get_strides
- a.ctypes.data_as a.ctypes.shape
- a.ctypes.get_as_parameter a.ctypes.shape_as
- a.ctypes.get_data a.ctypes.strides
- a.ctypes.get_shape a.ctypes.strides_as
-
- - Minuses:
-
- - can't use for writing code to be turned into C extensions, only a wrapper
- tool.
-
-4) SWIG (automatic wrapper generator)
-
- - Plusses:
-
- - around a long time
- - multiple scripting language support
- - C++ support
- - Good for wrapping large (many functions) existing C libraries
-
- - Minuses:
-
- - generates lots of code between Python and the C code
- - can cause performance problems that are nearly impossible to optimize
- out
- - interface files can be hard to write
- - doesn't necessarily avoid reference counting issues or needing to know
- API's
-
-5) scipy.weave
-
- - Plusses:
-
- - can turn many numpy expressions into C code
- - dynamic compiling and loading of generated C code
- - can embed pure C code in Python module and have weave extract, generate
- interfaces and compile, etc.
-
- - Minuses:
-
- - Future very uncertain: it's the only part of Scipy not ported to Python 3
- and is effectively deprecated in favor of Cython.
-
-6) Psyco
-
- - Plusses:
-
- - Turns pure python into efficient machine code through jit-like
- optimizations
- - very fast when it optimizes well
-
- - Minuses:
-
- - Only on intel (windows?)
- - Doesn't do much for numpy?
-
-Interfacing to Fortran:
------------------------
-The clear choice to wrap Fortran code is
-`f2py <https://docs.scipy.org/doc/numpy/f2py/>`_.
-
-Pyfort is an older alternative, but not supported any longer.
-Fwrap is a newer project that looked promising but isn't being developed any
-longer.
-
-Interfacing to C++:
--------------------
- 1) Cython
- 2) CXX
- 3) Boost.python
- 4) SWIG
- 5) SIP (used mainly in PyQT)
-
-"""
diff --git a/numpy/doc/structured_arrays.py b/numpy/doc/structured_arrays.py
deleted file mode 100644
index 359d4f7f4..000000000
--- a/numpy/doc/structured_arrays.py
+++ /dev/null
@@ -1,646 +0,0 @@
-"""
-=================
-Structured Arrays
-=================
-
-Introduction
-============
-
-Structured arrays are ndarrays whose datatype is a composition of simpler
-datatypes organized as a sequence of named :term:`fields <field>`. For example,
-::
-
- >>> x = np.array([('Rex', 9, 81.0), ('Fido', 3, 27.0)],
- ... dtype=[('name', 'U10'), ('age', 'i4'), ('weight', 'f4')])
- >>> x
- array([('Rex', 9, 81.), ('Fido', 3, 27.)],
- dtype=[('name', 'U10'), ('age', '<i4'), ('weight', '<f4')])
-
-Here ``x`` is a one-dimensional array of length two whose datatype is a
-structure with three fields: 1. A string of length 10 or less named 'name', 2.
-a 32-bit integer named 'age', and 3. a 32-bit float named 'weight'.
-
-If you index ``x`` at position 1 you get a structure::
-
- >>> x[1]
- ('Fido', 3, 27.0)
-
-You can access and modify individual fields of a structured array by indexing
-with the field name::
-
- >>> x['age']
- array([9, 3], dtype=int32)
- >>> x['age'] = 5
- >>> x
- array([('Rex', 5, 81.), ('Fido', 5, 27.)],
- dtype=[('name', 'U10'), ('age', '<i4'), ('weight', '<f4')])
-
-Structured datatypes are designed to be able to mimic 'structs' in the C
-language, and share a similar memory layout. They are meant for interfacing with
-C code and for low-level manipulation of structured buffers, for example for
-interpreting binary blobs. For these purposes they support specialized features
-such as subarrays, nested datatypes, and unions, and allow control over the
-memory layout of the structure.
-
-Users looking to manipulate tabular data, such as stored in csv files, may find
-other pydata projects more suitable, such as xarray, pandas, or DataArray.
-These provide a high-level interface for tabular data analysis and are better
-optimized for that use. For instance, the C-struct-like memory layout of
-structured arrays in numpy can lead to poor cache behavior in comparison.
-
-.. _defining-structured-types:
-
-Structured Datatypes
-====================
-
-A structured datatype can be thought of as a sequence of bytes of a certain
-length (the structure's :term:`itemsize`) which is interpreted as a collection
-of fields. Each field has a name, a datatype, and a byte offset within the
-structure. The datatype of a field may be any numpy datatype including other
-structured datatypes, and it may also be a :term:`subarray data type` which
-behaves like an ndarray of a specified shape. The offsets of the fields are
-arbitrary, and fields may even overlap. These offsets are usually determined
-automatically by numpy, but can also be specified.
-
-Structured Datatype Creation
-----------------------------
-
-Structured datatypes may be created using the function :func:`numpy.dtype`.
-There are 4 alternative forms of specification which vary in flexibility and
-conciseness. These are further documented in the
-:ref:`Data Type Objects <arrays.dtypes.constructing>` reference page, and in
-summary they are:
-
-1. A list of tuples, one tuple per field
-
- Each tuple has the form ``(fieldname, datatype, shape)`` where shape is
- optional. ``fieldname`` is a string (or tuple if titles are used, see
- :ref:`Field Titles <titles>` below), ``datatype`` may be any object
- convertible to a datatype, and ``shape`` is a tuple of integers specifying
- subarray shape.
-
- >>> np.dtype([('x', 'f4'), ('y', np.float32), ('z', 'f4', (2, 2))])
- dtype([('x', '<f4'), ('y', '<f4'), ('z', '<f4', (2, 2))])
-
- If ``fieldname`` is the empty string ``''``, the field will be given a
- default name of the form ``f#``, where ``#`` is the integer index of the
- field, counting from 0 from the left::
-
- >>> np.dtype([('x', 'f4'), ('', 'i4'), ('z', 'i8')])
- dtype([('x', '<f4'), ('f1', '<i4'), ('z', '<i8')])
-
- The byte offsets of the fields within the structure and the total
- structure itemsize are determined automatically.
-
-2. A string of comma-separated dtype specifications
-
- In this shorthand notation any of the :ref:`string dtype specifications
- <arrays.dtypes.constructing>` may be used in a string and separated by
- commas. The itemsize and byte offsets of the fields are determined
- automatically, and the field names are given the default names ``f0``,
- ``f1``, etc. ::
-
- >>> np.dtype('i8, f4, S3')
- dtype([('f0', '<i8'), ('f1', '<f4'), ('f2', 'S3')])
- >>> np.dtype('3int8, float32, (2, 3)float64')
- dtype([('f0', 'i1', (3,)), ('f1', '<f4'), ('f2', '<f8', (2, 3))])
-
-3. A dictionary of field parameter arrays
-
- This is the most flexible form of specification since it allows control
- over the byte-offsets of the fields and the itemsize of the structure.
-
- The dictionary has two required keys, 'names' and 'formats', and four
- optional keys, 'offsets', 'itemsize', 'aligned' and 'titles'. The values
- for 'names' and 'formats' should respectively be a list of field names and
- a list of dtype specifications, of the same length. The optional 'offsets'
- value should be a list of integer byte-offsets, one for each field within
- the structure. If 'offsets' is not given the offsets are determined
- automatically. The optional 'itemsize' value should be an integer
- describing the total size in bytes of the dtype, which must be large
- enough to contain all the fields.
- ::
-
- >>> np.dtype({'names': ['col1', 'col2'], 'formats': ['i4', 'f4']})
- dtype([('col1', '<i4'), ('col2', '<f4')])
- >>> np.dtype({'names': ['col1', 'col2'],
- ... 'formats': ['i4', 'f4'],
- ... 'offsets': [0, 4],
- ... 'itemsize': 12})
- dtype({'names':['col1','col2'], 'formats':['<i4','<f4'], 'offsets':[0,4], 'itemsize':12})
-
- Offsets may be chosen such that the fields overlap, though this will mean
- that assigning to one field may clobber any overlapping field's data. As
- an exception, fields of :class:`numpy.object` type cannot overlap with
- other fields, because of the risk of clobbering the internal object
- pointer and then dereferencing it.
-
- The optional 'aligned' value can be set to ``True`` to make the automatic
- offset computation use aligned offsets (see :ref:`offsets-and-alignment`),
- as if the 'align' keyword argument of :func:`numpy.dtype` had been set to
- True.
-
- The optional 'titles' value should be a list of titles of the same length
- as 'names', see :ref:`Field Titles <titles>` below.
-
-4. A dictionary of field names
-
- The use of this form of specification is discouraged, but documented here
- because older numpy code may use it. The keys of the dictionary are the
- field names and the values are tuples specifying type and offset::
-
- >>> np.dtype({'col1': ('i1', 0), 'col2': ('f4', 1)})
- dtype([('col1', 'i1'), ('col2', '<f4')])
-
- This form is discouraged because Python dictionaries do not preserve order
- in Python versions before Python 3.6, and the order of the fields in a
- structured dtype has meaning. :ref:`Field Titles <titles>` may be
- specified by using a 3-tuple, see below.
-
-Manipulating and Displaying Structured Datatypes
-------------------------------------------------
-
-The list of field names of a structured datatype can be found in the ``names``
-attribute of the dtype object::
-
- >>> d = np.dtype([('x', 'i8'), ('y', 'f4')])
- >>> d.names
- ('x', 'y')
-
-The field names may be modified by assigning to the ``names`` attribute using a
-sequence of strings of the same length.
-
-The dtype object also has a dictionary-like attribute, ``fields``, whose keys
-are the field names (and :ref:`Field Titles <titles>`, see below) and whose
-values are tuples containing the dtype and byte offset of each field. ::
-
- >>> d.fields
- mappingproxy({'x': (dtype('int64'), 0), 'y': (dtype('float32'), 8)})
-
-Both the ``names`` and ``fields`` attributes will equal ``None`` for
-unstructured arrays. The recommended way to test if a dtype is structured is
-with `if dt.names is not None` rather than `if dt.names`, to account for dtypes
-with 0 fields.
-
-The string representation of a structured datatype is shown in the "list of
-tuples" form if possible, otherwise numpy falls back to using the more general
-dictionary form.
-
-.. _offsets-and-alignment:
-
-Automatic Byte Offsets and Alignment
-------------------------------------
-
-Numpy uses one of two methods to automatically determine the field byte offsets
-and the overall itemsize of a structured datatype, depending on whether
-``align=True`` was specified as a keyword argument to :func:`numpy.dtype`.
-
-By default (``align=False``), numpy will pack the fields together such that
-each field starts at the byte offset the previous field ended, and the fields
-are contiguous in memory. ::
-
- >>> def print_offsets(d):
- ... print("offsets:", [d.fields[name][1] for name in d.names])
- ... print("itemsize:", d.itemsize)
- >>> print_offsets(np.dtype('u1, u1, i4, u1, i8, u2'))
- offsets: [0, 1, 2, 6, 7, 15]
- itemsize: 17
-
-If ``align=True`` is set, numpy will pad the structure in the same way many C
-compilers would pad a C-struct. Aligned structures can give a performance
-improvement in some cases, at the cost of increased datatype size. Padding
-bytes are inserted between fields such that each field's byte offset will be a
-multiple of that field's alignment, which is usually equal to the field's size
-in bytes for simple datatypes, see :c:member:`PyArray_Descr.alignment`. The
-structure will also have trailing padding added so that its itemsize is a
-multiple of the largest field's alignment. ::
-
- >>> print_offsets(np.dtype('u1, u1, i4, u1, i8, u2', align=True))
- offsets: [0, 1, 4, 8, 16, 24]
- itemsize: 32
-
-Note that although almost all modern C compilers pad in this way by default,
-padding in C structs is C-implementation-dependent so this memory layout is not
-guaranteed to exactly match that of a corresponding struct in a C program. Some
-work may be needed, either on the numpy side or the C side, to obtain exact
-correspondence.
-
-If offsets were specified using the optional ``offsets`` key in the
-dictionary-based dtype specification, setting ``align=True`` will check that
-each field's offset is a multiple of its size and that the itemsize is a
-multiple of the largest field size, and raise an exception if not.
-
-If the offsets of the fields and itemsize of a structured array satisfy the
-alignment conditions, the array will have the ``ALIGNED`` :attr:`flag
-<numpy.ndarray.flags>` set.
-
-A convenience function :func:`numpy.lib.recfunctions.repack_fields` converts an
-aligned dtype or array to a packed one and vice versa. It takes either a dtype
-or structured ndarray as an argument, and returns a copy with fields re-packed,
-with or without padding bytes.
-
-.. _titles:
-
-Field Titles
-------------
-
-In addition to field names, fields may also have an associated :term:`title`,
-an alternate name, which is sometimes used as an additional description or
-alias for the field. The title may be used to index an array, just like a
-field name.
-
-To add titles when using the list-of-tuples form of dtype specification, the
-field name may be specified as a tuple of two strings instead of a single
-string, which will be the field's title and field name respectively. For
-example::
-
- >>> np.dtype([(('my title', 'name'), 'f4')])
- dtype([(('my title', 'name'), '<f4')])
-
-When using the first form of dictionary-based specification, the titles may be
-supplied as an extra ``'titles'`` key as described above. When using the second
-(discouraged) dictionary-based specification, the title can be supplied by
-providing a 3-element tuple ``(datatype, offset, title)`` instead of the usual
-2-element tuple::
-
- >>> np.dtype({'name': ('i4', 0, 'my title')})
- dtype([(('my title', 'name'), '<i4')])
-
-The ``dtype.fields`` dictionary will contain titles as keys, if any
-titles are used. This means effectively that a field with a title will be
-represented twice in the fields dictionary. The tuple values for these fields
-will also have a third element, the field title. Because of this, and because
-the ``names`` attribute preserves the field order while the ``fields``
-attribute may not, it is recommended to iterate through the fields of a dtype
-using the ``names`` attribute of the dtype, which will not list titles, as
-in::
-
- >>> for name in d.names:
- ... print(d.fields[name][:2])
- (dtype('int64'), 0)
- (dtype('float32'), 8)
-
-Union types
------------
-
-Structured datatypes are implemented in numpy to have base type
-:class:`numpy.void` by default, but it is possible to interpret other numpy
-types as structured types using the ``(base_dtype, dtype)`` form of dtype
-specification described in
-:ref:`Data Type Objects <arrays.dtypes.constructing>`. Here, ``base_dtype`` is
-the desired underlying dtype, and fields and flags will be copied from
-``dtype``. This dtype is similar to a 'union' in C.
-
-Indexing and Assignment to Structured arrays
-============================================
-
-Assigning data to a Structured Array
-------------------------------------
-
-There are a number of ways to assign values to a structured array: Using python
-tuples, using scalar values, or using other structured arrays.
-
-Assignment from Python Native Types (Tuples)
-````````````````````````````````````````````
-
-The simplest way to assign values to a structured array is using python tuples.
-Each assigned value should be a tuple of length equal to the number of fields
-in the array, and not a list or array as these will trigger numpy's
-broadcasting rules. The tuple's elements are assigned to the successive fields
-of the array, from left to right::
-
- >>> x = np.array([(1, 2, 3), (4, 5, 6)], dtype='i8, f4, f8')
- >>> x[1] = (7, 8, 9)
- >>> x
- array([(1, 2., 3.), (7, 8., 9.)],
- dtype=[('f0', '<i8'), ('f1', '<f4'), ('f2', '<f8')])
-
-Assignment from Scalars
-```````````````````````
-
-A scalar assigned to a structured element will be assigned to all fields. This
-happens when a scalar is assigned to a structured array, or when an
-unstructured array is assigned to a structured array::
-
- >>> x = np.zeros(2, dtype='i8, f4, ?, S1')
- >>> x[:] = 3
- >>> x
- array([(3, 3., True, b'3'), (3, 3., True, b'3')],
- dtype=[('f0', '<i8'), ('f1', '<f4'), ('f2', '?'), ('f3', 'S1')])
- >>> x[:] = np.arange(2)
- >>> x
- array([(0, 0., False, b'0'), (1, 1., True, b'1')],
- dtype=[('f0', '<i8'), ('f1', '<f4'), ('f2', '?'), ('f3', 'S1')])
-
-Structured arrays can also be assigned to unstructured arrays, but only if the
-structured datatype has just a single field::
-
- >>> twofield = np.zeros(2, dtype=[('A', 'i4'), ('B', 'i4')])
- >>> onefield = np.zeros(2, dtype=[('A', 'i4')])
- >>> nostruct = np.zeros(2, dtype='i4')
- >>> nostruct[:] = twofield
- Traceback (most recent call last):
- ...
- TypeError: Cannot cast array data from dtype([('A', '<i4'), ('B', '<i4')]) to dtype('int32') according to the rule 'unsafe'
-
-Assignment from other Structured Arrays
-```````````````````````````````````````
-
-Assignment between two structured arrays occurs as if the source elements had
-been converted to tuples and then assigned to the destination elements. That
-is, the first field of the source array is assigned to the first field of the
-destination array, and the second field likewise, and so on, regardless of
-field names. Structured arrays with a different number of fields cannot be
-assigned to each other. Bytes of the destination structure which are not
-included in any of the fields are unaffected. ::
-
- >>> a = np.zeros(3, dtype=[('a', 'i8'), ('b', 'f4'), ('c', 'S3')])
- >>> b = np.ones(3, dtype=[('x', 'f4'), ('y', 'S3'), ('z', 'O')])
- >>> b[:] = a
- >>> b
- array([(0., b'0.0', b''), (0., b'0.0', b''), (0., b'0.0', b'')],
- dtype=[('x', '<f4'), ('y', 'S3'), ('z', 'O')])
-
-
-Assignment involving subarrays
-``````````````````````````````
-
-When assigning to fields which are subarrays, the assigned value will first be
-broadcast to the shape of the subarray.
-
-Indexing Structured Arrays
---------------------------
-
-Accessing Individual Fields
-```````````````````````````
-
-Individual fields of a structured array may be accessed and modified by indexing
-the array with the field name. ::
-
- >>> x = np.array([(1, 2), (3, 4)], dtype=[('foo', 'i8'), ('bar', 'f4')])
- >>> x['foo']
- array([1, 3])
- >>> x['foo'] = 10
- >>> x
- array([(10, 2.), (10, 4.)],
- dtype=[('foo', '<i8'), ('bar', '<f4')])
-
-The resulting array is a view into the original array. It shares the same
-memory locations and writing to the view will modify the original array. ::
-
- >>> y = x['bar']
- >>> y[:] = 11
- >>> x
- array([(10, 11.), (10, 11.)],
- dtype=[('foo', '<i8'), ('bar', '<f4')])
-
-This view has the same dtype and itemsize as the indexed field, so it is
-typically a non-structured array, except in the case of nested structures.
-
- >>> y.dtype, y.shape, y.strides
- (dtype('float32'), (2,), (12,))
-
-If the accessed field is a subarray, the dimensions of the subarray
-are appended to the shape of the result::
-
- >>> x = np.zeros((2, 2), dtype=[('a', np.int32), ('b', np.float64, (3, 3))])
- >>> x['a'].shape
- (2, 2)
- >>> x['b'].shape
- (2, 2, 3, 3)
-
-Accessing Multiple Fields
-```````````````````````````
-
-One can index and assign to a structured array with a multi-field index, where
-the index is a list of field names.
-
-.. warning::
- The behavior of multi-field indexes changed from Numpy 1.15 to Numpy 1.16.
-
-The result of indexing with a multi-field index is a view into the original
-array, as follows::
-
- >>> a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'i4'), ('c', 'f4')])
- >>> a[['a', 'c']]
- array([(0, 0.), (0, 0.), (0, 0.)],
- dtype={'names':['a','c'], 'formats':['<i4','<f4'], 'offsets':[0,8], 'itemsize':12})
-
-Assignment to the view modifies the original array. The view's fields will be
-in the order they were indexed. Note that unlike for single-field indexing, the
-dtype of the view has the same itemsize as the original array, and has fields
-at the same offsets as in the original array, and unindexed fields are merely
-missing.
-
-.. warning::
- In Numpy 1.15, indexing an array with a multi-field index returned a copy of
- the result above, but with fields packed together in memory as if
- passed through :func:`numpy.lib.recfunctions.repack_fields`.
-
- The new behavior as of Numpy 1.16 leads to extra "padding" bytes at the
- location of unindexed fields compared to 1.15. You will need to update any
- code which depends on the data having a "packed" layout. For instance code
- such as::
-
- >>> a[['a', 'c']].view('i8') # Fails in Numpy 1.16
- Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- ValueError: When changing to a smaller dtype, its size must be a divisor of the size of original dtype
-
- will need to be changed. This code has raised a ``FutureWarning`` since
- Numpy 1.12, and similar code has raised ``FutureWarning`` since 1.7.
-
- In 1.16 a number of functions have been introduced in the
- :mod:`numpy.lib.recfunctions` module to help users account for this
- change. These are
- :func:`numpy.lib.recfunctions.repack_fields`.
- :func:`numpy.lib.recfunctions.structured_to_unstructured`,
- :func:`numpy.lib.recfunctions.unstructured_to_structured`,
- :func:`numpy.lib.recfunctions.apply_along_fields`,
- :func:`numpy.lib.recfunctions.assign_fields_by_name`, and
- :func:`numpy.lib.recfunctions.require_fields`.
-
- The function :func:`numpy.lib.recfunctions.repack_fields` can always be
- used to reproduce the old behavior, as it will return a packed copy of the
- structured array. The code above, for example, can be replaced with:
-
- >>> from numpy.lib.recfunctions import repack_fields
- >>> repack_fields(a[['a', 'c']]).view('i8') # supported in 1.16
- array([0, 0, 0])
-
- Furthermore, numpy now provides a new function
- :func:`numpy.lib.recfunctions.structured_to_unstructured` which is a safer
- and more efficient alternative for users who wish to convert structured
- arrays to unstructured arrays, as the view above is often indeded to do.
- This function allows safe conversion to an unstructured type taking into
- account padding, often avoids a copy, and also casts the datatypes
- as needed, unlike the view. Code such as:
-
- >>> b = np.zeros(3, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
- >>> b[['x', 'z']].view('f4')
- array([0., 0., 0., 0., 0., 0., 0., 0., 0.], dtype=float32)
-
- can be made safer by replacing with:
-
- >>> from numpy.lib.recfunctions import structured_to_unstructured
- >>> structured_to_unstructured(b[['x', 'z']])
- array([0, 0, 0])
-
-
-Assignment to an array with a multi-field index modifies the original array::
-
- >>> a[['a', 'c']] = (2, 3)
- >>> a
- array([(2, 0, 3.), (2, 0, 3.), (2, 0, 3.)],
- dtype=[('a', '<i4'), ('b', '<i4'), ('c', '<f4')])
-
-This obeys the structured array assignment rules described above. For example,
-this means that one can swap the values of two fields using appropriate
-multi-field indexes::
-
- >>> a[['a', 'c']] = a[['c', 'a']]
-
-Indexing with an Integer to get a Structured Scalar
-```````````````````````````````````````````````````
-
-Indexing a single element of a structured array (with an integer index) returns
-a structured scalar::
-
- >>> x = np.array([(1, 2., 3.)], dtype='i, f, f')
- >>> scalar = x[0]
- >>> scalar
- (1, 2., 3.)
- >>> type(scalar)
- <class 'numpy.void'>
-
-Unlike other numpy scalars, structured scalars are mutable and act like views
-into the original array, such that modifying the scalar will modify the
-original array. Structured scalars also support access and assignment by field
-name::
-
- >>> x = np.array([(1, 2), (3, 4)], dtype=[('foo', 'i8'), ('bar', 'f4')])
- >>> s = x[0]
- >>> s['bar'] = 100
- >>> x
- array([(1, 100.), (3, 4.)],
- dtype=[('foo', '<i8'), ('bar', '<f4')])
-
-Similarly to tuples, structured scalars can also be indexed with an integer::
-
- >>> scalar = np.array([(1, 2., 3.)], dtype='i, f, f')[0]
- >>> scalar[0]
- 1
- >>> scalar[1] = 4
-
-Thus, tuples might be thought of as the native Python equivalent to numpy's
-structured types, much like native python integers are the equivalent to
-numpy's integer types. Structured scalars may be converted to a tuple by
-calling :func:`ndarray.item`::
-
- >>> scalar.item(), type(scalar.item())
- ((1, 4.0, 3.0), <class 'tuple'>)
-
-Viewing Structured Arrays Containing Objects
---------------------------------------------
-
-In order to prevent clobbering object pointers in fields of
-:class:`numpy.object` type, numpy currently does not allow views of structured
-arrays containing objects.
-
-Structure Comparison
---------------------
-
-If the dtypes of two void structured arrays are equal, testing the equality of
-the arrays will result in a boolean array with the dimensions of the original
-arrays, with elements set to ``True`` where all fields of the corresponding
-structures are equal. Structured dtypes are equal if the field names,
-dtypes and titles are the same, ignoring endianness, and the fields are in
-the same order::
-
- >>> a = np.zeros(2, dtype=[('a', 'i4'), ('b', 'i4')])
- >>> b = np.ones(2, dtype=[('a', 'i4'), ('b', 'i4')])
- >>> a == b
- array([False, False])
-
-Currently, if the dtypes of two void structured arrays are not equivalent the
-comparison fails, returning the scalar value ``False``. This behavior is
-deprecated as of numpy 1.10 and will raise an error or perform elementwise
-comparison in the future.
-
-The ``<`` and ``>`` operators always return ``False`` when comparing void
-structured arrays, and arithmetic and bitwise operations are not supported.
-
-Record Arrays
-=============
-
-As an optional convenience numpy provides an ndarray subclass,
-:class:`numpy.recarray`, and associated helper functions in the
-:mod:`numpy.rec` submodule, that allows access to fields of structured arrays
-by attribute instead of only by index. Record arrays also use a special
-datatype, :class:`numpy.record`, that allows field access by attribute on the
-structured scalars obtained from the array.
-
-The simplest way to create a record array is with :func:`numpy.rec.array`::
-
- >>> recordarr = np.rec.array([(1, 2., 'Hello'), (2, 3., "World")],
- ... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'S10')])
- >>> recordarr.bar
- array([ 2., 3.], dtype=float32)
- >>> recordarr[1:2]
- rec.array([(2, 3., b'World')],
- dtype=[('foo', '<i4'), ('bar', '<f4'), ('baz', 'S10')])
- >>> recordarr[1:2].foo
- array([2], dtype=int32)
- >>> recordarr.foo[1:2]
- array([2], dtype=int32)
- >>> recordarr[1].baz
- b'World'
-
-:func:`numpy.rec.array` can convert a wide variety of arguments into record
-arrays, including structured arrays::
-
- >>> arr = np.array([(1, 2., 'Hello'), (2, 3., "World")],
- ... dtype=[('foo', 'i4'), ('bar', 'f4'), ('baz', 'S10')])
- >>> recordarr = np.rec.array(arr)
-
-The :mod:`numpy.rec` module provides a number of other convenience functions for
-creating record arrays, see :ref:`record array creation routines
-<routines.array-creation.rec>`.
-
-A record array representation of a structured array can be obtained using the
-appropriate `view <numpy-ndarray-view>`_::
-
- >>> arr = np.array([(1, 2., 'Hello'), (2, 3., "World")],
- ... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'a10')])
- >>> recordarr = arr.view(dtype=np.dtype((np.record, arr.dtype)),
- ... type=np.recarray)
-
-For convenience, viewing an ndarray as type :class:`np.recarray` will
-automatically convert to :class:`np.record` datatype, so the dtype can be left
-out of the view::
-
- >>> recordarr = arr.view(np.recarray)
- >>> recordarr.dtype
- dtype((numpy.record, [('foo', '<i4'), ('bar', '<f4'), ('baz', 'S10')]))
-
-To get back to a plain ndarray both the dtype and type must be reset. The
-following view does so, taking into account the unusual case that the
-recordarr was not a structured type::
-
- >>> arr2 = recordarr.view(recordarr.dtype.fields or recordarr.dtype, np.ndarray)
-
-Record array fields accessed by index or by attribute are returned as a record
-array if the field has a structured type but as a plain ndarray otherwise. ::
-
- >>> recordarr = np.rec.array([('Hello', (1, 2)), ("World", (3, 4))],
- ... dtype=[('foo', 'S6'),('bar', [('A', int), ('B', int)])])
- >>> type(recordarr.foo)
- <class 'numpy.ndarray'>
- >>> type(recordarr.bar)
- <class 'numpy.recarray'>
-
-Note that if a field has the same name as an ndarray attribute, the ndarray
-attribute takes precedence. Such fields will be inaccessible by attribute but
-will still be accessible by index.
-
-"""
diff --git a/numpy/doc/subclassing.py b/numpy/doc/subclassing.py
deleted file mode 100644
index 7dc10e1c8..000000000
--- a/numpy/doc/subclassing.py
+++ /dev/null
@@ -1,752 +0,0 @@
-"""=============================
-Subclassing ndarray in python
-=============================
-
-Introduction
-------------
-
-Subclassing ndarray is relatively simple, but it has some complications
-compared to other Python objects. On this page we explain the machinery
-that allows you to subclass ndarray, and the implications for
-implementing a subclass.
-
-ndarrays and object creation
-============================
-
-Subclassing ndarray is complicated by the fact that new instances of
-ndarray classes can come about in three different ways. These are:
-
-#. Explicit constructor call - as in ``MySubClass(params)``. This is
- the usual route to Python instance creation.
-#. View casting - casting an existing ndarray as a given subclass
-#. New from template - creating a new instance from a template
- instance. Examples include returning slices from a subclassed array,
- creating return types from ufuncs, and copying arrays. See
- :ref:`new-from-template` for more details
-
-The last two are characteristics of ndarrays - in order to support
-things like array slicing. The complications of subclassing ndarray are
-due to the mechanisms numpy has to support these latter two routes of
-instance creation.
-
-.. _view-casting:
-
-View casting
-------------
-
-*View casting* is the standard ndarray mechanism by which you take an
-ndarray of any subclass, and return a view of the array as another
-(specified) subclass:
-
->>> import numpy as np
->>> # create a completely useless ndarray subclass
->>> class C(np.ndarray): pass
->>> # create a standard ndarray
->>> arr = np.zeros((3,))
->>> # take a view of it, as our useless subclass
->>> c_arr = arr.view(C)
->>> type(c_arr)
-<class 'C'>
-
-.. _new-from-template:
-
-Creating new from template
---------------------------
-
-New instances of an ndarray subclass can also come about by a very
-similar mechanism to :ref:`view-casting`, when numpy finds it needs to
-create a new instance from a template instance. The most obvious place
-this has to happen is when you are taking slices of subclassed arrays.
-For example:
-
->>> v = c_arr[1:]
->>> type(v) # the view is of type 'C'
-<class 'C'>
->>> v is c_arr # but it's a new instance
-False
-
-The slice is a *view* onto the original ``c_arr`` data. So, when we
-take a view from the ndarray, we return a new ndarray, of the same
-class, that points to the data in the original.
-
-There are other points in the use of ndarrays where we need such views,
-such as copying arrays (``c_arr.copy()``), creating ufunc output arrays
-(see also :ref:`array-wrap`), and reducing methods (like
-``c_arr.mean()``).
-
-Relationship of view casting and new-from-template
---------------------------------------------------
-
-These paths both use the same machinery. We make the distinction here,
-because they result in different input to your methods. Specifically,
-:ref:`view-casting` means you have created a new instance of your array
-type from any potential subclass of ndarray. :ref:`new-from-template`
-means you have created a new instance of your class from a pre-existing
-instance, allowing you - for example - to copy across attributes that
-are particular to your subclass.
-
-Implications for subclassing
-----------------------------
-
-If we subclass ndarray, we need to deal not only with explicit
-construction of our array type, but also :ref:`view-casting` or
-:ref:`new-from-template`. NumPy has the machinery to do this, and this
-machinery that makes subclassing slightly non-standard.
-
-There are two aspects to the machinery that ndarray uses to support
-views and new-from-template in subclasses.
-
-The first is the use of the ``ndarray.__new__`` method for the main work
-of object initialization, rather then the more usual ``__init__``
-method. The second is the use of the ``__array_finalize__`` method to
-allow subclasses to clean up after the creation of views and new
-instances from templates.
-
-A brief Python primer on ``__new__`` and ``__init__``
-=====================================================
-
-``__new__`` is a standard Python method, and, if present, is called
-before ``__init__`` when we create a class instance. See the `python
-__new__ documentation
-<https://docs.python.org/reference/datamodel.html#object.__new__>`_ for more detail.
-
-For example, consider the following Python code:
-
-.. testcode::
-
- class C:
- def __new__(cls, *args):
- print('Cls in __new__:', cls)
- print('Args in __new__:', args)
- # The `object` type __new__ method takes a single argument.
- return object.__new__(cls)
-
- def __init__(self, *args):
- print('type(self) in __init__:', type(self))
- print('Args in __init__:', args)
-
-meaning that we get:
-
->>> c = C('hello')
-Cls in __new__: <class 'C'>
-Args in __new__: ('hello',)
-type(self) in __init__: <class 'C'>
-Args in __init__: ('hello',)
-
-When we call ``C('hello')``, the ``__new__`` method gets its own class
-as first argument, and the passed argument, which is the string
-``'hello'``. After python calls ``__new__``, it usually (see below)
-calls our ``__init__`` method, with the output of ``__new__`` as the
-first argument (now a class instance), and the passed arguments
-following.
-
-As you can see, the object can be initialized in the ``__new__``
-method or the ``__init__`` method, or both, and in fact ndarray does
-not have an ``__init__`` method, because all the initialization is
-done in the ``__new__`` method.
-
-Why use ``__new__`` rather than just the usual ``__init__``? Because
-in some cases, as for ndarray, we want to be able to return an object
-of some other class. Consider the following:
-
-.. testcode::
-
- class D(C):
- def __new__(cls, *args):
- print('D cls is:', cls)
- print('D args in __new__:', args)
- return C.__new__(C, *args)
-
- def __init__(self, *args):
- # we never get here
- print('In D __init__')
-
-meaning that:
-
->>> obj = D('hello')
-D cls is: <class 'D'>
-D args in __new__: ('hello',)
-Cls in __new__: <class 'C'>
-Args in __new__: ('hello',)
->>> type(obj)
-<class 'C'>
-
-The definition of ``C`` is the same as before, but for ``D``, the
-``__new__`` method returns an instance of class ``C`` rather than
-``D``. Note that the ``__init__`` method of ``D`` does not get
-called. In general, when the ``__new__`` method returns an object of
-class other than the class in which it is defined, the ``__init__``
-method of that class is not called.
-
-This is how subclasses of the ndarray class are able to return views
-that preserve the class type. When taking a view, the standard
-ndarray machinery creates the new ndarray object with something
-like::
-
- obj = ndarray.__new__(subtype, shape, ...
-
-where ``subdtype`` is the subclass. Thus the returned view is of the
-same class as the subclass, rather than being of class ``ndarray``.
-
-That solves the problem of returning views of the same type, but now
-we have a new problem. The machinery of ndarray can set the class
-this way, in its standard methods for taking views, but the ndarray
-``__new__`` method knows nothing of what we have done in our own
-``__new__`` method in order to set attributes, and so on. (Aside -
-why not call ``obj = subdtype.__new__(...`` then? Because we may not
-have a ``__new__`` method with the same call signature).
-
-The role of ``__array_finalize__``
-==================================
-
-``__array_finalize__`` is the mechanism that numpy provides to allow
-subclasses to handle the various ways that new instances get created.
-
-Remember that subclass instances can come about in these three ways:
-
-#. explicit constructor call (``obj = MySubClass(params)``). This will
- call the usual sequence of ``MySubClass.__new__`` then (if it exists)
- ``MySubClass.__init__``.
-#. :ref:`view-casting`
-#. :ref:`new-from-template`
-
-Our ``MySubClass.__new__`` method only gets called in the case of the
-explicit constructor call, so we can't rely on ``MySubClass.__new__`` or
-``MySubClass.__init__`` to deal with the view casting and
-new-from-template. It turns out that ``MySubClass.__array_finalize__``
-*does* get called for all three methods of object creation, so this is
-where our object creation housekeeping usually goes.
-
-* For the explicit constructor call, our subclass will need to create a
- new ndarray instance of its own class. In practice this means that
- we, the authors of the code, will need to make a call to
- ``ndarray.__new__(MySubClass,...)``, a class-hierarchy prepared call to
- ``super(MySubClass, cls).__new__(cls, ...)``, or do view casting of an
- existing array (see below)
-* For view casting and new-from-template, the equivalent of
- ``ndarray.__new__(MySubClass,...`` is called, at the C level.
-
-The arguments that ``__array_finalize__`` receives differ for the three
-methods of instance creation above.
-
-The following code allows us to look at the call sequences and arguments:
-
-.. testcode::
-
- import numpy as np
-
- class C(np.ndarray):
- def __new__(cls, *args, **kwargs):
- print('In __new__ with class %s' % cls)
- return super(C, cls).__new__(cls, *args, **kwargs)
-
- def __init__(self, *args, **kwargs):
- # in practice you probably will not need or want an __init__
- # method for your subclass
- print('In __init__ with class %s' % self.__class__)
-
- def __array_finalize__(self, obj):
- print('In array_finalize:')
- print(' self type is %s' % type(self))
- print(' obj type is %s' % type(obj))
-
-
-Now:
-
->>> # Explicit constructor
->>> c = C((10,))
-In __new__ with class <class 'C'>
-In array_finalize:
- self type is <class 'C'>
- obj type is <type 'NoneType'>
-In __init__ with class <class 'C'>
->>> # View casting
->>> a = np.arange(10)
->>> cast_a = a.view(C)
-In array_finalize:
- self type is <class 'C'>
- obj type is <type 'numpy.ndarray'>
->>> # Slicing (example of new-from-template)
->>> cv = c[:1]
-In array_finalize:
- self type is <class 'C'>
- obj type is <class 'C'>
-
-The signature of ``__array_finalize__`` is::
-
- def __array_finalize__(self, obj):
-
-One sees that the ``super`` call, which goes to
-``ndarray.__new__``, passes ``__array_finalize__`` the new object, of our
-own class (``self``) as well as the object from which the view has been
-taken (``obj``). As you can see from the output above, the ``self`` is
-always a newly created instance of our subclass, and the type of ``obj``
-differs for the three instance creation methods:
-
-* When called from the explicit constructor, ``obj`` is ``None``
-* When called from view casting, ``obj`` can be an instance of any
- subclass of ndarray, including our own.
-* When called in new-from-template, ``obj`` is another instance of our
- own subclass, that we might use to update the new ``self`` instance.
-
-Because ``__array_finalize__`` is the only method that always sees new
-instances being created, it is the sensible place to fill in instance
-defaults for new object attributes, among other tasks.
-
-This may be clearer with an example.
-
-Simple example - adding an extra attribute to ndarray
------------------------------------------------------
-
-.. testcode::
-
- import numpy as np
-
- class InfoArray(np.ndarray):
-
- def __new__(subtype, shape, dtype=float, buffer=None, offset=0,
- strides=None, order=None, info=None):
- # Create the ndarray instance of our type, given the usual
- # ndarray input arguments. This will call the standard
- # ndarray constructor, but return an object of our type.
- # It also triggers a call to InfoArray.__array_finalize__
- obj = super(InfoArray, subtype).__new__(subtype, shape, dtype,
- buffer, offset, strides,
- order)
- # set the new 'info' attribute to the value passed
- obj.info = info
- # Finally, we must return the newly created object:
- return obj
-
- def __array_finalize__(self, obj):
- # ``self`` is a new object resulting from
- # ndarray.__new__(InfoArray, ...), therefore it only has
- # attributes that the ndarray.__new__ constructor gave it -
- # i.e. those of a standard ndarray.
- #
- # We could have got to the ndarray.__new__ call in 3 ways:
- # From an explicit constructor - e.g. InfoArray():
- # obj is None
- # (we're in the middle of the InfoArray.__new__
- # constructor, and self.info will be set when we return to
- # InfoArray.__new__)
- if obj is None: return
- # From view casting - e.g arr.view(InfoArray):
- # obj is arr
- # (type(obj) can be InfoArray)
- # From new-from-template - e.g infoarr[:3]
- # type(obj) is InfoArray
- #
- # Note that it is here, rather than in the __new__ method,
- # that we set the default value for 'info', because this
- # method sees all creation of default objects - with the
- # InfoArray.__new__ constructor, but also with
- # arr.view(InfoArray).
- self.info = getattr(obj, 'info', None)
- # We do not need to return anything
-
-
-Using the object looks like this:
-
- >>> obj = InfoArray(shape=(3,)) # explicit constructor
- >>> type(obj)
- <class 'InfoArray'>
- >>> obj.info is None
- True
- >>> obj = InfoArray(shape=(3,), info='information')
- >>> obj.info
- 'information'
- >>> v = obj[1:] # new-from-template - here - slicing
- >>> type(v)
- <class 'InfoArray'>
- >>> v.info
- 'information'
- >>> arr = np.arange(10)
- >>> cast_arr = arr.view(InfoArray) # view casting
- >>> type(cast_arr)
- <class 'InfoArray'>
- >>> cast_arr.info is None
- True
-
-This class isn't very useful, because it has the same constructor as the
-bare ndarray object, including passing in buffers and shapes and so on.
-We would probably prefer the constructor to be able to take an already
-formed ndarray from the usual numpy calls to ``np.array`` and return an
-object.
-
-Slightly more realistic example - attribute added to existing array
--------------------------------------------------------------------
-
-Here is a class that takes a standard ndarray that already exists, casts
-as our type, and adds an extra attribute.
-
-.. testcode::
-
- import numpy as np
-
- class RealisticInfoArray(np.ndarray):
-
- def __new__(cls, input_array, info=None):
- # Input array is an already formed ndarray instance
- # We first cast to be our class type
- obj = np.asarray(input_array).view(cls)
- # add the new attribute to the created instance
- obj.info = info
- # Finally, we must return the newly created object:
- return obj
-
- def __array_finalize__(self, obj):
- # see InfoArray.__array_finalize__ for comments
- if obj is None: return
- self.info = getattr(obj, 'info', None)
-
-
-So:
-
- >>> arr = np.arange(5)
- >>> obj = RealisticInfoArray(arr, info='information')
- >>> type(obj)
- <class 'RealisticInfoArray'>
- >>> obj.info
- 'information'
- >>> v = obj[1:]
- >>> type(v)
- <class 'RealisticInfoArray'>
- >>> v.info
- 'information'
-
-.. _array-ufunc:
-
-``__array_ufunc__`` for ufuncs
-------------------------------
-
- .. versionadded:: 1.13
-
-A subclass can override what happens when executing numpy ufuncs on it by
-overriding the default ``ndarray.__array_ufunc__`` method. This method is
-executed *instead* of the ufunc and should return either the result of the
-operation, or :obj:`NotImplemented` if the operation requested is not
-implemented.
-
-The signature of ``__array_ufunc__`` is::
-
- def __array_ufunc__(ufunc, method, *inputs, **kwargs):
-
- - *ufunc* is the ufunc object that was called.
- - *method* is a string indicating how the Ufunc was called, either
- ``"__call__"`` to indicate it was called directly, or one of its
- :ref:`methods<ufuncs.methods>`: ``"reduce"``, ``"accumulate"``,
- ``"reduceat"``, ``"outer"``, or ``"at"``.
- - *inputs* is a tuple of the input arguments to the ``ufunc``
- - *kwargs* contains any optional or keyword arguments passed to the
- function. This includes any ``out`` arguments, which are always
- contained in a tuple.
-
-A typical implementation would convert any inputs or outputs that are
-instances of one's own class, pass everything on to a superclass using
-``super()``, and finally return the results after possible
-back-conversion. An example, taken from the test case
-``test_ufunc_override_with_super`` in ``core/tests/test_umath.py``, is the
-following.
-
-.. testcode::
-
- input numpy as np
-
- class A(np.ndarray):
- def __array_ufunc__(self, ufunc, method, *inputs, out=None, **kwargs):
- args = []
- in_no = []
- for i, input_ in enumerate(inputs):
- if isinstance(input_, A):
- in_no.append(i)
- args.append(input_.view(np.ndarray))
- else:
- args.append(input_)
-
- outputs = out
- out_no = []
- if outputs:
- out_args = []
- for j, output in enumerate(outputs):
- if isinstance(output, A):
- out_no.append(j)
- out_args.append(output.view(np.ndarray))
- else:
- out_args.append(output)
- kwargs['out'] = tuple(out_args)
- else:
- outputs = (None,) * ufunc.nout
-
- info = {}
- if in_no:
- info['inputs'] = in_no
- if out_no:
- info['outputs'] = out_no
-
- results = super(A, self).__array_ufunc__(ufunc, method,
- *args, **kwargs)
- if results is NotImplemented:
- return NotImplemented
-
- if method == 'at':
- if isinstance(inputs[0], A):
- inputs[0].info = info
- return
-
- if ufunc.nout == 1:
- results = (results,)
-
- results = tuple((np.asarray(result).view(A)
- if output is None else output)
- for result, output in zip(results, outputs))
- if results and isinstance(results[0], A):
- results[0].info = info
-
- return results[0] if len(results) == 1 else results
-
-So, this class does not actually do anything interesting: it just
-converts any instances of its own to regular ndarray (otherwise, we'd
-get infinite recursion!), and adds an ``info`` dictionary that tells
-which inputs and outputs it converted. Hence, e.g.,
-
->>> a = np.arange(5.).view(A)
->>> b = np.sin(a)
->>> b.info
-{'inputs': [0]}
->>> b = np.sin(np.arange(5.), out=(a,))
->>> b.info
-{'outputs': [0]}
->>> a = np.arange(5.).view(A)
->>> b = np.ones(1).view(A)
->>> c = a + b
->>> c.info
-{'inputs': [0, 1]}
->>> a += b
->>> a.info
-{'inputs': [0, 1], 'outputs': [0]}
-
-Note that another approach would be to to use ``getattr(ufunc,
-methods)(*inputs, **kwargs)`` instead of the ``super`` call. For this example,
-the result would be identical, but there is a difference if another operand
-also defines ``__array_ufunc__``. E.g., lets assume that we evalulate
-``np.add(a, b)``, where ``b`` is an instance of another class ``B`` that has
-an override. If you use ``super`` as in the example,
-``ndarray.__array_ufunc__`` will notice that ``b`` has an override, which
-means it cannot evaluate the result itself. Thus, it will return
-`NotImplemented` and so will our class ``A``. Then, control will be passed
-over to ``b``, which either knows how to deal with us and produces a result,
-or does not and returns `NotImplemented`, raising a ``TypeError``.
-
-If instead, we replace our ``super`` call with ``getattr(ufunc, method)``, we
-effectively do ``np.add(a.view(np.ndarray), b)``. Again, ``B.__array_ufunc__``
-will be called, but now it sees an ``ndarray`` as the other argument. Likely,
-it will know how to handle this, and return a new instance of the ``B`` class
-to us. Our example class is not set up to handle this, but it might well be
-the best approach if, e.g., one were to re-implement ``MaskedArray`` using
-``__array_ufunc__``.
-
-As a final note: if the ``super`` route is suited to a given class, an
-advantage of using it is that it helps in constructing class hierarchies.
-E.g., suppose that our other class ``B`` also used the ``super`` in its
-``__array_ufunc__`` implementation, and we created a class ``C`` that depended
-on both, i.e., ``class C(A, B)`` (with, for simplicity, not another
-``__array_ufunc__`` override). Then any ufunc on an instance of ``C`` would
-pass on to ``A.__array_ufunc__``, the ``super`` call in ``A`` would go to
-``B.__array_ufunc__``, and the ``super`` call in ``B`` would go to
-``ndarray.__array_ufunc__``, thus allowing ``A`` and ``B`` to collaborate.
-
-.. _array-wrap:
-
-``__array_wrap__`` for ufuncs and other functions
--------------------------------------------------
-
-Prior to numpy 1.13, the behaviour of ufuncs could only be tuned using
-``__array_wrap__`` and ``__array_prepare__``. These two allowed one to
-change the output type of a ufunc, but, in contrast to
-``__array_ufunc__``, did not allow one to make any changes to the inputs.
-It is hoped to eventually deprecate these, but ``__array_wrap__`` is also
-used by other numpy functions and methods, such as ``squeeze``, so at the
-present time is still needed for full functionality.
-
-Conceptually, ``__array_wrap__`` "wraps up the action" in the sense of
-allowing a subclass to set the type of the return value and update
-attributes and metadata. Let's show how this works with an example. First
-we return to the simpler example subclass, but with a different name and
-some print statements:
-
-.. testcode::
-
- import numpy as np
-
- class MySubClass(np.ndarray):
-
- def __new__(cls, input_array, info=None):
- obj = np.asarray(input_array).view(cls)
- obj.info = info
- return obj
-
- def __array_finalize__(self, obj):
- print('In __array_finalize__:')
- print(' self is %s' % repr(self))
- print(' obj is %s' % repr(obj))
- if obj is None: return
- self.info = getattr(obj, 'info', None)
-
- def __array_wrap__(self, out_arr, context=None):
- print('In __array_wrap__:')
- print(' self is %s' % repr(self))
- print(' arr is %s' % repr(out_arr))
- # then just call the parent
- return super(MySubClass, self).__array_wrap__(self, out_arr, context)
-
-We run a ufunc on an instance of our new array:
-
->>> obj = MySubClass(np.arange(5), info='spam')
-In __array_finalize__:
- self is MySubClass([0, 1, 2, 3, 4])
- obj is array([0, 1, 2, 3, 4])
->>> arr2 = np.arange(5)+1
->>> ret = np.add(arr2, obj)
-In __array_wrap__:
- self is MySubClass([0, 1, 2, 3, 4])
- arr is array([1, 3, 5, 7, 9])
-In __array_finalize__:
- self is MySubClass([1, 3, 5, 7, 9])
- obj is MySubClass([0, 1, 2, 3, 4])
->>> ret
-MySubClass([1, 3, 5, 7, 9])
->>> ret.info
-'spam'
-
-Note that the ufunc (``np.add``) has called the ``__array_wrap__`` method
-with arguments ``self`` as ``obj``, and ``out_arr`` as the (ndarray) result
-of the addition. In turn, the default ``__array_wrap__``
-(``ndarray.__array_wrap__``) has cast the result to class ``MySubClass``,
-and called ``__array_finalize__`` - hence the copying of the ``info``
-attribute. This has all happened at the C level.
-
-But, we could do anything we wanted:
-
-.. testcode::
-
- class SillySubClass(np.ndarray):
-
- def __array_wrap__(self, arr, context=None):
- return 'I lost your data'
-
->>> arr1 = np.arange(5)
->>> obj = arr1.view(SillySubClass)
->>> arr2 = np.arange(5)
->>> ret = np.multiply(obj, arr2)
->>> ret
-'I lost your data'
-
-So, by defining a specific ``__array_wrap__`` method for our subclass,
-we can tweak the output from ufuncs. The ``__array_wrap__`` method
-requires ``self``, then an argument - which is the result of the ufunc -
-and an optional parameter *context*. This parameter is returned by
-ufuncs as a 3-element tuple: (name of the ufunc, arguments of the ufunc,
-domain of the ufunc), but is not set by other numpy functions. Though,
-as seen above, it is possible to do otherwise, ``__array_wrap__`` should
-return an instance of its containing class. See the masked array
-subclass for an implementation.
-
-In addition to ``__array_wrap__``, which is called on the way out of the
-ufunc, there is also an ``__array_prepare__`` method which is called on
-the way into the ufunc, after the output arrays are created but before any
-computation has been performed. The default implementation does nothing
-but pass through the array. ``__array_prepare__`` should not attempt to
-access the array data or resize the array, it is intended for setting the
-output array type, updating attributes and metadata, and performing any
-checks based on the input that may be desired before computation begins.
-Like ``__array_wrap__``, ``__array_prepare__`` must return an ndarray or
-subclass thereof or raise an error.
-
-Extra gotchas - custom ``__del__`` methods and ndarray.base
------------------------------------------------------------
-
-One of the problems that ndarray solves is keeping track of memory
-ownership of ndarrays and their views. Consider the case where we have
-created an ndarray, ``arr`` and have taken a slice with ``v = arr[1:]``.
-The two objects are looking at the same memory. NumPy keeps track of
-where the data came from for a particular array or view, with the
-``base`` attribute:
-
->>> # A normal ndarray, that owns its own data
->>> arr = np.zeros((4,))
->>> # In this case, base is None
->>> arr.base is None
-True
->>> # We take a view
->>> v1 = arr[1:]
->>> # base now points to the array that it derived from
->>> v1.base is arr
-True
->>> # Take a view of a view
->>> v2 = v1[1:]
->>> # base points to the view it derived from
->>> v2.base is v1
-True
-
-In general, if the array owns its own memory, as for ``arr`` in this
-case, then ``arr.base`` will be None - there are some exceptions to this
-- see the numpy book for more details.
-
-The ``base`` attribute is useful in being able to tell whether we have
-a view or the original array. This in turn can be useful if we need
-to know whether or not to do some specific cleanup when the subclassed
-array is deleted. For example, we may only want to do the cleanup if
-the original array is deleted, but not the views. For an example of
-how this can work, have a look at the ``memmap`` class in
-``numpy.core``.
-
-Subclassing and Downstream Compatibility
-----------------------------------------
-
-When sub-classing ``ndarray`` or creating duck-types that mimic the ``ndarray``
-interface, it is your responsibility to decide how aligned your APIs will be
-with those of numpy. For convenience, many numpy functions that have a corresponding
-``ndarray`` method (e.g., ``sum``, ``mean``, ``take``, ``reshape``) work by checking
-if the first argument to a function has a method of the same name. If it exists, the
-method is called instead of coercing the arguments to a numpy array.
-
-For example, if you want your sub-class or duck-type to be compatible with
-numpy's ``sum`` function, the method signature for this object's ``sum`` method
-should be the following:
-
-.. testcode::
-
- def sum(self, axis=None, dtype=None, out=None, keepdims=False):
- ...
-
-This is the exact same method signature for ``np.sum``, so now if a user calls
-``np.sum`` on this object, numpy will call the object's own ``sum`` method and
-pass in these arguments enumerated above in the signature, and no errors will
-be raised because the signatures are completely compatible with each other.
-
-If, however, you decide to deviate from this signature and do something like this:
-
-.. testcode::
-
- def sum(self, axis=None, dtype=None):
- ...
-
-This object is no longer compatible with ``np.sum`` because if you call ``np.sum``,
-it will pass in unexpected arguments ``out`` and ``keepdims``, causing a TypeError
-to be raised.
-
-If you wish to maintain compatibility with numpy and its subsequent versions (which
-might add new keyword arguments) but do not want to surface all of numpy's arguments,
-your function's signature should accept ``**kwargs``. For example:
-
-.. testcode::
-
- def sum(self, axis=None, dtype=None, **unused_kwargs):
- ...
-
-This object is now compatible with ``np.sum`` again because any extraneous arguments
-(i.e. keywords that are not ``axis`` or ``dtype``) will be hidden away in the
-``**unused_kwargs`` parameter.
-
-"""
diff --git a/numpy/emath.pyi b/numpy/emath.pyi
new file mode 100644
index 000000000..032ec9505
--- /dev/null
+++ b/numpy/emath.pyi
@@ -0,0 +1,11 @@
+from typing import Any
+
+sqrt: Any
+log: Any
+log2: Any
+logn: Any
+log10: Any
+power: Any
+arccos: Any
+arcsin: Any
+arctanh: Any
diff --git a/numpy/f2py/__init__.pyi b/numpy/f2py/__init__.pyi
new file mode 100644
index 000000000..602517957
--- /dev/null
+++ b/numpy/f2py/__init__.pyi
@@ -0,0 +1,5 @@
+from typing import Any
+
+run_main: Any
+compile: Any
+f2py_testing: Any
diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py
index ccbc9b0fb..9f5c73a45 100644
--- a/numpy/f2py/cfuncs.py
+++ b/numpy/f2py/cfuncs.py
@@ -320,10 +320,10 @@ cppmacros[
'pyobj_from_complex_float1'] = '#define pyobj_from_complex_float1(v) (PyComplex_FromDoubles(v.r,v.i))'
needs['pyobj_from_string1'] = ['string']
cppmacros[
- 'pyobj_from_string1'] = '#define pyobj_from_string1(v) (PyString_FromString((char *)v))'
+ 'pyobj_from_string1'] = '#define pyobj_from_string1(v) (PyUnicode_FromString((char *)v))'
needs['pyobj_from_string1size'] = ['string']
cppmacros[
- 'pyobj_from_string1size'] = '#define pyobj_from_string1size(v,len) (PyUString_FromStringAndSize((char *)v, len))'
+ 'pyobj_from_string1size'] = '#define pyobj_from_string1size(v,len) (PyUnicode_FromStringAndSize((char *)v, len))'
needs['TRYPYARRAYTEMPLATE'] = ['PRINTPYOBJERR']
cppmacros['TRYPYARRAYTEMPLATE'] = """\
/* New SciPy */
diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py
index 56f2033ff..a14f60194 100755
--- a/numpy/f2py/rules.py
+++ b/numpy/f2py/rules.py
@@ -202,7 +202,7 @@ PyMODINIT_FUNC PyInit_#modulename#(void) {
\tif (PyErr_Occurred())
\t\t{PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return m;}
\td = PyModule_GetDict(m);
-\ts = PyString_FromString(\"$R""" + """evision: $\");
+\ts = PyUnicode_FromString(\"$R""" + """evision: $\");
\tPyDict_SetItemString(d, \"__version__\", s);
\tPy_DECREF(s);
\ts = PyUnicode_FromString(
diff --git a/numpy/f2py/src/test/foomodule.c b/numpy/f2py/src/test/foomodule.c
index caf3590d4..88ec62440 100644
--- a/numpy/f2py/src/test/foomodule.c
+++ b/numpy/f2py/src/test/foomodule.c
@@ -121,7 +121,7 @@ void initfoo() {
m = Py_InitModule("foo", foo_module_methods);
d = PyModule_GetDict(m);
- s = PyString_FromString("This module 'foo' demonstrates the usage of fortranobject.");
+ s = PyUnicode_FromString("This module 'foo' demonstrates the usage of fortranobject.");
PyDict_SetItemString(d, "__doc__", s);
/* Fortran objects: */
diff --git a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c
index 0db33e714..0411b62e0 100644
--- a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c
+++ b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c
@@ -1,14 +1,9 @@
-/* File: wrapmodule.c
- * This file is auto-generated with f2py (version:2_1330).
- * Hand edited by Pearu.
- * f2py is a Fortran to Python Interface Generator (FPIG), Second Edition,
- * written by Pearu Peterson <pearu@cens.ioc.ee>.
- * See http://cens.ioc.ee/projects/f2py2e/
- * Generation date: Fri Oct 21 22:41:12 2005
- * $Revision:$
- * $Date:$
- * Do not edit this file directly unless you know what you are doing!!!
+/*
+ * This file was auto-generated with f2py (version:2_1330) and hand edited by
+ * Pearu for testing purposes. Do not edit this file unless you know what you
+ * are doing!!!
*/
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -55,7 +50,7 @@ static PyObject *f2py_rout_wrap_call(PyObject *capi_self,
if (tmp == NULL) {
goto fail;
}
- dims[i] = (npy_intp)PyInt_AsLong(tmp);
+ dims[i] = (npy_intp)PyLong_AsLong(tmp);
Py_DECREF(tmp);
if (dims[i] == -1 && PyErr_Occurred()) {
goto fail;
@@ -107,8 +102,8 @@ static PyObject *f2py_rout_wrap_attrs(PyObject *capi_self,
dimensions = PyTuple_New(PyArray_NDIM(arr));
strides = PyTuple_New(PyArray_NDIM(arr));
for (i=0;i<PyArray_NDIM(arr);++i) {
- PyTuple_SetItem(dimensions,i,PyInt_FromLong(PyArray_DIM(arr,i)));
- PyTuple_SetItem(strides,i,PyInt_FromLong(PyArray_STRIDE(arr,i)));
+ PyTuple_SetItem(dimensions,i,PyLong_FromLong(PyArray_DIM(arr,i)));
+ PyTuple_SetItem(strides,i,PyLong_FromLong(PyArray_STRIDE(arr,i)));
}
return Py_BuildValue("siNNO(cciii)ii",s,PyArray_NDIM(arr),
dimensions,strides,
@@ -149,15 +144,15 @@ PyMODINIT_FUNC PyInit_test_array_from_pyobj_ext(void) {
if (PyErr_Occurred())
Py_FatalError("can't initialize module wrap (failed to import numpy)");
d = PyModule_GetDict(m);
- s = PyString_FromString("This module 'wrap' is auto-generated with f2py (version:2_1330).\nFunctions:\n"
-" arr = call(type_num,dims,intent,obj)\n"
-".");
+ s = PyUnicode_FromString("This module 'wrap' is auto-generated with f2py (version:2_1330).\nFunctions:\n"
+ " arr = call(type_num,dims,intent,obj)\n"
+ ".");
PyDict_SetItemString(d, "__doc__", s);
wrap_error = PyErr_NewException ("wrap.error", NULL, NULL);
Py_DECREF(s);
#define ADDCONST(NAME, CONST) \
- s = PyInt_FromLong(CONST); \
+ s = PyLong_FromLong(CONST); \
PyDict_SetItemString(d, NAME, s); \
Py_DECREF(s)
diff --git a/numpy/fft/__init__.pyi b/numpy/fft/__init__.pyi
new file mode 100644
index 000000000..45190517f
--- /dev/null
+++ b/numpy/fft/__init__.pyi
@@ -0,0 +1,20 @@
+from typing import Any
+
+fft: Any
+ifft: Any
+rfft: Any
+irfft: Any
+hfft: Any
+ihfft: Any
+rfftn: Any
+irfftn: Any
+rfft2: Any
+irfft2: Any
+fft2: Any
+ifft2: Any
+fftn: Any
+ifftn: Any
+fftshift: Any
+ifftshift: Any
+fftfreq: Any
+rfftfreq: Any
diff --git a/numpy/fft/helper.py b/numpy/fft/helper.py
index 3dacd9ee1..927ee1af1 100644
--- a/numpy/fft/helper.py
+++ b/numpy/fft/helper.py
@@ -2,7 +2,6 @@
Discrete Fourier Transforms - helper.py
"""
-from numpy.compat import integer_types
from numpy.core import integer, empty, arange, asarray, roll
from numpy.core.overrides import array_function_dispatch, set_module
@@ -10,7 +9,7 @@ from numpy.core.overrides import array_function_dispatch, set_module
__all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq']
-integer_types = integer_types + (integer,)
+integer_types = (int, integer)
def _fftshift_dispatcher(x, axes=None):
diff --git a/numpy/fft/tests/test_helper.py b/numpy/fft/tests/test_helper.py
index 68f5990af..3fb700bb3 100644
--- a/numpy/fft/tests/test_helper.py
+++ b/numpy/fft/tests/test_helper.py
@@ -85,7 +85,6 @@ class TestFFTShift:
def test_equal_to_original(self):
""" Test that the new (>=v1.15) implementation (see #10073) is equal to the original (<=v1.14) """
- from numpy.compat import integer_types
from numpy.core import asarray, concatenate, arange, take
def original_fftshift(x, axes=None):
@@ -94,7 +93,7 @@ class TestFFTShift:
ndim = tmp.ndim
if axes is None:
axes = list(range(ndim))
- elif isinstance(axes, integer_types):
+ elif isinstance(axes, int):
axes = (axes,)
y = tmp
for k in axes:
@@ -110,7 +109,7 @@ class TestFFTShift:
ndim = tmp.ndim
if axes is None:
axes = list(range(ndim))
- elif isinstance(axes, integer_types):
+ elif isinstance(axes, int):
axes = (axes,)
y = tmp
for k in axes:
diff --git a/numpy/lib/__init__.py b/numpy/lib/__init__.py
index cb0de0d15..ad88ba347 100644
--- a/numpy/lib/__init__.py
+++ b/numpy/lib/__init__.py
@@ -35,7 +35,6 @@ from .polynomial import *
from .utils import *
from .arraysetops import *
from .npyio import *
-from .financial import *
from .arrayterator import Arrayterator
from .arraypad import *
from ._version import *
@@ -54,7 +53,6 @@ __all__ += polynomial.__all__
__all__ += utils.__all__
__all__ += arraysetops.__all__
__all__ += npyio.__all__
-__all__ += financial.__all__
__all__ += nanfunctions.__all__
__all__ += histograms.__all__
diff --git a/numpy/lib/__init__.pyi b/numpy/lib/__init__.pyi
new file mode 100644
index 000000000..413e2ae1b
--- /dev/null
+++ b/numpy/lib/__init__.pyi
@@ -0,0 +1,177 @@
+from typing import Any
+
+emath: Any
+math: Any
+tracemalloc_domain: Any
+Arrayterator: Any
+iscomplexobj: Any
+isrealobj: Any
+imag: Any
+iscomplex: Any
+isreal: Any
+nan_to_num: Any
+real: Any
+real_if_close: Any
+typename: Any
+asfarray: Any
+mintypecode: Any
+asscalar: Any
+common_type: Any
+ravel_multi_index: Any
+unravel_index: Any
+mgrid: Any
+ogrid: Any
+r_: Any
+c_: Any
+s_: Any
+index_exp: Any
+ix_: Any
+ndenumerate: Any
+ndindex: Any
+fill_diagonal: Any
+diag_indices: Any
+diag_indices_from: Any
+select: Any
+piecewise: Any
+trim_zeros: Any
+copy: Any
+iterable: Any
+percentile: Any
+diff: Any
+gradient: Any
+angle: Any
+unwrap: Any
+sort_complex: Any
+disp: Any
+flip: Any
+rot90: Any
+extract: Any
+place: Any
+vectorize: Any
+asarray_chkfinite: Any
+average: Any
+bincount: Any
+digitize: Any
+cov: Any
+corrcoef: Any
+msort: Any
+median: Any
+sinc: Any
+hamming: Any
+hanning: Any
+bartlett: Any
+blackman: Any
+kaiser: Any
+trapz: Any
+i0: Any
+add_newdoc: Any
+add_docstring: Any
+meshgrid: Any
+delete: Any
+insert: Any
+append: Any
+interp: Any
+add_newdoc_ufunc: Any
+quantile: Any
+column_stack: Any
+row_stack: Any
+dstack: Any
+array_split: Any
+split: Any
+hsplit: Any
+vsplit: Any
+dsplit: Any
+apply_over_axes: Any
+expand_dims: Any
+apply_along_axis: Any
+kron: Any
+tile: Any
+get_array_wrap: Any
+take_along_axis: Any
+put_along_axis: Any
+broadcast_to: Any
+broadcast_arrays: Any
+diag: Any
+diagflat: Any
+eye: Any
+fliplr: Any
+flipud: Any
+tri: Any
+triu: Any
+tril: Any
+vander: Any
+histogram2d: Any
+mask_indices: Any
+tril_indices: Any
+tril_indices_from: Any
+triu_indices: Any
+triu_indices_from: Any
+fix: Any
+isneginf: Any
+isposinf: Any
+pad: Any
+poly: Any
+roots: Any
+polyint: Any
+polyder: Any
+polyadd: Any
+polysub: Any
+polymul: Any
+polydiv: Any
+polyval: Any
+poly1d: Any
+polyfit: Any
+RankWarning: Any
+issubclass_: Any
+issubsctype: Any
+issubdtype: Any
+deprecate: Any
+deprecate_with_doc: Any
+get_include: Any
+info: Any
+source: Any
+who: Any
+lookfor: Any
+byte_bounds: Any
+safe_eval: Any
+ediff1d: Any
+intersect1d: Any
+setxor1d: Any
+union1d: Any
+setdiff1d: Any
+unique: Any
+in1d: Any
+isin: Any
+savetxt: Any
+loadtxt: Any
+genfromtxt: Any
+ndfromtxt: Any
+mafromtxt: Any
+recfromtxt: Any
+recfromcsv: Any
+load: Any
+loads: Any
+save: Any
+savez: Any
+savez_compressed: Any
+packbits: Any
+unpackbits: Any
+fromregex: Any
+DataSource: Any
+nansum: Any
+nanmax: Any
+nanmin: Any
+nanargmax: Any
+nanargmin: Any
+nanmean: Any
+nanmedian: Any
+nanpercentile: Any
+nanvar: Any
+nanstd: Any
+nanprod: Any
+nancumsum: Any
+nancumprod: Any
+nanquantile: Any
+histogram: Any
+histogramdd: Any
+histogram_bin_edges: Any
diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py
index 7560bf4da..f5368526d 100644
--- a/numpy/lib/_iotools.py
+++ b/numpy/lib/_iotools.py
@@ -5,7 +5,7 @@ __docformat__ = "restructuredtext en"
import numpy as np
import numpy.core.numeric as nx
-from numpy.compat import asbytes, asunicode, bytes
+from numpy.compat import asbytes, asunicode
def _decode_line(line, encoding=None):
diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py
index df9a110c5..6a2ad004c 100644
--- a/numpy/lib/arraysetops.py
+++ b/numpy/lib/arraysetops.py
@@ -278,7 +278,7 @@ def unique(ar, return_index=False, return_inverse=False,
ar = np.moveaxis(ar, axis, 0)
except np.AxisError:
# this removes the "axis1" or "axis2" prefix from the error message
- raise np.AxisError(axis, ar.ndim)
+ raise np.AxisError(axis, ar.ndim) from None
# Must reshape to a contiguous 2D array for this to work...
orig_shape, orig_dtype = ar.shape, ar.dtype
@@ -300,10 +300,10 @@ def unique(ar, return_index=False, return_inverse=False,
# array with shape `(len(ar),)`. Because `dtype` in this case has
# itemsize 0, the total size of the result is still 0 bytes.
consolidated = np.empty(len(ar), dtype=dtype)
- except TypeError:
+ except TypeError as e:
# There's no good way to do this for object arrays, etc...
msg = 'The axis argument to unique is not supported for dtype {dt}'
- raise TypeError(msg.format(dt=ar.dtype))
+ raise TypeError(msg.format(dt=ar.dtype)) from e
def reshape_uniq(uniq):
n = len(uniq)
diff --git a/numpy/lib/financial.py b/numpy/lib/financial.py
deleted file mode 100644
index 709a79dc0..000000000
--- a/numpy/lib/financial.py
+++ /dev/null
@@ -1,967 +0,0 @@
-"""Some simple financial calculations
-
-patterned after spreadsheet computations.
-
-There is some complexity in each function
-so that the functions behave like ufuncs with
-broadcasting and being able to be called with scalars
-or arrays (or other sequences).
-
-Functions support the :class:`decimal.Decimal` type unless
-otherwise stated.
-"""
-import warnings
-from decimal import Decimal
-import functools
-
-import numpy as np
-from numpy.core import overrides
-
-
-_depmsg = ("numpy.{name} is deprecated and will be removed from NumPy 1.20. "
- "Use numpy_financial.{name} instead "
- "(https://pypi.org/project/numpy-financial/).")
-
-array_function_dispatch = functools.partial(
- overrides.array_function_dispatch, module='numpy')
-
-
-__all__ = ['fv', 'pmt', 'nper', 'ipmt', 'ppmt', 'pv', 'rate',
- 'irr', 'npv', 'mirr']
-
-_when_to_num = {'end':0, 'begin':1,
- 'e':0, 'b':1,
- 0:0, 1:1,
- 'beginning':1,
- 'start':1,
- 'finish':0}
-
-def _convert_when(when):
- #Test to see if when has already been converted to ndarray
- #This will happen if one function calls another, for example ppmt
- if isinstance(when, np.ndarray):
- return when
- try:
- return _when_to_num[when]
- except (KeyError, TypeError):
- return [_when_to_num[x] for x in when]
-
-
-def _fv_dispatcher(rate, nper, pmt, pv, when=None):
- warnings.warn(_depmsg.format(name='fv'),
- DeprecationWarning, stacklevel=3)
- return (rate, nper, pmt, pv)
-
-
-@array_function_dispatch(_fv_dispatcher)
-def fv(rate, nper, pmt, pv, when='end'):
- """
- Compute the future value.
-
- .. deprecated:: 1.18
-
- `fv` is deprecated; for details, see NEP 32 [1]_.
- Use the corresponding function in the numpy-financial library,
- https://pypi.org/project/numpy-financial.
-
- Given:
- * a present value, `pv`
- * an interest `rate` compounded once per period, of which
- there are
- * `nper` total
- * a (fixed) payment, `pmt`, paid either
- * at the beginning (`when` = {'begin', 1}) or the end
- (`when` = {'end', 0}) of each period
-
- Return:
- the value at the end of the `nper` periods
-
- Parameters
- ----------
- rate : scalar or array_like of shape(M, )
- Rate of interest as decimal (not per cent) per period
- nper : scalar or array_like of shape(M, )
- Number of compounding periods
- pmt : scalar or array_like of shape(M, )
- Payment
- pv : scalar or array_like of shape(M, )
- Present value
- when : {{'begin', 1}, {'end', 0}}, {string, int}, optional
- When payments are due ('begin' (1) or 'end' (0)).
- Defaults to {'end', 0}.
-
- Returns
- -------
- out : ndarray
- Future values. If all input is scalar, returns a scalar float. If
- any input is array_like, returns future values for each input element.
- If multiple inputs are array_like, they all must have the same shape.
-
- Notes
- -----
- The future value is computed by solving the equation::
-
- fv +
- pv*(1+rate)**nper +
- pmt*(1 + rate*when)/rate*((1 + rate)**nper - 1) == 0
-
- or, when ``rate == 0``::
-
- fv + pv + pmt * nper == 0
-
- References
- ----------
- .. [1] NumPy Enhancement Proposal (NEP) 32,
- https://numpy.org/neps/nep-0032-remove-financial-functions.html
- .. [2] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May).
- Open Document Format for Office Applications (OpenDocument)v1.2,
- Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version,
- Pre-Draft 12. Organization for the Advancement of Structured Information
- Standards (OASIS). Billerica, MA, USA. [ODT Document].
- Available:
- http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula
- OpenDocument-formula-20090508.odt
-
-
- Examples
- --------
- What is the future value after 10 years of saving $100 now, with
- an additional monthly savings of $100. Assume the interest rate is
- 5% (annually) compounded monthly?
-
- >>> np.fv(0.05/12, 10*12, -100, -100)
- 15692.928894335748
-
- By convention, the negative sign represents cash flow out (i.e. money not
- available today). Thus, saving $100 a month at 5% annual interest leads
- to $15,692.93 available to spend in 10 years.
-
- If any input is array_like, returns an array of equal shape. Let's
- compare different interest rates from the example above.
-
- >>> a = np.array((0.05, 0.06, 0.07))/12
- >>> np.fv(a, 10*12, -100, -100)
- array([ 15692.92889434, 16569.87435405, 17509.44688102]) # may vary
-
- """
- when = _convert_when(when)
- (rate, nper, pmt, pv, when) = map(np.asarray, [rate, nper, pmt, pv, when])
- temp = (1+rate)**nper
- fact = np.where(rate == 0, nper,
- (1 + rate*when)*(temp - 1)/rate)
- return -(pv*temp + pmt*fact)
-
-
-def _pmt_dispatcher(rate, nper, pv, fv=None, when=None):
- warnings.warn(_depmsg.format(name='pmt'),
- DeprecationWarning, stacklevel=3)
- return (rate, nper, pv, fv)
-
-
-@array_function_dispatch(_pmt_dispatcher)
-def pmt(rate, nper, pv, fv=0, when='end'):
- """
- Compute the payment against loan principal plus interest.
-
- .. deprecated:: 1.18
-
- `pmt` is deprecated; for details, see NEP 32 [1]_.
- Use the corresponding function in the numpy-financial library,
- https://pypi.org/project/numpy-financial.
-
- Given:
- * a present value, `pv` (e.g., an amount borrowed)
- * a future value, `fv` (e.g., 0)
- * an interest `rate` compounded once per period, of which
- there are
- * `nper` total
- * and (optional) specification of whether payment is made
- at the beginning (`when` = {'begin', 1}) or the end
- (`when` = {'end', 0}) of each period
-
- Return:
- the (fixed) periodic payment.
-
- Parameters
- ----------
- rate : array_like
- Rate of interest (per period)
- nper : array_like
- Number of compounding periods
- pv : array_like
- Present value
- fv : array_like, optional
- Future value (default = 0)
- when : {{'begin', 1}, {'end', 0}}, {string, int}
- When payments are due ('begin' (1) or 'end' (0))
-
- Returns
- -------
- out : ndarray
- Payment against loan plus interest. If all input is scalar, returns a
- scalar float. If any input is array_like, returns payment for each
- input element. If multiple inputs are array_like, they all must have
- the same shape.
-
- Notes
- -----
- The payment is computed by solving the equation::
-
- fv +
- pv*(1 + rate)**nper +
- pmt*(1 + rate*when)/rate*((1 + rate)**nper - 1) == 0
-
- or, when ``rate == 0``::
-
- fv + pv + pmt * nper == 0
-
- for ``pmt``.
-
- Note that computing a monthly mortgage payment is only
- one use for this function. For example, pmt returns the
- periodic deposit one must make to achieve a specified
- future balance given an initial deposit, a fixed,
- periodically compounded interest rate, and the total
- number of periods.
-
- References
- ----------
- .. [1] NumPy Enhancement Proposal (NEP) 32,
- https://numpy.org/neps/nep-0032-remove-financial-functions.html
- .. [2] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May).
- Open Document Format for Office Applications (OpenDocument)v1.2,
- Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version,
- Pre-Draft 12. Organization for the Advancement of Structured Information
- Standards (OASIS). Billerica, MA, USA. [ODT Document].
- Available:
- http://www.oasis-open.org/committees/documents.php
- ?wg_abbrev=office-formulaOpenDocument-formula-20090508.odt
-
- Examples
- --------
- What is the monthly payment needed to pay off a $200,000 loan in 15
- years at an annual interest rate of 7.5%?
-
- >>> np.pmt(0.075/12, 12*15, 200000)
- -1854.0247200054619
-
- In order to pay-off (i.e., have a future-value of 0) the $200,000 obtained
- today, a monthly payment of $1,854.02 would be required. Note that this
- example illustrates usage of `fv` having a default value of 0.
-
- """
- when = _convert_when(when)
- (rate, nper, pv, fv, when) = map(np.array, [rate, nper, pv, fv, when])
- temp = (1 + rate)**nper
- mask = (rate == 0)
- masked_rate = np.where(mask, 1, rate)
- fact = np.where(mask != 0, nper,
- (1 + masked_rate*when)*(temp - 1)/masked_rate)
- return -(fv + pv*temp) / fact
-
-
-def _nper_dispatcher(rate, pmt, pv, fv=None, when=None):
- warnings.warn(_depmsg.format(name='nper'),
- DeprecationWarning, stacklevel=3)
- return (rate, pmt, pv, fv)
-
-
-@array_function_dispatch(_nper_dispatcher)
-def nper(rate, pmt, pv, fv=0, when='end'):
- """
- Compute the number of periodic payments.
-
- .. deprecated:: 1.18
-
- `nper` is deprecated; for details, see NEP 32 [1]_.
- Use the corresponding function in the numpy-financial library,
- https://pypi.org/project/numpy-financial.
-
- :class:`decimal.Decimal` type is not supported.
-
- Parameters
- ----------
- rate : array_like
- Rate of interest (per period)
- pmt : array_like
- Payment
- pv : array_like
- Present value
- fv : array_like, optional
- Future value
- when : {{'begin', 1}, {'end', 0}}, {string, int}, optional
- When payments are due ('begin' (1) or 'end' (0))
-
- Notes
- -----
- The number of periods ``nper`` is computed by solving the equation::
-
- fv + pv*(1+rate)**nper + pmt*(1+rate*when)/rate*((1+rate)**nper-1) = 0
-
- but if ``rate = 0`` then::
-
- fv + pv + pmt*nper = 0
-
- References
- ----------
- .. [1] NumPy Enhancement Proposal (NEP) 32,
- https://numpy.org/neps/nep-0032-remove-financial-functions.html
-
- Examples
- --------
- If you only had $150/month to pay towards the loan, how long would it take
- to pay-off a loan of $8,000 at 7% annual interest?
-
- >>> print(np.round(np.nper(0.07/12, -150, 8000), 5))
- 64.07335
-
- So, over 64 months would be required to pay off the loan.
-
- The same analysis could be done with several different interest rates
- and/or payments and/or total amounts to produce an entire table.
-
- >>> np.nper(*(np.ogrid[0.07/12: 0.08/12: 0.01/12,
- ... -150 : -99 : 50 ,
- ... 8000 : 9001 : 1000]))
- array([[[ 64.07334877, 74.06368256],
- [108.07548412, 127.99022654]],
- [[ 66.12443902, 76.87897353],
- [114.70165583, 137.90124779]]])
-
- """
- when = _convert_when(when)
- (rate, pmt, pv, fv, when) = map(np.asarray, [rate, pmt, pv, fv, when])
-
- use_zero_rate = False
- with np.errstate(divide="raise"):
- try:
- z = pmt*(1+rate*when)/rate
- except FloatingPointError:
- use_zero_rate = True
-
- if use_zero_rate:
- return (-fv + pv) / pmt
- else:
- A = -(fv + pv)/(pmt+0)
- B = np.log((-fv+z) / (pv+z))/np.log(1+rate)
- return np.where(rate == 0, A, B)
-
-
-def _ipmt_dispatcher(rate, per, nper, pv, fv=None, when=None):
- warnings.warn(_depmsg.format(name='ipmt'),
- DeprecationWarning, stacklevel=3)
- return (rate, per, nper, pv, fv)
-
-
-@array_function_dispatch(_ipmt_dispatcher)
-def ipmt(rate, per, nper, pv, fv=0, when='end'):
- """
- Compute the interest portion of a payment.
-
- .. deprecated:: 1.18
-
- `ipmt` is deprecated; for details, see NEP 32 [1]_.
- Use the corresponding function in the numpy-financial library,
- https://pypi.org/project/numpy-financial.
-
- Parameters
- ----------
- rate : scalar or array_like of shape(M, )
- Rate of interest as decimal (not per cent) per period
- per : scalar or array_like of shape(M, )
- Interest paid against the loan changes during the life or the loan.
- The `per` is the payment period to calculate the interest amount.
- nper : scalar or array_like of shape(M, )
- Number of compounding periods
- pv : scalar or array_like of shape(M, )
- Present value
- fv : scalar or array_like of shape(M, ), optional
- Future value
- when : {{'begin', 1}, {'end', 0}}, {string, int}, optional
- When payments are due ('begin' (1) or 'end' (0)).
- Defaults to {'end', 0}.
-
- Returns
- -------
- out : ndarray
- Interest portion of payment. If all input is scalar, returns a scalar
- float. If any input is array_like, returns interest payment for each
- input element. If multiple inputs are array_like, they all must have
- the same shape.
-
- See Also
- --------
- ppmt, pmt, pv
-
- Notes
- -----
- The total payment is made up of payment against principal plus interest.
-
- ``pmt = ppmt + ipmt``
-
- References
- ----------
- .. [1] NumPy Enhancement Proposal (NEP) 32,
- https://numpy.org/neps/nep-0032-remove-financial-functions.html
-
- Examples
- --------
- What is the amortization schedule for a 1 year loan of $2500 at
- 8.24% interest per year compounded monthly?
-
- >>> principal = 2500.00
-
- The 'per' variable represents the periods of the loan. Remember that
- financial equations start the period count at 1!
-
- >>> per = np.arange(1*12) + 1
- >>> ipmt = np.ipmt(0.0824/12, per, 1*12, principal)
- >>> ppmt = np.ppmt(0.0824/12, per, 1*12, principal)
-
- Each element of the sum of the 'ipmt' and 'ppmt' arrays should equal
- 'pmt'.
-
- >>> pmt = np.pmt(0.0824/12, 1*12, principal)
- >>> np.allclose(ipmt + ppmt, pmt)
- True
-
- >>> fmt = '{0:2d} {1:8.2f} {2:8.2f} {3:8.2f}'
- >>> for payment in per:
- ... index = payment - 1
- ... principal = principal + ppmt[index]
- ... print(fmt.format(payment, ppmt[index], ipmt[index], principal))
- 1 -200.58 -17.17 2299.42
- 2 -201.96 -15.79 2097.46
- 3 -203.35 -14.40 1894.11
- 4 -204.74 -13.01 1689.37
- 5 -206.15 -11.60 1483.22
- 6 -207.56 -10.18 1275.66
- 7 -208.99 -8.76 1066.67
- 8 -210.42 -7.32 856.25
- 9 -211.87 -5.88 644.38
- 10 -213.32 -4.42 431.05
- 11 -214.79 -2.96 216.26
- 12 -216.26 -1.49 -0.00
-
- >>> interestpd = np.sum(ipmt)
- >>> np.round(interestpd, 2)
- -112.98
-
- """
- when = _convert_when(when)
- rate, per, nper, pv, fv, when = np.broadcast_arrays(rate, per, nper,
- pv, fv, when)
- total_pmt = pmt(rate, nper, pv, fv, when)
- ipmt = _rbl(rate, per, total_pmt, pv, when)*rate
- try:
- ipmt = np.where(when == 1, ipmt/(1 + rate), ipmt)
- ipmt = np.where(np.logical_and(when == 1, per == 1), 0, ipmt)
- except IndexError:
- pass
- return ipmt
-
-
-def _rbl(rate, per, pmt, pv, when):
- """
- This function is here to simply have a different name for the 'fv'
- function to not interfere with the 'fv' keyword argument within the 'ipmt'
- function. It is the 'remaining balance on loan' which might be useful as
- its own function, but is easily calculated with the 'fv' function.
- """
- return fv(rate, (per - 1), pmt, pv, when)
-
-
-def _ppmt_dispatcher(rate, per, nper, pv, fv=None, when=None):
- warnings.warn(_depmsg.format(name='ppmt'),
- DeprecationWarning, stacklevel=3)
- return (rate, per, nper, pv, fv)
-
-
-@array_function_dispatch(_ppmt_dispatcher)
-def ppmt(rate, per, nper, pv, fv=0, when='end'):
- """
- Compute the payment against loan principal.
-
- .. deprecated:: 1.18
-
- `ppmt` is deprecated; for details, see NEP 32 [1]_.
- Use the corresponding function in the numpy-financial library,
- https://pypi.org/project/numpy-financial.
-
- Parameters
- ----------
- rate : array_like
- Rate of interest (per period)
- per : array_like, int
- Amount paid against the loan changes. The `per` is the period of
- interest.
- nper : array_like
- Number of compounding periods
- pv : array_like
- Present value
- fv : array_like, optional
- Future value
- when : {{'begin', 1}, {'end', 0}}, {string, int}
- When payments are due ('begin' (1) or 'end' (0))
-
- See Also
- --------
- pmt, pv, ipmt
-
- References
- ----------
- .. [1] NumPy Enhancement Proposal (NEP) 32,
- https://numpy.org/neps/nep-0032-remove-financial-functions.html
-
- """
- total = pmt(rate, nper, pv, fv, when)
- return total - ipmt(rate, per, nper, pv, fv, when)
-
-
-def _pv_dispatcher(rate, nper, pmt, fv=None, when=None):
- warnings.warn(_depmsg.format(name='pv'),
- DeprecationWarning, stacklevel=3)
- return (rate, nper, nper, pv, fv)
-
-
-@array_function_dispatch(_pv_dispatcher)
-def pv(rate, nper, pmt, fv=0, when='end'):
- """
- Compute the present value.
-
- .. deprecated:: 1.18
-
- `pv` is deprecated; for details, see NEP 32 [1]_.
- Use the corresponding function in the numpy-financial library,
- https://pypi.org/project/numpy-financial.
-
- Given:
- * a future value, `fv`
- * an interest `rate` compounded once per period, of which
- there are
- * `nper` total
- * a (fixed) payment, `pmt`, paid either
- * at the beginning (`when` = {'begin', 1}) or the end
- (`when` = {'end', 0}) of each period
-
- Return:
- the value now
-
- Parameters
- ----------
- rate : array_like
- Rate of interest (per period)
- nper : array_like
- Number of compounding periods
- pmt : array_like
- Payment
- fv : array_like, optional
- Future value
- when : {{'begin', 1}, {'end', 0}}, {string, int}, optional
- When payments are due ('begin' (1) or 'end' (0))
-
- Returns
- -------
- out : ndarray, float
- Present value of a series of payments or investments.
-
- Notes
- -----
- The present value is computed by solving the equation::
-
- fv +
- pv*(1 + rate)**nper +
- pmt*(1 + rate*when)/rate*((1 + rate)**nper - 1) = 0
-
- or, when ``rate = 0``::
-
- fv + pv + pmt * nper = 0
-
- for `pv`, which is then returned.
-
- References
- ----------
- .. [1] NumPy Enhancement Proposal (NEP) 32,
- https://numpy.org/neps/nep-0032-remove-financial-functions.html
- .. [2] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May).
- Open Document Format for Office Applications (OpenDocument)v1.2,
- Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version,
- Pre-Draft 12. Organization for the Advancement of Structured Information
- Standards (OASIS). Billerica, MA, USA. [ODT Document].
- Available:
- http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula
- OpenDocument-formula-20090508.odt
-
- Examples
- --------
- What is the present value (e.g., the initial investment)
- of an investment that needs to total $15692.93
- after 10 years of saving $100 every month? Assume the
- interest rate is 5% (annually) compounded monthly.
-
- >>> np.pv(0.05/12, 10*12, -100, 15692.93)
- -100.00067131625819
-
- By convention, the negative sign represents cash flow out
- (i.e., money not available today). Thus, to end up with
- $15,692.93 in 10 years saving $100 a month at 5% annual
- interest, one's initial deposit should also be $100.
-
- If any input is array_like, ``pv`` returns an array of equal shape.
- Let's compare different interest rates in the example above:
-
- >>> a = np.array((0.05, 0.04, 0.03))/12
- >>> np.pv(a, 10*12, -100, 15692.93)
- array([ -100.00067132, -649.26771385, -1273.78633713]) # may vary
-
- So, to end up with the same $15692.93 under the same $100 per month
- "savings plan," for annual interest rates of 4% and 3%, one would
- need initial investments of $649.27 and $1273.79, respectively.
-
- """
- when = _convert_when(when)
- (rate, nper, pmt, fv, when) = map(np.asarray, [rate, nper, pmt, fv, when])
- temp = (1+rate)**nper
- fact = np.where(rate == 0, nper, (1+rate*when)*(temp-1)/rate)
- return -(fv + pmt*fact)/temp
-
-# Computed with Sage
-# (y + (r + 1)^n*x + p*((r + 1)^n - 1)*(r*w + 1)/r)/(n*(r + 1)^(n - 1)*x -
-# p*((r + 1)^n - 1)*(r*w + 1)/r^2 + n*p*(r + 1)^(n - 1)*(r*w + 1)/r +
-# p*((r + 1)^n - 1)*w/r)
-
-def _g_div_gp(r, n, p, x, y, w):
- t1 = (r+1)**n
- t2 = (r+1)**(n-1)
- return ((y + t1*x + p*(t1 - 1)*(r*w + 1)/r) /
- (n*t2*x - p*(t1 - 1)*(r*w + 1)/(r**2) + n*p*t2*(r*w + 1)/r +
- p*(t1 - 1)*w/r))
-
-
-def _rate_dispatcher(nper, pmt, pv, fv, when=None, guess=None, tol=None,
- maxiter=None):
- warnings.warn(_depmsg.format(name='rate'),
- DeprecationWarning, stacklevel=3)
- return (nper, pmt, pv, fv)
-
-
-# Use Newton's iteration until the change is less than 1e-6
-# for all values or a maximum of 100 iterations is reached.
-# Newton's rule is
-# r_{n+1} = r_{n} - g(r_n)/g'(r_n)
-# where
-# g(r) is the formula
-# g'(r) is the derivative with respect to r.
-@array_function_dispatch(_rate_dispatcher)
-def rate(nper, pmt, pv, fv, when='end', guess=None, tol=None, maxiter=100):
- """
- Compute the rate of interest per period.
-
- .. deprecated:: 1.18
-
- `rate` is deprecated; for details, see NEP 32 [1]_.
- Use the corresponding function in the numpy-financial library,
- https://pypi.org/project/numpy-financial.
-
- Parameters
- ----------
- nper : array_like
- Number of compounding periods
- pmt : array_like
- Payment
- pv : array_like
- Present value
- fv : array_like
- Future value
- when : {{'begin', 1}, {'end', 0}}, {string, int}, optional
- When payments are due ('begin' (1) or 'end' (0))
- guess : Number, optional
- Starting guess for solving the rate of interest, default 0.1
- tol : Number, optional
- Required tolerance for the solution, default 1e-6
- maxiter : int, optional
- Maximum iterations in finding the solution
-
- Notes
- -----
- The rate of interest is computed by iteratively solving the
- (non-linear) equation::
-
- fv + pv*(1+rate)**nper + pmt*(1+rate*when)/rate * ((1+rate)**nper - 1) = 0
-
- for ``rate``.
-
- References
- ----------
- .. [1] NumPy Enhancement Proposal (NEP) 32,
- https://numpy.org/neps/nep-0032-remove-financial-functions.html
- .. [2] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May).
- Open Document Format for Office Applications (OpenDocument)v1.2,
- Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version,
- Pre-Draft 12. Organization for the Advancement of Structured Information
- Standards (OASIS). Billerica, MA, USA. [ODT Document].
- Available:
- http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula
- OpenDocument-formula-20090508.odt
-
- """
- when = _convert_when(when)
- default_type = Decimal if isinstance(pmt, Decimal) else float
-
- # Handle casting defaults to Decimal if/when pmt is a Decimal and
- # guess and/or tol are not given default values
- if guess is None:
- guess = default_type('0.1')
-
- if tol is None:
- tol = default_type('1e-6')
-
- (nper, pmt, pv, fv, when) = map(np.asarray, [nper, pmt, pv, fv, when])
-
- rn = guess
- iterator = 0
- close = False
- while (iterator < maxiter) and not close:
- rnp1 = rn - _g_div_gp(rn, nper, pmt, pv, fv, when)
- diff = abs(rnp1-rn)
- close = np.all(diff < tol)
- iterator += 1
- rn = rnp1
- if not close:
- # Return nan's in array of the same shape as rn
- return np.nan + rn
- else:
- return rn
-
-
-def _irr_dispatcher(values):
- warnings.warn(_depmsg.format(name='irr'),
- DeprecationWarning, stacklevel=3)
- return (values,)
-
-
-@array_function_dispatch(_irr_dispatcher)
-def irr(values):
- """
- Return the Internal Rate of Return (IRR).
-
- .. deprecated:: 1.18
-
- `irr` is deprecated; for details, see NEP 32 [1]_.
- Use the corresponding function in the numpy-financial library,
- https://pypi.org/project/numpy-financial.
-
- This is the "average" periodically compounded rate of return
- that gives a net present value of 0.0; for a more complete explanation,
- see Notes below.
-
- :class:`decimal.Decimal` type is not supported.
-
- Parameters
- ----------
- values : array_like, shape(N,)
- Input cash flows per time period. By convention, net "deposits"
- are negative and net "withdrawals" are positive. Thus, for
- example, at least the first element of `values`, which represents
- the initial investment, will typically be negative.
-
- Returns
- -------
- out : float
- Internal Rate of Return for periodic input values.
-
- Notes
- -----
- The IRR is perhaps best understood through an example (illustrated
- using np.irr in the Examples section below). Suppose one invests 100
- units and then makes the following withdrawals at regular (fixed)
- intervals: 39, 59, 55, 20. Assuming the ending value is 0, one's 100
- unit investment yields 173 units; however, due to the combination of
- compounding and the periodic withdrawals, the "average" rate of return
- is neither simply 0.73/4 nor (1.73)^0.25-1. Rather, it is the solution
- (for :math:`r`) of the equation:
-
- .. math:: -100 + \\frac{39}{1+r} + \\frac{59}{(1+r)^2}
- + \\frac{55}{(1+r)^3} + \\frac{20}{(1+r)^4} = 0
-
- In general, for `values` :math:`= [v_0, v_1, ... v_M]`,
- irr is the solution of the equation: [2]_
-
- .. math:: \\sum_{t=0}^M{\\frac{v_t}{(1+irr)^{t}}} = 0
-
- References
- ----------
- .. [1] NumPy Enhancement Proposal (NEP) 32,
- https://numpy.org/neps/nep-0032-remove-financial-functions.html
- .. [2] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed.,
- Addison-Wesley, 2003, pg. 348.
-
- Examples
- --------
- >>> round(np.irr([-100, 39, 59, 55, 20]), 5)
- 0.28095
- >>> round(np.irr([-100, 0, 0, 74]), 5)
- -0.0955
- >>> round(np.irr([-100, 100, 0, -7]), 5)
- -0.0833
- >>> round(np.irr([-100, 100, 0, 7]), 5)
- 0.06206
- >>> round(np.irr([-5, 10.5, 1, -8, 1]), 5)
- 0.0886
-
- """
- # `np.roots` call is why this function does not support Decimal type.
- #
- # Ultimately Decimal support needs to be added to np.roots, which has
- # greater implications on the entire linear algebra module and how it does
- # eigenvalue computations.
- res = np.roots(values[::-1])
- mask = (res.imag == 0) & (res.real > 0)
- if not mask.any():
- return np.nan
- res = res[mask].real
- # NPV(rate) = 0 can have more than one solution so we return
- # only the solution closest to zero.
- rate = 1/res - 1
- rate = rate.item(np.argmin(np.abs(rate)))
- return rate
-
-
-def _npv_dispatcher(rate, values):
- warnings.warn(_depmsg.format(name='npv'),
- DeprecationWarning, stacklevel=3)
- return (values,)
-
-
-@array_function_dispatch(_npv_dispatcher)
-def npv(rate, values):
- """
- Returns the NPV (Net Present Value) of a cash flow series.
-
- .. deprecated:: 1.18
-
- `npv` is deprecated; for details, see NEP 32 [1]_.
- Use the corresponding function in the numpy-financial library,
- https://pypi.org/project/numpy-financial.
-
- Parameters
- ----------
- rate : scalar
- The discount rate.
- values : array_like, shape(M, )
- The values of the time series of cash flows. The (fixed) time
- interval between cash flow "events" must be the same as that for
- which `rate` is given (i.e., if `rate` is per year, then precisely
- a year is understood to elapse between each cash flow event). By
- convention, investments or "deposits" are negative, income or
- "withdrawals" are positive; `values` must begin with the initial
- investment, thus `values[0]` will typically be negative.
-
- Returns
- -------
- out : float
- The NPV of the input cash flow series `values` at the discount
- `rate`.
-
- Warnings
- --------
- ``npv`` considers a series of cashflows starting in the present (t = 0).
- NPV can also be defined with a series of future cashflows, paid at the
- end, rather than the start, of each period. If future cashflows are used,
- the first cashflow `values[0]` must be zeroed and added to the net
- present value of the future cashflows. This is demonstrated in the
- examples.
-
- Notes
- -----
- Returns the result of: [2]_
-
- .. math :: \\sum_{t=0}^{M-1}{\\frac{values_t}{(1+rate)^{t}}}
-
- References
- ----------
- .. [1] NumPy Enhancement Proposal (NEP) 32,
- https://numpy.org/neps/nep-0032-remove-financial-functions.html
- .. [2] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed.,
- Addison-Wesley, 2003, pg. 346.
-
- Examples
- --------
- Consider a potential project with an initial investment of $40 000 and
- projected cashflows of $5 000, $8 000, $12 000 and $30 000 at the end of
- each period discounted at a rate of 8% per period. To find the project's
- net present value:
-
- >>> rate, cashflows = 0.08, [-40_000, 5_000, 8_000, 12_000, 30_000]
- >>> np.npv(rate, cashflows).round(5)
- 3065.22267
-
- It may be preferable to split the projected cashflow into an initial
- investment and expected future cashflows. In this case, the value of
- the initial cashflow is zero and the initial investment is later added
- to the future cashflows net present value:
-
- >>> initial_cashflow = cashflows[0]
- >>> cashflows[0] = 0
- >>> np.round(np.npv(rate, cashflows) + initial_cashflow, 5)
- 3065.22267
-
- """
- values = np.asarray(values)
- return (values / (1+rate)**np.arange(0, len(values))).sum(axis=0)
-
-
-def _mirr_dispatcher(values, finance_rate, reinvest_rate):
- warnings.warn(_depmsg.format(name='mirr'),
- DeprecationWarning, stacklevel=3)
- return (values,)
-
-
-@array_function_dispatch(_mirr_dispatcher)
-def mirr(values, finance_rate, reinvest_rate):
- """
- Modified internal rate of return.
-
- .. deprecated:: 1.18
-
- `mirr` is deprecated; for details, see NEP 32 [1]_.
- Use the corresponding function in the numpy-financial library,
- https://pypi.org/project/numpy-financial.
-
- Parameters
- ----------
- values : array_like
- Cash flows (must contain at least one positive and one negative
- value) or nan is returned. The first value is considered a sunk
- cost at time zero.
- finance_rate : scalar
- Interest rate paid on the cash flows
- reinvest_rate : scalar
- Interest rate received on the cash flows upon reinvestment
-
- Returns
- -------
- out : float
- Modified internal rate of return
-
- References
- ----------
- .. [1] NumPy Enhancement Proposal (NEP) 32,
- https://numpy.org/neps/nep-0032-remove-financial-functions.html
- """
- values = np.asarray(values)
- n = values.size
-
- # Without this explicit cast the 1/(n - 1) computation below
- # becomes a float, which causes TypeError when using Decimal
- # values.
- if isinstance(finance_rate, Decimal):
- n = Decimal(n)
-
- pos = values > 0
- neg = values < 0
- if not (pos.any() and neg.any()):
- return np.nan
- numer = np.abs(npv(reinvest_rate, values*pos))
- denom = np.abs(npv(finance_rate, values*neg))
- return (numer/denom)**(1/(n - 1))*(1 + reinvest_rate) - 1
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index cd8862c94..f1ec38c5c 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -1625,57 +1625,7 @@ def trim_zeros(filt, trim='fb'):
[1, 2]
"""
- try:
- return _trim_zeros_new(filt, trim)
- except Exception as ex:
- # Numpy 1.20.0, 2020-07-31
- warning = DeprecationWarning(
- "in the future trim_zeros will require a 1-D array as input "
- "that is compatible with ndarray.astype(bool)"
- )
- warning.__cause__ = ex
- warnings.warn(warning, stacklevel=3)
-
- # Fall back to the old implementation if an exception is encountered
- # Note that the same exception may or may not be raised here as well
- return _trim_zeros_old(filt, trim)
-
-
-def _trim_zeros_new(filt, trim='fb'):
- """Newer optimized implementation of ``trim_zeros()``."""
- arr = np.asanyarray(filt).astype(bool, copy=False)
-
- if arr.ndim != 1:
- raise ValueError('trim_zeros requires an array of exactly one dimension')
- elif not len(arr):
- return filt
-
- trim_upper = trim.upper()
- first = last = None
-
- if 'F' in trim_upper:
- first = arr.argmax()
- # If `arr[first] is False` then so are all other elements
- if not arr[first]:
- return filt[:0]
-
- if 'B' in trim_upper:
- last = len(arr) - arr[::-1].argmax()
- # If `arr[last - 1] is False` then so are all other elements
- if not arr[last - 1]:
- return filt[:0]
-
- return filt[first:last]
-
-def _trim_zeros_old(filt, trim='fb'):
- """
- Older unoptimized implementation of ``trim_zeros()``.
-
- Used as fallback in case an exception is encountered
- in ``_trim_zeros_new()``.
-
- """
first = 0
trim = trim.upper()
if 'F' in trim:
@@ -1985,8 +1935,8 @@ class vectorize:
.. versionadded:: 1.7.0
cache : bool, optional
- If `True`, then cache the first function call that determines the number
- of outputs if `otypes` is not provided.
+ If `True`, then cache the first function call that determines the number
+ of outputs if `otypes` is not provided.
.. versionadded:: 1.7.0
@@ -2783,8 +2733,8 @@ def blackman(M):
return array([])
if M == 1:
return ones(1, float)
- n = arange(0, M)
- return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
+ n = arange(1-M, M, 2)
+ return 0.42 + 0.5*cos(pi*n/(M-1)) + 0.08*cos(2.0*pi*n/(M-1))
@set_module('numpy')
@@ -2892,8 +2842,8 @@ def bartlett(M):
return array([])
if M == 1:
return ones(1, float)
- n = arange(0, M)
- return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1))
+ n = arange(1-M, M, 2)
+ return where(less_equal(n, 0), 1 + n/(M-1), 1 - n/(M-1))
@set_module('numpy')
@@ -2996,8 +2946,8 @@ def hanning(M):
return array([])
if M == 1:
return ones(1, float)
- n = arange(0, M)
- return 0.5 - 0.5*cos(2.0*pi*n/(M-1))
+ n = arange(1-M, M, 2)
+ return 0.5 + 0.5*cos(pi*n/(M-1))
@set_module('numpy')
@@ -3096,8 +3046,8 @@ def hamming(M):
return array([])
if M == 1:
return ones(1, float)
- n = arange(0, M)
- return 0.54 - 0.46*cos(2.0*pi*n/(M-1))
+ n = arange(1-M, M, 2)
+ return 0.54 + 0.46*cos(pi*n/(M-1))
## Code from cephes for i0
@@ -3193,25 +3143,18 @@ def i0(x):
"""
Modified Bessel function of the first kind, order 0.
- Usually denoted :math:`I_0`. This function does broadcast, but will *not*
- "up-cast" int dtype arguments unless accompanied by at least one float or
- complex dtype argument (see Raises below).
+ Usually denoted :math:`I_0`.
Parameters
----------
- x : array_like, dtype float or complex
+ x : array_like of float
Argument of the Bessel function.
Returns
-------
- out : ndarray, shape = x.shape, dtype = x.dtype
+ out : ndarray, shape = x.shape, dtype = float
The modified Bessel function evaluated at each of the elements of `x`.
- Raises
- ------
- TypeError: array cannot be safely cast to required type
- If argument consists exclusively of int dtypes.
-
See Also
--------
scipy.special.i0, scipy.special.iv, scipy.special.ive
@@ -3241,12 +3184,16 @@ def i0(x):
Examples
--------
>>> np.i0(0.)
- array(1.0) # may vary
- >>> np.i0([0., 1. + 2j])
- array([ 1.00000000+0.j , 0.18785373+0.64616944j]) # may vary
+ array(1.0)
+ >>> np.i0([0, 1, 2, 3])
+ array([1. , 1.26606588, 2.2795853 , 4.88079259])
"""
x = np.asanyarray(x)
+ if x.dtype.kind == 'c':
+ raise TypeError("i0 not supported for complex values")
+ if x.dtype.kind != 'f':
+ x = x.astype(float)
x = np.abs(x)
return piecewise(x, [x <= 8.0], [_i0_1, _i0_2])
diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py
index 6093f7e9d..9d3de69dd 100644
--- a/numpy/lib/index_tricks.py
+++ b/numpy/lib/index_tricks.py
@@ -1,6 +1,7 @@
import functools
import sys
import math
+import warnings
import numpy.core.numeric as _nx
from numpy.core.numeric import (
@@ -659,7 +660,15 @@ class ndindex:
Increment the multi-dimensional index by one.
This method is for backward compatibility only: do not use.
+
+ .. deprecated:: 1.20.0
+ This method has been advised against since numpy 1.8.0, but only
+ started emitting DeprecationWarning as of this version.
"""
+ # NumPy 1.20.0, 2020-09-08
+ warnings.warn(
+ "`ndindex.ndincr()` is deprecated, use `next(ndindex)` instead",
+ DeprecationWarning, stacklevel=2)
next(self)
def __next__(self):
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index 58affc2fc..c6a19fda9 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -14,7 +14,7 @@ from . import format
from ._datasource import DataSource
from numpy.core import overrides
from numpy.core.multiarray import packbits, unpackbits
-from numpy.core.overrides import set_module
+from numpy.core.overrides import set_array_function_like_doc, set_module
from numpy.core._internal import recursive
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
@@ -23,7 +23,7 @@ from ._iotools import (
)
from numpy.compat import (
- asbytes, asstr, asunicode, bytes, os_fspath, os_PathLike,
+ asbytes, asstr, asunicode, os_fspath, os_PathLike,
pickle, contextlib_nullcontext
)
@@ -712,44 +712,14 @@ def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
zipf = zipfile_factory(file, mode="w", compression=compression)
- if sys.version_info >= (3, 6):
- # Since Python 3.6 it is possible to write directly to a ZIP file.
- for key, val in namedict.items():
- fname = key + '.npy'
- val = np.asanyarray(val)
- # always force zip64, gh-10776
- with zipf.open(fname, 'w', force_zip64=True) as fid:
- format.write_array(fid, val,
- allow_pickle=allow_pickle,
- pickle_kwargs=pickle_kwargs)
- else:
- # Stage arrays in a temporary file on disk, before writing to zip.
-
- # Import deferred for startup time improvement
- import tempfile
- # Since target file might be big enough to exceed capacity of a global
- # temporary directory, create temp file side-by-side with the target file.
- file_dir, file_prefix = os.path.split(file) if _is_string_like(file) else (None, 'tmp')
- fd, tmpfile = tempfile.mkstemp(prefix=file_prefix, dir=file_dir, suffix='-numpy.npy')
- os.close(fd)
- try:
- for key, val in namedict.items():
- fname = key + '.npy'
- fid = open(tmpfile, 'wb')
- try:
- format.write_array(fid, np.asanyarray(val),
- allow_pickle=allow_pickle,
- pickle_kwargs=pickle_kwargs)
- fid.close()
- fid = None
- zipf.write(tmpfile, arcname=fname)
- except IOError as exc:
- raise IOError("Failed to write to %s: %s" % (tmpfile, exc))
- finally:
- if fid:
- fid.close()
- finally:
- os.remove(tmpfile)
+ for key, val in namedict.items():
+ fname = key + '.npy'
+ val = np.asanyarray(val)
+ # always force zip64, gh-10776
+ with zipf.open(fname, 'w', force_zip64=True) as fid:
+ format.write_array(fid, val,
+ allow_pickle=allow_pickle,
+ pickle_kwargs=pickle_kwargs)
zipf.close()
@@ -790,10 +760,17 @@ def _getconv(dtype):
_loadtxt_chunksize = 50000
+def _loadtxt_dispatcher(fname, dtype=None, comments=None, delimiter=None,
+ converters=None, skiprows=None, usecols=None, unpack=None,
+ ndmin=None, encoding=None, max_rows=None, *, like=None):
+ return (like,)
+
+
+@set_array_function_like_doc
@set_module('numpy')
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
- ndmin=0, encoding='bytes', max_rows=None):
+ ndmin=0, encoding='bytes', max_rows=None, *, like=None):
r"""
Load data from a text file.
@@ -860,6 +837,9 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
is to read all the lines.
.. versionadded:: 1.16.0
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -917,6 +897,14 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
[-17.57, 63.94]])
"""
+ if like is not None:
+ return _loadtxt_with_like(
+ fname, dtype=dtype, comments=comments, delimiter=delimiter,
+ converters=converters, skiprows=skiprows, usecols=usecols,
+ unpack=unpack, ndmin=ndmin, encoding=encoding,
+ max_rows=max_rows, like=like
+ )
+
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Nested functions used by loadtxt.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@@ -1201,6 +1189,11 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
return X
+_loadtxt_with_like = array_function_dispatch(
+ _loadtxt_dispatcher
+)(loadtxt)
+
+
def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None,
header=None, footer=None, comments=None,
encoding=None):
@@ -1497,7 +1490,7 @@ def fromregex(file, regexp, dtype, encoding=None):
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
- `doc.structured_arrays`.
+ `basics.rec`.
Examples
--------
@@ -1554,6 +1547,18 @@ def fromregex(file, regexp, dtype, encoding=None):
#####--------------------------------------------------------------------------
+def _genfromtxt_dispatcher(fname, dtype=None, comments=None, delimiter=None,
+ skip_header=None, skip_footer=None, converters=None,
+ missing_values=None, filling_values=None, usecols=None,
+ names=None, excludelist=None, deletechars=None,
+ replace_space=None, autostrip=None, case_sensitive=None,
+ defaultfmt=None, unpack=None, usemask=None, loose=None,
+ invalid_raise=None, max_rows=None, encoding=None, *,
+ like=None):
+ return (like,)
+
+
+@set_array_function_like_doc
@set_module('numpy')
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
@@ -1562,7 +1567,8 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
deletechars=''.join(sorted(NameValidator.defaultdeletechars)),
replace_space='_', autostrip=False, case_sensitive=True,
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
- invalid_raise=True, max_rows=None, encoding='bytes'):
+ invalid_raise=True, max_rows=None, encoding='bytes', *,
+ like=None):
"""
Load data from a text file, with missing values handled as specified.
@@ -1659,6 +1665,9 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
to None the system default is used. The default value is 'bytes'.
.. versionadded:: 1.14.0
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -1737,6 +1746,21 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
dtype=[('f0', 'S12'), ('f1', 'S12')])
"""
+
+ if like is not None:
+ return _genfromtxt_with_like(
+ fname, dtype=dtype, comments=comments, delimiter=delimiter,
+ skip_header=skip_header, skip_footer=skip_footer,
+ converters=converters, missing_values=missing_values,
+ filling_values=filling_values, usecols=usecols, names=names,
+ excludelist=excludelist, deletechars=deletechars,
+ replace_space=replace_space, autostrip=autostrip,
+ case_sensitive=case_sensitive, defaultfmt=defaultfmt,
+ unpack=unpack, usemask=usemask, loose=loose,
+ invalid_raise=invalid_raise, max_rows=max_rows, encoding=encoding,
+ like=like
+ )
+
if max_rows is not None:
if skip_footer:
raise ValueError(
@@ -2250,6 +2274,11 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
return output.squeeze()
+_genfromtxt_with_like = array_function_dispatch(
+ _genfromtxt_dispatcher
+)(genfromtxt)
+
+
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py
index 1c124cc0e..7b89eeb70 100644
--- a/numpy/lib/polynomial.py
+++ b/numpy/lib/polynomial.py
@@ -1017,7 +1017,7 @@ def polydiv(u, v):
(array([1.5 , 1.75]), array([0.25]))
"""
- truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
+ truepoly = (isinstance(u, poly1d) or isinstance(v, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py
index 127338975..cbc4641d8 100644
--- a/numpy/lib/shape_base.py
+++ b/numpy/lib/shape_base.py
@@ -870,7 +870,7 @@ def split(ary, indices_or_sections, axis=0):
N = ary.shape[axis]
if N % sections:
raise ValueError(
- 'array split does not result in an equal division')
+ 'array split does not result in an equal division') from None
return array_split(ary, indices_or_sections, axis)
diff --git a/numpy/lib/tests/test_financial.py b/numpy/lib/tests/test_financial.py
deleted file mode 100644
index 26e79bc06..000000000
--- a/numpy/lib/tests/test_financial.py
+++ /dev/null
@@ -1,380 +0,0 @@
-import warnings
-from decimal import Decimal
-
-import numpy as np
-from numpy.testing import (
- assert_, assert_almost_equal, assert_allclose, assert_equal, assert_raises
- )
-
-
-def filter_deprecation(func):
- def newfunc(*args, **kwargs):
- with warnings.catch_warnings(record=True) as ws:
- warnings.filterwarnings('always', category=DeprecationWarning)
- func(*args, **kwargs)
- assert_(all(w.category is DeprecationWarning for w in ws))
- return newfunc
-
-
-class TestFinancial:
- @filter_deprecation
- def test_npv_irr_congruence(self):
- # IRR is defined as the rate required for the present value of a
- # a series of cashflows to be zero i.e. NPV(IRR(x), x) = 0
- cashflows = np.array([-40000, 5000, 8000, 12000, 30000])
- assert_allclose(np.npv(np.irr(cashflows), cashflows), 0, atol=1e-10, rtol=0)
-
- @filter_deprecation
- def test_rate(self):
- assert_almost_equal(
- np.rate(10, 0, -3500, 10000),
- 0.1107, 4)
-
- @filter_deprecation
- def test_rate_decimal(self):
- rate = np.rate(Decimal('10'), Decimal('0'), Decimal('-3500'), Decimal('10000'))
- assert_equal(Decimal('0.1106908537142689284704528100'), rate)
-
- @filter_deprecation
- def test_irr(self):
- v = [-150000, 15000, 25000, 35000, 45000, 60000]
- assert_almost_equal(np.irr(v), 0.0524, 2)
- v = [-100, 0, 0, 74]
- assert_almost_equal(np.irr(v), -0.0955, 2)
- v = [-100, 39, 59, 55, 20]
- assert_almost_equal(np.irr(v), 0.28095, 2)
- v = [-100, 100, 0, -7]
- assert_almost_equal(np.irr(v), -0.0833, 2)
- v = [-100, 100, 0, 7]
- assert_almost_equal(np.irr(v), 0.06206, 2)
- v = [-5, 10.5, 1, -8, 1]
- assert_almost_equal(np.irr(v), 0.0886, 2)
-
- # Test that if there is no solution then np.irr returns nan
- # Fixes gh-6744
- v = [-1, -2, -3]
- assert_equal(np.irr(v), np.nan)
-
- @filter_deprecation
- def test_pv(self):
- assert_almost_equal(np.pv(0.07, 20, 12000, 0), -127128.17, 2)
-
- @filter_deprecation
- def test_pv_decimal(self):
- assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0')),
- Decimal('-127128.1709461939327295222005'))
-
- @filter_deprecation
- def test_fv(self):
- assert_equal(np.fv(0.075, 20, -2000, 0, 0), 86609.362673042924)
-
- @filter_deprecation
- def test_fv_decimal(self):
- assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), 0, 0),
- Decimal('86609.36267304300040536731624'))
-
- @filter_deprecation
- def test_pmt(self):
- res = np.pmt(0.08 / 12, 5 * 12, 15000)
- tgt = -304.145914
- assert_allclose(res, tgt)
- # Test the edge case where rate == 0.0
- res = np.pmt(0.0, 5 * 12, 15000)
- tgt = -250.0
- assert_allclose(res, tgt)
- # Test the case where we use broadcast and
- # the arguments passed in are arrays.
- res = np.pmt([[0.0, 0.8], [0.3, 0.8]], [12, 3], [2000, 20000])
- tgt = np.array([[-166.66667, -19311.258], [-626.90814, -19311.258]])
- assert_allclose(res, tgt)
-
- @filter_deprecation
- def test_pmt_decimal(self):
- res = np.pmt(Decimal('0.08') / Decimal('12'), 5 * 12, 15000)
- tgt = Decimal('-304.1459143262052370338701494')
- assert_equal(res, tgt)
- # Test the edge case where rate == 0.0
- res = np.pmt(Decimal('0'), Decimal('60'), Decimal('15000'))
- tgt = -250
- assert_equal(res, tgt)
- # Test the case where we use broadcast and
- # the arguments passed in are arrays.
- res = np.pmt([[Decimal('0'), Decimal('0.8')], [Decimal('0.3'), Decimal('0.8')]],
- [Decimal('12'), Decimal('3')], [Decimal('2000'), Decimal('20000')])
- tgt = np.array([[Decimal('-166.6666666666666666666666667'), Decimal('-19311.25827814569536423841060')],
- [Decimal('-626.9081401700757748402586600'), Decimal('-19311.25827814569536423841060')]])
-
- # Cannot use the `assert_allclose` because it uses isfinite under the covers
- # which does not support the Decimal type
- # See issue: https://github.com/numpy/numpy/issues/9954
- assert_equal(res[0][0], tgt[0][0])
- assert_equal(res[0][1], tgt[0][1])
- assert_equal(res[1][0], tgt[1][0])
- assert_equal(res[1][1], tgt[1][1])
-
- @filter_deprecation
- def test_ppmt(self):
- assert_equal(np.round(np.ppmt(0.1 / 12, 1, 60, 55000), 2), -710.25)
-
- @filter_deprecation
- def test_ppmt_decimal(self):
- assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000')),
- Decimal('-710.2541257864217612489830917'))
-
- # Two tests showing how Decimal is actually getting at a more exact result
- # .23 / 12 does not come out nicely as a float but does as a decimal
- @filter_deprecation
- def test_ppmt_special_rate(self):
- assert_equal(np.round(np.ppmt(0.23 / 12, 1, 60, 10000000000), 8), -90238044.232277036)
-
- @filter_deprecation
- def test_ppmt_special_rate_decimal(self):
- # When rounded out to 8 decimal places like the float based test, this should not equal the same value
- # as the float, substituted for the decimal
- def raise_error_because_not_equal():
- assert_equal(
- round(np.ppmt(Decimal('0.23') / Decimal('12'), 1, 60, Decimal('10000000000')), 8),
- Decimal('-90238044.232277036'))
-
- assert_raises(AssertionError, raise_error_because_not_equal)
- assert_equal(np.ppmt(Decimal('0.23') / Decimal('12'), 1, 60, Decimal('10000000000')),
- Decimal('-90238044.2322778884413969909'))
-
- @filter_deprecation
- def test_ipmt(self):
- assert_almost_equal(np.round(np.ipmt(0.1 / 12, 1, 24, 2000), 2), -16.67)
-
- @filter_deprecation
- def test_ipmt_decimal(self):
- result = np.ipmt(Decimal('0.1') / Decimal('12'), 1, 24, 2000)
- assert_equal(result.flat[0], Decimal('-16.66666666666666666666666667'))
-
- @filter_deprecation
- def test_nper(self):
- assert_almost_equal(np.nper(0.075, -2000, 0, 100000.),
- 21.54, 2)
-
- @filter_deprecation
- def test_nper2(self):
- assert_almost_equal(np.nper(0.0, -2000, 0, 100000.),
- 50.0, 1)
-
- @filter_deprecation
- def test_npv(self):
- assert_almost_equal(
- np.npv(0.05, [-15000, 1500, 2500, 3500, 4500, 6000]),
- 122.89, 2)
-
- @filter_deprecation
- def test_npv_decimal(self):
- assert_equal(
- np.npv(Decimal('0.05'), [-15000, 1500, 2500, 3500, 4500, 6000]),
- Decimal('122.894854950942692161628715'))
-
- @filter_deprecation
- def test_mirr(self):
- val = [-4500, -800, 800, 800, 600, 600, 800, 800, 700, 3000]
- assert_almost_equal(np.mirr(val, 0.08, 0.055), 0.0666, 4)
-
- val = [-120000, 39000, 30000, 21000, 37000, 46000]
- assert_almost_equal(np.mirr(val, 0.10, 0.12), 0.126094, 6)
-
- val = [100, 200, -50, 300, -200]
- assert_almost_equal(np.mirr(val, 0.05, 0.06), 0.3428, 4)
-
- val = [39000, 30000, 21000, 37000, 46000]
- assert_(np.isnan(np.mirr(val, 0.10, 0.12)))
-
- @filter_deprecation
- def test_mirr_decimal(self):
- val = [Decimal('-4500'), Decimal('-800'), Decimal('800'), Decimal('800'),
- Decimal('600'), Decimal('600'), Decimal('800'), Decimal('800'),
- Decimal('700'), Decimal('3000')]
- assert_equal(np.mirr(val, Decimal('0.08'), Decimal('0.055')),
- Decimal('0.066597175031553548874239618'))
-
- val = [Decimal('-120000'), Decimal('39000'), Decimal('30000'),
- Decimal('21000'), Decimal('37000'), Decimal('46000')]
- assert_equal(np.mirr(val, Decimal('0.10'), Decimal('0.12')), Decimal('0.126094130365905145828421880'))
-
- val = [Decimal('100'), Decimal('200'), Decimal('-50'),
- Decimal('300'), Decimal('-200')]
- assert_equal(np.mirr(val, Decimal('0.05'), Decimal('0.06')), Decimal('0.342823387842176663647819868'))
-
- val = [Decimal('39000'), Decimal('30000'), Decimal('21000'), Decimal('37000'), Decimal('46000')]
- assert_(np.isnan(np.mirr(val, Decimal('0.10'), Decimal('0.12'))))
-
- @filter_deprecation
- def test_when(self):
- # begin
- assert_equal(np.rate(10, 20, -3500, 10000, 1),
- np.rate(10, 20, -3500, 10000, 'begin'))
- # end
- assert_equal(np.rate(10, 20, -3500, 10000),
- np.rate(10, 20, -3500, 10000, 'end'))
- assert_equal(np.rate(10, 20, -3500, 10000, 0),
- np.rate(10, 20, -3500, 10000, 'end'))
-
- # begin
- assert_equal(np.pv(0.07, 20, 12000, 0, 1),
- np.pv(0.07, 20, 12000, 0, 'begin'))
- # end
- assert_equal(np.pv(0.07, 20, 12000, 0),
- np.pv(0.07, 20, 12000, 0, 'end'))
- assert_equal(np.pv(0.07, 20, 12000, 0, 0),
- np.pv(0.07, 20, 12000, 0, 'end'))
-
- # begin
- assert_equal(np.fv(0.075, 20, -2000, 0, 1),
- np.fv(0.075, 20, -2000, 0, 'begin'))
- # end
- assert_equal(np.fv(0.075, 20, -2000, 0),
- np.fv(0.075, 20, -2000, 0, 'end'))
- assert_equal(np.fv(0.075, 20, -2000, 0, 0),
- np.fv(0.075, 20, -2000, 0, 'end'))
-
- # begin
- assert_equal(np.pmt(0.08 / 12, 5 * 12, 15000., 0, 1),
- np.pmt(0.08 / 12, 5 * 12, 15000., 0, 'begin'))
- # end
- assert_equal(np.pmt(0.08 / 12, 5 * 12, 15000., 0),
- np.pmt(0.08 / 12, 5 * 12, 15000., 0, 'end'))
- assert_equal(np.pmt(0.08 / 12, 5 * 12, 15000., 0, 0),
- np.pmt(0.08 / 12, 5 * 12, 15000., 0, 'end'))
-
- # begin
- assert_equal(np.ppmt(0.1 / 12, 1, 60, 55000, 0, 1),
- np.ppmt(0.1 / 12, 1, 60, 55000, 0, 'begin'))
- # end
- assert_equal(np.ppmt(0.1 / 12, 1, 60, 55000, 0),
- np.ppmt(0.1 / 12, 1, 60, 55000, 0, 'end'))
- assert_equal(np.ppmt(0.1 / 12, 1, 60, 55000, 0, 0),
- np.ppmt(0.1 / 12, 1, 60, 55000, 0, 'end'))
-
- # begin
- assert_equal(np.ipmt(0.1 / 12, 1, 24, 2000, 0, 1),
- np.ipmt(0.1 / 12, 1, 24, 2000, 0, 'begin'))
- # end
- assert_equal(np.ipmt(0.1 / 12, 1, 24, 2000, 0),
- np.ipmt(0.1 / 12, 1, 24, 2000, 0, 'end'))
- assert_equal(np.ipmt(0.1 / 12, 1, 24, 2000, 0, 0),
- np.ipmt(0.1 / 12, 1, 24, 2000, 0, 'end'))
-
- # begin
- assert_equal(np.nper(0.075, -2000, 0, 100000., 1),
- np.nper(0.075, -2000, 0, 100000., 'begin'))
- # end
- assert_equal(np.nper(0.075, -2000, 0, 100000.),
- np.nper(0.075, -2000, 0, 100000., 'end'))
- assert_equal(np.nper(0.075, -2000, 0, 100000., 0),
- np.nper(0.075, -2000, 0, 100000., 'end'))
-
- @filter_deprecation
- def test_decimal_with_when(self):
- """Test that decimals are still supported if the when argument is passed"""
- # begin
- assert_equal(np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), Decimal('1')),
- np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), 'begin'))
- # end
- assert_equal(np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000')),
- np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), 'end'))
- assert_equal(np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), Decimal('0')),
- np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), 'end'))
-
- # begin
- assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), Decimal('1')),
- np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), 'begin'))
- # end
- assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0')),
- np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), 'end'))
- assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), Decimal('0')),
- np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), 'end'))
-
- # begin
- assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), Decimal('1')),
- np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), 'begin'))
- # end
- assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0')),
- np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), 'end'))
- assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), Decimal('0')),
- np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), 'end'))
-
- # begin
- assert_equal(np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'),
- Decimal('0'), Decimal('1')),
- np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'),
- Decimal('0'), 'begin'))
- # end
- assert_equal(np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'),
- Decimal('0')),
- np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'),
- Decimal('0'), 'end'))
- assert_equal(np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'),
- Decimal('0'), Decimal('0')),
- np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'),
- Decimal('0'), 'end'))
-
- # begin
- assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'),
- Decimal('0'), Decimal('1')),
- np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'),
- Decimal('0'), 'begin'))
- # end
- assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'),
- Decimal('0')),
- np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'),
- Decimal('0'), 'end'))
- assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'),
- Decimal('0'), Decimal('0')),
- np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'),
- Decimal('0'), 'end'))
-
- # begin
- assert_equal(np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'),
- Decimal('0'), Decimal('1')).flat[0],
- np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'),
- Decimal('0'), 'begin').flat[0])
- # end
- assert_equal(np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'),
- Decimal('0')).flat[0],
- np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'),
- Decimal('0'), 'end').flat[0])
- assert_equal(np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'),
- Decimal('0'), Decimal('0')).flat[0],
- np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'),
- Decimal('0'), 'end').flat[0])
-
- @filter_deprecation
- def test_broadcast(self):
- assert_almost_equal(np.nper(0.075, -2000, 0, 100000., [0, 1]),
- [21.5449442, 20.76156441], 4)
-
- assert_almost_equal(np.ipmt(0.1 / 12, list(range(5)), 24, 2000),
- [-17.29165168, -16.66666667, -16.03647345,
- -15.40102862, -14.76028842], 4)
-
- assert_almost_equal(np.ppmt(0.1 / 12, list(range(5)), 24, 2000),
- [-74.998201, -75.62318601, -76.25337923,
- -76.88882405, -77.52956425], 4)
-
- assert_almost_equal(np.ppmt(0.1 / 12, list(range(5)), 24, 2000, 0,
- [0, 0, 1, 'end', 'begin']),
- [-74.998201, -75.62318601, -75.62318601,
- -76.88882405, -76.88882405], 4)
-
- @filter_deprecation
- def test_broadcast_decimal(self):
- # Use almost equal because precision is tested in the explicit tests, this test is to ensure
- # broadcast with Decimal is not broken.
- assert_almost_equal(np.ipmt(Decimal('0.1') / Decimal('12'), list(range(5)), Decimal('24'), Decimal('2000')),
- [Decimal('-17.29165168'), Decimal('-16.66666667'), Decimal('-16.03647345'),
- Decimal('-15.40102862'), Decimal('-14.76028842')], 4)
-
- assert_almost_equal(np.ppmt(Decimal('0.1') / Decimal('12'), list(range(5)), Decimal('24'), Decimal('2000')),
- [Decimal('-74.998201'), Decimal('-75.62318601'), Decimal('-76.25337923'),
- Decimal('-76.88882405'), Decimal('-77.52956425')], 4)
-
- assert_almost_equal(np.ppmt(Decimal('0.1') / Decimal('12'), list(range(5)), Decimal('24'), Decimal('2000'),
- Decimal('0'), [Decimal('0'), Decimal('0'), Decimal('1'), 'end', 'begin']),
- [Decimal('-74.998201'), Decimal('-75.62318601'), Decimal('-75.62318601'),
- Decimal('-76.88882405'), Decimal('-76.88882405')], 4)
diff --git a/numpy/lib/tests/test_financial_expired.py b/numpy/lib/tests/test_financial_expired.py
new file mode 100644
index 000000000..66bb08026
--- /dev/null
+++ b/numpy/lib/tests/test_financial_expired.py
@@ -0,0 +1,13 @@
+import sys
+import pytest
+import numpy as np
+
+
+@pytest.mark.skipif(sys.version_info[:2] < (3, 7),
+ reason="requires python 3.7 or higher")
+def test_financial_expired():
+ match = 'NEP 32'
+ with pytest.warns(RuntimeWarning, match=match):
+ func = np.fv
+ with pytest.raises(RuntimeError, match=match):
+ func(1, 2, 3)
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index 89c1a2d9b..7bddb941c 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -1169,7 +1169,7 @@ class TestTrimZeros:
a = np.array([0, 0, 1, 0, 2, 3, 4, 0])
b = a.astype(float)
c = a.astype(complex)
- d = np.array([None, [], 1, False, 'b', 3.0, range(4), b''], dtype=object)
+ d = a.astype(object)
def values(self):
attr_names = ('a', 'b', 'c', 'd')
@@ -1208,6 +1208,26 @@ class TestTrimZeros:
res = trim_zeros(arr)
assert_array_equal(arr, res)
+ @pytest.mark.parametrize(
+ 'arr',
+ [np.array([0, 2**62, 0]),
+ np.array([0, 2**63, 0]),
+ np.array([0, 2**64, 0])]
+ )
+ def test_overflow(self, arr):
+ slc = np.s_[1:2]
+ res = trim_zeros(arr)
+ assert_array_equal(res, arr[slc])
+
+ def test_no_trim(self):
+ arr = np.array([None, 1, None])
+ res = trim_zeros(arr)
+ assert_array_equal(arr, res)
+
+
+ def test_list_to_list(self):
+ res = trim_zeros(self.a.tolist())
+ assert isinstance(res, list)
class TestExtins:
@@ -1785,28 +1805,28 @@ class TestFilterwindows:
def test_hanning(self):
# check symmetry
w = hanning(10)
- assert_array_almost_equal(w, flipud(w), 7)
+ assert_equal(w, flipud(w))
# check known value
assert_almost_equal(np.sum(w, axis=0), 4.500, 4)
def test_hamming(self):
# check symmetry
w = hamming(10)
- assert_array_almost_equal(w, flipud(w), 7)
+ assert_equal(w, flipud(w))
# check known value
assert_almost_equal(np.sum(w, axis=0), 4.9400, 4)
def test_bartlett(self):
# check symmetry
w = bartlett(10)
- assert_array_almost_equal(w, flipud(w), 7)
+ assert_equal(w, flipud(w))
# check known value
assert_almost_equal(np.sum(w, axis=0), 4.4444, 4)
def test_blackman(self):
# check symmetry
w = blackman(10)
- assert_array_almost_equal(w, flipud(w), 7)
+ assert_equal(w, flipud(w))
# check known value
assert_almost_equal(np.sum(w, axis=0), 3.7800, 4)
@@ -2111,8 +2131,9 @@ class Test_I0:
i0(0.5),
np.array(1.0634833707413234))
- A = np.array([0.49842636, 0.6969809, 0.22011976, 0.0155549])
- expected = np.array([1.06307822, 1.12518299, 1.01214991, 1.00006049])
+ # need at least one test above 8, as the implementation is piecewise
+ A = np.array([0.49842636, 0.6969809, 0.22011976, 0.0155549, 10.0])
+ expected = np.array([1.06307822, 1.12518299, 1.01214991, 1.00006049, 2815.71662847])
assert_almost_equal(i0(A), expected)
assert_almost_equal(i0(-A), expected)
@@ -2149,6 +2170,10 @@ class Test_I0:
assert_array_equal(exp, res)
+ def test_complex(self):
+ a = np.array([0, 1 + 2j])
+ with pytest.raises(TypeError, match="i0 not supported for complex values"):
+ res = i0(a)
class TestKaiser:
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index a23c6b007..38d698df4 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -19,7 +19,7 @@ from ctypes import c_bool
import numpy as np
import numpy.ma as ma
from numpy.lib._iotools import ConverterError, ConversionWarning
-from numpy.compat import asbytes, bytes
+from numpy.compat import asbytes
from numpy.ma.testutils import assert_equal
from numpy.testing import (
assert_warns, assert_, assert_raises_regex, assert_raises,
diff --git a/numpy/lib/tests/test_polynomial.py b/numpy/lib/tests/test_polynomial.py
index cd0b90dc4..ab6691b43 100644
--- a/numpy/lib/tests/test_polynomial.py
+++ b/numpy/lib/tests/test_polynomial.py
@@ -243,6 +243,15 @@ class TestPolynomial:
assert_equal(q.coeffs.dtype, np.complex128)
assert_equal(r.coeffs.dtype, np.complex128)
assert_equal(q*a + r, b)
+
+ c = [1, 2, 3]
+ d = np.poly1d([1, 2, 3])
+ s, t = np.polydiv(c, d)
+ assert isinstance(s, np.poly1d)
+ assert isinstance(t, np.poly1d)
+ u, v = np.polydiv(d, c)
+ assert isinstance(u, np.poly1d)
+ assert isinstance(v, np.poly1d)
def test_poly_coeffs_mutable(self):
""" Coefficients should be modifiable """
diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py
index cd7484241..2b4cbdfbb 100644
--- a/numpy/lib/twodim_base.py
+++ b/numpy/lib/twodim_base.py
@@ -8,7 +8,7 @@ from numpy.core.numeric import (
asarray, where, int8, int16, int32, int64, empty, promote_types, diagonal,
nonzero
)
-from numpy.core.overrides import set_module
+from numpy.core.overrides import set_array_function_like_doc, set_module
from numpy.core import overrides
from numpy.core import iinfo
@@ -149,8 +149,13 @@ def flipud(m):
return m[::-1, ...]
+def _eye_dispatcher(N, M=None, k=None, dtype=None, order=None, *, like=None):
+ return (like,)
+
+
+@set_array_function_like_doc
@set_module('numpy')
-def eye(N, M=None, k=0, dtype=float, order='C'):
+def eye(N, M=None, k=0, dtype=float, order='C', *, like=None):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
@@ -171,6 +176,9 @@ def eye(N, M=None, k=0, dtype=float, order='C'):
column-major (Fortran-style) order in memory.
.. versionadded:: 1.14.0
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -194,6 +202,8 @@ def eye(N, M=None, k=0, dtype=float, order='C'):
[0., 0., 0.]])
"""
+ if like is not None:
+ return _eye_with_like(N, M=M, k=k, dtype=dtype, order=order, like=like)
if M is None:
M = N
m = zeros((N, M), dtype=dtype, order=order)
@@ -207,6 +217,11 @@ def eye(N, M=None, k=0, dtype=float, order='C'):
return m
+_eye_with_like = array_function_dispatch(
+ _eye_dispatcher
+)(eye)
+
+
def _diag_dispatcher(v, k=None):
return (v,)
@@ -343,8 +358,13 @@ def diagflat(v, k=0):
return wrap(res)
+def _tri_dispatcher(N, M=None, k=None, dtype=None, *, like=None):
+ return (like,)
+
+
+@set_array_function_like_doc
@set_module('numpy')
-def tri(N, M=None, k=0, dtype=float):
+def tri(N, M=None, k=0, dtype=float, *, like=None):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
@@ -361,6 +381,9 @@ def tri(N, M=None, k=0, dtype=float):
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
Returns
-------
@@ -381,6 +404,9 @@ def tri(N, M=None, k=0, dtype=float):
[1., 1., 0., 0., 0.]])
"""
+ if like is not None:
+ return _tri_with_like(N, M=M, k=k, dtype=dtype, like=like)
+
if M is None:
M = N
@@ -393,6 +419,11 @@ def tri(N, M=None, k=0, dtype=float):
return m
+_tri_with_like = array_function_dispatch(
+ _tri_dispatcher
+)(tri)
+
+
def _trilu_dispatcher(m, k=None):
return (m,)
diff --git a/numpy/linalg/__init__.pyi b/numpy/linalg/__init__.pyi
new file mode 100644
index 000000000..ffb05bb81
--- /dev/null
+++ b/numpy/linalg/__init__.pyi
@@ -0,0 +1,23 @@
+from typing import Any
+
+matrix_power: Any
+solve: Any
+tensorsolve: Any
+tensorinv: Any
+inv: Any
+cholesky: Any
+eigvals: Any
+eigvalsh: Any
+pinv: Any
+slogdet: Any
+det: Any
+svd: Any
+eig: Any
+eigh: Any
+lstsq: Any
+norm: Any
+qr: Any
+cond: Any
+matrix_rank: Any
+LinAlgError: Any
+multi_dot: Any
diff --git a/numpy/linalg/umath_linalg.c.src b/numpy/linalg/umath_linalg.c.src
index 59647c67d..1807aadcf 100644
--- a/numpy/linalg/umath_linalg.c.src
+++ b/numpy/linalg/umath_linalg.c.src
@@ -3665,7 +3665,7 @@ PyObject *PyInit__umath_linalg(void)
return NULL;
}
- version = PyString_FromString(umath_linalg_version_string);
+ version = PyUnicode_FromString(umath_linalg_version_string);
if (version == NULL) {
return NULL;
}
diff --git a/numpy/ma/__init__.pyi b/numpy/ma/__init__.pyi
new file mode 100644
index 000000000..d1259abcc
--- /dev/null
+++ b/numpy/ma/__init__.pyi
@@ -0,0 +1,225 @@
+from typing import Any
+
+core: Any
+extras: Any
+MAError: Any
+MaskError: Any
+MaskType: Any
+MaskedArray: Any
+abs: Any
+absolute: Any
+add: Any
+all: Any
+allclose: Any
+allequal: Any
+alltrue: Any
+amax: Any
+amin: Any
+angle: Any
+anom: Any
+anomalies: Any
+any: Any
+append: Any
+arange: Any
+arccos: Any
+arccosh: Any
+arcsin: Any
+arcsinh: Any
+arctan: Any
+arctan2: Any
+arctanh: Any
+argmax: Any
+argmin: Any
+argsort: Any
+around: Any
+array: Any
+asanyarray: Any
+asarray: Any
+bitwise_and: Any
+bitwise_or: Any
+bitwise_xor: Any
+bool_: Any
+ceil: Any
+choose: Any
+clip: Any
+common_fill_value: Any
+compress: Any
+compressed: Any
+concatenate: Any
+conjugate: Any
+convolve: Any
+copy: Any
+correlate: Any
+cos: Any
+cosh: Any
+count: Any
+cumprod: Any
+cumsum: Any
+default_fill_value: Any
+diag: Any
+diagonal: Any
+diff: Any
+divide: Any
+empty: Any
+empty_like: Any
+equal: Any
+exp: Any
+expand_dims: Any
+fabs: Any
+filled: Any
+fix_invalid: Any
+flatten_mask: Any
+flatten_structured_array: Any
+floor: Any
+floor_divide: Any
+fmod: Any
+frombuffer: Any
+fromflex: Any
+fromfunction: Any
+getdata: Any
+getmask: Any
+getmaskarray: Any
+greater: Any
+greater_equal: Any
+harden_mask: Any
+hypot: Any
+identity: Any
+ids: Any
+indices: Any
+inner: Any
+innerproduct: Any
+isMA: Any
+isMaskedArray: Any
+is_mask: Any
+is_masked: Any
+isarray: Any
+left_shift: Any
+less: Any
+less_equal: Any
+log: Any
+log10: Any
+log2: Any
+logical_and: Any
+logical_not: Any
+logical_or: Any
+logical_xor: Any
+make_mask: Any
+make_mask_descr: Any
+make_mask_none: Any
+mask_or: Any
+masked: Any
+masked_array: Any
+masked_equal: Any
+masked_greater: Any
+masked_greater_equal: Any
+masked_inside: Any
+masked_invalid: Any
+masked_less: Any
+masked_less_equal: Any
+masked_not_equal: Any
+masked_object: Any
+masked_outside: Any
+masked_print_option: Any
+masked_singleton: Any
+masked_values: Any
+masked_where: Any
+max: Any
+maximum: Any
+maximum_fill_value: Any
+mean: Any
+min: Any
+minimum: Any
+minimum_fill_value: Any
+mod: Any
+multiply: Any
+mvoid: Any
+ndim: Any
+negative: Any
+nomask: Any
+nonzero: Any
+not_equal: Any
+ones: Any
+outer: Any
+outerproduct: Any
+power: Any
+prod: Any
+product: Any
+ptp: Any
+put: Any
+putmask: Any
+ravel: Any
+remainder: Any
+repeat: Any
+reshape: Any
+resize: Any
+right_shift: Any
+round: Any
+round_: Any
+set_fill_value: Any
+shape: Any
+sin: Any
+sinh: Any
+size: Any
+soften_mask: Any
+sometrue: Any
+sort: Any
+sqrt: Any
+squeeze: Any
+std: Any
+subtract: Any
+sum: Any
+swapaxes: Any
+take: Any
+tan: Any
+tanh: Any
+trace: Any
+transpose: Any
+true_divide: Any
+var: Any
+where: Any
+zeros: Any
+apply_along_axis: Any
+apply_over_axes: Any
+atleast_1d: Any
+atleast_2d: Any
+atleast_3d: Any
+average: Any
+clump_masked: Any
+clump_unmasked: Any
+column_stack: Any
+compress_cols: Any
+compress_nd: Any
+compress_rowcols: Any
+compress_rows: Any
+count_masked: Any
+corrcoef: Any
+cov: Any
+diagflat: Any
+dot: Any
+dstack: Any
+ediff1d: Any
+flatnotmasked_contiguous: Any
+flatnotmasked_edges: Any
+hsplit: Any
+hstack: Any
+isin: Any
+in1d: Any
+intersect1d: Any
+mask_cols: Any
+mask_rowcols: Any
+mask_rows: Any
+masked_all: Any
+masked_all_like: Any
+median: Any
+mr_: Any
+notmasked_contiguous: Any
+notmasked_edges: Any
+polyfit: Any
+row_stack: Any
+setdiff1d: Any
+setxor1d: Any
+stack: Any
+unique: Any
+union1d: Any
+vander: Any
+vstack: Any
diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py
index 27f14a5e7..0ed2971e6 100644
--- a/numpy/ma/tests/test_core.py
+++ b/numpy/ma/tests/test_core.py
@@ -3858,8 +3858,6 @@ class TestMaskedArrayMathMethods:
assert_almost_equal(np.sqrt(mXvar0[k]),
mX[:, k].compressed().std())
- @pytest.mark.skipif(sys.platform=='win32' and sys.version_info < (3, 6),
- reason='Fails on Python < 3.6 on Windows, gh-9671')
@suppress_copy_mask_on_assignment
def test_varstd_specialcases(self):
# Test a special case for var
diff --git a/numpy/ma/timer_comparison.py b/numpy/ma/timer_comparison.py
index 83bd7852e..f5855efcf 100644
--- a/numpy/ma/timer_comparison.py
+++ b/numpy/ma/timer_comparison.py
@@ -100,9 +100,9 @@ class ModuleTester:
header=header,
names=('x', 'y'))
assert cond, msg
- except ValueError:
+ except ValueError as e:
msg = build_err_msg([x, y], err_msg, header=header, names=('x', 'y'))
- raise ValueError(msg)
+ raise ValueError(msg) from e
def assert_array_equal(self, x, y, err_msg=''):
"""
diff --git a/numpy/matrixlib/__init__.pyi b/numpy/matrixlib/__init__.pyi
new file mode 100644
index 000000000..b240bb327
--- /dev/null
+++ b/numpy/matrixlib/__init__.pyi
@@ -0,0 +1,6 @@
+from typing import Any
+
+matrix: Any
+bmat: Any
+mat: Any
+asmatrix: Any
diff --git a/numpy/polynomial/__init__.pyi b/numpy/polynomial/__init__.pyi
new file mode 100644
index 000000000..817ba22ac
--- /dev/null
+++ b/numpy/polynomial/__init__.pyi
@@ -0,0 +1,9 @@
+from typing import Any
+
+Polynomial: Any
+Chebyshev: Any
+Legendre: Any
+Hermite: Any
+HermiteE: Any
+Laguerre: Any
+set_default_printstyle: Any
diff --git a/numpy/random/__init__.pyi b/numpy/random/__init__.pyi
new file mode 100644
index 000000000..f7c3cfafe
--- /dev/null
+++ b/numpy/random/__init__.pyi
@@ -0,0 +1,61 @@
+from typing import Any
+
+beta: Any
+binomial: Any
+bytes: Any
+chisquare: Any
+choice: Any
+dirichlet: Any
+exponential: Any
+f: Any
+gamma: Any
+geometric: Any
+get_state: Any
+gumbel: Any
+hypergeometric: Any
+laplace: Any
+logistic: Any
+lognormal: Any
+logseries: Any
+multinomial: Any
+multivariate_normal: Any
+negative_binomial: Any
+noncentral_chisquare: Any
+noncentral_f: Any
+normal: Any
+pareto: Any
+permutation: Any
+poisson: Any
+power: Any
+rand: Any
+randint: Any
+randn: Any
+random: Any
+random_integers: Any
+random_sample: Any
+ranf: Any
+rayleigh: Any
+sample: Any
+seed: Any
+set_state: Any
+shuffle: Any
+standard_cauchy: Any
+standard_exponential: Any
+standard_gamma: Any
+standard_normal: Any
+standard_t: Any
+triangular: Any
+uniform: Any
+vonmises: Any
+wald: Any
+weibull: Any
+zipf: Any
+Generator: Any
+RandomState: Any
+SeedSequence: Any
+MT19937: Any
+Philox: Any
+PCG64: Any
+SFC64: Any
+default_rng: Any
+BitGenerator: Any
diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx
index 66847043b..e40dcefe3 100644
--- a/numpy/random/_generator.pyx
+++ b/numpy/random/_generator.pyx
@@ -5,6 +5,7 @@ import warnings
from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
from cpython cimport (Py_INCREF, PyFloat_AsDouble)
+from cpython.mem cimport PyMem_Malloc, PyMem_Free
cimport cython
import numpy as np
@@ -28,6 +29,13 @@ from ._common cimport (POISSON_LAM_MAX, CONS_POSITIVE, CONS_NONE,
validate_output_shape
)
+cdef extern from "numpy/arrayobject.h":
+ int PyArray_ResolveWritebackIfCopy(np.ndarray)
+ object PyArray_FromArray(np.PyArrayObject *, np.PyArray_Descr *, int)
+
+ enum:
+ NPY_ARRAY_WRITEBACKIFCOPY
+
np.import_array()
cdef int64_t _safe_sum_nonneg_int64(size_t num_colors, int64_t *colors):
@@ -48,6 +56,77 @@ cdef int64_t _safe_sum_nonneg_int64(size_t num_colors, int64_t *colors):
return sum
+cdef inline void _shuffle_raw_wrap(bitgen_t *bitgen, np.npy_intp n,
+ np.npy_intp first, np.npy_intp itemsize,
+ np.npy_intp stride,
+ char* data, char* buf) nogil:
+ # We trick gcc into providing a specialized implementation for
+ # the most common case, yielding a ~33% performance improvement.
+ # Note that apparently, only one branch can ever be specialized.
+ if itemsize == sizeof(np.npy_intp):
+ _shuffle_raw(bitgen, n, first, sizeof(np.npy_intp), stride, data, buf)
+ else:
+ _shuffle_raw(bitgen, n, first, itemsize, stride, data, buf)
+
+
+cdef inline void _shuffle_raw(bitgen_t *bitgen, np.npy_intp n,
+ np.npy_intp first, np.npy_intp itemsize,
+ np.npy_intp stride,
+ char* data, char* buf) nogil:
+ """
+ Parameters
+ ----------
+ bitgen
+ Pointer to a bitgen_t instance.
+ n
+ Number of elements in data
+ first
+ First observation to shuffle. Shuffles n-1,
+ n-2, ..., first, so that when first=1 the entire
+ array is shuffled
+ itemsize
+ Size in bytes of item
+ stride
+ Array stride
+ data
+ Location of data
+ buf
+ Location of buffer (itemsize)
+ """
+ cdef np.npy_intp i, j
+
+ for i in reversed(range(first, n)):
+ j = random_interval(bitgen, i)
+ string.memcpy(buf, data + j * stride, itemsize)
+ string.memcpy(data + j * stride, data + i * stride, itemsize)
+ string.memcpy(data + i * stride, buf, itemsize)
+
+
+cdef inline void _shuffle_int(bitgen_t *bitgen, np.npy_intp n,
+ np.npy_intp first, int64_t* data) nogil:
+ """
+ Parameters
+ ----------
+ bitgen
+ Pointer to a bitgen_t instance.
+ n
+ Number of elements in data
+ first
+ First observation to shuffle. Shuffles n-1,
+ n-2, ..., first, so that when first=1 the entire
+ array is shuffled
+ data
+ Location of data
+ """
+ cdef np.npy_intp i, j
+ cdef int64_t temp
+ for i in reversed(range(first, n)):
+ j = random_bounded_uint64(bitgen, 0, i, 0, 0)
+ temp = data[j]
+ data[j] = data[i]
+ data[i] = temp
+
+
cdef bint _check_bit_generator(object bitgen):
"""Check if an object satisfies the BitGenerator interface.
"""
@@ -708,8 +787,8 @@ cdef class Generator:
idx = np.PyArray_Arange(0, pop_size_i, 1, np.NPY_INT64)
idx_data = <int64_t*>(<np.ndarray>idx).data
with self.lock, nogil:
- self._shuffle_int(pop_size_i, max(pop_size_i - size_i, 1),
- idx_data)
+ _shuffle_int(&self._bitgen, pop_size_i,
+ max(pop_size_i - size_i, 1), idx_data)
# Copy to allow potentially large array backing idx to be gc
idx = idx[(pop_size - size):].copy()
else:
@@ -737,7 +816,7 @@ cdef class Generator:
hash_set[loc] = j
idx_data[j - pop_size_i + size_i] = j
if shuffle:
- self._shuffle_int(size_i, 1, idx_data)
+ _shuffle_int(&self._bitgen, size_i, 1, idx_data)
idx.shape = shape
if is_scalar and isinstance(idx, np.ndarray):
@@ -4114,7 +4193,159 @@ cdef class Generator:
return diric
- # Shuffling and permutations:
+ def permuted(self, object x, *, axis=None, out=None):
+ """
+ permuted(x, axis=None, out=None)
+
+ Randomly permute `x` along axis `axis`.
+
+ Unlike `shuffle`, each slice along the given axis is shuffled
+ independently of the others.
+
+ Parameters
+ ----------
+ x : array_like, at least one-dimensional
+ Array to be shuffled.
+ axis : int, optional
+ Slices of `x` in this axis are shuffled. Each slice
+ is shuffled independently of the others. If `axis` is
+ None, the flattened array is shuffled.
+ out : ndarray, optional
+ If given, this is the destinaton of the shuffled array.
+ If `out` is None, a shuffled copy of the array is returned.
+
+ Returns
+ -------
+ ndarray
+ If `out` is None, a shuffled copy of `x` is returned.
+ Otherwise, the shuffled array is stored in `out`,
+ and `out` is returned
+
+ See Also
+ --------
+ shuffle
+ permutation
+
+ Examples
+ --------
+ Create a `numpy.random.Generator` instance:
+
+ >>> rng = np.random.default_rng()
+
+ Create a test array:
+
+ >>> x = np.arange(24).reshape(3, 8)
+ >>> x
+ array([[ 0, 1, 2, 3, 4, 5, 6, 7],
+ [ 8, 9, 10, 11, 12, 13, 14, 15],
+ [16, 17, 18, 19, 20, 21, 22, 23]])
+
+ Shuffle the rows of `x`:
+
+ >>> y = rng.permuted(x, axis=1)
+ >>> y
+ array([[ 4, 3, 6, 7, 1, 2, 5, 0], # random
+ [15, 10, 14, 9, 12, 11, 8, 13],
+ [17, 16, 20, 21, 18, 22, 23, 19]])
+
+ `x` has not been modified:
+
+ >>> x
+ array([[ 0, 1, 2, 3, 4, 5, 6, 7],
+ [ 8, 9, 10, 11, 12, 13, 14, 15],
+ [16, 17, 18, 19, 20, 21, 22, 23]])
+
+ To shuffle the rows of `x` in-place, pass `x` as the `out`
+ parameter:
+
+ >>> y = rng.permuted(x, axis=1, out=x)
+ >>> x
+ array([[ 3, 0, 4, 7, 1, 6, 2, 5], # random
+ [ 8, 14, 13, 9, 12, 11, 15, 10],
+ [17, 18, 16, 22, 19, 23, 20, 21]])
+
+ Note that when the ``out`` parameter is given, the return
+ value is ``out``:
+
+ >>> y is x
+ True
+ """
+
+ cdef int ax
+ cdef np.npy_intp axlen, axstride, itemsize
+ cdef void *buf
+ cdef np.flatiter it
+ cdef np.ndarray to_shuffle
+ cdef int status
+ cdef int flags
+
+ x = np.asarray(x)
+
+ if out is None:
+ out = x.copy(order='K')
+ else:
+ if type(out) is not np.ndarray:
+ raise TypeError('out must be a numpy array')
+ if out.shape != x.shape:
+ raise ValueError('out must have the same shape as x')
+ np.copyto(out, x, casting='safe')
+
+ if axis is None:
+ if x.ndim > 1:
+ if not (np.PyArray_FLAGS(out) & (np.NPY_ARRAY_C_CONTIGUOUS |
+ np.NPY_ARRAY_F_CONTIGUOUS)):
+ flags = (np.NPY_ARRAY_C_CONTIGUOUS |
+ NPY_ARRAY_WRITEBACKIFCOPY)
+ to_shuffle = PyArray_FromArray(<np.PyArrayObject *>out,
+ <np.PyArray_Descr *>NULL, flags)
+ self.shuffle(to_shuffle.ravel(order='K'))
+ # Because we only execute this block if out is not
+ # contiguous, we know this call will always result in a
+ # copy of to_shuffle back to out. I.e. status will be 1.
+ status = PyArray_ResolveWritebackIfCopy(to_shuffle)
+ assert status == 1
+ else:
+ # out is n-d with n > 1, but is either C- or F-contiguous,
+ # so we know out.ravel(order='A') is a view.
+ self.shuffle(out.ravel(order='A'))
+ else:
+ # out is 1-d
+ self.shuffle(out)
+ return out
+
+ ax = normalize_axis_index(axis, np.ndim(out))
+ itemsize = out.itemsize
+ axlen = out.shape[ax]
+ axstride = out.strides[ax]
+
+ it = np.PyArray_IterAllButAxis(out, &ax)
+
+ buf = PyMem_Malloc(itemsize)
+ if buf == NULL:
+ raise MemoryError('memory allocation failed in permuted')
+
+ if out.dtype.hasobject:
+ # Keep the GIL when shuffling an object array.
+ with self.lock:
+ while np.PyArray_ITER_NOTDONE(it):
+ _shuffle_raw_wrap(&self._bitgen, axlen, 0, itemsize,
+ axstride,
+ <char *>np.PyArray_ITER_DATA(it),
+ <char *>buf)
+ np.PyArray_ITER_NEXT(it)
+ else:
+ # out is not an object array, so we can release the GIL.
+ with self.lock, nogil:
+ while np.PyArray_ITER_NOTDONE(it):
+ _shuffle_raw_wrap(&self._bitgen, axlen, 0, itemsize,
+ axstride,
+ <char *>np.PyArray_ITER_DATA(it),
+ <char *>buf)
+ np.PyArray_ITER_NEXT(it)
+
+ PyMem_Free(buf)
+ return out
+
def shuffle(self, object x, axis=0):
"""
shuffle(x, axis=0)
@@ -4177,14 +4408,15 @@ cdef class Generator:
# when the function exits.
buf = np.empty(itemsize, dtype=np.int8) # GC'd at function exit
buf_ptr = <char*><size_t>np.PyArray_DATA(buf)
- with self.lock:
- # We trick gcc into providing a specialized implementation for
- # the most common case, yielding a ~33% performance improvement.
- # Note that apparently, only one branch can ever be specialized.
- if itemsize == sizeof(np.npy_intp):
- self._shuffle_raw(n, 1, sizeof(np.npy_intp), stride, x_ptr, buf_ptr)
- else:
- self._shuffle_raw(n, 1, itemsize, stride, x_ptr, buf_ptr)
+ if x.dtype.hasobject:
+ with self.lock:
+ _shuffle_raw_wrap(&self._bitgen, n, 1, itemsize, stride,
+ x_ptr, buf_ptr)
+ else:
+ # Same as above, but the GIL is released.
+ with self.lock, nogil:
+ _shuffle_raw_wrap(&self._bitgen, n, 1, itemsize, stride,
+ x_ptr, buf_ptr)
elif isinstance(x, np.ndarray) and x.ndim and x.size:
x = np.swapaxes(x, 0, axis)
buf = np.empty_like(x[0, ...])
@@ -4207,56 +4439,6 @@ cdef class Generator:
j = random_interval(&self._bitgen, i)
x[i], x[j] = x[j], x[i]
- cdef inline _shuffle_raw(self, np.npy_intp n, np.npy_intp first,
- np.npy_intp itemsize, np.npy_intp stride,
- char* data, char* buf):
- """
- Parameters
- ----------
- n
- Number of elements in data
- first
- First observation to shuffle. Shuffles n-1,
- n-2, ..., first, so that when first=1 the entire
- array is shuffled
- itemsize
- Size in bytes of item
- stride
- Array stride
- data
- Location of data
- buf
- Location of buffer (itemsize)
- """
- cdef np.npy_intp i, j
- for i in reversed(range(first, n)):
- j = random_interval(&self._bitgen, i)
- string.memcpy(buf, data + j * stride, itemsize)
- string.memcpy(data + j * stride, data + i * stride, itemsize)
- string.memcpy(data + i * stride, buf, itemsize)
-
- cdef inline void _shuffle_int(self, np.npy_intp n, np.npy_intp first,
- int64_t* data) nogil:
- """
- Parameters
- ----------
- n
- Number of elements in data
- first
- First observation to shuffle. Shuffles n-1,
- n-2, ..., first, so that when first=1 the entire
- array is shuffled
- data
- Location of data
- """
- cdef np.npy_intp i, j
- cdef int64_t temp
- for i in reversed(range(first, n)):
- j = random_bounded_uint64(&self._bitgen, 0, i, 0, 0)
- temp = data[j]
- data[j] = data[i]
- data[i] = temp
-
def permutation(self, object x, axis=0):
"""
permutation(x, axis=0)
diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx
index df305e689..d43e7f5aa 100644
--- a/numpy/random/mtrand.pyx
+++ b/numpy/random/mtrand.pyx
@@ -383,7 +383,7 @@ cdef class RandomState:
.. note::
New code should use the ``random`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -453,7 +453,7 @@ cdef class RandomState:
.. note::
New code should use the ``beta`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -503,7 +503,7 @@ cdef class RandomState:
.. note::
New code should use the ``exponential`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -552,7 +552,7 @@ cdef class RandomState:
.. note::
New code should use the ``standard_exponential`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -653,7 +653,7 @@ cdef class RandomState:
.. note::
New code should use the ``integers`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -774,7 +774,7 @@ cdef class RandomState:
.. note::
New code should use the ``bytes`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -812,7 +812,7 @@ cdef class RandomState:
.. note::
New code should use the ``choice`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -1017,7 +1017,7 @@ cdef class RandomState:
.. note::
New code should use the ``uniform`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -1185,7 +1185,7 @@ cdef class RandomState:
.. note::
New code should use the ``standard_normal`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
If positive int_like arguments are provided, `randn` generates an array
of shape ``(d0, d1, ..., dn)``, filled
@@ -1339,7 +1339,7 @@ cdef class RandomState:
.. note::
New code should use the ``standard_normal`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -1414,7 +1414,7 @@ cdef class RandomState:
.. note::
New code should use the ``normal`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -1514,7 +1514,7 @@ cdef class RandomState:
.. note::
New code should use the ``standard_gamma`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -1595,7 +1595,7 @@ cdef class RandomState:
.. note::
New code should use the ``gamma`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -1684,7 +1684,7 @@ cdef class RandomState:
.. note::
New code should use the ``f`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -1772,7 +1772,7 @@ cdef class RandomState:
.. note::
New code should use the ``noncentral_f`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -1857,7 +1857,7 @@ cdef class RandomState:
.. note::
New code should use the ``chisquare`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -1930,7 +1930,7 @@ cdef class RandomState:
.. note::
New code should use the ``noncentral_chisquare`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -2016,7 +2016,7 @@ cdef class RandomState:
.. note::
New code should use the ``standard_cauchy`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -2092,7 +2092,7 @@ cdef class RandomState:
.. note::
New code should use the ``standard_t`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -2197,7 +2197,7 @@ cdef class RandomState:
.. note::
New code should use the ``vonmises`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -2295,7 +2295,7 @@ cdef class RandomState:
.. note::
New code should use the ``pareto`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -2389,7 +2389,7 @@ cdef class RandomState:
.. note::
New code should use the ``weibull`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -2485,7 +2485,7 @@ cdef class RandomState:
.. note::
New code should use the ``power`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -2596,7 +2596,7 @@ cdef class RandomState:
.. note::
New code should use the ``laplace`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -2687,7 +2687,7 @@ cdef class RandomState:
.. note::
New code should use the ``gumbel`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -2809,7 +2809,7 @@ cdef class RandomState:
.. note::
New code should use the ``logistic`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -2896,7 +2896,7 @@ cdef class RandomState:
.. note::
New code should use the ``lognormal`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -3009,7 +3009,7 @@ cdef class RandomState:
.. note::
New code should use the ``rayleigh`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -3091,7 +3091,7 @@ cdef class RandomState:
.. note::
New code should use the ``wald`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -3164,7 +3164,7 @@ cdef class RandomState:
.. note::
New code should use the ``triangular`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -3271,7 +3271,7 @@ cdef class RandomState:
.. note::
New code should use the ``binomial`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -3421,7 +3421,7 @@ cdef class RandomState:
.. note::
New code should use the ``negative_binomial`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -3506,7 +3506,7 @@ cdef class RandomState:
.. note::
New code should use the ``poisson`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -3592,7 +3592,7 @@ cdef class RandomState:
.. note::
New code should use the ``zipf`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -3682,7 +3682,7 @@ cdef class RandomState:
.. note::
New code should use the ``geometric`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -3736,7 +3736,7 @@ cdef class RandomState:
.. note::
New code should use the ``hypergeometric`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -3867,7 +3867,7 @@ cdef class RandomState:
.. note::
New code should use the ``logseries`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -3960,7 +3960,7 @@ cdef class RandomState:
.. note::
New code should use the ``multivariate_normal`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -4134,7 +4134,7 @@ cdef class RandomState:
.. note::
New code should use the ``multinomial`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -4252,7 +4252,7 @@ cdef class RandomState:
.. note::
New code should use the ``dirichlet`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -4398,7 +4398,7 @@ cdef class RandomState:
.. note::
New code should use the ``shuffle`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
@@ -4493,7 +4493,7 @@ cdef class RandomState:
.. note::
New code should use the ``permutation`` method of a ``default_rng()``
- instance instead; see `random-quick-start`.
+ instance instead; please see the :ref:`random-quick-start`.
Parameters
----------
diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py
index bb6d25ef1..6be7d852b 100644
--- a/numpy/random/tests/test_generator_mt19937.py
+++ b/numpy/random/tests/test_generator_mt19937.py
@@ -1039,6 +1039,56 @@ class TestRandomDist:
assert_raises(np.AxisError, random.permutation, arr, 3)
assert_raises(TypeError, random.permutation, arr, slice(1, 2, None))
+ @pytest.mark.parametrize("dtype", [int, object])
+ @pytest.mark.parametrize("axis, expected",
+ [(None, np.array([[3, 7, 0, 9, 10, 11],
+ [8, 4, 2, 5, 1, 6]])),
+ (0, np.array([[6, 1, 2, 9, 10, 11],
+ [0, 7, 8, 3, 4, 5]])),
+ (1, np.array([[ 5, 3, 4, 0, 2, 1],
+ [11, 9, 10, 6, 8, 7]]))])
+ def test_permuted(self, dtype, axis, expected):
+ random = Generator(MT19937(self.seed))
+ x = np.arange(12).reshape(2, 6).astype(dtype)
+ random.permuted(x, axis=axis, out=x)
+ assert_array_equal(x, expected)
+
+ random = Generator(MT19937(self.seed))
+ x = np.arange(12).reshape(2, 6).astype(dtype)
+ y = random.permuted(x, axis=axis)
+ assert y.dtype == dtype
+ assert_array_equal(y, expected)
+
+ def test_permuted_with_strides(self):
+ random = Generator(MT19937(self.seed))
+ x0 = np.arange(22).reshape(2, 11)
+ x1 = x0.copy()
+ x = x0[:, ::3]
+ y = random.permuted(x, axis=1, out=x)
+ expected = np.array([[0, 9, 3, 6],
+ [14, 20, 11, 17]])
+ assert_array_equal(y, expected)
+ x1[:, ::3] = expected
+ # Verify that the original x0 was modified in-place as expected.
+ assert_array_equal(x1, x0)
+
+ def test_permuted_empty(self):
+ y = random.permuted([])
+ assert_array_equal(y, [])
+
+ @pytest.mark.parametrize('outshape', [(2, 3), 5])
+ def test_permuted_out_with_wrong_shape(self, outshape):
+ a = np.array([1, 2, 3])
+ out = np.zeros(outshape, dtype=a.dtype)
+ with pytest.raises(ValueError, match='same shape'):
+ random.permuted(a, out=out)
+
+ def test_permuted_out_with_wrong_type(self):
+ out = np.zeros((3, 5), dtype=np.int32)
+ x = np.ones((3, 5))
+ with pytest.raises(TypeError, match='Cannot cast'):
+ random.permuted(x, axis=1, out=out)
+
def test_beta(self):
random = Generator(MT19937(self.seed))
actual = random.beta(.1, .9, size=(3, 2))
diff --git a/numpy/rec.pyi b/numpy/rec.pyi
new file mode 100644
index 000000000..c70ee5374
--- /dev/null
+++ b/numpy/rec.pyi
@@ -0,0 +1,5 @@
+from typing import Any
+
+record: Any
+recarray: Any
+format_parser: Any
diff --git a/numpy/testing/__init__.pyi b/numpy/testing/__init__.pyi
new file mode 100644
index 000000000..c394a387d
--- /dev/null
+++ b/numpy/testing/__init__.pyi
@@ -0,0 +1,44 @@
+from typing import Any
+
+assert_equal: Any
+assert_almost_equal: Any
+assert_approx_equal: Any
+assert_array_equal: Any
+assert_array_less: Any
+assert_string_equal: Any
+assert_array_almost_equal: Any
+assert_raises: Any
+build_err_msg: Any
+decorate_methods: Any
+jiffies: Any
+memusage: Any
+print_assert_equal: Any
+raises: Any
+rundocs: Any
+runstring: Any
+verbose: Any
+measure: Any
+assert_: Any
+assert_array_almost_equal_nulp: Any
+assert_raises_regex: Any
+assert_array_max_ulp: Any
+assert_warns: Any
+assert_no_warnings: Any
+assert_allclose: Any
+IgnoreException: Any
+clear_and_catch_warnings: Any
+SkipTest: Any
+KnownFailureException: Any
+temppath: Any
+tempdir: Any
+IS_PYPY: Any
+HAS_REFCOUNT: Any
+suppress_warnings: Any
+assert_array_compare: Any
+_assert_valid_refcount: Any
+_gen_alignment_data: Any
+assert_no_gc_cycles: Any
+break_cycles: Any
+HAS_LAPACK64: Any
+TestCase: Any
+run_module_suite: Any
diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py
index 6a6cc664a..c3b9e04b6 100644
--- a/numpy/testing/tests/test_utils.py
+++ b/numpy/testing/tests/test_utils.py
@@ -1240,7 +1240,7 @@ def assert_warn_len_equal(mod, n_in_context, py34=None, py37=None):
if sys.version_info[:2] >= (3, 7):
if py37 is not None:
n_in_context = py37
- elif sys.version_info[:2] >= (3, 4):
+ else:
if py34 is not None:
n_in_context = py34
assert_equal(num_warns, n_in_context)
diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py
index a9d6da01c..21b8b838f 100644
--- a/numpy/tests/test_public_api.py
+++ b/numpy/tests/test_public_api.py
@@ -145,18 +145,7 @@ PUBLIC_MODULES = ['numpy.' + s for s in [
"distutils.log",
"distutils.system_info",
"doc",
- "doc.basics",
- "doc.broadcasting",
- "doc.byteswapping",
"doc.constants",
- "doc.creation",
- "doc.dispatch",
- "doc.glossary",
- "doc.indexing",
- "doc.internals",
- "doc.misc",
- "doc.structured_arrays",
- "doc.subclassing",
"doc.ufuncs",
"f2py",
"fft",
@@ -281,7 +270,6 @@ PRIVATE_BUT_PRESENT_MODULES = ['numpy.' + s for s in [
"lib.arraypad",
"lib.arraysetops",
"lib.arrayterator",
- "lib.financial",
"lib.function_base",
"lib.histograms",
"lib.index_tricks",
@@ -368,18 +356,6 @@ def test_all_modules_are_expected():
SKIP_LIST_2 = [
'numpy.math',
'numpy.distutils.log.sys',
- 'numpy.distutils.system_info.copy',
- 'numpy.distutils.system_info.distutils',
- 'numpy.distutils.system_info.log',
- 'numpy.distutils.system_info.os',
- 'numpy.distutils.system_info.platform',
- 'numpy.distutils.system_info.re',
- 'numpy.distutils.system_info.shutil',
- 'numpy.distutils.system_info.subprocess',
- 'numpy.distutils.system_info.sys',
- 'numpy.distutils.system_info.tempfile',
- 'numpy.distutils.system_info.textwrap',
- 'numpy.distutils.system_info.warnings',
'numpy.doc.constants.re',
'numpy.doc.constants.textwrap',
'numpy.lib.emath',
diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py
index 3a00162cb..86fd5e787 100644
--- a/numpy/typing/__init__.py
+++ b/numpy/typing/__init__.py
@@ -93,3 +93,8 @@ Please see : https://numpy.org/devdocs/reference/arrays.dtypes.html
from ._array_like import _SupportsArray, ArrayLike
from ._shape import _Shape, _ShapeLike
from ._dtype_like import DtypeLike
+
+from numpy._pytesttester import PytestTester
+test = PytestTester(__name__)
+del PytestTester
+
diff --git a/numpy/tests/setup.py b/numpy/typing/setup.py
index f034cdf95..c444e769f 100644
--- a/numpy/tests/setup.py
+++ b/numpy/typing/setup.py
@@ -1,7 +1,8 @@
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
- config = Configuration('tests', parent_package, top_path)
- config.add_data_dir('typing')
+ config = Configuration('typing', parent_package, top_path)
+ config.add_subpackage('tests')
+ config.add_data_dir('tests/data')
return config
diff --git a/numpy/typing/tests/__init__.py b/numpy/typing/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/typing/tests/__init__.py
diff --git a/numpy/tests/typing/fail/array_like.py b/numpy/typing/tests/data/fail/array_like.py
index a97e72dc7..a97e72dc7 100644
--- a/numpy/tests/typing/fail/array_like.py
+++ b/numpy/typing/tests/data/fail/array_like.py
diff --git a/numpy/tests/typing/fail/dtype.py b/numpy/typing/tests/data/fail/dtype.py
index 3dc027daf..3dc027daf 100644
--- a/numpy/tests/typing/fail/dtype.py
+++ b/numpy/typing/tests/data/fail/dtype.py
diff --git a/numpy/typing/tests/data/fail/flatiter.py b/numpy/typing/tests/data/fail/flatiter.py
new file mode 100644
index 000000000..e8a82344f
--- /dev/null
+++ b/numpy/typing/tests/data/fail/flatiter.py
@@ -0,0 +1,25 @@
+from typing import Any
+
+import numpy as np
+from numpy.typing import DtypeLike, _SupportsArray
+
+
+class Index:
+ def __index__(self) -> int:
+ ...
+
+
+a: "np.flatiter[np.ndarray]"
+supports_array: _SupportsArray
+
+a.base = Any # E: Property "base" defined in "flatiter" is read-only
+a.coords = Any # E: Property "coords" defined in "flatiter" is read-only
+a.index = Any # E: Property "index" defined in "flatiter" is read-only
+a.copy(order='C') # E: Unexpected keyword argument
+
+# NOTE: Contrary to `ndarray.__getitem__` its counterpart in `flatiter`
+# does not accept objects with the `__array__` or `__index__` protocols;
+# boolean indexing is just plain broken (gh-17175)
+a[np.bool_()] # E: No overload variant of "__getitem__"
+a[Index()] # E: No overload variant of "__getitem__"
+a[supports_array] # E: No overload variant of "__getitem__"
diff --git a/numpy/tests/typing/fail/fromnumeric.py b/numpy/typing/tests/data/fail/fromnumeric.py
index 66f8a89d0..c9156895d 100644
--- a/numpy/tests/typing/fail/fromnumeric.py
+++ b/numpy/typing/tests/data/fail/fromnumeric.py
@@ -124,3 +124,31 @@ np.amin(a, keepdims=1.0) # E: No overload variant of "amin" matches argument ty
np.amin(a, out=1.0) # E: No overload variant of "amin" matches argument type
np.amin(a, initial=[1.0]) # E: No overload variant of "amin" matches argument type
np.amin(a, where=[1.0]) # E: List item 0 has incompatible type
+
+np.prod(a, axis=1.0) # E: No overload variant of "prod" matches argument type
+np.prod(a, out=False) # E: No overload variant of "prod" matches argument type
+np.prod(a, keepdims=1.0) # E: No overload variant of "prod" matches argument type
+np.prod(a, initial=int) # E: No overload variant of "prod" matches argument type
+np.prod(a, where=1.0) # E: No overload variant of "prod" matches argument type
+
+np.cumprod(a, axis=1.0) # E: Argument "axis" to "cumprod" has incompatible type
+np.cumprod(a, out=False) # E: Argument "out" to "cumprod" has incompatible type
+
+np.size(a, axis=1.0) # E: Argument "axis" to "size" has incompatible type
+
+np.around(a, decimals=1.0) # E: No overload variant of "around" matches argument type
+np.around(a, out=type) # E: No overload variant of "around" matches argument type
+
+np.mean(a, axis=1.0) # E: No overload variant of "mean" matches argument type
+np.mean(a, out=False) # E: No overload variant of "mean" matches argument type
+np.mean(a, keepdims=1.0) # E: No overload variant of "mean" matches argument type
+
+np.std(a, axis=1.0) # E: No overload variant of "std" matches argument type
+np.std(a, out=False) # E: No overload variant of "std" matches argument type
+np.std(a, ddof='test') # E: No overload variant of "std" matches argument type
+np.std(a, keepdims=1.0) # E: No overload variant of "std" matches argument type
+
+np.var(a, axis=1.0) # E: No overload variant of "var" matches argument type
+np.var(a, out=False) # E: No overload variant of "var" matches argument type
+np.var(a, ddof='test') # E: No overload variant of "var" matches argument type
+np.var(a, keepdims=1.0) # E: No overload variant of "var" matches argument type
diff --git a/numpy/typing/tests/data/fail/linspace.py b/numpy/typing/tests/data/fail/linspace.py
new file mode 100644
index 000000000..a9769c5d6
--- /dev/null
+++ b/numpy/typing/tests/data/fail/linspace.py
@@ -0,0 +1,13 @@
+import numpy as np
+
+np.linspace(None, 'bob') # E: No overload variant
+np.linspace(0, 2, num=10.0) # E: No overload variant
+np.linspace(0, 2, endpoint='True') # E: No overload variant
+np.linspace(0, 2, retstep=b'False') # E: No overload variant
+np.linspace(0, 2, dtype=0) # E: No overload variant
+np.linspace(0, 2, axis=None) # E: No overload variant
+
+np.logspace(None, 'bob') # E: Argument 1
+np.logspace(0, 2, base=None) # E: Argument "base"
+
+np.geomspace(None, 'bob') # E: Argument 1
diff --git a/numpy/typing/tests/data/fail/modules.py b/numpy/typing/tests/data/fail/modules.py
new file mode 100644
index 000000000..e7ffe8920
--- /dev/null
+++ b/numpy/typing/tests/data/fail/modules.py
@@ -0,0 +1,3 @@
+import numpy as np
+
+np.testing.bob # E: Module has no attribute
diff --git a/numpy/tests/typing/fail/ndarray.py b/numpy/typing/tests/data/fail/ndarray.py
index 5a5130d40..5a5130d40 100644
--- a/numpy/tests/typing/fail/ndarray.py
+++ b/numpy/typing/tests/data/fail/ndarray.py
diff --git a/numpy/tests/typing/fail/numerictypes.py b/numpy/typing/tests/data/fail/numerictypes.py
index dd03eacc1..dd03eacc1 100644
--- a/numpy/tests/typing/fail/numerictypes.py
+++ b/numpy/typing/tests/data/fail/numerictypes.py
diff --git a/numpy/tests/typing/fail/scalars.py b/numpy/typing/tests/data/fail/scalars.py
index 5d7221895..47c031163 100644
--- a/numpy/tests/typing/fail/scalars.py
+++ b/numpy/typing/tests/data/fail/scalars.py
@@ -32,11 +32,16 @@ dt_64 = np.datetime64(0, "D")
td_64 = np.timedelta64(1, "h")
dt_64 + dt_64 # E: Unsupported operand types
-
td_64 - dt_64 # E: Unsupported operand types
-td_64 / dt_64 # E: No overload
td_64 % 1 # E: Unsupported operand types
-td_64 % dt_64 # E: Unsupported operand types
+
+# NOTE: The 2 tests below currently don't work due to the broad
+# (i.e. untyped) signature of `generic.__truediv__()` and `.__mod__()`.
+# TODO: Revisit this once annotations are added to the
+# `_ArrayOrScalarCommon` magic methods.
+
+# td_64 / dt_64 # E: No overload
+# td_64 % dt_64 # E: Unsupported operand types
class A:
diff --git a/numpy/tests/typing/fail/simple.py b/numpy/typing/tests/data/fail/simple.py
index 57c08fb7d..57c08fb7d 100644
--- a/numpy/tests/typing/fail/simple.py
+++ b/numpy/typing/tests/data/fail/simple.py
diff --git a/numpy/tests/typing/fail/ufuncs.py b/numpy/typing/tests/data/fail/ufuncs.py
index 4da9d08ba..4da9d08ba 100644
--- a/numpy/tests/typing/fail/ufuncs.py
+++ b/numpy/typing/tests/data/fail/ufuncs.py
diff --git a/numpy/tests/typing/fail/warnings_and_errors.py b/numpy/typing/tests/data/fail/warnings_and_errors.py
index 7390cc45f..7390cc45f 100644
--- a/numpy/tests/typing/fail/warnings_and_errors.py
+++ b/numpy/typing/tests/data/fail/warnings_and_errors.py
diff --git a/numpy/tests/typing/mypy.ini b/numpy/typing/tests/data/mypy.ini
index 91d93588a..91d93588a 100644
--- a/numpy/tests/typing/mypy.ini
+++ b/numpy/typing/tests/data/mypy.ini
diff --git a/numpy/tests/typing/pass/array_like.py b/numpy/typing/tests/data/pass/array_like.py
index 6b823ca7e..6b823ca7e 100644
--- a/numpy/tests/typing/pass/array_like.py
+++ b/numpy/typing/tests/data/pass/array_like.py
diff --git a/numpy/tests/typing/pass/dtype.py b/numpy/typing/tests/data/pass/dtype.py
index cbae8c078..cbae8c078 100644
--- a/numpy/tests/typing/pass/dtype.py
+++ b/numpy/typing/tests/data/pass/dtype.py
diff --git a/numpy/typing/tests/data/pass/flatiter.py b/numpy/typing/tests/data/pass/flatiter.py
new file mode 100644
index 000000000..c0219eb2b
--- /dev/null
+++ b/numpy/typing/tests/data/pass/flatiter.py
@@ -0,0 +1,14 @@
+import numpy as np
+
+a = np.empty((2, 2)).flat
+
+a.base
+a.copy()
+a.coords
+a.index
+iter(a)
+next(a)
+a[0]
+a[[0, 1, 2]]
+a[...]
+a[:]
diff --git a/numpy/tests/typing/pass/fromnumeric.py b/numpy/typing/tests/data/pass/fromnumeric.py
index d9dd45c54..9e936e684 100644
--- a/numpy/tests/typing/pass/fromnumeric.py
+++ b/numpy/typing/tests/data/pass/fromnumeric.py
@@ -10,6 +10,7 @@ B.setflags(write=False)
a = np.bool_(True)
b = np.float32(1.0)
c = 1.0
+d = np.array(1.0, dtype=np.float32) # writeable
np.take(a, 0)
np.take(b, 0)
@@ -183,3 +184,77 @@ np.amin(A, axis=0)
np.amin(B, axis=0)
np.amin(A, keepdims=True)
np.amin(B, keepdims=True)
+
+np.prod(a)
+np.prod(b)
+np.prod(c)
+np.prod(A)
+np.prod(B)
+np.prod(a, dtype=None)
+np.prod(A, dtype=None)
+np.prod(A, axis=0)
+np.prod(B, axis=0)
+np.prod(A, keepdims=True)
+np.prod(B, keepdims=True)
+np.prod(b, out=d)
+np.prod(B, out=d)
+
+np.cumprod(a)
+np.cumprod(b)
+np.cumprod(c)
+np.cumprod(A)
+np.cumprod(B)
+
+np.ndim(a)
+np.ndim(b)
+np.ndim(c)
+np.ndim(A)
+np.ndim(B)
+
+np.size(a)
+np.size(b)
+np.size(c)
+np.size(A)
+np.size(B)
+
+np.around(a)
+np.around(b)
+np.around(c)
+np.around(A)
+np.around(B)
+
+np.mean(a)
+np.mean(b)
+np.mean(c)
+np.mean(A)
+np.mean(B)
+np.mean(A, axis=0)
+np.mean(B, axis=0)
+np.mean(A, keepdims=True)
+np.mean(B, keepdims=True)
+np.mean(b, out=d)
+np.mean(B, out=d)
+
+np.std(a)
+np.std(b)
+np.std(c)
+np.std(A)
+np.std(B)
+np.std(A, axis=0)
+np.std(B, axis=0)
+np.std(A, keepdims=True)
+np.std(B, keepdims=True)
+np.std(b, out=d)
+np.std(B, out=d)
+
+np.var(a)
+np.var(b)
+np.var(c)
+np.var(A)
+np.var(B)
+np.var(A, axis=0)
+np.var(B, axis=0)
+np.var(A, keepdims=True)
+np.var(B, keepdims=True)
+np.var(b, out=d)
+np.var(B, out=d)
diff --git a/numpy/typing/tests/data/pass/linspace.py b/numpy/typing/tests/data/pass/linspace.py
new file mode 100644
index 000000000..8c6d0d56b
--- /dev/null
+++ b/numpy/typing/tests/data/pass/linspace.py
@@ -0,0 +1,22 @@
+import numpy as np
+
+class Index:
+ def __index__(self) -> int:
+ return 0
+
+np.linspace(0, 2)
+np.linspace(0.5, [0, 1, 2])
+np.linspace([0, 1, 2], 3)
+np.linspace(0j, 2)
+np.linspace(0, 2, num=10)
+np.linspace(0, 2, endpoint=True)
+np.linspace(0, 2, retstep=True)
+np.linspace(0j, 2j, retstep=True)
+np.linspace(0, 2, dtype=bool)
+np.linspace([0, 1], [2, 3], axis=Index())
+
+np.logspace(0, 2, base=2)
+np.logspace(0, 2, base=2)
+np.logspace(0, 2, base=[1j, 2j], num=2)
+
+np.geomspace(1, 2)
diff --git a/numpy/typing/tests/data/pass/literal.py b/numpy/typing/tests/data/pass/literal.py
new file mode 100644
index 000000000..321ce3c2b
--- /dev/null
+++ b/numpy/typing/tests/data/pass/literal.py
@@ -0,0 +1,43 @@
+from functools import partial
+from typing import Callable, List, Tuple
+
+import pytest # type: ignore
+import numpy as np
+
+AR = np.array(0)
+AR.setflags(write=False)
+
+KACF = frozenset({None, "K", "A", "C", "F"})
+ACF = frozenset({None, "A", "C", "F"})
+CF = frozenset({None, "C", "F"})
+
+order_list: List[Tuple[frozenset, Callable]] = [
+ (KACF, partial(np.ndarray, 1)),
+ (KACF, AR.tobytes),
+ (KACF, partial(AR.astype, int)),
+ (KACF, AR.copy),
+ (ACF, partial(AR.reshape, 1)),
+ (KACF, AR.flatten),
+ (KACF, AR.ravel),
+ (KACF, partial(np.array, 1)),
+ (CF, partial(np.zeros, 1)),
+ (CF, partial(np.ones, 1)),
+ (CF, partial(np.empty, 1)),
+ (CF, partial(np.full, 1, 1)),
+ (KACF, partial(np.zeros_like, AR)),
+ (KACF, partial(np.ones_like, AR)),
+ (KACF, partial(np.empty_like, AR)),
+ (KACF, partial(np.full_like, AR, 1)),
+ (KACF, partial(np.add, 1, 1)), # i.e. np.ufunc.__call__
+ (ACF, partial(np.reshape, AR, 1)),
+ (KACF, partial(np.ravel, AR)),
+]
+
+for order_set, func in order_list:
+ for order in order_set:
+ func(order=order)
+
+ invalid_orders = KACF - order_set
+ for order in invalid_orders:
+ with pytest.raises(ValueError):
+ func(order=order)
diff --git a/numpy/tests/typing/pass/ndarray_conversion.py b/numpy/typing/tests/data/pass/ndarray_conversion.py
index 303cf53e4..303cf53e4 100644
--- a/numpy/tests/typing/pass/ndarray_conversion.py
+++ b/numpy/typing/tests/data/pass/ndarray_conversion.py
diff --git a/numpy/tests/typing/pass/ndarray_shape_manipulation.py b/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py
index 0ca3dff39..0ca3dff39 100644
--- a/numpy/tests/typing/pass/ndarray_shape_manipulation.py
+++ b/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py
diff --git a/numpy/tests/typing/pass/numerictypes.py b/numpy/typing/tests/data/pass/numerictypes.py
index 4f205cabc..4f205cabc 100644
--- a/numpy/tests/typing/pass/numerictypes.py
+++ b/numpy/typing/tests/data/pass/numerictypes.py
diff --git a/numpy/tests/typing/pass/scalars.py b/numpy/typing/tests/data/pass/scalars.py
index 1c7ace282..c02e1ed36 100644
--- a/numpy/tests/typing/pass/scalars.py
+++ b/numpy/typing/tests/data/pass/scalars.py
@@ -1,27 +1,38 @@
+import sys
+import datetime as dt
+
import numpy as np
# Construction
+class D:
+ def __index__(self) -> int:
+ return 0
+
+
class C:
- def __complex__(self):
+ def __complex__(self) -> complex:
return 3j
class B:
- def __int__(self):
+ def __int__(self) -> int:
return 4
class A:
- def __float__(self):
+ def __float__(self) -> float:
return 4.0
np.complex64(3j)
+np.complex64(A())
np.complex64(C())
np.complex128(3j)
np.complex128(C())
np.complex128(None)
+np.complex64("1.2")
+np.complex128(b"2j")
np.int8(4)
np.int16(3.4)
@@ -29,11 +40,20 @@ np.int32(4)
np.int64(-1)
np.uint8(B())
np.uint32()
+np.int32("1")
+np.int64(b"2")
np.float16(A())
np.float32(16)
np.float64(3.0)
np.float64(None)
+np.float32("1")
+np.float16(b"2.5")
+
+if sys.version_info >= (3, 8):
+ np.uint64(D())
+ np.float32(D())
+ np.complex64(D())
np.bytes_(b"hello")
np.bytes_("hello", 'utf-8')
@@ -66,14 +86,25 @@ np.uint64().shape
# Time structures
np.datetime64()
np.datetime64(0, "D")
+np.datetime64(0, b"D")
+np.datetime64(0, ('ms', 3))
np.datetime64("2019")
+np.datetime64(b"2019")
np.datetime64("2019", "D")
+np.datetime64(np.datetime64())
+np.datetime64(dt.datetime(2000, 5, 3))
np.datetime64(None)
np.datetime64(None, "D")
np.timedelta64()
np.timedelta64(0)
np.timedelta64(0, "D")
+np.timedelta64(0, ('ms', 3))
+np.timedelta64(0, b"D")
+np.timedelta64("3")
+np.timedelta64(b"5")
+np.timedelta64(np.timedelta64(2))
+np.timedelta64(dt.timedelta(2))
np.timedelta64(None)
np.timedelta64(None, "D")
diff --git a/numpy/tests/typing/pass/simple.py b/numpy/typing/tests/data/pass/simple.py
index 527050557..527050557 100644
--- a/numpy/tests/typing/pass/simple.py
+++ b/numpy/typing/tests/data/pass/simple.py
diff --git a/numpy/tests/typing/pass/simple_py3.py b/numpy/typing/tests/data/pass/simple_py3.py
index c05a1ce61..c05a1ce61 100644
--- a/numpy/tests/typing/pass/simple_py3.py
+++ b/numpy/typing/tests/data/pass/simple_py3.py
diff --git a/numpy/tests/typing/pass/ufuncs.py b/numpy/typing/tests/data/pass/ufuncs.py
index 82172952a..82172952a 100644
--- a/numpy/tests/typing/pass/ufuncs.py
+++ b/numpy/typing/tests/data/pass/ufuncs.py
diff --git a/numpy/tests/typing/pass/warnings_and_errors.py b/numpy/typing/tests/data/pass/warnings_and_errors.py
index 5b6ec2626..5b6ec2626 100644
--- a/numpy/tests/typing/pass/warnings_and_errors.py
+++ b/numpy/typing/tests/data/pass/warnings_and_errors.py
diff --git a/numpy/tests/typing/reveal/constants.py b/numpy/typing/tests/data/reveal/constants.py
index 8e00810bd..8e00810bd 100644
--- a/numpy/tests/typing/reveal/constants.py
+++ b/numpy/typing/tests/data/reveal/constants.py
diff --git a/numpy/typing/tests/data/reveal/flatiter.py b/numpy/typing/tests/data/reveal/flatiter.py
new file mode 100644
index 000000000..56cdc7a0e
--- /dev/null
+++ b/numpy/typing/tests/data/reveal/flatiter.py
@@ -0,0 +1,14 @@
+import numpy as np
+
+a: "np.flatiter[np.ndarray]"
+
+reveal_type(a.base) # E: numpy.ndarray*
+reveal_type(a.copy()) # E: numpy.ndarray*
+reveal_type(a.coords) # E: tuple[builtins.int]
+reveal_type(a.index) # E: int
+reveal_type(iter(a)) # E: Iterator[numpy.generic*]
+reveal_type(next(a)) # E: numpy.generic
+reveal_type(a[0]) # E: numpy.generic
+reveal_type(a[[0, 1, 2]]) # E: numpy.ndarray*
+reveal_type(a[...]) # E: numpy.ndarray*
+reveal_type(a[:]) # E: numpy.ndarray*
diff --git a/numpy/tests/typing/reveal/fromnumeric.py b/numpy/typing/tests/data/reveal/fromnumeric.py
index f5feb3f5f..06501f6e2 100644
--- a/numpy/tests/typing/reveal/fromnumeric.py
+++ b/numpy/typing/tests/data/reveal/fromnumeric.py
@@ -10,6 +10,7 @@ B.setflags(write=False)
a = np.bool_(True)
b = np.float32(1.0)
c = 1.0
+d = np.array(1.0, dtype=np.float32) # writeable
reveal_type(np.take(a, 0)) # E: numpy.bool_
reveal_type(np.take(b, 0)) # E: numpy.float32
@@ -203,3 +204,75 @@ reveal_type(np.amin(A, axis=0)) # E: Union[numpy.number, numpy.ndarray]
reveal_type(np.amin(B, axis=0)) # E: Union[numpy.number, numpy.ndarray]
reveal_type(np.amin(A, keepdims=True)) # E: Union[numpy.number, numpy.ndarray]
reveal_type(np.amin(B, keepdims=True)) # E: Union[numpy.number, numpy.ndarray]
+
+reveal_type(np.prod(a)) # E: numpy.number
+reveal_type(np.prod(b)) # E: numpy.float32
+reveal_type(np.prod(c)) # E: numpy.number
+reveal_type(np.prod(A)) # E: numpy.number
+reveal_type(np.prod(B)) # E: numpy.number
+reveal_type(np.prod(A, axis=0)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(np.prod(B, axis=0)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(np.prod(A, keepdims=True)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(np.prod(B, keepdims=True)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(np.prod(b, out=d)) # E: numpy.ndarray
+reveal_type(np.prod(B, out=d)) # E: numpy.ndarray
+
+reveal_type(np.cumprod(a)) # E: numpy.ndarray
+reveal_type(np.cumprod(b)) # E: numpy.ndarray
+reveal_type(np.cumprod(c)) # E: numpy.ndarray
+reveal_type(np.cumprod(A)) # E: numpy.ndarray
+reveal_type(np.cumprod(B)) # E: numpy.ndarray
+
+reveal_type(np.ndim(a)) # E: int
+reveal_type(np.ndim(b)) # E: int
+reveal_type(np.ndim(c)) # E: int
+reveal_type(np.ndim(A)) # E: int
+reveal_type(np.ndim(B)) # E: int
+
+reveal_type(np.size(a)) # E: int
+reveal_type(np.size(b)) # E: int
+reveal_type(np.size(c)) # E: int
+reveal_type(np.size(A)) # E: int
+reveal_type(np.size(B)) # E: int
+
+reveal_type(np.around(a)) # E: numpy.number
+reveal_type(np.around(b)) # E: numpy.float32
+reveal_type(np.around(c)) # E: numpy.number
+reveal_type(np.around(A)) # E: numpy.ndarray
+reveal_type(np.around(B)) # E: numpy.ndarray
+
+reveal_type(np.mean(a)) # E: numpy.number
+reveal_type(np.mean(b)) # E: numpy.number
+reveal_type(np.mean(c)) # E: numpy.number
+reveal_type(np.mean(A)) # E: numpy.number
+reveal_type(np.mean(B)) # E: numpy.number
+reveal_type(np.mean(A, axis=0)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(np.mean(B, axis=0)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(np.mean(A, keepdims=True)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(np.mean(B, keepdims=True)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(np.mean(b, out=d)) # E: numpy.ndarray
+reveal_type(np.mean(B, out=d)) # E: numpy.ndarray
+
+reveal_type(np.std(a)) # E: numpy.number
+reveal_type(np.std(b)) # E: numpy.number
+reveal_type(np.std(c)) # E: numpy.number
+reveal_type(np.std(A)) # E: numpy.number
+reveal_type(np.std(B)) # E: numpy.number
+reveal_type(np.std(A, axis=0)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(np.std(B, axis=0)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(np.std(A, keepdims=True)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(np.std(B, keepdims=True)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(np.std(b, out=d)) # E: numpy.ndarray
+reveal_type(np.std(B, out=d)) # E: numpy.ndarray
+
+reveal_type(np.var(a)) # E: numpy.number
+reveal_type(np.var(b)) # E: numpy.number
+reveal_type(np.var(c)) # E: numpy.number
+reveal_type(np.var(A)) # E: numpy.number
+reveal_type(np.var(B)) # E: numpy.number
+reveal_type(np.var(A, axis=0)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(np.var(B, axis=0)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(np.var(A, keepdims=True)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(np.var(B, keepdims=True)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(np.var(b, out=d)) # E: numpy.ndarray
+reveal_type(np.var(B, out=d)) # E: numpy.ndarray
diff --git a/numpy/typing/tests/data/reveal/linspace.py b/numpy/typing/tests/data/reveal/linspace.py
new file mode 100644
index 000000000..cfbbdf390
--- /dev/null
+++ b/numpy/typing/tests/data/reveal/linspace.py
@@ -0,0 +1,6 @@
+import numpy as np
+
+reveal_type(np.linspace(0, 10)) # E: numpy.ndarray
+reveal_type(np.linspace(0, 10, retstep=True)) # E: Tuple[numpy.ndarray, numpy.inexact]
+reveal_type(np.logspace(0, 10)) # E: numpy.ndarray
+reveal_type(np.geomspace(1, 10)) # E: numpy.ndarray
diff --git a/numpy/typing/tests/data/reveal/modules.py b/numpy/typing/tests/data/reveal/modules.py
new file mode 100644
index 000000000..406463152
--- /dev/null
+++ b/numpy/typing/tests/data/reveal/modules.py
@@ -0,0 +1,20 @@
+import numpy as np
+
+reveal_type(np) # E: ModuleType
+
+reveal_type(np.char) # E: ModuleType
+reveal_type(np.ctypeslib) # E: ModuleType
+reveal_type(np.emath) # E: ModuleType
+reveal_type(np.fft) # E: ModuleType
+reveal_type(np.lib) # E: ModuleType
+reveal_type(np.linalg) # E: ModuleType
+reveal_type(np.ma) # E: ModuleType
+reveal_type(np.matrixlib) # E: ModuleType
+reveal_type(np.polynomial) # E: ModuleType
+reveal_type(np.random) # E: ModuleType
+reveal_type(np.rec) # E: ModuleType
+reveal_type(np.testing) # E: ModuleType
+reveal_type(np.version) # E: ModuleType
+
+# TODO: Remove when annotations have been added to `np.testing.assert_equal`
+reveal_type(np.testing.assert_equal) # E: Any
diff --git a/numpy/tests/typing/reveal/ndarray_conversion.py b/numpy/typing/tests/data/reveal/ndarray_conversion.py
index 411adcf63..4ee637b75 100644
--- a/numpy/tests/typing/reveal/ndarray_conversion.py
+++ b/numpy/typing/tests/data/reveal/ndarray_conversion.py
@@ -9,7 +9,7 @@ reveal_type(nd.item(0, 1)) # E: Any
reveal_type(nd.item((0, 1))) # E: Any
# tolist
-reveal_type(nd.tolist()) # E: builtins.list[Any]
+reveal_type(nd.tolist()) # E: Any
# itemset does not return a value
# tostring is pretty simple
diff --git a/numpy/tests/typing/reveal/ndarray_shape_manipulation.py b/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.py
index a44e1cfa1..a44e1cfa1 100644
--- a/numpy/tests/typing/reveal/ndarray_shape_manipulation.py
+++ b/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.py
diff --git a/numpy/tests/typing/reveal/numerictypes.py b/numpy/typing/tests/data/reveal/numerictypes.py
index e026158cd..e026158cd 100644
--- a/numpy/tests/typing/reveal/numerictypes.py
+++ b/numpy/typing/tests/data/reveal/numerictypes.py
diff --git a/numpy/tests/typing/reveal/scalars.py b/numpy/typing/tests/data/reveal/scalars.py
index 8a9555fc3..882fe9612 100644
--- a/numpy/tests/typing/reveal/scalars.py
+++ b/numpy/typing/tests/data/reveal/scalars.py
@@ -28,3 +28,6 @@ reveal_type(td - 1) # E: numpy.timedelta64
reveal_type(td / 1.0) # E: numpy.timedelta64
reveal_type(td / td) # E: float
reveal_type(td % td) # E: numpy.timedelta64
+
+reveal_type(np.complex64().real) # E: numpy.float32
+reveal_type(np.complex128().imag) # E: numpy.float64
diff --git a/numpy/tests/typing/reveal/warnings_and_errors.py b/numpy/typing/tests/data/reveal/warnings_and_errors.py
index c428deb7a..c428deb7a 100644
--- a/numpy/tests/typing/reveal/warnings_and_errors.py
+++ b/numpy/typing/tests/data/reveal/warnings_and_errors.py
diff --git a/numpy/tests/test_typing.py b/numpy/typing/tests/test_typing.py
index 04ea3c64d..beb53ddec 100644
--- a/numpy/tests/test_typing.py
+++ b/numpy/typing/tests/test_typing.py
@@ -12,15 +12,13 @@ except ImportError:
else:
NO_MYPY = False
-TESTS_DIR = os.path.join(
- os.path.dirname(os.path.abspath(__file__)),
- "typing",
-)
-PASS_DIR = os.path.join(TESTS_DIR, "pass")
-FAIL_DIR = os.path.join(TESTS_DIR, "fail")
-REVEAL_DIR = os.path.join(TESTS_DIR, "reveal")
-MYPY_INI = os.path.join(TESTS_DIR, "mypy.ini")
-CACHE_DIR = os.path.join(TESTS_DIR, ".mypy_cache")
+
+DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
+PASS_DIR = os.path.join(DATA_DIR, "pass")
+FAIL_DIR = os.path.join(DATA_DIR, "fail")
+REVEAL_DIR = os.path.join(DATA_DIR, "reveal")
+MYPY_INI = os.path.join(DATA_DIR, "mypy.ini")
+CACHE_DIR = os.path.join(DATA_DIR, ".mypy_cache")
def get_test_cases(directory):
@@ -89,7 +87,7 @@ def test_fail(path):
for i, line in enumerate(lines):
lineno = i + 1
- if " E:" not in line and lineno not in errors:
+ if line.startswith('#') or (" E:" not in line and lineno not in errors):
continue
target_line = lines[lineno - 1]
diff --git a/numpy/version.pyi b/numpy/version.pyi
new file mode 100644
index 000000000..6f3659e43
--- /dev/null
+++ b/numpy/version.pyi
@@ -0,0 +1,7 @@
+from typing import Any
+
+short_version: Any
+version: Any
+full_version: Any
+git_revision: Any
+release: Any
diff --git a/runtests.py b/runtests.py
index 8aefab0db..2f07749f8 100755
--- a/runtests.py
+++ b/runtests.py
@@ -108,6 +108,8 @@ def main(argv):
help="Start IPython shell with PYTHONPATH set")
parser.add_argument("--shell", action="store_true",
help="Start Unix shell with PYTHONPATH set")
+ parser.add_argument("--mypy", action="store_true",
+ help="Run mypy on files with NumPy on the MYPYPATH")
parser.add_argument("--debug", "-g", action="store_true",
help="Debug build")
parser.add_argument("--parallel", "-j", type=int, default=0,
@@ -131,7 +133,7 @@ def main(argv):
"COMMIT. Note that you need to commit your "
"changes first!"))
parser.add_argument("args", metavar="ARGS", default=[], nargs=REMAINDER,
- help="Arguments to pass to Nose, asv, Python or shell")
+ help="Arguments to pass to pytest, asv, mypy, Python or shell")
args = parser.parse_args(argv)
if args.durations < 0:
@@ -211,6 +213,36 @@ def main(argv):
subprocess.call([shell] + extra_argv)
sys.exit(0)
+ if args.mypy:
+ try:
+ import mypy.api
+ except ImportError:
+ raise RuntimeError(
+ "Mypy not found. Please install it by running "
+ "pip install -r test_requirements.txt from the repo root"
+ )
+
+ os.environ['MYPYPATH'] = site_dir
+ # By default mypy won't color the output since it isn't being
+ # invoked from a tty.
+ os.environ['MYPY_FORCE_COLOR'] = '1'
+
+ config = os.path.join(
+ site_dir,
+ "numpy",
+ "typing",
+ "tests",
+ "data",
+ "mypy.ini",
+ )
+
+ report, errors, status = mypy.api.run(
+ ['--config-file', config] + args.args
+ )
+ print(report, end='')
+ print(errors, end='', file=sys.stderr)
+ sys.exit(status)
+
if args.coverage:
dst_dir = os.path.join(ROOT_DIR, 'build', 'coverage')
fn = os.path.join(dst_dir, 'coverage_html.js')
@@ -341,7 +373,7 @@ def build_project(args):
"""
- import distutils.sysconfig
+ import sysconfig
root_ok = [os.path.exists(os.path.join(ROOT_DIR, fn))
for fn in PROJECT_ROOT_FILES]
@@ -357,7 +389,7 @@ def build_project(args):
# Always use ccache, if installed
env['PATH'] = os.pathsep.join(EXTRA_PATH + env.get('PATH', '').split(os.pathsep))
- cvars = distutils.sysconfig.get_config_vars()
+ cvars = sysconfig.get_config_vars()
compiler = env.get('CC') or cvars.get('CC', '')
if 'gcc' in compiler:
# Check that this isn't clang masquerading as gcc.
@@ -414,7 +446,7 @@ def build_project(args):
os.makedirs(site_dir)
if not os.path.exists(site_dir_noarch):
os.makedirs(site_dir_noarch)
- env['PYTHONPATH'] = site_dir + ':' + site_dir_noarch
+ env['PYTHONPATH'] = site_dir + os.pathsep + site_dir_noarch
log_filename = os.path.join(ROOT_DIR, 'build.log')
diff --git a/setup.py b/setup.py
index 0016deac0..1f5212676 100755
--- a/setup.py
+++ b/setup.py
@@ -189,7 +189,6 @@ def check_submodules():
for line in status.splitlines():
if line.startswith('-') or line.startswith('+'):
raise ValueError('Submodule not clean: {}'.format(line))
-
class concat_license_files():
@@ -234,20 +233,27 @@ def get_build_overrides():
"""
from numpy.distutils.command.build_clib import build_clib
from numpy.distutils.command.build_ext import build_ext
+ from distutils.version import LooseVersion
- def _is_using_gcc(obj):
- is_gcc = False
- if obj.compiler.compiler_type == 'unix':
- cc = sysconfig.get_config_var("CC")
- if not cc:
- cc = ""
- compiler_name = os.path.basename(cc)
- is_gcc = "gcc" in compiler_name
- return is_gcc
+ def _needs_gcc_c99_flag(obj):
+ if obj.compiler.compiler_type != 'unix':
+ return False
+
+ cc = obj.compiler.compiler[0]
+ if "gcc" not in cc:
+ return False
+
+ # will print something like '4.2.1\n'
+ out = subprocess.run([cc, '-dumpversion'], stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE, universal_newlines=True)
+ # -std=c99 is default from this version on
+ if LooseVersion(out.stdout) >= LooseVersion('5.0'):
+ return False
+ return True
class new_build_clib(build_clib):
def build_a_library(self, build_info, lib_name, libraries):
- if _is_using_gcc(self):
+ if _needs_gcc_c99_flag(self):
args = build_info.get('extra_compiler_args') or []
args.append('-std=c99')
build_info['extra_compiler_args'] = args
@@ -255,7 +261,7 @@ def get_build_overrides():
class new_build_ext(build_ext):
def build_extension(self, ext):
- if _is_using_gcc(self):
+ if _needs_gcc_c99_flag(self):
if '-std=c99' not in ext.extra_compile_args:
ext.extra_compile_args.append('-std=c99')
build_ext.build_extension(self, ext)
diff --git a/test_requirements.txt b/test_requirements.txt
index 51551d01f..7ef91125c 100644
--- a/test_requirements.txt
+++ b/test_requirements.txt
@@ -1,10 +1,10 @@
cython==0.29.21
wheel
setuptools<49.2.0
-hypothesis==5.23.9
+hypothesis==5.33.0
pytest==6.0.1
pytz==2020.1
-pytest-cov==2.10.0
+pytest-cov==2.10.1
pickle5; python_version == '3.7'
pickle5; python_version == '3.6' and platform_python_implementation != 'PyPy'
# for numpy.random.test.test_extending
diff --git a/tools/functions_missing_types.py b/tools/functions_missing_types.py
index 0fee97777..a32e72dad 100755
--- a/tools/functions_missing_types.py
+++ b/tools/functions_missing_types.py
@@ -50,17 +50,6 @@ EXCLUDE_LIST = {
"object",
"str",
"unicode",
- # Should use numpy_financial instead
- "fv",
- "ipmt",
- "irr",
- "mirr",
- "nper",
- "npv",
- "pmt",
- "ppmt",
- "pv",
- "rate",
# More standard names should be preferred
"alltrue", # all
"sometrue", # any
diff --git a/tools/refguide_check.py b/tools/refguide_check.py
index b3891c5cb..138e0ece7 100644
--- a/tools/refguide_check.py
+++ b/tools/refguide_check.py
@@ -71,7 +71,6 @@ BASE_MODULE = "numpy"
PUBLIC_SUBMODULES = [
'core',
- 'doc.structured_arrays',
'f2py',
'linalg',
'lib',
@@ -122,6 +121,15 @@ RST_SKIPLIST = [
'c-info.python-as-glue.rst',
'f2py.getting-started.rst',
'arrays.nditer.cython.rst',
+ # See PR 17222, these should be fixed
+ 'basics.broadcasting.rst',
+ 'basics.byteswapping.rst',
+ 'basics.creation.rst',
+ 'basics.dispatch.rst',
+ 'basics.indexing.rst',
+ 'basics.subclassing.rst',
+ 'basics.types.rst',
+ 'misc.rst',
]
# these names are not required to be present in ALL despite being in
@@ -260,7 +268,7 @@ def get_all_dict(module):
except ValueError:
pass
if not all_dict:
- # Must be a pure documentation module like doc.structured_arrays
+ # Must be a pure documentation module
all_dict.append('__doc__')
# Modules are almost always private; real submodules need a separate
@@ -388,8 +396,8 @@ def check_items(all_dict, names, deprecated, others, module_name, dots=True):
output += "Objects in refguide: %i\n\n" % num_ref
only_all, only_ref, missing = compare(all_dict, others, names, module_name)
- dep_in_ref = set(only_ref).intersection(deprecated)
- only_ref = set(only_ref).difference(deprecated)
+ dep_in_ref = only_ref.intersection(deprecated)
+ only_ref = only_ref.difference(deprecated)
if len(dep_in_ref) > 0:
output += "Deprecated objects in refguide::\n\n"
diff --git a/tools/travis-sorter.py b/tools/travis-sorter.py
new file mode 100755
index 000000000..c13204f7e
--- /dev/null
+++ b/tools/travis-sorter.py
@@ -0,0 +1,287 @@
+#!/usr/bin/env python3
+"""
+Run with a repo/build number or list of Travis CI build times to show the optimal build
+order to run faster and make full use of all available parallel build jobs.
+
+Requires the Travis Client CLI
+
+https://github.com/travis-ci/travis.rb#installation
+
+# Example
+
+$ # Check build 22 of hugovk/numpy, and skip the first job (it's a single stage)
+$ travis-sorter.py hugovk/numpy 22 --skip 1
+travis show -r hugovk/numpy 22
+[8, 7, 8, 10, 9, 18, 8, 11, 8, 10, 8, 8, 17, 8, 26]
+[7, 8, 10, 9, 18, 8, 11, 8, 10, 8, 8, 17, 8, 26]
+Before:
+
+ID Duration in mins
+ 1 *******
+ 2 ********
+ 3 **********
+ 4 *********
+ 5 ******************
+ 6 ********
+ 7 ***********
+ 8 ********
+ 9 **********
+10 ********
+11 ********
+12 *****************
+13 ********
+14 **************************
+End: 46
+ ----------------------------------------------
+
+After:
+
+ID Duration in mins
+14 **************************
+ 5 ******************
+12 *****************
+ 7 ***********
+ 3 **********
+ 9 **********
+ 4 *********
+ 2 ********
+ 6 ********
+ 8 ********
+10 ********
+11 ********
+13 ********
+ 1 *******
+End: 34
+ ----------------------------------
+
+# Example
+
+$ python travis-sorter.py 4 4 4 4 4 12 19
+
+Before:
+
+****
+****
+****
+****
+****
+ ************
+ *******************
+12345678901234567890123 = 23 minutes
+
+After:
+
+*******************
+************
+****
+****
+****
+ ****
+ ****
+1234567890123456789 = 19 minutes
+"""
+import argparse
+import re
+import subprocess
+import sys
+
+count = 1
+
+
+def summarise(jobs):
+ end = 0
+ print("ID Duration in mins")
+ for job in jobs:
+ before = " " * job.started
+ active = "*" * job.length
+ print("{:2d} {}{}".format(job.id, before, active))
+ if job.started + job.length > end:
+ end = job.started + job.length
+ # for job in jobs:
+ # print(job)
+ print("End:", end)
+ print(" " + "-" * end)
+
+
+class Job(object):
+ def __init__(self, length):
+ global count
+ self.id = count
+ count += 1
+ self.length = length
+ self.started = -1
+ self.status = "not started"
+ self.ended = False
+
+ def __str__(self):
+ return "{}\tLength: {}\tStarted: {}\tEnded: {}".format(
+ self.id, self.length, self.started, self.ended
+ )
+
+
+def count_status(jobs, status):
+ number = 0
+ for job in jobs:
+ if job.status == status:
+ number += 1
+ return number
+
+
+def simulate(jobs, limit):
+
+ time = 0
+
+ # summarise(jobs)
+
+ while True:
+ # Check if any have ended
+ for job in jobs:
+ if job.status == "active":
+ if time >= job.started + job.length:
+ # print("{}/{} Finished:".format(count_status(jobs, "active"), limit))
+ job.ended = time
+ job.status = "finished"
+ # print(job)
+
+ # Check if any can start
+ for job in jobs:
+ if job.status == "not started":
+ if count_status(jobs, "active") < limit:
+ # print("{}/{} Starting:".format(count_status(jobs, "active"), limit))
+ job.started = time
+ job.status = "active"
+ # print(job)
+
+ time += 1
+
+ # Exit loop?
+ if count_status(jobs, "finished") == len(jobs):
+ break
+
+ summarise(jobs)
+
+
+def do_thing(repo, number):
+ cmd = f"travis show -r {repo} {number or ''}"
+ # cmd = f"travis show --com -r {repo} {number or ''}"
+ print(cmd)
+
+ exitcode = 0
+ # For offline testing
+ output = """Build #4: Upgrade Python syntax with pyupgrade https://github.com/asottile/pyupgrade
+State: passed
+Type: push
+Branch: add-3.7
+Compare URL: https://github.com/hugovk/diff-cover/compare/4ae7cf97c6fa...7eeddb300175
+Duration: 16 min 7 sec
+Started: 2018-10-17 19:03:01
+Finished: 2018-10-17 19:09:53
+
+#4.1 passed: 1 min os: linux, env: TOXENV=py27, python: 2.7
+#4.2 passed: 1 min 43 sec os: linux, env: TOXENV=py34, python: 3.4
+#4.3 passed: 1 min 52 sec os: linux, env: TOXENV=py35, python: 3.5
+#4.4 passed: 1 min 38 sec os: linux, env: TOXENV=py36, python: 3.6
+#4.5 passed: 1 min 47 sec os: linux, env: TOXENV=py37, python: 3.7
+#4.6 passed: 4 min 35 sec os: linux, env: TOXENV=pypy, python: pypy
+#4.7 passed: 3 min 17 sec os: linux, env: TOXENV=pypy3, python: pypy3"""
+
+ # For offline testing
+ output = """Build #9: :arrows_clockwise: [EngCom] Public Pull Requests - 2.3-develop
+State: errored
+Type: push
+Branch: 2.3-develop
+Compare URL: https://github.com/hugovk/magento2/compare/80469a61e061...77af5d65ef4f
+Duration: 4 hrs 12 min 13 sec
+Started: 2018-10-27 17:50:51
+Finished: 2018-10-27 18:54:14
+
+#9.1 passed: 3 min 30 sec os: linux, env: TEST_SUITE=unit, php: 7.1
+#9.2 passed: 3 min 35 sec os: linux, env: TEST_SUITE=unit, php: 7.2
+#9.3 passed: 3 min 41 sec os: linux, env: TEST_SUITE=static, php: 7.2
+#9.4 passed: 8 min 48 sec os: linux, env: TEST_SUITE=js GRUNT_COMMAND=spec, php: 7.2
+#9.5 passed: 3 min 24 sec os: linux, env: TEST_SUITE=js GRUNT_COMMAND=static, php: 7.2
+#9.6 errored: 50 min os: linux, env: TEST_SUITE=integration INTEGRATION_INDEX=1, php: 7.1
+#9.7 passed: 49 min 25 sec os: linux, env: TEST_SUITE=integration INTEGRATION_INDEX=1, php: 7.2
+#9.8 passed: 31 min 54 sec os: linux, env: TEST_SUITE=integration INTEGRATION_INDEX=2, php: 7.1
+#9.9 passed: 31 min 24 sec os: linux, env: TEST_SUITE=integration INTEGRATION_INDEX=2, php: 7.2
+#9.10 passed: 27 min 23 sec os: linux, env: TEST_SUITE=integration INTEGRATION_INDEX=3, php: 7.1
+#9.11 passed: 26 min 9 sec os: linux, env: TEST_SUITE=integration INTEGRATION_INDEX=3, php: 7.2
+#9.12 passed: 13 min os: linux, env: TEST_SUITE=functional, php: 7.2"""
+
+ # Real use
+ exitcode, output = subprocess.getstatusoutput(cmd)
+
+ # print(exitcode)
+ # print(output)
+ if exitcode != 0:
+ print(output)
+ sys.exit(exitcode)
+
+ minutes = []
+ matches = re.findall(r"(pass|fail|error)ed.* (\d+) min (\d+)? ", output)
+ for match in matches:
+ status, m, s = match
+ s = 0 if s == "" else int(s)
+ s += int(m) * 60
+ minutes.append(round(s / 60))
+
+ # print(minutes)
+ return minutes
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="Either give minutes for --jobs (3 5 3 2 5), "
+ "or --repo slug (hugovk/test) and build --number (5)",
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+ )
+ parser.add_argument(
+ "input",
+ nargs="+",
+ help="Either: times for each build job (minutes), "
+ "or an org/repo slug and optionally build number",
+ )
+ parser.add_argument(
+ "-l", "--limit", type=int, default=5, help="Concurrent jobs limit"
+ )
+ parser.add_argument(
+ "-s", "--skip", type=int, default=0, help="Skip X jobs at the start"
+ )
+ args = parser.parse_args()
+
+ # If all ints
+ try:
+ for x in args.input:
+ int(x)
+ job_times = args.input
+ except ValueError:
+ try:
+ number = args.input[1]
+ except IndexError:
+ number = None
+ job_times = do_thing(args.input[0], number)
+
+ job_times = job_times[args.skip :]
+ # print(job_times)
+
+ print("Before:")
+ print()
+
+ jobs = []
+ for job_time in job_times:
+ job = Job(job_time)
+ jobs.append(job)
+
+ simulate(jobs, args.limit)
+
+ print()
+ print("After:")
+ print()
+
+ # Sort with longest first
+ jobs.sort(key=lambda job: job.length, reverse=True)
+ # Reset status
+ for job in jobs:
+ job.status = "not started"
+
+ simulate(jobs, args.limit)