summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNathaniel J. Smith <njs@pobox.com>2012-06-06 19:51:10 +0100
committerNathaniel J. Smith <njs@pobox.com>2012-06-06 19:51:10 +0100
commit85b682893f1d38cbb3b31f827889e1d54edbc95e (patch)
treea2998716efa9293a9b3ce4f2be4a7d9715adcd8f
parent51616c9265155f75e536ba19c32f9f91337243af (diff)
parentde8c536813472be29a6292721df83d73dbf7016e (diff)
downloadnumpy-85b682893f1d38cbb3b31f827889e1d54edbc95e.tar.gz
Merge branch 'master' into clean-up-diagonal
-rw-r--r--.gitignore2
-rw-r--r--bento.info30
-rw-r--r--bscript37
-rw-r--r--doc/HOWTO_DOCUMENT.rst.txt6
-rw-r--r--doc/release/1.6.1-notes.rst22
-rw-r--r--doc/release/1.6.2-notes.rst90
-rw-r--r--doc/source/release.rst5
-rw-r--r--numpy/core/__init__.py6
-rw-r--r--numpy/core/bento.info3
-rw-r--r--numpy/core/bscript58
-rw-r--r--numpy/core/src/multiarray/nditer_api.c314
-rw-r--r--numpy/core/src/multiarray/nditer_constr.c44
-rw-r--r--numpy/core/src/multiarray/nditer_impl.h26
-rw-r--r--numpy/core/tests/test_numeric.py1
-rw-r--r--numpy/distutils/fcompiler/intel.py2
-rw-r--r--numpy/distutils/system_info.py13
-rw-r--r--numpy/fft/bscript18
-rw-r--r--numpy/lib/bscript19
-rw-r--r--numpy/lib/financial.py28
-rw-r--r--numpy/lib/tests/test_financial.py80
-rw-r--r--numpy/random/bscript2
-rw-r--r--release.sh29
-rw-r--r--tools/test-installed-numpy.py46
-rw-r--r--tox.ini40
24 files changed, 631 insertions, 290 deletions
diff --git a/.gitignore b/.gitignore
index ef6427d69..f54e63b89 100644
--- a/.gitignore
+++ b/.gitignore
@@ -56,6 +56,7 @@ doc/cdoc/build
*.egg-info
# The shelf plugin uses this dir
./.shelf
+MANIFEST
# Logs and databases #
######################
@@ -88,3 +89,4 @@ numpy/core/include/numpy/__ufunc_api.h
numpy/core/include/numpy/_numpyconfig.h
numpy/version.py
site.cfg
+.tox
diff --git a/bento.info b/bento.info
index f8f0257ae..d4c7d3eee 100644
--- a/bento.info
+++ b/bento.info
@@ -56,18 +56,24 @@ DataFiles: tests
TargetDir: $sitedir/numpy
SourceDir: numpy
Files:
- core/tests/*.py,
- distutils/tests/*.py,
- f2py/tests/*.py,
- fft/tests/*.py,
- lib/tests/*.py,
- linalg/tests/*.py,
- ma/tests/*.py,
- matrixlib/tests/*.py,
- oldnumeric/tests/*.py,
- polynomial/tests/*.py,
- random/tests/*.py,
- testing/tests/*.py
+ **/tests/*.py,
+ core/tests/data/*.fits,
+ core/tests/data/*.pkl,
+ f2py/tests/src/array_from_pyobj/*.c,
+ f2py/src/test/*.c,
+ f2py/src/test/*.f,
+ f2py/src/test/*.f90
+
+DataFiles: f2py-data
+ TargetDir: $sitedir
+ Files:
+ numpy/f2py/src/fortranobject.*
+
+DataFiles: numpy-includes
+ TargetDir: $sitedir
+ Files:
+ numpy/core/include/numpy/*.h,
+ numpy/core/include/numpy/fenv/*.h
HookFile: bscript
Recurse: numpy
diff --git a/bscript b/bscript
index 5b0df20be..9517bcbd1 100644
--- a/bscript
+++ b/bscript
@@ -45,32 +45,42 @@ def check_blas_lapack(conf):
conf.check_cc(lib=mkl_libs, msg="Checking for MKL (CBLAS)",
uselib_store="CBLAS")
conf.env.HAS_CBLAS = True
+ except waflib.Errors.ConfigurationError:
+ conf.env.HAS_LAPACK = False
+ try:
conf.check_cc(lib=mkl_libs, msg="Checking for MKL (LAPACK)",
uselib_store="LAPACK")
conf.env.HAS_LAPACK = True
except waflib.Errors.ConfigurationError:
- pass
+ conf.env.HAS_LAPACK = False
+
elif sys.platform == "darwin":
try:
- conf.check(framework="Accelerate", msg="Checking for framework Accelerate", uselib_store="CBLAS")
+ conf.check(framework="Accelerate", msg="Checking for framework Accelerate (CBLAS)", uselib_store="CBLAS")
conf.env.HAS_CBLAS = True
+ except waflib.Errors.ConfigurationError:
+ conf.env.HAS_CBLAS = False
- conf.check(framework="Accelerate", msg="Checking for framework Accelerate", uselib_store="LAPACK")
+ try:
+ conf.check(framework="Accelerate", msg="Checking for framework Accelerate (LAPACK)", uselib_store="LAPACK")
conf.env.HAS_LAPACK = True
except waflib.Errors.ConfigurationError:
- pass
+ conf.env.HAS_LAPACK = False
else:
try:
conf.check_cc(lib=["cblas", "atlas"], uselib_store="CBLAS")
conf.env.HAS_CBLAS = True
+ except waflib.Errors.ConfigurationError:
+ conf.env.HAS_CBLAS = False
+ try:
conf.check_cc(lib=["lapack", "f77blas", "cblas", "atlas"],
uselib_store="LAPACK")
conf.env.HAS_LAPACK = True
except waflib.Errors.ConfigurationError:
- pass
+ conf.env.HAS_LAPACK = False
# You can manually set up blas/lapack as follows:
#conf.env.HAS_CBLAS = True
@@ -105,29 +115,20 @@ def make_git_commit_info(ctx):
commit_template = ctx.make_source_node(op.join("numpy", "version.py.in"))
return set_revision(commit_template, ctx.pkg.version)
-@hooks.pre_configure
-def pre_configure(context):
+@hooks.post_configure
+def post_configure(context):
conf = context.waf_context
-
- conf.load("compiler_c")
- conf.load("custom_python", tooldir=[waf_backend.WAF_TOOLDIR])
-
- conf.check_python_version((2, 4, 0))
- conf.check_python_headers()
-
if conf.env["CC_NAME"] == "gcc":
conf.env.CFLAGS_PYEXT.append("-Wfatal-errors")
check_blas_lapack(conf)
@hooks.pre_build
def pre_build(context):
- context.register_category("git_info")
commit_output = make_git_commit_info(context)
- context.register_outputs("git_info", "git_commit_info", [commit_output])
+ context.register_outputs_simple([commit_output])
# FIXME: we write a dummy show for now - the original show function is not
# super useful anyway.
- context.register_category("gen_config")
config_node = context.make_build_node("numpy/__config__.py")
config_node.safe_write("def show(): pass")
- context.register_outputs("gen_config", "top_config", [config_node])
+ context.register_outputs_simple([config_node])
diff --git a/doc/HOWTO_DOCUMENT.rst.txt b/doc/HOWTO_DOCUMENT.rst.txt
index 8186c472d..5081955a8 100644
--- a/doc/HOWTO_DOCUMENT.rst.txt
+++ b/doc/HOWTO_DOCUMENT.rst.txt
@@ -9,7 +9,7 @@ A Guide to NumPy/SciPy Documentation
For an accompanying example, see `example.py
<http://github.com/numpy/numpy/blob/master/doc/example.py>`_.
- When using `Sphinx <http://sphinx.pocoo.org/>`_ in combination with the
+ When using `Sphinx <http://sphinx.pocoo.org/>`__ in combination with the
numpy conventions, you should use the ``numpydoc`` extension so that your
docstrings will be handled correctly. For example, Sphinx will extract the
``Parameters`` section from your docstring and convert it into a field
@@ -25,9 +25,9 @@ A Guide to NumPy/SciPy Documentation
<https://github.com/numpy/numpy/blob/master/doc/sphinxext/numpydoc.py>`_
Details of how to use it can be found `here
- <https://github.com/numpy/numpy/blob/master/doc/sphinxext/README.txt>`_ and
+ <https://github.com/numpy/numpy/blob/master/doc/sphinxext/README.txt>`__ and
`here
- <https://github.com/numpy/numpy/blob/master/doc/HOWTO_BUILD_DOCS.rst.txt>`_
+ <https://github.com/numpy/numpy/blob/master/doc/HOWTO_BUILD_DOCS.rst.txt>`__
Overview
--------
diff --git a/doc/release/1.6.1-notes.rst b/doc/release/1.6.1-notes.rst
new file mode 100644
index 000000000..5f59cb743
--- /dev/null
+++ b/doc/release/1.6.1-notes.rst
@@ -0,0 +1,22 @@
+=========================
+NumPy 1.6.1 Release Notes
+=========================
+
+This is a bugfix only release in the 1.6.x series.
+
+
+Issues fixed
+------------
+
+#1834 einsum fails for specific shapes
+#1837 einsum throws nan or freezes python for specific array shapes
+#1838 object <-> structured type arrays regression
+#1851 regression for SWIG based code in 1.6.0
+#1863 Buggy results when operating on array copied with astype()
+#1870 Fix corner case of object array assignment
+#1843 Py3k: fix error with recarray
+#1885 nditer: Error in detecting double reduction loop
+#1874 f2py: fix --include_paths bug
+#1749 Fix ctypes.load_library()
+#1895/1896 iter: writeonly operands weren't always being buffered correctly
+
diff --git a/doc/release/1.6.2-notes.rst b/doc/release/1.6.2-notes.rst
new file mode 100644
index 000000000..7b62e6c93
--- /dev/null
+++ b/doc/release/1.6.2-notes.rst
@@ -0,0 +1,90 @@
+=========================
+NumPy 1.6.2 Release Notes
+=========================
+
+This is a bugfix release in the 1.6.x series. Due to the delay of the NumPy
+1.7.0 release, this release contains far more fixes than a regular NumPy bugfix
+release. It also includes a number of documentation and build improvements.
+
+
+``numpy.core`` issues fixed
+---------------------------
+
+#2063 make unique() return consistent index
+#1138 allow creating arrays from empty buffers or empty slices
+#1446 correct note about correspondence vstack and concatenate
+#1149 make argmin() work for datetime
+#1672 fix allclose() to work for scalar inf
+#1747 make np.median() work for 0-D arrays
+#1776 make complex division by zero to yield inf properly
+#1675 add scalar support for the format() function
+#1905 explicitly check for NaNs in allclose()
+#1952 allow floating ddof in std() and var()
+#1948 fix regression for indexing chararrays with empty list
+#2017 fix type hashing
+#2046 deleting array attributes causes segfault
+#2033 a**2.0 has incorrect type
+#2045 make attribute/iterator_element deletions not segfault
+#2021 fix segfault in searchsorted()
+#2073 fix float16 __array_interface__ bug
+
+
+``numpy.lib`` issues fixed
+--------------------------
+
+#2048 break reference cycle in NpzFile
+#1573 savetxt() now handles complex arrays
+#1387 allow bincount() to accept empty arrays
+#1899 fixed histogramdd() bug with empty inputs
+#1793 fix failing npyio test under py3k
+#1936 fix extra nesting for subarray dtypes
+#1848 make tril/triu return the same dtype as the original array
+#1918 use Py_TYPE to access ob_type, so it works also on Py3
+
+
+``numpy.f2py`` changes
+----------------------
+
+ENH: Introduce new options extra_f77_compiler_args and extra_f90_compiler_args
+BLD: Improve reporting of fcompiler value
+BUG: Fix f2py test_kind.py test
+
+
+``numpy.poly`` changes
+----------------------
+
+ENH: Add some tests for polynomial printing
+ENH: Add companion matrix functions
+DOC: Rearrange the polynomial documents
+BUG: Fix up links to classes
+DOC: Add version added to some of the polynomial package modules
+DOC: Document xxxfit functions in the polynomial package modules
+BUG: The polynomial convenience classes let different types interact
+DOC: Document the use of the polynomial convenience classes
+DOC: Improve numpy reference documentation of polynomial classes
+ENH: Improve the computation of polynomials from roots
+STY: Code cleanup in polynomial [*]fromroots functions
+DOC: Remove references to cast and NA, which were added in 1.7
+
+
+``numpy.distutils`` issues fixed
+-------------------------------
+
+#1261 change compile flag on AIX from -O5 to -O3
+#1377 update HP compiler flags
+#1383 provide better support for C++ code on HPUX
+#1857 fix build for py3k + pip
+BLD: raise a clearer warning in case of building without cleaning up first
+BLD: follow build_ext coding convention in build_clib
+BLD: fix up detection of Intel CPU on OS X in system_info.py
+BLD: add support for the new X11 directory structure on Ubuntu & co.
+BLD: add ufsparse to the libraries search path.
+BLD: add 'pgfortran' as a valid compiler in the Portland Group
+BLD: update version match regexp for IBM AIX Fortran compilers.
+
+
+``numpy.random`` issues fixed
+-----------------------------
+
+BUG: Use npy_intp instead of long in mtrand
+
diff --git a/doc/source/release.rst b/doc/source/release.rst
index ce50cf290..bf9e95a14 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -3,3 +3,8 @@ Release Notes
*************
.. include:: ../release/1.3.0-notes.rst
+.. include:: ../release/1.4.0-notes.rst
+.. include:: ../release/1.5.0-notes.rst
+.. include:: ../release/1.6.0-notes.rst
+.. include:: ../release/1.6.1-notes.rst
+.. include:: ../release/1.6.2-notes.rst
diff --git a/numpy/core/__init__.py b/numpy/core/__init__.py
index 273160b95..5d1599111 100644
--- a/numpy/core/__init__.py
+++ b/numpy/core/__init__.py
@@ -7,7 +7,9 @@ import umath
import _internal # for freeze programs
import numerictypes as nt
multiarray.set_typeDict(nt.sctypeDict)
+import numeric
from numeric import *
+import fromnumeric
from fromnumeric import *
import defchararray as char
import records as rec
@@ -15,9 +17,13 @@ from records import *
from memmap import *
from defchararray import chararray
import scalarmath
+import function_base
from function_base import *
+import machar
from machar import *
+import getlimits
from getlimits import *
+import shape_base
from shape_base import *
del nt
diff --git a/numpy/core/bento.info b/numpy/core/bento.info
index 292f9b5d9..04401f991 100644
--- a/numpy/core/bento.info
+++ b/numpy/core/bento.info
@@ -20,6 +20,9 @@ Library:
Extension: umath
Sources:
src/umath/umathmodule_onefile.c
+ Extension: umath_tests
+ Sources:
+ src/umath/umath_tests.c.src
Extension: scalarmath
Sources:
src/scalarmathmodule.c.src
diff --git a/numpy/core/bscript b/numpy/core/bscript
index c0438fcb8..3db659529 100644
--- a/numpy/core/bscript
+++ b/numpy/core/bscript
@@ -1,9 +1,8 @@
import os
import sys
-from bento.commands.hooks \
- import \
- pre_configure, pre_build, post_build
+from bento.commands import hooks
+
import waflib
import waflib.Errors
from waflib.Task \
@@ -263,8 +262,8 @@ def check_win32_specifics(conf):
if arch == "Intel" or arch == "AMD64":
conf.define('FORCE_NO_LONG_DOUBLE_FORMATTING', 1)
-@pre_configure
-def configure(context):
+@hooks.post_configure
+def post_configure(context):
conf = context.waf_context
try:
@@ -354,6 +353,10 @@ def process_multiarray_api_generator(self):
txt = self.pattern + ".txt"
files = [header, source, txt]
tsk.set_outputs([self.path.find_or_declare(f) for f in files])
+
+ self.bld.register_outputs("numpy_gen_headers", "multiarray",
+ [output for output in tsk.outputs if output.suffix() == ".h"],
+ target_dir="$sitedir/numpy/core/include/numpy")
return tsk
@waflib.TaskGen.feature("ufunc_api_gen")
@@ -369,6 +372,10 @@ def process_api_ufunc_generator(self):
txt = self.pattern + ".txt"
files = [header, source, txt]
tsk.set_outputs([self.path.find_or_declare(f) for f in files])
+
+ headers = [output for output in tsk.outputs if output.suffix() == ".h"]
+ self.bld.register_outputs("numpy_gen_headers", "ufunc", headers,
+ target_dir="$sitedir/numpy/core/include/numpy")
return tsk
class umath_generator(Task):
@@ -391,23 +398,21 @@ def process_umath_generator(self):
return tsk
from os.path import join as pjoin
-@pre_build
-def pbuild(context):
+@hooks.pre_build
+def pre_build(context):
bld = context.waf_context
- def builder(library):
- # FIXME: hack to build static library that can be linked into a dlopen-able
- # library
- return context.default_library_builder(library,
- includes=["src/private", "src/npymath", "include"],
- use="cshlib")
- context.register_compiled_library_builder("npymath", builder)
+ context.register_category("numpy_gen_headers")
+
+ numpyconfig_h = context.local_node.declare(os.path.join("include", "numpy", "_numpyconfig.h"))
+ context.register_outputs("numpy_gen_headers", "numpyconfig", [numpyconfig_h])
+
+ context.tweak_library("npymath",
+ includes=["src/private", "src/npymath", "include"])
- def builder_sort(library):
- return context.default_library_builder(library,
- includes=[".", "src/private", "src/npysort"],
- use="npymath")
- context.register_compiled_library_builder("npysort", builder_sort)
+ context.tweak_library("npysort",
+ includes=[".", "src/private", "src/npysort"],
+ use="npymath")
def builder_multiarray(extension):
bld(name="multiarray_api",
@@ -494,18 +499,11 @@ def pbuild(context):
use="npymath")
context.register_builder("umath", build_ufunc)
- def build_scalarmath(extension):
- return context.default_builder(extension,
- use="npymath")
- context.register_builder("scalarmath", build_scalarmath)
-
- def build_multiarray_tests(extension):
- return context.default_builder(extension,
- use="npymath")
- context.register_builder("multiarray_tests", build_multiarray_tests)
+ context.tweak_extension("scalarmath", use="npymath")
+ context.tweak_extension("multiarray_tests", use="npymath", includes=["src/private"])
+ context.tweak_extension("umath_tests", use="npymath", includes=["src/private"])
def build_dotblas(extension):
if bld.env.HAS_CBLAS:
- return context.default_builder(extension,
- use="CBLAS")
+ return context.default_builder(extension, use="CBLAS")
context.register_builder("_dotblas", build_dotblas)
diff --git a/numpy/core/src/multiarray/nditer_api.c b/numpy/core/src/multiarray/nditer_api.c
index a433df406..cceec518b 100644
--- a/numpy/core/src/multiarray/nditer_api.c
+++ b/numpy/core/src/multiarray/nditer_api.c
@@ -771,7 +771,7 @@ NpyIter_RequiresBuffering(NpyIter *iter)
/*int ndim = NIT_NDIM(iter);*/
int iop, nop = NIT_NOP(iter);
- char *op_itflags;
+ npyiter_opitflags *op_itflags;
if (!(itflags&NPY_ITFLAG_BUFFER)) {
return 0;
@@ -1217,7 +1217,7 @@ NpyIter_GetReadFlags(NpyIter *iter, char *outreadflags)
/*int ndim = NIT_NDIM(iter);*/
int iop, nop = NIT_NOP(iter);
- char *op_itflags = NIT_OPITFLAGS(iter);
+ npyiter_opitflags *op_itflags = NIT_OPITFLAGS(iter);
for (iop = 0; iop < nop; ++iop) {
outreadflags[iop] = (op_itflags[iop]&NPY_OP_ITFLAG_READ) != 0;
@@ -1234,7 +1234,7 @@ NpyIter_GetWriteFlags(NpyIter *iter, char *outwriteflags)
/*int ndim = NIT_NDIM(iter);*/
int iop, nop = NIT_NOP(iter);
- char *op_itflags = NIT_OPITFLAGS(iter);
+ npyiter_opitflags *op_itflags = NIT_OPITFLAGS(iter);
for (iop = 0; iop < nop; ++iop) {
outwriteflags[iop] = (op_itflags[iop]&NPY_OP_ITFLAG_WRITE) != 0;
@@ -1330,7 +1330,7 @@ NpyIter_GetInnerFixedStrideArray(NpyIter *iter, npy_intp *out_strides)
if (itflags&NPY_ITFLAG_BUFFER) {
NpyIter_BufferData *data = NIT_BUFFERDATA(iter);
- char *op_itflags = NIT_OPITFLAGS(iter);
+ npyiter_opitflags *op_itflags = NIT_OPITFLAGS(iter);
npy_intp stride, *strides = NBF_STRIDES(data),
*ad_strides = NAD_STRIDES(axisdata0);
PyArray_Descr **dtypes = NIT_DTYPES(iter);
@@ -1751,14 +1751,14 @@ npyiter_allocate_buffers(NpyIter *iter, char **errmsg)
int iop = 0, nop = NIT_NOP(iter);
npy_intp i;
- char *op_itflags = NIT_OPITFLAGS(iter);
+ npyiter_opitflags *op_itflags = NIT_OPITFLAGS(iter);
NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter);
PyArray_Descr **op_dtype = NIT_DTYPES(iter);
npy_intp buffersize = NBF_BUFFERSIZE(bufferdata);
char *buffer, **buffers = NBF_BUFFERS(bufferdata);
for (iop = 0; iop < nop; ++iop) {
- char flags = op_itflags[iop];
+ npyiter_opitflags flags = op_itflags[iop];
/*
* If we have determined that a buffer may be needed,
@@ -1889,7 +1889,7 @@ npyiter_copy_from_buffers(NpyIter *iter)
int maskop = NIT_MASKOP(iter);
int first_maskna_op = NIT_FIRST_MASKNA_OP(iter);
- char *op_itflags = NIT_OPITFLAGS(iter);
+ npyiter_opitflags *op_itflags = NIT_OPITFLAGS(iter);
NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter);
NpyIter_AxisData *axisdata = NIT_AXISDATA(iter),
*reduce_outeraxisdata = NULL;
@@ -1935,168 +1935,165 @@ npyiter_copy_from_buffers(NpyIter *iter)
/*
* Copy the data back to the arrays. If the type has refs,
* this function moves them so the buffer's refs are released.
+ *
+ * The flag USINGBUFFER is set when the buffer was used, so
+ * only copy back when this flag is on.
*/
- if ((stransfer != NULL) && (op_itflags[iop]&NPY_OP_ITFLAG_WRITE)) {
- /* Copy back only if the pointer was pointing to the buffer */
- npy_intp delta = (ptrs[iop] - buffer);
- if (0 <= delta && delta <= buffersize*dtypes[iop]->elsize) {
- npy_intp op_transfersize;
+ if ((stransfer != NULL) &&
+ (op_itflags[iop]&(NPY_OP_ITFLAG_WRITE|NPY_OP_ITFLAG_USINGBUFFER))
+ == (NPY_OP_ITFLAG_WRITE|NPY_OP_ITFLAG_USINGBUFFER)) {
+ npy_intp op_transfersize;
- npy_intp src_stride, *dst_strides, *dst_coords, *dst_shape;
- int ndim_transfer;
+ npy_intp src_stride, *dst_strides, *dst_coords, *dst_shape;
+ int ndim_transfer;
- NPY_IT_DBG_PRINT1("Iterator: Operand %d was buffered\n",
- (int)iop);
+ NPY_IT_DBG_PRINT1("Iterator: Operand %d was buffered\n",
+ (int)iop);
- /*
- * If this operand is being reduced in the inner loop,
- * its buffering stride was set to zero, and just
- * one element was copied.
- */
- if (op_itflags[iop]&NPY_OP_ITFLAG_REDUCE) {
- if (strides[iop] == 0) {
- if (reduce_outerstrides[iop] == 0) {
- op_transfersize = 1;
- src_stride = 0;
- dst_strides = &src_stride;
- dst_coords = &NAD_INDEX(reduce_outeraxisdata);
- dst_shape = &NAD_SHAPE(reduce_outeraxisdata);
- ndim_transfer = 1;
- }
- else {
- op_transfersize = NBF_REDUCE_OUTERSIZE(bufferdata);
- src_stride = reduce_outerstrides[iop];
- dst_strides =
- &NAD_STRIDES(reduce_outeraxisdata)[iop];
- dst_coords = &NAD_INDEX(reduce_outeraxisdata);
- dst_shape = &NAD_SHAPE(reduce_outeraxisdata);
- ndim_transfer = ndim - reduce_outerdim;
- }
+ /*
+ * If this operand is being reduced in the inner loop,
+ * its buffering stride was set to zero, and just
+ * one element was copied.
+ */
+ if (op_itflags[iop]&NPY_OP_ITFLAG_REDUCE) {
+ if (strides[iop] == 0) {
+ if (reduce_outerstrides[iop] == 0) {
+ op_transfersize = 1;
+ src_stride = 0;
+ dst_strides = &src_stride;
+ dst_coords = &NAD_INDEX(reduce_outeraxisdata);
+ dst_shape = &NAD_SHAPE(reduce_outeraxisdata);
+ ndim_transfer = 1;
}
else {
- if (reduce_outerstrides[iop] == 0) {
- op_transfersize = NBF_SIZE(bufferdata);
- src_stride = strides[iop];
- dst_strides = &ad_strides[iop];
- dst_coords = &NAD_INDEX(axisdata);
- dst_shape = &NAD_SHAPE(axisdata);
- ndim_transfer = reduce_outerdim ?
- reduce_outerdim : 1;
- }
- else {
- op_transfersize = transfersize;
- src_stride = strides[iop];
- dst_strides = &ad_strides[iop];
- dst_coords = &NAD_INDEX(axisdata);
- dst_shape = &NAD_SHAPE(axisdata);
- ndim_transfer = ndim;
- }
+ op_transfersize = NBF_REDUCE_OUTERSIZE(bufferdata);
+ src_stride = reduce_outerstrides[iop];
+ dst_strides =
+ &NAD_STRIDES(reduce_outeraxisdata)[iop];
+ dst_coords = &NAD_INDEX(reduce_outeraxisdata);
+ dst_shape = &NAD_SHAPE(reduce_outeraxisdata);
+ ndim_transfer = ndim - reduce_outerdim;
}
}
else {
- op_transfersize = transfersize;
- src_stride = strides[iop];
- dst_strides = &ad_strides[iop];
- dst_coords = &NAD_INDEX(axisdata);
- dst_shape = &NAD_SHAPE(axisdata);
- ndim_transfer = ndim;
- }
-
- NPY_IT_DBG_PRINT2("Iterator: Copying buffer to "
- "operand %d (%d items)\n",
- (int)iop, (int)op_transfersize);
-
- /* USE_MASKNA operand */
- if (iop < first_maskna_op && maskna_indices[iop] >= 0) {
- int iop_maskna = maskna_indices[iop];
- npy_mask *maskptr;
- /* TODO: support WRITEMASKED + USE_MASKNA together */
-
- /*
- * The mask pointer may be in the buffer or in
- * the array, detect which one.
- */
- delta = (ptrs[iop_maskna] - buffers[iop_maskna]);
- if (0 <= delta &&
- delta <= buffersize*dtypes[iop_maskna]->elsize) {
- maskptr = (npy_mask *)buffers[iop_maskna];
+ if (reduce_outerstrides[iop] == 0) {
+ op_transfersize = NBF_SIZE(bufferdata);
+ src_stride = strides[iop];
+ dst_strides = &ad_strides[iop];
+ dst_coords = &NAD_INDEX(axisdata);
+ dst_shape = &NAD_SHAPE(axisdata);
+ ndim_transfer = reduce_outerdim ?
+ reduce_outerdim : 1;
}
else {
- maskptr = (npy_mask *)ad_ptrs[iop_maskna];
+ op_transfersize = transfersize;
+ src_stride = strides[iop];
+ dst_strides = &ad_strides[iop];
+ dst_coords = &NAD_INDEX(axisdata);
+ dst_shape = &NAD_SHAPE(axisdata);
+ ndim_transfer = ndim;
}
+ }
+ }
+ else {
+ op_transfersize = transfersize;
+ src_stride = strides[iop];
+ dst_strides = &ad_strides[iop];
+ dst_coords = &NAD_INDEX(axisdata);
+ dst_shape = &NAD_SHAPE(axisdata);
+ ndim_transfer = ndim;
+ }
+
+ NPY_IT_DBG_PRINT2("Iterator: Copying buffer to "
+ "operand %d (%d items)\n",
+ (int)iop, (int)op_transfersize);
- PyArray_TransferMaskedStridedToNDim(ndim_transfer,
- ad_ptrs[iop], dst_strides, axisdata_incr,
- buffer, src_stride,
- maskptr, strides[iop_maskna],
- dst_coords, axisdata_incr,
- dst_shape, axisdata_incr,
- op_transfersize, dtypes[iop]->elsize,
- (PyArray_MaskedStridedUnaryOp *)stransfer,
- transferdata);
+ /* USE_MASKNA operand */
+ if (iop < first_maskna_op && maskna_indices[iop] >= 0) {
+ int iop_maskna = maskna_indices[iop];
+ npy_mask *maskptr;
+ /* TODO: support WRITEMASKED + USE_MASKNA together */
+
+ /*
+ * The mask pointer may be in the buffer or in
+ * the array, detect which one.
+ */
+ if ((op_itflags[iop_maskna]&NPY_OP_ITFLAG_USINGBUFFER) != 0) {
+ maskptr = (npy_mask *)buffers[iop_maskna];
}
- /* WRITEMASKED operand */
- else if (op_itflags[iop] & NPY_OP_ITFLAG_WRITEMASKED) {
- npy_mask *maskptr;
-
- /*
- * The mask pointer may be in the buffer or in
- * the array, detect which one.
- */
- delta = (ptrs[maskop] - buffers[maskop]);
- if (0 <= delta &&
- delta <= buffersize*dtypes[maskop]->elsize) {
- maskptr = (npy_mask *)buffers[maskop];
- }
- else {
- maskptr = (npy_mask *)ad_ptrs[maskop];
- }
+ else {
+ maskptr = (npy_mask *)ad_ptrs[iop_maskna];
+ }
+
+ PyArray_TransferMaskedStridedToNDim(ndim_transfer,
+ ad_ptrs[iop], dst_strides, axisdata_incr,
+ buffer, src_stride,
+ maskptr, strides[iop_maskna],
+ dst_coords, axisdata_incr,
+ dst_shape, axisdata_incr,
+ op_transfersize, dtypes[iop]->elsize,
+ (PyArray_MaskedStridedUnaryOp *)stransfer,
+ transferdata);
+ }
+ /* WRITEMASKED operand */
+ else if (op_itflags[iop] & NPY_OP_ITFLAG_WRITEMASKED) {
+ npy_mask *maskptr;
- PyArray_TransferMaskedStridedToNDim(ndim_transfer,
- ad_ptrs[iop], dst_strides, axisdata_incr,
- buffer, src_stride,
- maskptr, strides[maskop],
- dst_coords, axisdata_incr,
- dst_shape, axisdata_incr,
- op_transfersize, dtypes[iop]->elsize,
- (PyArray_MaskedStridedUnaryOp *)stransfer,
- transferdata);
+ /*
+ * The mask pointer may be in the buffer or in
+ * the array, detect which one.
+ */
+ if ((op_itflags[maskop]&NPY_OP_ITFLAG_USINGBUFFER) != 0) {
+ maskptr = (npy_mask *)buffers[maskop];
}
- /* Regular operand */
else {
- PyArray_TransferStridedToNDim(ndim_transfer,
- ad_ptrs[iop], dst_strides, axisdata_incr,
- buffer, src_stride,
- dst_coords, axisdata_incr,
- dst_shape, axisdata_incr,
- op_transfersize, dtypes[iop]->elsize,
- stransfer,
- transferdata);
+ maskptr = (npy_mask *)ad_ptrs[maskop];
}
+
+ PyArray_TransferMaskedStridedToNDim(ndim_transfer,
+ ad_ptrs[iop], dst_strides, axisdata_incr,
+ buffer, src_stride,
+ maskptr, strides[maskop],
+ dst_coords, axisdata_incr,
+ dst_shape, axisdata_incr,
+ op_transfersize, dtypes[iop]->elsize,
+ (PyArray_MaskedStridedUnaryOp *)stransfer,
+ transferdata);
+ }
+ /* Regular operand */
+ else {
+ PyArray_TransferStridedToNDim(ndim_transfer,
+ ad_ptrs[iop], dst_strides, axisdata_incr,
+ buffer, src_stride,
+ dst_coords, axisdata_incr,
+ dst_shape, axisdata_incr,
+ op_transfersize, dtypes[iop]->elsize,
+ stransfer,
+ transferdata);
}
}
/* If there's no copy back, we may have to decrement refs. In
* this case, the transfer function has a 'decsrcref' transfer
* function, so we can use it to do the decrement.
+ *
+ * The flag USINGBUFFER is set when the buffer was used, so
+ * only decrement refs when this flag is on.
*/
- else if (stransfer != NULL) {
- /* Decrement refs only if the pointer was pointing to the buffer */
- npy_intp delta = (ptrs[iop] - buffer);
- if (0 <= delta && delta <= transfersize*dtypes[iop]->elsize) {
- NPY_IT_DBG_PRINT1("Iterator: Freeing refs and zeroing buffer "
- "of operand %d\n", (int)iop);
- /* Decrement refs */
- stransfer(NULL, 0, buffer, dtypes[iop]->elsize,
- transfersize, dtypes[iop]->elsize,
- transferdata);
- /*
- * Zero out the memory for safety. For instance,
- * if during iteration some Python code copied an
- * array pointing into the buffer, it will get None
- * values for its references after this.
- */
- memset(buffer, 0, dtypes[iop]->elsize*transfersize);
- }
+ else if (stransfer != NULL &&
+ (op_itflags[iop]&NPY_OP_ITFLAG_USINGBUFFER) != 0) {
+ NPY_IT_DBG_PRINT1("Iterator: Freeing refs and zeroing buffer "
+ "of operand %d\n", (int)iop);
+ /* Decrement refs */
+ stransfer(NULL, 0, buffer, dtypes[iop]->elsize,
+ transfersize, dtypes[iop]->elsize,
+ transferdata);
+ /*
+ * Zero out the memory for safety. For instance,
+ * if during iteration some Python code copied an
+ * array pointing into the buffer, it will get None
+ * values for its references after this.
+ */
+ memset(buffer, 0, dtypes[iop]->elsize*transfersize);
}
}
@@ -2116,7 +2113,7 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs)
int iop, nop = NIT_NOP(iter);
int first_maskna_op = NIT_FIRST_MASKNA_OP(iter);
- char *op_itflags = NIT_OPITFLAGS(iter);
+ npyiter_opitflags *op_itflags = NIT_OPITFLAGS(iter);
NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter);
NpyIter_AxisData *axisdata = NIT_AXISDATA(iter),
*reduce_outeraxisdata = NULL;
@@ -2265,6 +2262,7 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs)
* to the first non-trivial stride.
*/
stransfer = NULL;
+ /* The flag NPY_OP_ITFLAG_USINGBUFFER can be ignored here */
break;
/* Never need to buffer this operand */
case NPY_OP_ITFLAG_BUFNEVER|NPY_OP_ITFLAG_REDUCE:
@@ -2277,6 +2275,7 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs)
* to the first non-trivial stride.
*/
stransfer = NULL;
+ /* The flag NPY_OP_ITFLAG_USINGBUFFER can be ignored here */
break;
/* Just a copy */
case 0:
@@ -2290,6 +2289,8 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs)
ptrs[iop] = ad_ptrs[iop];
strides[iop] = ad_strides[iop];
stransfer = NULL;
+ /* Signal that the buffer is not being used */
+ op_itflags[iop] &= (~NPY_OP_ITFLAG_USINGBUFFER);
}
/* If some other op is reduced, we have a double reduce loop */
else if ((itflags&NPY_ITFLAG_REDUCE) &&
@@ -2303,6 +2304,8 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs)
reduce_outerstrides[iop] =
NAD_STRIDES(reduce_outeraxisdata)[iop];
stransfer = NULL;
+ /* Signal that the buffer is not being used */
+ op_itflags[iop] &= (~NPY_OP_ITFLAG_USINGBUFFER);
}
else {
/* In this case, the buffer is being used */
@@ -2313,6 +2316,8 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs)
strides[iop];
reduce_outerptrs[iop] = ptrs[iop];
}
+ /* Signal that the buffer is being used */
+ op_itflags[iop] |= NPY_OP_ITFLAG_USINGBUFFER;
}
break;
/* Just a copy, but with a reduction */
@@ -2325,6 +2330,8 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs)
ptrs[iop] = ad_ptrs[iop];
reduce_outerstrides[iop] = 0;
stransfer = NULL;
+ /* Signal that the buffer is not being used */
+ op_itflags[iop] &= (~NPY_OP_ITFLAG_USINGBUFFER);
}
/* It's all in one stride in the reduce outer loop */
else if ((reduce_outerdim > 0) &&
@@ -2338,6 +2345,8 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs)
reduce_outerstrides[iop] =
NAD_STRIDES(reduce_outeraxisdata)[iop];
stransfer = NULL;
+ /* Signal that the buffer is not being used */
+ op_itflags[iop] &= (~NPY_OP_ITFLAG_USINGBUFFER);
}
/* In this case, the buffer is being used */
else {
@@ -2351,6 +2360,8 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs)
else {
reduce_outerstrides[iop] = dtypes[iop]->elsize;
}
+ /* Signal that the buffer is being used */
+ op_itflags[iop] |= NPY_OP_ITFLAG_USINGBUFFER;
}
}
@@ -2360,6 +2371,8 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs)
strides[iop] = ad_strides[iop];
reduce_outerstrides[iop] = 0;
stransfer = NULL;
+ /* Signal that the buffer is not being used */
+ op_itflags[iop] &= (~NPY_OP_ITFLAG_USINGBUFFER);
}
else {
/* It's all in one stride in the reduce outer loop */
@@ -2373,6 +2386,8 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs)
reduce_outerstrides[iop] =
NAD_STRIDES(reduce_outeraxisdata)[iop];
stransfer = NULL;
+ /* Signal that the buffer is not being used */
+ op_itflags[iop] &= (~NPY_OP_ITFLAG_USINGBUFFER);
}
/* In this case, the buffer is being used */
else {
@@ -2388,6 +2403,8 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs)
reduce_outerstrides[iop] = reduce_innersize *
dtypes[iop]->elsize;
}
+ /* Signal that the buffer is being used */
+ op_itflags[iop] |= NPY_OP_ITFLAG_USINGBUFFER;
}
}
reduce_outerptrs[iop] = ptrs[iop];
@@ -2396,6 +2413,9 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs)
/* In this case, the buffer is always being used */
any_buffered = 1;
+ /* Signal that the buffer is being used */
+ op_itflags[iop] |= NPY_OP_ITFLAG_USINGBUFFER;
+
if (!(op_itflags[iop]&NPY_OP_ITFLAG_REDUCE)) {
ptrs[iop] = buffers[iop];
strides[iop] = dtypes[iop]->elsize;
@@ -2636,7 +2656,7 @@ npyiter_checkreducesize(NpyIter *iter, npy_intp count,
npy_intp reducespace = 1, factor;
npy_bool nonzerocoord;
- char *op_itflags = NIT_OPITFLAGS(iter);
+ npyiter_opitflags *op_itflags = NIT_OPITFLAGS(iter);
char stride0op[NPY_MAXARGS];
/* Default to no outer axis */
diff --git a/numpy/core/src/multiarray/nditer_constr.c b/numpy/core/src/multiarray/nditer_constr.c
index af9780b83..180c55063 100644
--- a/numpy/core/src/multiarray/nditer_constr.c
+++ b/numpy/core/src/multiarray/nditer_constr.c
@@ -27,14 +27,14 @@ static int
npyiter_calculate_ndim(int nop, PyArrayObject **op_in,
int oa_ndim);
static int
-npyiter_check_per_op_flags(npy_uint32 flags, char *op_itflags);
+npyiter_check_per_op_flags(npy_uint32 flags, npyiter_opitflags *op_itflags);
static int
npyiter_prepare_one_operand(PyArrayObject **op,
char **op_dataptr,
PyArray_Descr *op_request_dtype,
PyArray_Descr** op_dtype,
npy_uint32 flags,
- npy_uint32 op_flags, char *op_itflags);
+ npy_uint32 op_flags, npyiter_opitflags *op_itflags);
static int
npyiter_prepare_operands(int nop, int first_maskna_op,
PyArrayObject **op_in,
@@ -43,16 +43,16 @@ npyiter_prepare_operands(int nop, int first_maskna_op,
PyArray_Descr **op_request_dtypes,
PyArray_Descr **op_dtype,
npy_uint32 flags,
- npy_uint32 *op_flags, char *op_itflags,
+ npy_uint32 *op_flags, npyiter_opitflags *op_itflags,
npy_int8 *out_maskop,
npy_int8 *out_maskna_indices);
static int
npyiter_check_casting(int nop, PyArrayObject **op,
PyArray_Descr **op_dtype,
NPY_CASTING casting,
- char *op_itflags);
+ npyiter_opitflags *op_itflags);
static int
-npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, char *op_itflags,
+npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itflags,
char **op_dataptr,
npy_uint32 *op_flags, int **op_axes,
npy_intp *itershape,
@@ -74,25 +74,25 @@ static void
npyiter_find_best_axis_ordering(NpyIter *iter);
static PyArray_Descr *
npyiter_get_common_dtype(int first_maskna_op, PyArrayObject **op,
- char *op_itflags, PyArray_Descr **op_dtype,
+ npyiter_opitflags *op_itflags, PyArray_Descr **op_dtype,
PyArray_Descr **op_request_dtypes,
int only_inputs, int output_scalars);
static PyArrayObject *
npyiter_new_temp_array(NpyIter *iter, PyTypeObject *subtype,
- npy_uint32 flags, char *op_itflags,
+ npy_uint32 flags, npyiter_opitflags *op_itflags,
int op_ndim, npy_intp *shape,
PyArray_Descr *op_dtype, int *op_axes);
static int
npyiter_allocate_arrays(NpyIter *iter,
npy_uint32 flags,
PyArray_Descr **op_dtype, PyTypeObject *subtype,
- npy_uint32 *op_flags, char *op_itflags,
+ npy_uint32 *op_flags, npyiter_opitflags *op_itflags,
int **op_axes, int output_scalars);
static int
npyiter_fill_maskna_axisdata(NpyIter *iter, int **op_axes);
static void
npyiter_get_priority_subtype(int first_maskna_op, PyArrayObject **op,
- char *op_itflags,
+ npyiter_opitflags *op_itflags,
double *subtype_priority, PyTypeObject **subtype);
static int
npyiter_allocate_transfer_functions(NpyIter *iter);
@@ -120,7 +120,7 @@ NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 flags,
/* Per-operand values */
PyArrayObject **op;
PyArray_Descr **op_dtype;
- char *op_itflags;
+ npyiter_opitflags *op_itflags;
char **op_dataptr;
npy_int8 *perm;
@@ -916,7 +916,7 @@ npyiter_calculate_ndim(int nop, PyArrayObject **op_in,
* Returns 1 on success, 0 on failure.
*/
static int
-npyiter_check_per_op_flags(npy_uint32 op_flags, char *op_itflags)
+npyiter_check_per_op_flags(npy_uint32 op_flags, npyiter_opitflags *op_itflags)
{
if ((op_flags & NPY_ITER_GLOBAL_FLAGS) != 0) {
PyErr_SetString(PyExc_ValueError,
@@ -1027,7 +1027,7 @@ npyiter_prepare_one_operand(PyArrayObject **op,
PyArray_Descr *op_request_dtype,
PyArray_Descr **op_dtype,
npy_uint32 flags,
- npy_uint32 op_flags, char *op_itflags)
+ npy_uint32 op_flags, npyiter_opitflags *op_itflags)
{
/* NULL operands must be automatically allocated outputs */
if (*op == NULL) {
@@ -1226,7 +1226,7 @@ npyiter_prepare_operands(int nop, int first_maskna_op, PyArrayObject **op_in,
PyArray_Descr **op_request_dtypes,
PyArray_Descr **op_dtype,
npy_uint32 flags,
- npy_uint32 *op_flags, char *op_itflags,
+ npy_uint32 *op_flags, npyiter_opitflags *op_itflags,
npy_int8 *out_maskop,
npy_int8 *out_maskna_indices)
{
@@ -1419,7 +1419,7 @@ static int
npyiter_check_casting(int first_maskna_op, PyArrayObject **op,
PyArray_Descr **op_dtype,
NPY_CASTING casting,
- char *op_itflags)
+ npyiter_opitflags *op_itflags)
{
int iop;
@@ -1557,7 +1557,7 @@ check_mask_for_writemasked_reduction(NpyIter *iter, int iop)
* Returns 1 on success, 0 on failure.
*/
static int
-npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, char *op_itflags,
+npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itflags,
char **op_dataptr,
npy_uint32 *op_flags, int **op_axes,
npy_intp *itershape,
@@ -2520,7 +2520,7 @@ npyiter_find_best_axis_ordering(NpyIter *iter)
*/
static PyArray_Descr *
npyiter_get_common_dtype(int first_maskna_op, PyArrayObject **op,
- char *op_itflags, PyArray_Descr **op_dtype,
+ npyiter_opitflags *op_itflags, PyArray_Descr **op_dtype,
PyArray_Descr **op_request_dtypes,
int only_inputs, int output_scalars)
{
@@ -2586,7 +2586,7 @@ npyiter_get_common_dtype(int first_maskna_op, PyArrayObject **op,
*/
static PyArrayObject *
npyiter_new_temp_array(NpyIter *iter, PyTypeObject *subtype,
- npy_uint32 flags, char *op_itflags,
+ npy_uint32 flags, npyiter_opitflags *op_itflags,
int op_ndim, npy_intp *shape,
PyArray_Descr *op_dtype, int *op_axes)
{
@@ -2820,7 +2820,7 @@ static int
npyiter_allocate_arrays(NpyIter *iter,
npy_uint32 flags,
PyArray_Descr **op_dtype, PyTypeObject *subtype,
- npy_uint32 *op_flags, char *op_itflags,
+ npy_uint32 *op_flags, npyiter_opitflags *op_itflags,
int **op_axes, int output_scalars)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
@@ -3151,7 +3151,7 @@ npyiter_fill_maskna_axisdata(NpyIter *iter, int **op_axes)
int first_maskna_op = NIT_FIRST_MASKNA_OP(iter);
npy_int8 *perm;
- char *op_itflags = NIT_OPITFLAGS(iter);
+ npyiter_opitflags *op_itflags = NIT_OPITFLAGS(iter);
npy_int8 *maskna_indices = NIT_MASKNA_INDICES(iter);
NpyIter_AxisData *axisdata;
npy_intp sizeof_axisdata;
@@ -3285,7 +3285,7 @@ npyiter_fill_maskna_axisdata(NpyIter *iter, int **op_axes)
*/
static void
npyiter_get_priority_subtype(int first_maskna_op, PyArrayObject **op,
- char *op_itflags,
+ npyiter_opitflags *op_itflags,
double *subtype_priority,
PyTypeObject **subtype)
{
@@ -3311,7 +3311,7 @@ npyiter_allocate_transfer_functions(NpyIter *iter)
int first_maskna_op = NIT_FIRST_MASKNA_OP(iter);
npy_intp i;
- char *op_itflags = NIT_OPITFLAGS(iter);
+ npyiter_opitflags *op_itflags = NIT_OPITFLAGS(iter);
NpyIter_BufferData *bufferdata = NIT_BUFFERDATA(iter);
NpyIter_AxisData *axisdata = NIT_AXISDATA(iter);
PyArrayObject **op = NIT_OPERANDS(iter);
@@ -3328,7 +3328,7 @@ npyiter_allocate_transfer_functions(NpyIter *iter)
int needs_api = 0;
for (iop = 0; iop < nop; ++iop) {
- char flags = op_itflags[iop];
+ npyiter_opitflags flags = op_itflags[iop];
/*
* Reduction operands may be buffered with a different stride,
* so we must pass NPY_MAX_INTP to the transfer function factory.
diff --git a/numpy/core/src/multiarray/nditer_impl.h b/numpy/core/src/multiarray/nditer_impl.h
index ea02d5cc4..ef10308fb 100644
--- a/numpy/core/src/multiarray/nditer_impl.h
+++ b/numpy/core/src/multiarray/nditer_impl.h
@@ -107,21 +107,23 @@
/* Internal iterator per-operand iterator flags */
/* The operand will be written to */
-#define NPY_OP_ITFLAG_WRITE 0x01
+#define NPY_OP_ITFLAG_WRITE 0x0001
/* The operand will be read from */
-#define NPY_OP_ITFLAG_READ 0x02
+#define NPY_OP_ITFLAG_READ 0x0002
/* The operand needs type conversion/byte swapping/alignment */
-#define NPY_OP_ITFLAG_CAST 0x04
+#define NPY_OP_ITFLAG_CAST 0x0004
/* The operand never needs buffering */
-#define NPY_OP_ITFLAG_BUFNEVER 0x08
+#define NPY_OP_ITFLAG_BUFNEVER 0x0008
/* The operand is aligned */
-#define NPY_OP_ITFLAG_ALIGNED 0x10
+#define NPY_OP_ITFLAG_ALIGNED 0x0010
/* The operand is being reduced */
-#define NPY_OP_ITFLAG_REDUCE 0x20
+#define NPY_OP_ITFLAG_REDUCE 0x0020
/* The operand is for temporary use, does not have a backing array */
-#define NPY_OP_ITFLAG_VIRTUAL 0x40
+#define NPY_OP_ITFLAG_VIRTUAL 0x0040
/* The operand requires masking when copying buffer -> array */
-#define NPY_OP_ITFLAG_WRITEMASKED 0x80
+#define NPY_OP_ITFLAG_WRITEMASKED 0x0080
+/* The operand's data pointer is pointing into its buffer */
+#define NPY_OP_ITFLAG_USINGBUFFER 0x0100
/*
* The data layout of the iterator is fully specified by
@@ -147,6 +149,8 @@ struct NpyIter_InternalOnly {
typedef struct NpyIter_AD NpyIter_AxisData;
typedef struct NpyIter_BD NpyIter_BufferData;
+typedef npy_int16 npyiter_opitflags;
+
/* Byte sizes of the iterator members */
#define NIT_PERM_SIZEOF(itflags, ndim, nop) \
NPY_INTP_ALIGNED(NPY_MAXDIMS)
@@ -161,7 +165,7 @@ typedef struct NpyIter_BD NpyIter_BufferData;
#define NIT_OPERANDS_SIZEOF(itflags, ndim, nop) \
((NPY_SIZEOF_INTP)*(nop))
#define NIT_OPITFLAGS_SIZEOF(itflags, ndim, nop) \
- (NPY_INTP_ALIGNED(nop))
+ (NPY_INTP_ALIGNED(sizeof(npyiter_opitflags) * nop))
#define NIT_BUFFERDATA_SIZEOF(itflags, ndim, nop) \
((itflags&NPY_ITFLAG_BUFFER) ? ((NPY_SIZEOF_INTP)*(6 + 9*nop)) : 0)
@@ -224,8 +228,8 @@ typedef struct NpyIter_BD NpyIter_BufferData;
&(iter)->iter_flexdata + NIT_BASEOFFSETS_OFFSET(itflags, ndim, nop)))
#define NIT_OPERANDS(iter) ((PyArrayObject **)( \
&(iter)->iter_flexdata + NIT_OPERANDS_OFFSET(itflags, ndim, nop)))
-#define NIT_OPITFLAGS(iter) ( \
- &(iter)->iter_flexdata + NIT_OPITFLAGS_OFFSET(itflags, ndim, nop))
+#define NIT_OPITFLAGS(iter) ((npyiter_opitflags *)( \
+ &(iter)->iter_flexdata + NIT_OPITFLAGS_OFFSET(itflags, ndim, nop)))
#define NIT_BUFFERDATA(iter) ((NpyIter_BufferData *)( \
&(iter)->iter_flexdata + NIT_BUFFERDATA_OFFSET(itflags, ndim, nop)))
#define NIT_AXISDATA(iter) ((NpyIter_AxisData *)( \
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index 5233d0f88..31d818778 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -285,6 +285,7 @@ class TestFloatExceptions(TestCase):
self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()]);
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()]);
+ @dec.knownfailureif(True, "See ticket 1755")
def test_floating_exceptions(self):
"""Test basic arithmetic function errors"""
oldsettings = np.seterr(all='raise')
diff --git a/numpy/distutils/fcompiler/intel.py b/numpy/distutils/fcompiler/intel.py
index 190584829..18ecc01e5 100644
--- a/numpy/distutils/fcompiler/intel.py
+++ b/numpy/distutils/fcompiler/intel.py
@@ -111,7 +111,7 @@ class IntelFCompiler(BaseIntelFCompiler):
opt.remove('-shared')
except ValueError:
idx = 0
- opt[idx:idx] = ['-dynamiclib', '-Wl,-undefined,dynamic_lookup', '-Wl,-framework,Python']
+ opt[idx:idx] = ['-dynamiclib', '-Wl,-undefined,dynamic_lookup']
return opt
class IntelItaniumFCompiler(IntelFCompiler):
diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py
index 5ad668ef3..9c3cee3cb 100644
--- a/numpy/distutils/system_info.py
+++ b/numpy/distutils/system_info.py
@@ -665,9 +665,16 @@ class system_info:
found_libs, found_dirs = [], []
for dir_ in lib_dirs:
found_libs1 = self._lib_list(dir_, libs, exts)
- if found_libs1:
- found_libs.extend(found_libs1)
- found_dirs.append(dir_)
+ # It's possible that we'll find the same library in multiple
+ # directories. It's also possible that we'll find some
+ # libraries on in directory, and some in another. So the
+ # obvious thing would be to use a set instead of a list, but I
+ # don't know if preserving order matters (does it?).
+ for found_lib in found_libs1:
+ if found_lib not in found_libs:
+ found_libs.append(found_lib)
+ if dir_ not in found_dirs:
+ found_dirs.append(dir_)
else:
found_libs = self._lib_list(lib_dirs, libs, exts)
found_dirs = [lib_dirs]
diff --git a/numpy/fft/bscript b/numpy/fft/bscript
index 165ff145d..ac1506496 100644
--- a/numpy/fft/bscript
+++ b/numpy/fft/bscript
@@ -1,15 +1,7 @@
-import os
+from bento.commands import hooks
-from bento.commands.hooks \
- import \
- pre_build
-
-@pre_build
+@hooks.pre_build
def build(context):
- bld = context.waf_context
-
- def build(extension):
- includes = ["../core/include", "../core/include/numpy", "../core",
- "../core/src/private"]
- return context.default_builder(extension, includes=includes)
- context.register_builder("fftpack_lite", build)
+ context.tweak_extension("fftpack_lite",
+ includes=["../core/include", "../core/include/numpy",
+ "../core", "../core/src/private"])
diff --git a/numpy/lib/bscript b/numpy/lib/bscript
index 8d47e1f34..a9200d043 100644
--- a/numpy/lib/bscript
+++ b/numpy/lib/bscript
@@ -1,14 +1,7 @@
-import os
+from bento.commands import hooks
-from bento.commands.hooks \
- import \
- pre_build
-
-@pre_build
-def pbuild(context):
- bld = context.waf_context
- def builder_compiled_base(extension):
- includes = ["../core/include", "../core/include/numpy", "../core",
- "../core/src/private"]
- return context.default_builder(extension, includes=includes)
- context.register_builder("_compiled_base", builder_compiled_base)
+@hooks.pre_build
+def build(context):
+ context.tweak_extension("_compiled_base",
+ includes=["../core/include", "../core/include/numpy", "../core",
+ "../core/src/private"])
diff --git a/numpy/lib/financial.py b/numpy/lib/financial.py
index d686aeca2..599a36198 100644
--- a/numpy/lib/financial.py
+++ b/numpy/lib/financial.py
@@ -18,9 +18,13 @@ _when_to_num = {'end':0, 'begin':1,
'finish':0}
def _convert_when(when):
+ #Test to see if when has already been converted to ndarray
+ #This will happen if one function calls another, for example ppmt
+ if isinstance(when, np.ndarray):
+ return when
try:
return _when_to_num[when]
- except KeyError:
+ except (KeyError, TypeError):
return [_when_to_num[x] for x in when]
@@ -236,8 +240,8 @@ def nper(rate, pmt, pv, fv=0, when='end'):
If you only had $150/month to pay towards the loan, how long would it take
to pay-off a loan of $8,000 at 7% annual interest?
- >>> np.nper(0.07/12, -150, 8000)
- 64.073348770661852
+ >>> print round(np.nper(0.07/12, -150, 8000), 5)
+ 64.07335
So, over 64 months would be required to pay off the loan.
@@ -354,17 +358,19 @@ def ipmt(rate, per, nper, pv, fv=0.0, when='end'):
12 -216.26 -1.49 -0.00
>>> interestpd = np.sum(ipmt)
- >>> interestpd
- -112.98308424136215
+ >>> np.round(interestpd, 2)
+ -112.98
"""
when = _convert_when(when)
- if when == 1 and per == 1:
- return 0.0
+ rate, per, nper, pv, fv, when = np.broadcast_arrays(rate, per, nper, pv, fv, when)
total_pmt = pmt(rate, nper, pv, fv, when)
ipmt = _rbl(rate, per, total_pmt, pv, when)*rate
- if when == 1:
- return ipmt/(1 + rate)
+ try:
+ ipmt = np.where(when == 1, ipmt/(1 + rate), ipmt)
+ ipmt = np.where(np.logical_and(when == 1, per == 1), 0.0, ipmt)
+ except IndexError:
+ pass
return ipmt
def _rbl(rate, per, pmt, pv, when):
@@ -620,8 +626,8 @@ def irr(values):
Examples
--------
- >>> np.irr([-100, 39, 59, 55, 20])
- 0.2809484211599611
+ >>> print round(np.irr([-100, 39, 59, 55, 20]), 5)
+ 0.28095
(Compare with the Example given for numpy.lib.financial.npv)
diff --git a/numpy/lib/tests/test_financial.py b/numpy/lib/tests/test_financial.py
index ba6846bf0..5fe976143 100644
--- a/numpy/lib/tests/test_financial.py
+++ b/numpy/lib/tests/test_financial.py
@@ -54,6 +54,86 @@ class TestFinancial(TestCase):
val = [39000,30000,21000,37000,46000]
assert_(np.isnan(np.mirr(val, 0.10, 0.12)))
+ def test_when(self):
+ #begin
+ assert_almost_equal(np.rate(10,20,-3500,10000,1),
+ np.rate(10,20,-3500,10000,'begin'), 4)
+ #end
+ assert_almost_equal(np.rate(10,20,-3500,10000),
+ np.rate(10,20,-3500,10000,'end'), 4)
+ assert_almost_equal(np.rate(10,20,-3500,10000,0),
+ np.rate(10,20,-3500,10000,'end'), 4)
+
+ # begin
+ assert_almost_equal(np.pv(0.07,20,12000,0,1),
+ np.pv(0.07,20,12000,0,'begin'), 2)
+ # end
+ assert_almost_equal(np.pv(0.07,20,12000,0),
+ np.pv(0.07,20,12000,0,'end'), 2)
+ assert_almost_equal(np.pv(0.07,20,12000,0,0),
+ np.pv(0.07,20,12000,0,'end'), 2)
+
+ # begin
+ assert_almost_equal(np.fv(0.075, 20, -2000,0,1),
+ np.fv(0.075, 20, -2000,0,'begin'), 4)
+ # end
+ assert_almost_equal(np.fv(0.075, 20, -2000,0),
+ np.fv(0.075, 20, -2000,0,'end'), 4)
+ assert_almost_equal(np.fv(0.075, 20, -2000,0,0),
+ np.fv(0.075, 20, -2000,0,'end'), 4)
+
+ # begin
+ assert_almost_equal(np.pmt(0.08/12,5*12,15000.,0,1),
+ np.pmt(0.08/12,5*12,15000.,0,'begin'), 4)
+ # end
+ assert_almost_equal(np.pmt(0.08/12,5*12,15000.,0),
+ np.pmt(0.08/12,5*12,15000.,0,'end'), 4)
+ assert_almost_equal(np.pmt(0.08/12,5*12,15000.,0,0),
+ np.pmt(0.08/12,5*12,15000.,0,'end'), 4)
+
+ # begin
+ assert_almost_equal(np.ppmt(0.1/12,1,60,55000,0,1),
+ np.ppmt(0.1/12,1,60,55000,0,'begin'), 4)
+ # end
+ assert_almost_equal(np.ppmt(0.1/12,1,60,55000,0),
+ np.ppmt(0.1/12,1,60,55000,0,'end'), 4)
+ assert_almost_equal(np.ppmt(0.1/12,1,60,55000,0,0),
+ np.ppmt(0.1/12,1,60,55000,0,'end'), 4)
+
+ # begin
+ assert_almost_equal(np.ipmt(0.1/12,1,24,2000,0,1),
+ np.ipmt(0.1/12,1,24,2000,0,'begin'), 4)
+ # end
+ assert_almost_equal(np.ipmt(0.1/12,1,24,2000,0),
+ np.ipmt(0.1/12,1,24,2000,0,'end'), 4)
+ assert_almost_equal(np.ipmt(0.1/12,1,24,2000,0,0),
+ np.ipmt(0.1/12,1,24,2000,0,'end'), 4)
+
+ # begin
+ assert_almost_equal(np.nper(0.075,-2000,0,100000.,1),
+ np.nper(0.075,-2000,0,100000.,'begin'), 4)
+ # end
+ assert_almost_equal(np.nper(0.075,-2000,0,100000.),
+ np.nper(0.075,-2000,0,100000.,'end'), 4)
+ assert_almost_equal(np.nper(0.075,-2000,0,100000.,0),
+ np.nper(0.075,-2000,0,100000.,'end'), 4)
+
+ def test_broadcast(self):
+ assert_almost_equal(np.nper(0.075,-2000,0,100000.,[0,1]),
+ [ 21.5449442 , 20.76156441], 4)
+
+ assert_almost_equal(np.ipmt(0.1/12,range(5), 24, 2000),
+ [-17.29165168, -16.66666667, -16.03647345,
+ -15.40102862, -14.76028842], 4)
+
+ assert_almost_equal(np.ppmt(0.1/12,range(5), 24, 2000),
+ [-74.998201 , -75.62318601, -76.25337923,
+ -76.88882405, -77.52956425], 4)
+
+ assert_almost_equal(np.ppmt(0.1/12,range(5), 24, 2000, 0,
+ [0,0,1,'end','begin']),
+ [-74.998201 , -75.62318601, -75.62318601,
+ -76.88882405, -76.88882405], 4)
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/random/bscript b/numpy/random/bscript
index c548f29e8..714015049 100644
--- a/numpy/random/bscript
+++ b/numpy/random/bscript
@@ -4,7 +4,7 @@ import sys
from bento.commands import hooks
import waflib
-@hooks.pre_configure
+@hooks.post_configure
def configure(context):
conf = context.waf_context
diff --git a/release.sh b/release.sh
index 5f1f31ebb..6f46290eb 100644
--- a/release.sh
+++ b/release.sh
@@ -5,6 +5,15 @@
# downloads, i.e. two versions for Python 2.7. The Intel 32/64-bit version is
# for OS X 10.6+, the other dmg installers are for 10.3+ and are built on 10.5
+#---------------
+# Build tarballs
+#---------------
+paver sdist
+
+
+#--------------------
+# Build documentation
+#--------------------
# Check we're using the correct g++/c++ for the 32-bit 2.6 version we build for
# the docs and the 64-bit 2.7 dmg installer.
# We do this because for Python 2.6 we use a symlink on the PATH to select
@@ -21,14 +30,13 @@ paver bootstrap
source bootstrap/bin/activate
python setupsconsegg.py install
-# build docs
+# build pdf docs
paver pdf
-#------------------------------------------------------------------
-# Build tarballs, Windows and 64-bit OS X installers (on OS X 10.6)
-#------------------------------------------------------------------
-paver sdist
+#--------------------------------------------------------
+# Build Windows and 64-bit OS X installers (on OS X 10.6)
+#--------------------------------------------------------
export MACOSX_DEPLOYMENT_TARGET=10.6
# Use GCC 4.2 for 64-bit OS X installer for Python 2.7
export PATH=~/Code/tmp/gpp42temp/:$PATH
@@ -58,3 +66,14 @@ paver bdist_superpack -p 2.5
paver write_release_and_log
+
+
+#-------------------------------------------------------
+# Build basic (no SSE) Windows installers to put on PyPi
+#-------------------------------------------------------
+paver bdist_wininst_simple -p 2.5
+paver bdist_wininst_simple -p 2.6
+paver bdist_wininst_simple -p 2.7
+paver bdist_wininst_simple -p 3.1
+paver bdist_wininst_simple -p 3.2
+
diff --git a/tools/test-installed-numpy.py b/tools/test-installed-numpy.py
new file mode 100644
index 000000000..91e619e96
--- /dev/null
+++ b/tools/test-installed-numpy.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+
+# A simple script to test the installed version of numpy by calling
+# 'numpy.test()'. Key features:
+# -- convenient command-line syntax
+# -- sets exit status appropriately, useful for automated test environments
+
+# It would be better to set this up as a module in the numpy namespace, so
+# that it could be run as:
+# python -m numpy.run_tests <args>
+# But, python2.4's -m switch only works with top-level modules, not modules
+# that are inside packages. So, once we drop 2.4 support, maybe...
+
+import sys
+# In case we are run from the source directory, we don't want to import numpy
+# from there, we want to import the installed version:
+sys.path.pop(0)
+
+from optparse import OptionParser
+parser = OptionParser("usage: %prog [options] -- [nosetests options]")
+parser.add_option("-v", "--verbose",
+ action="count", dest="verbose", default=1,
+ help="increase verbosity")
+parser.add_option("--doctests",
+ action="store_true", dest="doctests", default=False,
+ help="Run doctests in module")
+parser.add_option("--coverage",
+ action="store_true", dest="coverage", default=False,
+ help="report coverage of NumPy code (requires 'coverage' module")
+parser.add_option("-m", "--mode",
+ action="store", dest="mode", default="fast",
+ help="'fast', 'full', or something that could be "
+ "passed to nosetests -A [default: %default]")
+(options, args) = parser.parse_args()
+
+import numpy
+result = numpy.test(options.mode,
+ verbose=options.verbose,
+ extra_argv=args,
+ doctests=options.doctests,
+ coverage=options.coverage)
+
+if result.wasSuccessful():
+ sys.exit(0)
+else:
+ sys.exit(1)
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 000000000..3085741ac
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,40 @@
+# 'Tox' is a tool for automating sdist/build/test cycles against
+# multiple Python versions:
+# http://pypi.python.org/pypi/tox
+# http://tox.testrun.org/
+
+# Running the command 'tox' while in the root of the numpy source
+# directory will:
+# - Create a numpy source distribution (setup.py sdist)
+# - Then for every supported version of Python:
+# - Create a virtualenv in .tox/py$VERSION and install
+# dependencies. (These virtualenvs are cached across runs unless
+# you use --recreate.)
+# - Use pip to install the numpy sdist into the virtualenv
+# - Run the numpy tests
+# To run against a specific subset of Python versions, use:
+# tox -e py24,py27
+
+# Extra arguments will be passed to test-installed-numpy.py. To run
+# the full testsuite:
+# tox full
+# To run with extra verbosity:
+# tox -- -v
+
+# Tox assumes that you have appropriate Python interpreters already
+# installed and that they can be run as 'python2.4', 'python2.5', etc.
+
+[tox]
+envlist = py24,py25,py26,py27,py31,py32
+
+[testenv]
+deps=
+ nose
+changedir={envdir}
+commands=python {toxinidir}/tools/test-installed-numpy.py {posargs:}
+
+# Not run by default. Set up the way you want then use 'tox -e debug'
+# if you want it:
+[testenv:debug]
+basepython=PYTHON-WITH-DEBUG-INFO
+commands=gdb --args {envpython} {toxinidir}/tools/test-installed-numpy.py {posargs:}