summaryrefslogtreecommitdiff
path: root/numpy/random
diff options
context:
space:
mode:
Diffstat (limited to 'numpy/random')
-rw-r--r--numpy/random/_bounded_integers.pyx.in88
-rw-r--r--numpy/random/_common.pxd40
-rw-r--r--numpy/random/_common.pyx2
-rw-r--r--numpy/random/_examples/cffi/parse.py5
-rw-r--r--numpy/random/_generator.pyi99
-rw-r--r--numpy/random/_generator.pyx96
-rw-r--r--numpy/random/_mt19937.pyx8
-rw-r--r--numpy/random/_pcg64.pyx6
-rw-r--r--numpy/random/_philox.pyx6
-rw-r--r--numpy/random/_sfc64.pyx6
-rw-r--r--numpy/random/bit_generator.pyi3
-rw-r--r--numpy/random/bit_generator.pyx67
-rw-r--r--numpy/random/c_distributions.pxd12
-rw-r--r--numpy/random/include/aligned_malloc.h8
-rw-r--r--numpy/random/meson.build164
-rw-r--r--numpy/random/mtrand.pyx18
-rw-r--r--numpy/random/src/distributions/distributions.c65
-rw-r--r--numpy/random/src/legacy/legacy-distributions.c4
-rw-r--r--numpy/random/src/mt19937/randomkit.c4
-rw-r--r--numpy/random/src/philox/philox.c4
-rw-r--r--numpy/random/src/philox/philox.h30
-rw-r--r--numpy/random/src/sfc64/sfc64.h8
-rw-r--r--numpy/random/tests/test_direct.py40
-rw-r--r--numpy/random/tests/test_extending.py8
-rw-r--r--numpy/random/tests/test_generator_mt19937.py6
-rw-r--r--numpy/random/tests/test_generator_mt19937_regressions.py50
-rw-r--r--numpy/random/tests/test_random.py3
-rw-r--r--numpy/random/tests/test_randomstate.py7
28 files changed, 665 insertions, 192 deletions
diff --git a/numpy/random/_bounded_integers.pyx.in b/numpy/random/_bounded_integers.pyx.in
index 7eb6aff1e..6743001d6 100644
--- a/numpy/random/_bounded_integers.pyx.in
+++ b/numpy/random/_bounded_integers.pyx.in
@@ -99,8 +99,12 @@ cdef object _rand_{{nptype}}_broadcast(np.ndarray low, np.ndarray high, object s
is_open = not closed
low_arr = <np.ndarray>low
high_arr = <np.ndarray>high
- if np.any(np.less(low_arr, {{lb}})):
+
+ if np.can_cast(low_arr, np.{{otype}}):
+ pass # cannot be out-of-bounds
+ elif np.any(np.less(low_arr, np.{{otype}}({{lb}}))):
raise ValueError('low is out of bounds for {{nptype}}')
+
if closed:
high_comp = np.greater_equal
low_high_comp = np.greater
@@ -108,8 +112,11 @@ cdef object _rand_{{nptype}}_broadcast(np.ndarray low, np.ndarray high, object s
high_comp = np.greater
low_high_comp = np.greater_equal
- if np.any(high_comp(high_arr, {{ub}})):
+ if np.can_cast(high_arr, np.{{otype}}):
+ pass # cannot be out-of-bounds
+ elif np.any(high_comp(high_arr, np.{{nptype_up}}({{ub}}))):
raise ValueError('high is out of bounds for {{nptype}}')
+
if np.any(low_high_comp(low_arr, high_arr)):
raise ValueError(format_bounds_error(closed, low_arr))
@@ -165,50 +172,69 @@ cdef object _rand_{{nptype}}_broadcast(object low, object high, object size,
64 bit integer type.
"""
- cdef np.ndarray low_arr, high_arr, out_arr, highm1_arr
+ cdef np.ndarray low_arr, low_arr_orig, high_arr, high_arr_orig, out_arr
cdef np.npy_intp i, cnt, n
cdef np.broadcast it
cdef object closed_upper
cdef uint64_t *out_data
cdef {{nptype}}_t *highm1_data
cdef {{nptype}}_t low_v, high_v
- cdef uint64_t rng, last_rng, val, mask, off, out_val
+ cdef uint64_t rng, last_rng, val, mask, off, out_val, is_open
- low_arr = <np.ndarray>low
- high_arr = <np.ndarray>high
+ low_arr_orig = <np.ndarray>low
+ high_arr_orig = <np.ndarray>high
- if np.any(np.less(low_arr, {{lb}})):
- raise ValueError('low is out of bounds for {{nptype}}')
- dt = high_arr.dtype
- if closed or np.issubdtype(dt, np.integer):
- # Avoid object dtype path if already an integer
- high_lower_comp = np.less if closed else np.less_equal
- if np.any(high_lower_comp(high_arr, {{lb}})):
- raise ValueError(format_bounds_error(closed, low_arr))
- high_m1 = high_arr if closed else high_arr - dt.type(1)
- if np.any(np.greater(high_m1, {{ub}})):
- raise ValueError('high is out of bounds for {{nptype}}')
- highm1_arr = <np.ndarray>np.PyArray_FROM_OTF(high_m1, np.{{npctype}}, np.NPY_ALIGNED | np.NPY_FORCECAST)
+ is_open = not closed
+
+ # The following code tries to cast safely, but failing that goes via
+ # Python `int()` because it is very difficult to cast integers in a
+ # truly safe way (i.e. so it raises on out-of-bound).
+ # We correct if the interval is not closed in this step if we go the long
+ # route. (Not otherwise, since the -1 could overflow in theory.)
+ if np.can_cast(low_arr_orig, np.{{otype}}):
+ low_arr = <np.ndarray>np.PyArray_FROM_OTF(low_arr_orig, np.{{npctype}}, np.NPY_ALIGNED)
+ else:
+ low_arr = <np.ndarray>np.empty_like(low_arr_orig, dtype=np.{{otype}})
+ flat = low_arr_orig.flat
+ low_data = <{{nptype}}_t *>np.PyArray_DATA(low_arr)
+ cnt = np.PyArray_SIZE(low_arr)
+ for i in range(cnt):
+ lower = int(flat[i])
+ if lower < {{lb}} or lower > {{ub}}:
+ raise ValueError('low is out of bounds for {{nptype}}')
+ low_data[i] = lower
+
+ del low_arr_orig
+
+ if np.can_cast(high_arr_orig, np.{{otype}}):
+ high_arr = <np.ndarray>np.PyArray_FROM_OTF(high_arr_orig, np.{{npctype}}, np.NPY_ALIGNED)
else:
- # If input is object or a floating type
- highm1_arr = <np.ndarray>np.empty_like(high_arr, dtype=np.{{otype}})
- highm1_data = <{{nptype}}_t *>np.PyArray_DATA(highm1_arr)
+ high_arr = np.empty_like(high_arr_orig, dtype=np.{{otype}})
+ flat = high_arr_orig.flat
+ high_data = <{{nptype}}_t *>np.PyArray_DATA(high_arr)
cnt = np.PyArray_SIZE(high_arr)
- flat = high_arr.flat
for i in range(cnt):
- # Subtract 1 since generator produces values on the closed int [off, off+rng]
- closed_upper = int(flat[i]) - 1
+ closed_upper = int(flat[i]) - is_open
if closed_upper > {{ub}}:
raise ValueError('high is out of bounds for {{nptype}}')
if closed_upper < {{lb}}:
raise ValueError(format_bounds_error(closed, low_arr))
- highm1_data[i] = <{{nptype}}_t>closed_upper
+ high_data[i] = closed_upper
- if np.any(np.greater(low_arr, highm1_arr)):
- raise ValueError(format_bounds_error(closed, low_arr))
+ is_open = 0 # we applied is_open in this path already
- high_arr = highm1_arr
- low_arr = <np.ndarray>np.PyArray_FROM_OTF(low, np.{{npctype}}, np.NPY_ALIGNED | np.NPY_FORCECAST)
+ del high_arr_orig
+
+ # Since we have the largest supported integer dtypes, they must be within
+ # range at this point; otherwise conversion would have failed. Check that
+ # it is never true that `high <= low`` if closed and `high < low` if not
+ if not is_open:
+ low_high_comp = np.greater
+ else:
+ low_high_comp = np.greater_equal
+
+ if np.any(low_high_comp(low_arr, high_arr)):
+ raise ValueError(format_bounds_error(closed, low_arr))
if size is not None:
out_arr = <np.ndarray>np.empty(size, np.{{otype}})
@@ -224,8 +250,8 @@ cdef object _rand_{{nptype}}_broadcast(object low, object high, object size,
for i in range(n):
low_v = (<{{nptype}}_t*>np.PyArray_MultiIter_DATA(it, 0))[0]
high_v = (<{{nptype}}_t*>np.PyArray_MultiIter_DATA(it, 1))[0]
- # Generator produces values on the closed int [off, off+rng], -1 subtracted above
- rng = <{{utype}}_t>(high_v - low_v)
+ # Generator produces values on the closed int [off, off+rng]
+ rng = <{{utype}}_t>((high_v - is_open) - low_v)
off = <{{utype}}_t>(<{{nptype}}_t>low_v)
if rng != last_rng:
diff --git a/numpy/random/_common.pxd b/numpy/random/_common.pxd
index 3eaf39ddf..659da0d2d 100644
--- a/numpy/random/_common.pxd
+++ b/numpy/random/_common.pxd
@@ -39,32 +39,32 @@ cdef extern from "include/aligned_malloc.h":
cdef void *PyArray_calloc_aligned(size_t n, size_t s)
cdef void PyArray_free_aligned(void *p)
-ctypedef void (*random_double_fill)(bitgen_t *state, np.npy_intp count, double* out) nogil
-ctypedef double (*random_double_0)(void *state) nogil
-ctypedef double (*random_double_1)(void *state, double a) nogil
-ctypedef double (*random_double_2)(void *state, double a, double b) nogil
-ctypedef double (*random_double_3)(void *state, double a, double b, double c) nogil
+ctypedef void (*random_double_fill)(bitgen_t *state, np.npy_intp count, double* out) noexcept nogil
+ctypedef double (*random_double_0)(void *state) noexcept nogil
+ctypedef double (*random_double_1)(void *state, double a) noexcept nogil
+ctypedef double (*random_double_2)(void *state, double a, double b) noexcept nogil
+ctypedef double (*random_double_3)(void *state, double a, double b, double c) noexcept nogil
-ctypedef void (*random_float_fill)(bitgen_t *state, np.npy_intp count, float* out) nogil
-ctypedef float (*random_float_0)(bitgen_t *state) nogil
-ctypedef float (*random_float_1)(bitgen_t *state, float a) nogil
+ctypedef void (*random_float_fill)(bitgen_t *state, np.npy_intp count, float* out) noexcept nogil
+ctypedef float (*random_float_0)(bitgen_t *state) noexcept nogil
+ctypedef float (*random_float_1)(bitgen_t *state, float a) noexcept nogil
-ctypedef int64_t (*random_uint_0)(void *state) nogil
-ctypedef int64_t (*random_uint_d)(void *state, double a) nogil
-ctypedef int64_t (*random_uint_dd)(void *state, double a, double b) nogil
-ctypedef int64_t (*random_uint_di)(void *state, double a, uint64_t b) nogil
-ctypedef int64_t (*random_uint_i)(void *state, int64_t a) nogil
-ctypedef int64_t (*random_uint_iii)(void *state, int64_t a, int64_t b, int64_t c) nogil
+ctypedef int64_t (*random_uint_0)(void *state) noexcept nogil
+ctypedef int64_t (*random_uint_d)(void *state, double a) noexcept nogil
+ctypedef int64_t (*random_uint_dd)(void *state, double a, double b) noexcept nogil
+ctypedef int64_t (*random_uint_di)(void *state, double a, uint64_t b) noexcept nogil
+ctypedef int64_t (*random_uint_i)(void *state, int64_t a) noexcept nogil
+ctypedef int64_t (*random_uint_iii)(void *state, int64_t a, int64_t b, int64_t c) noexcept nogil
-ctypedef uint32_t (*random_uint_0_32)(bitgen_t *state) nogil
-ctypedef uint32_t (*random_uint_1_i_32)(bitgen_t *state, uint32_t a) nogil
+ctypedef uint32_t (*random_uint_0_32)(bitgen_t *state) noexcept nogil
+ctypedef uint32_t (*random_uint_1_i_32)(bitgen_t *state, uint32_t a) noexcept nogil
-ctypedef int32_t (*random_int_2_i_32)(bitgen_t *state, int32_t a, int32_t b) nogil
-ctypedef int64_t (*random_int_2_i)(bitgen_t *state, int64_t a, int64_t b) nogil
+ctypedef int32_t (*random_int_2_i_32)(bitgen_t *state, int32_t a, int32_t b) noexcept nogil
+ctypedef int64_t (*random_int_2_i)(bitgen_t *state, int64_t a, int64_t b) noexcept nogil
-cdef double kahan_sum(double *darr, np.npy_intp n)
+cdef double kahan_sum(double *darr, np.npy_intp n) noexcept
-cdef inline double uint64_to_double(uint64_t rnd) nogil:
+cdef inline double uint64_to_double(uint64_t rnd) noexcept nogil:
return (rnd >> 11) * (1.0 / 9007199254740992.0)
cdef object double_fill(void *func, bitgen_t *state, object size, object lock, object out)
diff --git a/numpy/random/_common.pyx b/numpy/random/_common.pyx
index 7b6f69303..c5e4e3297 100644
--- a/numpy/random/_common.pyx
+++ b/numpy/random/_common.pyx
@@ -171,7 +171,7 @@ cdef object prepare_ctypes(bitgen_t *bitgen):
ctypes.c_void_p(<uintptr_t>bitgen))
return _ctypes
-cdef double kahan_sum(double *darr, np.npy_intp n):
+cdef double kahan_sum(double *darr, np.npy_intp n) noexcept:
"""
Parameters
----------
diff --git a/numpy/random/_examples/cffi/parse.py b/numpy/random/_examples/cffi/parse.py
index daff6bdec..d41c4c2db 100644
--- a/numpy/random/_examples/cffi/parse.py
+++ b/numpy/random/_examples/cffi/parse.py
@@ -36,9 +36,9 @@ def parse_distributions_h(ffi, inc_dir):
continue
# skip any inlined function definition
- # which starts with 'static NPY_INLINE xxx(...) {'
+ # which starts with 'static inline xxx(...) {'
# and ends with a closing '}'
- if line.strip().startswith('static NPY_INLINE'):
+ if line.strip().startswith('static inline'):
in_skip += line.count('{')
continue
elif in_skip > 0:
@@ -48,7 +48,6 @@ def parse_distributions_h(ffi, inc_dir):
# replace defines with their value or remove them
line = line.replace('DECLDIR', '')
- line = line.replace('NPY_INLINE', '')
line = line.replace('RAND_INT_TYPE', 'int64_t')
s.append(line)
ffi.cdef('\n'.join(s))
diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi
index f0d814fef..e1cdefb15 100644
--- a/numpy/random/_generator.pyi
+++ b/numpy/random/_generator.pyi
@@ -29,6 +29,7 @@ from numpy._typing import (
_DTypeLikeUInt,
_Float32Codes,
_Float64Codes,
+ _FloatLike_co,
_Int8Codes,
_Int16Codes,
_Int32Codes,
@@ -72,6 +73,7 @@ class Generator:
def __reduce__(self) -> tuple[Callable[[str], Generator], tuple[str], dict[str, Any]]: ...
@property
def bit_generator(self) -> BitGenerator: ...
+ def spawn(self, n_children: int) -> list[Generator]: ...
def bytes(self, length: int) -> bytes: ...
@overload
def standard_normal( # type: ignore[misc]
@@ -187,13 +189,18 @@ class Generator:
out: None | ndarray[Any, dtype[float64]] = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def beta(self, a: float, b: float, size: None = ...) -> float: ... # type: ignore[misc]
+ def beta(
+ self,
+ a: _FloatLike_co,
+ b: _FloatLike_co,
+ size: None = ...,
+ ) -> float: ... # type: ignore[misc]
@overload
def beta(
self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def exponential(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ def exponential(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def exponential(
self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
@@ -370,7 +377,12 @@ class Generator:
shuffle: bool = ...,
) -> ndarray[Any, Any]: ...
@overload
- def uniform(self, low: float = ..., high: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ def uniform(
+ self,
+ low: _FloatLike_co = ...,
+ high: _FloatLike_co = ...,
+ size: None = ...,
+ ) -> float: ... # type: ignore[misc]
@overload
def uniform(
self,
@@ -379,7 +391,12 @@ class Generator:
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def normal(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ def normal(
+ self,
+ loc: _FloatLike_co = ...,
+ scale: _FloatLike_co = ...,
+ size: None = ...,
+ ) -> float: ... # type: ignore[misc]
@overload
def normal(
self,
@@ -390,7 +407,7 @@ class Generator:
@overload
def standard_gamma( # type: ignore[misc]
self,
- shape: float,
+ shape: _FloatLike_co,
size: None = ...,
dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ...,
out: None = ...,
@@ -425,7 +442,7 @@ class Generator:
out: None | ndarray[Any, dtype[float64]] = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def gamma(self, shape: float, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ def gamma(self, shape: _FloatLike_co, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def gamma(
self,
@@ -434,13 +451,13 @@ class Generator:
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def f(self, dfnum: float, dfden: float, size: None = ...) -> float: ... # type: ignore[misc]
+ def f(self, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def f(
self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def noncentral_f(self, dfnum: float, dfden: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc]
+ def noncentral_f(self, dfnum: _FloatLike_co, dfden: _FloatLike_co, nonc: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def noncentral_f(
self,
@@ -450,19 +467,19 @@ class Generator:
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def chisquare(self, df: float, size: None = ...) -> float: ... # type: ignore[misc]
+ def chisquare(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def chisquare(
self, df: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def noncentral_chisquare(self, df: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc]
+ def noncentral_chisquare(self, df: _FloatLike_co, nonc: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def noncentral_chisquare(
self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def standard_t(self, df: float, size: None = ...) -> float: ... # type: ignore[misc]
+ def standard_t(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def standard_t(
self, df: _ArrayLikeFloat_co, size: None = ...
@@ -472,25 +489,25 @@ class Generator:
self, df: _ArrayLikeFloat_co, size: _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def vonmises(self, mu: float, kappa: float, size: None = ...) -> float: ... # type: ignore[misc]
+ def vonmises(self, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def vonmises(
self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def pareto(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
+ def pareto(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def pareto(
self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def weibull(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
+ def weibull(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def weibull(
self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def power(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
+ def power(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def power(
self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
@@ -500,7 +517,12 @@ class Generator:
@overload
def standard_cauchy(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ...
@overload
- def laplace(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ def laplace(
+ self,
+ loc: _FloatLike_co = ...,
+ scale: _FloatLike_co = ...,
+ size: None = ...,
+ ) -> float: ... # type: ignore[misc]
@overload
def laplace(
self,
@@ -509,7 +531,12 @@ class Generator:
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def gumbel(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ def gumbel(
+ self,
+ loc: _FloatLike_co = ...,
+ scale: _FloatLike_co = ...,
+ size: None = ...,
+ ) -> float: ... # type: ignore[misc]
@overload
def gumbel(
self,
@@ -518,7 +545,12 @@ class Generator:
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def logistic(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ def logistic(
+ self,
+ loc: _FloatLike_co = ...,
+ scale: _FloatLike_co = ...,
+ size: None = ...,
+ ) -> float: ... # type: ignore[misc]
@overload
def logistic(
self,
@@ -527,7 +559,12 @@ class Generator:
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def lognormal(self, mean: float = ..., sigma: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ def lognormal(
+ self,
+ mean: _FloatLike_co = ...,
+ sigma: _FloatLike_co = ...,
+ size: None = ...,
+ ) -> float: ... # type: ignore[misc]
@overload
def lognormal(
self,
@@ -536,19 +573,25 @@ class Generator:
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def rayleigh(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ def rayleigh(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def rayleigh(
self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def wald(self, mean: float, scale: float, size: None = ...) -> float: ... # type: ignore[misc]
+ def wald(self, mean: _FloatLike_co, scale: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def wald(
self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def triangular(self, left: float, mode: float, right: float, size: None = ...) -> float: ... # type: ignore[misc]
+ def triangular(
+ self,
+ left: _FloatLike_co,
+ mode: _FloatLike_co,
+ right: _FloatLike_co,
+ size: None = ...,
+ ) -> float: ... # type: ignore[misc]
@overload
def triangular(
self,
@@ -558,31 +601,31 @@ class Generator:
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[float64]]: ...
@overload
- def binomial(self, n: int, p: float, size: None = ...) -> int: ... # type: ignore[misc]
+ def binomial(self, n: int, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def binomial(
self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[int64]]: ...
@overload
- def negative_binomial(self, n: float, p: float, size: None = ...) -> int: ... # type: ignore[misc]
+ def negative_binomial(self, n: _FloatLike_co, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def negative_binomial(
self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[int64]]: ...
@overload
- def poisson(self, lam: float = ..., size: None = ...) -> int: ... # type: ignore[misc]
+ def poisson(self, lam: _FloatLike_co = ..., size: None = ...) -> int: ... # type: ignore[misc]
@overload
def poisson(
self, lam: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[int64]]: ...
@overload
- def zipf(self, a: float, size: None = ...) -> int: ... # type: ignore[misc]
+ def zipf(self, a: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def zipf(
self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> ndarray[Any, dtype[int64]]: ...
@overload
- def geometric(self, p: float, size: None = ...) -> int: ... # type: ignore[misc]
+ def geometric(self, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def geometric(
self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
@@ -598,7 +641,7 @@ class Generator:
size: None | _ShapeLike = ...,
) -> ndarray[Any, dtype[int64]]: ...
@overload
- def logseries(self, p: float, size: None = ...) -> int: ... # type: ignore[misc]
+ def logseries(self, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def logseries(
self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx
index 266a05387..a30d116c2 100644
--- a/numpy/random/_generator.pyx
+++ b/numpy/random/_generator.pyx
@@ -238,6 +238,64 @@ cdef class Generator:
"""
return self._bit_generator
+ def spawn(self, int n_children):
+ """
+ spawn(n_children)
+
+ Create new independent child generators.
+
+ See :ref:`seedsequence-spawn` for additional notes on spawning
+ children.
+
+ .. versionadded:: 1.25.0
+
+ Parameters
+ ----------
+ n_children : int
+
+ Returns
+ -------
+ child_generators : list of Generators
+
+ Raises
+ ------
+ TypeError
+ When the underlying SeedSequence does not implement spawning.
+
+ See Also
+ --------
+ random.BitGenerator.spawn, random.SeedSequence.spawn :
+ Equivalent method on the bit generator and seed sequence.
+ bit_generator :
+ The bit generator instance used by the generator.
+
+ Examples
+ --------
+ Starting from a seeded default generator:
+
+ >>> # High quality entropy created with: f"0x{secrets.randbits(128):x}"
+ >>> entropy = 0x3034c61a9ae04ff8cb62ab8ec2c4b501
+ >>> rng = np.random.default_rng(entropy)
+
+ Create two new generators for example for parallel executation:
+
+ >>> child_rng1, child_rng2 = rng.spawn(2)
+
+ Drawn numbers from each are independent but derived from the initial
+ seeding entropy:
+
+ >>> rng.uniform(), child_rng1.uniform(), child_rng2.uniform()
+ (0.19029263503854454, 0.9475673279178444, 0.4702687338396767)
+
+ It is safe to spawn additional children from the original ``rng`` or
+ the children:
+
+ >>> more_child_rngs = rng.spawn(20)
+ >>> nested_spawn = child_rng1.spawn(20)
+
+ """
+ return [type(self)(g) for g in self._bit_generator.spawn(n_children)]
+
def random(self, size=None, dtype=np.float64, out=None):
"""
random(size=None, dtype=np.float64, out=None)
@@ -380,6 +438,22 @@ cdef class Generator:
out : ndarray or scalar
Drawn samples from the parameterized exponential distribution.
+ Examples
+ --------
+ A real world example: Assume a company has 10000 customer support
+ agents and the average time between customer calls is 4 minutes.
+
+ >>> n = 10000
+ >>> time_between_calls = np.random.default_rng().exponential(scale=4, size=n)
+
+ What is the probability that a customer will call in the next
+ 4 to 5 minutes?
+
+ >>> x = ((time_between_calls < 5).sum())/n
+ >>> y = ((time_between_calls < 4).sum())/n
+ >>> x-y
+ 0.08 # may vary
+
References
----------
.. [1] Peyton Z. Peebles Jr., "Probability, Random Variables and
@@ -2560,7 +2634,7 @@ cdef class Generator:
>>> b = []
>>> for i in range(1000):
... a = 10. + rng.standard_normal(100)
- ... b.append(np.product(a))
+ ... b.append(np.prod(a))
>>> b = np.array(b) / np.min(b) # scale values to be positive
>>> count, bins, ignored = plt.hist(b, 100, density=True, align='mid')
@@ -3533,8 +3607,8 @@ cdef class Generator:
generalization of the one-dimensional normal distribution to higher
dimensions. Such a distribution is specified by its mean and
covariance matrix. These parameters are analogous to the mean
- (average or "center") and variance (standard deviation, or "width,"
- squared) of the one-dimensional normal distribution.
+ (average or "center") and variance (the squared standard deviation,
+ or "width") of the one-dimensional normal distribution.
Parameters
----------
@@ -3611,6 +3685,12 @@ cdef class Generator:
nonnegative-definite). Otherwise, the behavior of this method is
undefined and backwards compatibility is not guaranteed.
+ This function internally uses linear algebra routines, and thus results
+ may not be identical (even up to precision) across architectures, OSes,
+ or even builds. For example, this is likely if ``cov`` has multiple equal
+ singular values and ``method`` is ``'svd'`` (default). In this case,
+ ``method='cholesky'`` may be more robust.
+
References
----------
.. [1] Papoulis, A., "Probability, Random Variables, and Stochastic
@@ -4247,7 +4327,7 @@ cdef class Generator:
Raises
------
ValueError
- If any value in ``alpha`` is less than or equal to zero
+ If any value in ``alpha`` is less than zero
Notes
-----
@@ -4326,8 +4406,8 @@ cdef class Generator:
alpha_arr = <np.ndarray>np.PyArray_FROMANY(
alpha, np.NPY_DOUBLE, 1, 1,
np.NPY_ARRAY_ALIGNED | np.NPY_ARRAY_C_CONTIGUOUS)
- if np.any(np.less_equal(alpha_arr, 0)):
- raise ValueError('alpha <= 0')
+ if np.any(np.less(alpha_arr, 0)):
+ raise ValueError('alpha < 0')
alpha_data = <double*>np.PyArray_DATA(alpha_arr)
if size is None:
@@ -4745,7 +4825,7 @@ cdef class Generator:
>>> rng.permutation("abc")
Traceback (most recent call last):
...
- numpy.AxisError: axis 0 is out of bounds for array of dimension 0
+ numpy.exceptions.AxisError: axis 0 is out of bounds for array of dimension 0
>>> arr = np.arange(9).reshape((3, 3))
>>> rng.permutation(arr, axis=1)
@@ -4803,6 +4883,8 @@ def default_rng(seed=None):
-----
If ``seed`` is not a `BitGenerator` or a `Generator`, a new `BitGenerator`
is instantiated. This function does not manage a default global instance.
+
+ See :ref:`seeding_and_entropy` for more information about seeding.
Examples
--------
diff --git a/numpy/random/_mt19937.pyx b/numpy/random/_mt19937.pyx
index 5a8d52e6b..8b991254a 100644
--- a/numpy/random/_mt19937.pyx
+++ b/numpy/random/_mt19937.pyx
@@ -28,16 +28,16 @@ cdef extern from "src/mt19937/mt19937.h":
enum:
RK_STATE_LEN
-cdef uint64_t mt19937_uint64(void *st) nogil:
+cdef uint64_t mt19937_uint64(void *st) noexcept nogil:
return mt19937_next64(<mt19937_state *> st)
-cdef uint32_t mt19937_uint32(void *st) nogil:
+cdef uint32_t mt19937_uint32(void *st) noexcept nogil:
return mt19937_next32(<mt19937_state *> st)
-cdef double mt19937_double(void *st) nogil:
+cdef double mt19937_double(void *st) noexcept nogil:
return mt19937_next_double(<mt19937_state *> st)
-cdef uint64_t mt19937_raw(void *st) nogil:
+cdef uint64_t mt19937_raw(void *st) noexcept nogil:
return <uint64_t>mt19937_next32(<mt19937_state *> st)
cdef class MT19937(BitGenerator):
diff --git a/numpy/random/_pcg64.pyx b/numpy/random/_pcg64.pyx
index c0a10a812..dee38c039 100644
--- a/numpy/random/_pcg64.pyx
+++ b/numpy/random/_pcg64.pyx
@@ -30,13 +30,13 @@ cdef extern from "src/pcg64/pcg64.h":
uint32_t pcg64_cm_next32(pcg64_state *state) nogil
void pcg64_cm_advance(pcg64_state *state, uint64_t *step)
-cdef uint64_t pcg64_uint64(void* st) nogil:
+cdef uint64_t pcg64_uint64(void* st) noexcept nogil:
return pcg64_next64(<pcg64_state *>st)
-cdef uint32_t pcg64_uint32(void *st) nogil:
+cdef uint32_t pcg64_uint32(void *st) noexcept nogil:
return pcg64_next32(<pcg64_state *> st)
-cdef double pcg64_double(void* st) nogil:
+cdef double pcg64_double(void* st) noexcept nogil:
return uint64_to_double(pcg64_next64(<pcg64_state *>st))
cdef uint64_t pcg64_cm_uint64(void* st) nogil:
diff --git a/numpy/random/_philox.pyx b/numpy/random/_philox.pyx
index d9a366e86..e0c0504f6 100644
--- a/numpy/random/_philox.pyx
+++ b/numpy/random/_philox.pyx
@@ -42,13 +42,13 @@ cdef extern from 'src/philox/philox.h':
void philox_advance(uint64_t *step, philox_state *state)
-cdef uint64_t philox_uint64(void*st) nogil:
+cdef uint64_t philox_uint64(void*st) noexcept nogil:
return philox_next64(<philox_state *> st)
-cdef uint32_t philox_uint32(void *st) nogil:
+cdef uint32_t philox_uint32(void *st) noexcept nogil:
return philox_next32(<philox_state *> st)
-cdef double philox_double(void*st) nogil:
+cdef double philox_double(void*st) noexcept nogil:
return uint64_to_double(philox_next64(<philox_state *> st))
cdef class Philox(BitGenerator):
diff --git a/numpy/random/_sfc64.pyx b/numpy/random/_sfc64.pyx
index 1daee34f8..419045c1d 100644
--- a/numpy/random/_sfc64.pyx
+++ b/numpy/random/_sfc64.pyx
@@ -21,13 +21,13 @@ cdef extern from "src/sfc64/sfc64.h":
void sfc64_set_state(sfc64_state *state, uint64_t *state_arr, int has_uint32, uint32_t uinteger)
-cdef uint64_t sfc64_uint64(void* st) nogil:
+cdef uint64_t sfc64_uint64(void* st) noexcept nogil:
return sfc64_next64(<sfc64_state *>st)
-cdef uint32_t sfc64_uint32(void *st) nogil:
+cdef uint32_t sfc64_uint32(void *st) noexcept nogil:
return sfc64_next32(<sfc64_state *> st)
-cdef double sfc64_double(void* st) nogil:
+cdef double sfc64_double(void* st) noexcept nogil:
return uint64_to_double(sfc64_next64(<sfc64_state *>st))
diff --git a/numpy/random/bit_generator.pyi b/numpy/random/bit_generator.pyi
index e6e3b10cd..8b9779cad 100644
--- a/numpy/random/bit_generator.pyi
+++ b/numpy/random/bit_generator.pyi
@@ -96,6 +96,9 @@ class BitGenerator(abc.ABC):
def state(self) -> Mapping[str, Any]: ...
@state.setter
def state(self, value: Mapping[str, Any]) -> None: ...
+ @property
+ def seed_seq(self) -> ISeedSequence: ...
+ def spawn(self, n_children: int) -> list[BitGenerator]: ...
@overload
def random_raw(self, size: None = ..., output: Literal[True] = ...) -> int: ... # type: ignore[misc]
@overload
diff --git a/numpy/random/bit_generator.pyx b/numpy/random/bit_generator.pyx
index 3da4fabce..83441747a 100644
--- a/numpy/random/bit_generator.pyx
+++ b/numpy/random/bit_generator.pyx
@@ -212,6 +212,9 @@ class ISpawnableSeedSequence(ISeedSequence):
Spawn a number of child `SeedSequence` s by extending the
`spawn_key`.
+ See :ref:`seedsequence-spawn` for additional notes on spawning
+ children.
+
Parameters
----------
n_children : int
@@ -260,6 +263,7 @@ cdef class SeedSequence():
----------
entropy : {None, int, sequence[int]}, optional
The entropy for creating a `SeedSequence`.
+ All integer values must be non-negative.
spawn_key : {(), sequence[int]}, optional
An additional source of entropy based on the position of this
`SeedSequence` in the tree of such objects created with the
@@ -450,6 +454,9 @@ cdef class SeedSequence():
Spawn a number of child `SeedSequence` s by extending the
`spawn_key`.
+ See :ref:`seedsequence-spawn` for additional notes on spawning
+ children.
+
Parameters
----------
n_children : int
@@ -457,6 +464,12 @@ cdef class SeedSequence():
Returns
-------
seqs : list of `SeedSequence` s
+
+ See Also
+ --------
+ random.Generator.spawn, random.BitGenerator.spawn :
+ Equivalent method on the generator and bit generator.
+
"""
cdef uint32_t i
@@ -490,6 +503,7 @@ cdef class BitGenerator():
``array_like[ints]`` is passed, then it will be passed to
`~numpy.random.SeedSequence` to derive the initial `BitGenerator` state.
One may also pass in a `SeedSequence` instance.
+ All integer values must be non-negative.
Attributes
----------
@@ -549,6 +563,59 @@ cdef class BitGenerator():
def state(self, value):
raise NotImplementedError('Not implemented in base BitGenerator')
+ @property
+ def seed_seq(self):
+ """
+ Get the seed sequence used to initialize the bit generator.
+
+ .. versionadded:: 1.25.0
+
+ Returns
+ -------
+ seed_seq : ISeedSequence
+ The SeedSequence object used to initialize the BitGenerator.
+ This is normally a `np.random.SeedSequence` instance.
+
+ """
+ return self._seed_seq
+
+ def spawn(self, int n_children):
+ """
+ spawn(n_children)
+
+ Create new independent child bit generators.
+
+ See :ref:`seedsequence-spawn` for additional notes on spawning
+ children. Some bit generators also implement ``jumped``
+ as a different approach for creating independent streams.
+
+ .. versionadded:: 1.25.0
+
+ Parameters
+ ----------
+ n_children : int
+
+ Returns
+ -------
+ child_bit_generators : list of BitGenerators
+
+ Raises
+ ------
+ TypeError
+ When the underlying SeedSequence does not implement spawning.
+
+ See Also
+ --------
+ random.Generator.spawn, random.SeedSequence.spawn :
+ Equivalent method on the generator and seed sequence.
+
+ """
+ if not isinstance(self._seed_seq, ISpawnableSeedSequence):
+ raise TypeError(
+ "The underlying SeedSequence does not implement spawning.")
+
+ return [type(self)(seed=s) for s in self._seed_seq.spawn(n_children)]
+
def random_raw(self, size=None, output=True):
"""
random_raw(self, size=None)
diff --git a/numpy/random/c_distributions.pxd b/numpy/random/c_distributions.pxd
index 6f905edc1..b978d1350 100644
--- a/numpy/random/c_distributions.pxd
+++ b/numpy/random/c_distributions.pxd
@@ -28,18 +28,24 @@ cdef extern from "numpy/random/distributions.h":
ctypedef s_binomial_t binomial_t
+ float random_standard_uniform_f(bitgen_t *bitgen_state) nogil
double random_standard_uniform(bitgen_t *bitgen_state) nogil
void random_standard_uniform_fill(bitgen_t* bitgen_state, npy_intp cnt, double *out) nogil
+ void random_standard_uniform_fill_f(bitgen_t *bitgen_state, npy_intp cnt, float *out) nogil
+
double random_standard_exponential(bitgen_t *bitgen_state) nogil
- double random_standard_exponential_f(bitgen_t *bitgen_state) nogil
+ float random_standard_exponential_f(bitgen_t *bitgen_state) nogil
void random_standard_exponential_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil
- void random_standard_exponential_fill_f(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil
+ void random_standard_exponential_fill_f(bitgen_t *bitgen_state, npy_intp cnt, float *out) nogil
void random_standard_exponential_inv_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil
- void random_standard_exponential_inv_fill_f(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil
+ void random_standard_exponential_inv_fill_f(bitgen_t *bitgen_state, npy_intp cnt, float *out) nogil
+
double random_standard_normal(bitgen_t* bitgen_state) nogil
+ float random_standard_normal_f(bitgen_t *bitgen_state) nogil
void random_standard_normal_fill(bitgen_t *bitgen_state, npy_intp count, double *out) nogil
void random_standard_normal_fill_f(bitgen_t *bitgen_state, npy_intp count, float *out) nogil
double random_standard_gamma(bitgen_t *bitgen_state, double shape) nogil
+ float random_standard_gamma_f(bitgen_t *bitgen_state, float shape) nogil
float random_standard_uniform_f(bitgen_t *bitgen_state) nogil
void random_standard_uniform_fill_f(bitgen_t* bitgen_state, npy_intp cnt, float *out) nogil
diff --git a/numpy/random/include/aligned_malloc.h b/numpy/random/include/aligned_malloc.h
index 43f68253d..0fff57b6c 100644
--- a/numpy/random/include/aligned_malloc.h
+++ b/numpy/random/include/aligned_malloc.h
@@ -6,7 +6,7 @@
#define NPY_MEMALIGN 16 /* 16 for SSE2, 32 for AVX, 64 for Xeon Phi */
-static NPY_INLINE void *PyArray_realloc_aligned(void *p, size_t n)
+static inline void *PyArray_realloc_aligned(void *p, size_t n)
{
void *p1, **p2, *base;
size_t old_offs, offs = NPY_MEMALIGN - 1 + sizeof(void *);
@@ -31,12 +31,12 @@ static NPY_INLINE void *PyArray_realloc_aligned(void *p, size_t n)
return (void *)p2;
}
-static NPY_INLINE void *PyArray_malloc_aligned(size_t n)
+static inline void *PyArray_malloc_aligned(size_t n)
{
return PyArray_realloc_aligned(NULL, n);
}
-static NPY_INLINE void *PyArray_calloc_aligned(size_t n, size_t s)
+static inline void *PyArray_calloc_aligned(size_t n, size_t s)
{
void *p;
if (NPY_UNLIKELY((p = PyArray_realloc_aligned(NULL, n * s)) == NULL))
@@ -45,7 +45,7 @@ static NPY_INLINE void *PyArray_calloc_aligned(size_t n, size_t s)
return p;
}
-static NPY_INLINE void PyArray_free_aligned(void *p)
+static inline void PyArray_free_aligned(void *p)
{
void *base = *(((void **)p) - 1);
PyMem_Free(base);
diff --git a/numpy/random/meson.build b/numpy/random/meson.build
new file mode 100644
index 000000000..036cd81b9
--- /dev/null
+++ b/numpy/random/meson.build
@@ -0,0 +1,164 @@
+# Build npyrandom library
+# -----------------------
+npyrandom_sources = [
+ 'src/distributions/logfactorial.c',
+ 'src/distributions/distributions.c',
+ 'src/distributions/random_mvhg_count.c',
+ 'src/distributions/random_mvhg_marginals.c',
+ 'src/distributions/random_hypergeometric.c',
+]
+
+npyrandom_lib = static_library('npyrandom',
+ npyrandom_sources,
+ c_args: staticlib_cflags,
+ # include_directories: '../core/include',
+ dependencies: [py_dep, np_core_dep],
+ install: true,
+ install_dir: np_dir / 'random/lib',
+)
+
+# Build Cython extensions for numpy.random
+# ----------------------------------------
+# pyx -> c transpile output depends on copied __init__.py and pxd files
+_cython_tree_random = [
+ fs.copyfile('__init__.py'),
+ fs.copyfile('__init__.pxd'),
+ fs.copyfile('_common.pxd'),
+ fs.copyfile('bit_generator.pxd'),
+ fs.copyfile('c_distributions.pxd'),
+]
+# Need to use `custom_target` because we need to install this .pxd file
+_cython_tree_random += custom_target('_bounded_integer_pxd',
+ output: '_bounded_integers.pxd',
+ input: '_bounded_integers.pxd.in',
+ command: [tempita_cli, '@INPUT@', '-o', '@OUTPUT@'],
+ install: true,
+ install_dir: np_dir / 'random'
+)
+
+_bounded_integers_pyx = custom_target('_bounded_integer_pyx',
+ output: '_bounded_integers.pyx',
+ input: '_bounded_integers.pyx.in',
+ command: [tempita_cli, '@INPUT@', '-o', '@OUTPUT@'],
+)
+
+c_args_random = [
+ cflags_large_file_support,
+ '-DNPY_NO_DEPRECATED_API=0', # Cython still uses old NumPy C API
+]
+if host_machine.system() == 'cygwin'
+ c_args_random += ['-Wl,--export-all-symbols']
+endif
+
+# name, sources, extra link libs, extra c_args
+random_pyx_sources = [
+ ['_bounded_integers', _bounded_integers_pyx, [], npymath_lib],
+ ['_common', '_common.pyx', [], []],
+ ['_mt19937', ['_mt19937.pyx', 'src/mt19937/mt19937.c', 'src/mt19937/mt19937-jump.c'], [], []],
+ ['_philox', ['_philox.pyx', 'src/philox/philox.c'], [], []],
+ ['_pcg64', ['_pcg64.pyx', 'src/pcg64/pcg64.c'], ['-U__GNUC_GNU_INLINE__'], []],
+ ['_sfc64', ['_sfc64.pyx', 'src/sfc64/sfc64.c'], [], []],
+ ['bit_generator', 'bit_generator.pyx', [], []],
+ # The `fs.copyfile` usage here is needed because these two .pyx files import
+ # from _bounded_integers,and its pxd file is only present in the build directory
+ ['_generator', fs.copyfile('_generator.pyx'), [], npymath_lib],
+ ['mtrand', [
+ fs.copyfile('mtrand.pyx'),
+ 'src/distributions/distributions.c',
+ 'src/legacy/legacy-distributions.c'
+ ], ['-DNP_RANDOM_LEGACY=1'], npymath_lib,
+ ],
+]
+foreach gen: random_pyx_sources
+ py.extension_module(gen[0],
+ [gen[1], _cython_tree, _cython_tree_random],
+ c_args: [c_args_random, gen[2]],
+ include_directories: 'src',
+ dependencies: np_core_dep,
+ link_with: [npyrandom_lib, gen[3]],
+ install: true,
+ subdir: 'numpy/random',
+ )
+endforeach
+
+# Install Python sources, stub files, tests, examples and license
+# ---------------------------------------------------------------
+py.install_sources(
+ [
+ '__init__.pxd',
+ '__init__.py',
+ '__init__.pyi',
+ '_common.pxd',
+ '_generator.pyi',
+ '_mt19937.pyi',
+ '_pcg64.pyi',
+ '_pickle.py',
+ '_philox.pyi',
+ '_sfc64.pyi',
+ 'bit_generator.pxd',
+ 'bit_generator.pyi',
+ 'c_distributions.pxd',
+ 'LICENSE.md',
+ 'mtrand.pyi',
+ ],
+ subdir: 'numpy/random'
+)
+
+py.install_sources(
+ [
+ 'tests/__init__.py',
+ 'tests/test_direct.py',
+ 'tests/test_extending.py',
+ 'tests/test_generator_mt19937.py',
+ 'tests/test_generator_mt19937_regressions.py',
+ 'tests/test_random.py',
+ 'tests/test_randomstate.py',
+ 'tests/test_randomstate_regression.py',
+ 'tests/test_regression.py',
+ 'tests/test_seed_sequence.py',
+ 'tests/test_smoke.py',
+ ],
+ subdir: 'numpy/random/tests'
+)
+
+py.install_sources(
+ [
+ 'tests/data/__init__.py',
+ 'tests/data/mt19937-testset-1.csv',
+ 'tests/data/mt19937-testset-2.csv',
+ 'tests/data/pcg64-testset-1.csv',
+ 'tests/data/pcg64-testset-2.csv',
+ 'tests/data/pcg64dxsm-testset-1.csv',
+ 'tests/data/pcg64dxsm-testset-2.csv',
+ 'tests/data/philox-testset-1.csv',
+ 'tests/data/philox-testset-2.csv',
+ 'tests/data/sfc64-testset-1.csv',
+ 'tests/data/sfc64-testset-2.csv',
+ ],
+ subdir: 'numpy/random/tests/data'
+)
+
+py.install_sources(
+ [
+ '_examples/cffi/extending.py',
+ '_examples/cffi/parse.py',
+ ],
+ subdir: 'numpy/random/_examples/cffi'
+)
+
+py.install_sources(
+ [
+ '_examples/cython/extending.pyx',
+ '_examples/cython/extending_distributions.pyx',
+ '_examples/cython/setup.py',
+ ],
+ subdir: 'numpy/random/_examples/cython'
+)
+
+py.install_sources(
+ [
+ '_examples/numba/extending.py',
+ '_examples/numba/extending_distributions.py',
+ ],
+ subdir: 'numpy/random/_examples/numba'
+)
diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx
index edf812a4d..dfa553ee4 100644
--- a/numpy/random/mtrand.pyx
+++ b/numpy/random/mtrand.pyx
@@ -537,6 +537,22 @@ cdef class RandomState:
out : ndarray or scalar
Drawn samples from the parameterized exponential distribution.
+ Examples
+ --------
+ A real world example: Assume a company has 10000 customer support
+ agents and the average time between customer calls is 4 minutes.
+
+ >>> n = 10000
+ >>> time_between_calls = np.random.default_rng().exponential(scale=4, size=n)
+
+ What is the probability that a customer will call in the next
+ 4 to 5 minutes?
+
+ >>> x = ((time_between_calls < 5).sum())/n
+ >>> y = ((time_between_calls < 4).sum())/n
+ >>> x-y
+ 0.08 # may vary
+
See Also
--------
random.Generator.exponential: which should be used for new code.
@@ -3050,7 +3066,7 @@ cdef class RandomState:
>>> b = []
>>> for i in range(1000):
... a = 10. + np.random.standard_normal(100)
- ... b.append(np.product(a))
+ ... b.append(np.prod(a))
>>> b = np.array(b) / np.min(b) # scale values to be positive
>>> count, bins, ignored = plt.hist(b, 100, density=True, align='mid')
diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c
index bd1e1faa4..cebeb07cf 100644
--- a/numpy/random/src/distributions/distributions.c
+++ b/numpy/random/src/distributions/distributions.c
@@ -9,14 +9,14 @@
#include <assert.h>
/* Inline generators for internal use */
-static NPY_INLINE uint32_t next_uint32(bitgen_t *bitgen_state) {
+static inline uint32_t next_uint32(bitgen_t *bitgen_state) {
return bitgen_state->next_uint32(bitgen_state->state);
}
-static NPY_INLINE uint64_t next_uint64(bitgen_t *bitgen_state) {
+static inline uint64_t next_uint64(bitgen_t *bitgen_state) {
return bitgen_state->next_uint64(bitgen_state->state);
}
-static NPY_INLINE float next_float(bitgen_t *bitgen_state) {
+static inline float next_float(bitgen_t *bitgen_state) {
return (next_uint32(bitgen_state) >> 8) * (1.0f / 16777216.0f);
}
@@ -960,7 +960,15 @@ RAND_INT_TYPE random_geometric_search(bitgen_t *bitgen_state, double p) {
}
int64_t random_geometric_inversion(bitgen_t *bitgen_state, double p) {
- return (int64_t)ceil(-random_standard_exponential(bitgen_state) / npy_log1p(-p));
+ double z = ceil(-random_standard_exponential(bitgen_state) / npy_log1p(-p));
+ /*
+ * The constant 9.223372036854776e+18 is the smallest double that is
+ * larger than INT64_MAX.
+ */
+ if (z >= 9.223372036854776e+18) {
+ return INT64_MAX;
+ }
+ return (int64_t) z;
}
int64_t random_geometric(bitgen_t *bitgen_state, double p) {
@@ -1047,7 +1055,7 @@ uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max) {
}
/* Bounded generators */
-static NPY_INLINE uint64_t gen_mask(uint64_t max) {
+static inline uint64_t gen_mask(uint64_t max) {
uint64_t mask = max;
mask |= mask >> 1;
mask |= mask >> 2;
@@ -1059,8 +1067,8 @@ static NPY_INLINE uint64_t gen_mask(uint64_t max) {
}
/* Generate 16 bit random numbers using a 32 bit buffer. */
-static NPY_INLINE uint16_t buffered_uint16(bitgen_t *bitgen_state, int *bcnt,
- uint32_t *buf) {
+static inline uint16_t buffered_uint16(bitgen_t *bitgen_state, int *bcnt,
+ uint32_t *buf) {
if (!(bcnt[0])) {
buf[0] = next_uint32(bitgen_state);
bcnt[0] = 1;
@@ -1073,8 +1081,8 @@ static NPY_INLINE uint16_t buffered_uint16(bitgen_t *bitgen_state, int *bcnt,
}
/* Generate 8 bit random numbers using a 32 bit buffer. */
-static NPY_INLINE uint8_t buffered_uint8(bitgen_t *bitgen_state, int *bcnt,
- uint32_t *buf) {
+static inline uint8_t buffered_uint8(bitgen_t *bitgen_state, int *bcnt,
+ uint32_t *buf) {
if (!(bcnt[0])) {
buf[0] = next_uint32(bitgen_state);
bcnt[0] = 3;
@@ -1087,8 +1095,8 @@ static NPY_INLINE uint8_t buffered_uint8(bitgen_t *bitgen_state, int *bcnt,
}
/* Static `masked rejection` function called by random_bounded_uint64(...) */
-static NPY_INLINE uint64_t bounded_masked_uint64(bitgen_t *bitgen_state,
- uint64_t rng, uint64_t mask) {
+static inline uint64_t bounded_masked_uint64(bitgen_t *bitgen_state,
+ uint64_t rng, uint64_t mask) {
uint64_t val;
while ((val = (next_uint64(bitgen_state) & mask)) > rng)
@@ -1099,7 +1107,7 @@ static NPY_INLINE uint64_t bounded_masked_uint64(bitgen_t *bitgen_state,
/* Static `masked rejection` function called by
* random_buffered_bounded_uint32(...) */
-static NPY_INLINE uint32_t
+static inline uint32_t
buffered_bounded_masked_uint32(bitgen_t *bitgen_state, uint32_t rng,
uint32_t mask, int *bcnt, uint32_t *buf) {
/*
@@ -1118,7 +1126,7 @@ buffered_bounded_masked_uint32(bitgen_t *bitgen_state, uint32_t rng,
/* Static `masked rejection` function called by
* random_buffered_bounded_uint16(...) */
-static NPY_INLINE uint16_t
+static inline uint16_t
buffered_bounded_masked_uint16(bitgen_t *bitgen_state, uint16_t rng,
uint16_t mask, int *bcnt, uint32_t *buf) {
uint16_t val;
@@ -1131,10 +1139,11 @@ buffered_bounded_masked_uint16(bitgen_t *bitgen_state, uint16_t rng,
/* Static `masked rejection` function called by
* random_buffered_bounded_uint8(...) */
-static NPY_INLINE uint8_t buffered_bounded_masked_uint8(bitgen_t *bitgen_state,
- uint8_t rng,
- uint8_t mask, int *bcnt,
- uint32_t *buf) {
+static inline uint8_t buffered_bounded_masked_uint8(bitgen_t *bitgen_state,
+ uint8_t rng,
+ uint8_t mask,
+ int *bcnt,
+ uint32_t *buf) {
uint8_t val;
while ((val = (buffered_uint8(bitgen_state, bcnt, buf) & mask)) > rng)
@@ -1143,10 +1152,10 @@ static NPY_INLINE uint8_t buffered_bounded_masked_uint8(bitgen_t *bitgen_state,
return val;
}
-static NPY_INLINE npy_bool buffered_bounded_bool(bitgen_t *bitgen_state,
- npy_bool off, npy_bool rng,
- npy_bool mask, int *bcnt,
- uint32_t *buf) {
+static inline npy_bool buffered_bounded_bool(bitgen_t *bitgen_state,
+ npy_bool off, npy_bool rng,
+ npy_bool mask, int *bcnt,
+ uint32_t *buf) {
if (rng == 0)
return off;
if (!(bcnt[0])) {
@@ -1160,8 +1169,8 @@ static NPY_INLINE npy_bool buffered_bounded_bool(bitgen_t *bitgen_state,
}
/* Static `Lemire rejection` function called by random_bounded_uint64(...) */
-static NPY_INLINE uint64_t bounded_lemire_uint64(bitgen_t *bitgen_state,
- uint64_t rng) {
+static inline uint64_t bounded_lemire_uint64(bitgen_t *bitgen_state,
+ uint64_t rng) {
/*
* Uses Lemire's algorithm - https://arxiv.org/abs/1805.10941
*
@@ -1245,7 +1254,7 @@ static NPY_INLINE uint64_t bounded_lemire_uint64(bitgen_t *bitgen_state,
/* Static `Lemire rejection` function called by
* random_buffered_bounded_uint32(...) */
-static NPY_INLINE uint32_t buffered_bounded_lemire_uint32(
+static inline uint32_t buffered_bounded_lemire_uint32(
bitgen_t *bitgen_state, uint32_t rng, int *bcnt, uint32_t *buf) {
/*
* Uses Lemire's algorithm - https://arxiv.org/abs/1805.10941
@@ -1285,7 +1294,7 @@ static NPY_INLINE uint32_t buffered_bounded_lemire_uint32(
/* Static `Lemire rejection` function called by
* random_buffered_bounded_uint16(...) */
-static NPY_INLINE uint16_t buffered_bounded_lemire_uint16(
+static inline uint16_t buffered_bounded_lemire_uint16(
bitgen_t *bitgen_state, uint16_t rng, int *bcnt, uint32_t *buf) {
/*
* Uses Lemire's algorithm - https://arxiv.org/abs/1805.10941
@@ -1321,9 +1330,9 @@ static NPY_INLINE uint16_t buffered_bounded_lemire_uint16(
/* Static `Lemire rejection` function called by
* random_buffered_bounded_uint8(...) */
-static NPY_INLINE uint8_t buffered_bounded_lemire_uint8(bitgen_t *bitgen_state,
- uint8_t rng, int *bcnt,
- uint32_t *buf) {
+static inline uint8_t buffered_bounded_lemire_uint8(bitgen_t *bitgen_state,
+ uint8_t rng, int *bcnt,
+ uint32_t *buf) {
/*
* Uses Lemire's algorithm - https://arxiv.org/abs/1805.10941
*
diff --git a/numpy/random/src/legacy/legacy-distributions.c b/numpy/random/src/legacy/legacy-distributions.c
index 443c1a4bf..b518b8a03 100644
--- a/numpy/random/src/legacy/legacy-distributions.c
+++ b/numpy/random/src/legacy/legacy-distributions.c
@@ -11,7 +11,7 @@
#include "include/legacy-distributions.h"
-static NPY_INLINE double legacy_double(aug_bitgen_t *aug_state) {
+static inline double legacy_double(aug_bitgen_t *aug_state) {
return aug_state->bit_generator->next_double(aug_state->bit_generator->state);
}
@@ -494,4 +494,4 @@ int64_t legacy_logseries(bitgen_t *bitgen_state, double p) {
}
return 2;
}
-} \ No newline at end of file
+}
diff --git a/numpy/random/src/mt19937/randomkit.c b/numpy/random/src/mt19937/randomkit.c
index f8ed4b49e..e718c2d06 100644
--- a/numpy/random/src/mt19937/randomkit.c
+++ b/numpy/random/src/mt19937/randomkit.c
@@ -247,7 +247,7 @@ unsigned long rk_random(rk_state *state) {
/*
* Returns an unsigned 64 bit random integer.
*/
-NPY_INLINE static npy_uint64 rk_uint64(rk_state *state) {
+static inline npy_uint64 rk_uint64(rk_state *state) {
npy_uint64 upper = (npy_uint64)rk_random(state) << 32;
npy_uint64 lower = (npy_uint64)rk_random(state);
return upper | lower;
@@ -256,7 +256,7 @@ NPY_INLINE static npy_uint64 rk_uint64(rk_state *state) {
/*
* Returns an unsigned 32 bit random integer.
*/
-NPY_INLINE static npy_uint32 rk_uint32(rk_state *state) {
+static inline npy_uint32 rk_uint32(rk_state *state) {
return (npy_uint32)rk_random(state);
}
diff --git a/numpy/random/src/philox/philox.c b/numpy/random/src/philox/philox.c
index 6f2fad5a4..7909383e7 100644
--- a/numpy/random/src/philox/philox.c
+++ b/numpy/random/src/philox/philox.c
@@ -1,8 +1,8 @@
#include "philox.h"
-extern NPY_INLINE uint64_t philox_next64(philox_state *state);
+extern inline uint64_t philox_next64(philox_state *state);
-extern NPY_INLINE uint32_t philox_next32(philox_state *state);
+extern inline uint32_t philox_next32(philox_state *state);
extern void philox_jump(philox_state *state) {
/* Advances state as-if 2^128 draws were made */
diff --git a/numpy/random/src/philox/philox.h b/numpy/random/src/philox/philox.h
index 81e034a47..ba7a67470 100644
--- a/numpy/random/src/philox/philox.h
+++ b/numpy/random/src/philox/philox.h
@@ -18,7 +18,7 @@ typedef struct r123array4x64 philox4x64_ctr_t;
typedef struct r123array2x64 philox4x64_key_t;
typedef struct r123array2x64 philox4x64_ukey_t;
-static NPY_INLINE struct r123array2x64
+static inline struct r123array2x64
_philox4x64bumpkey(struct r123array2x64 key) {
key.v[0] += (0x9E3779B97F4A7C15ULL);
key.v[1] += (0xBB67AE8584CAA73BULL);
@@ -27,7 +27,7 @@ _philox4x64bumpkey(struct r123array2x64 key) {
/* Prefer uint128 if available: GCC, clang, ICC */
#ifdef __SIZEOF_INT128__
-static NPY_INLINE uint64_t mulhilo64(uint64_t a, uint64_t b, uint64_t *hip) {
+static inline uint64_t mulhilo64(uint64_t a, uint64_t b, uint64_t *hip) {
__uint128_t product = ((__uint128_t)a) * ((__uint128_t)b);
*hip = product >> 64;
return (uint64_t)product;
@@ -39,13 +39,13 @@ static NPY_INLINE uint64_t mulhilo64(uint64_t a, uint64_t b, uint64_t *hip) {
#pragma intrinsic(_umul128)
#elif defined(_WIN64) && defined(_M_ARM64)
#pragma intrinsic(__umulh)
-static NPY_INLINE uint64_t _umul128(uint64_t a, uint64_t b, uint64_t *high) {
+static inline uint64_t _umul128(uint64_t a, uint64_t b, uint64_t *high) {
*high = __umulh(a, b);
return a * b;
}
#else
#pragma intrinsic(__emulu)
-static NPY_INLINE uint64_t _umul128(uint64_t a, uint64_t b, uint64_t *high) {
+static inline uint64_t _umul128(uint64_t a, uint64_t b, uint64_t *high) {
uint64_t a_lo, a_hi, b_lo, b_hi, a_x_b_hi, a_x_b_mid, a_x_b_lo, b_x_a_mid,
carry_bit;
@@ -68,11 +68,11 @@ static NPY_INLINE uint64_t _umul128(uint64_t a, uint64_t b, uint64_t *high) {
return a_x_b_lo + ((a_x_b_mid + b_x_a_mid) << 32);
}
#endif
-static NPY_INLINE uint64_t mulhilo64(uint64_t a, uint64_t b, uint64_t *hip) {
+static inline uint64_t mulhilo64(uint64_t a, uint64_t b, uint64_t *hip) {
return _umul128(a, b, hip);
}
#else
-static NPY_INLINE uint64_t _umul128(uint64_t a, uint64_t b, uint64_t *high) {
+static inline uint64_t _umul128(uint64_t a, uint64_t b, uint64_t *high) {
uint64_t a_lo, a_hi, b_lo, b_hi, a_x_b_hi, a_x_b_mid, a_x_b_lo, b_x_a_mid,
carry_bit;
@@ -94,16 +94,16 @@ static NPY_INLINE uint64_t _umul128(uint64_t a, uint64_t b, uint64_t *high) {
return a_x_b_lo + ((a_x_b_mid + b_x_a_mid) << 32);
}
-static NPY_INLINE uint64_t mulhilo64(uint64_t a, uint64_t b, uint64_t *hip) {
+static inline uint64_t mulhilo64(uint64_t a, uint64_t b, uint64_t *hip) {
return _umul128(a, b, hip);
}
#endif
#endif
-static NPY_INLINE struct r123array4x64 _philox4x64round(struct r123array4x64 ctr,
+static inline struct r123array4x64 _philox4x64round(struct r123array4x64 ctr,
struct r123array2x64 key);
-static NPY_INLINE struct r123array4x64 _philox4x64round(struct r123array4x64 ctr,
+static inline struct r123array4x64 _philox4x64round(struct r123array4x64 ctr,
struct r123array2x64 key) {
uint64_t hi0;
uint64_t hi1;
@@ -114,14 +114,14 @@ static NPY_INLINE struct r123array4x64 _philox4x64round(struct r123array4x64 ctr
return out;
}
-static NPY_INLINE philox4x64_key_t philox4x64keyinit(philox4x64_ukey_t uk) {
+static inline philox4x64_key_t philox4x64keyinit(philox4x64_ukey_t uk) {
return uk;
}
-static NPY_INLINE philox4x64_ctr_t philox4x64_R(unsigned int R,
+static inline philox4x64_ctr_t philox4x64_R(unsigned int R,
philox4x64_ctr_t ctr,
philox4x64_key_t key);
-static NPY_INLINE philox4x64_ctr_t philox4x64_R(unsigned int R,
+static inline philox4x64_ctr_t philox4x64_R(unsigned int R,
philox4x64_ctr_t ctr,
philox4x64_key_t key) {
if (R > 0) {
@@ -199,7 +199,7 @@ typedef struct s_philox_state {
uint32_t uinteger;
} philox_state;
-static NPY_INLINE uint64_t philox_next(philox_state *state) {
+static inline uint64_t philox_next(philox_state *state) {
uint64_t out;
int i;
philox4x64_ctr_t ct;
@@ -229,11 +229,11 @@ static NPY_INLINE uint64_t philox_next(philox_state *state) {
return state->buffer[0];
}
-static NPY_INLINE uint64_t philox_next64(philox_state *state) {
+static inline uint64_t philox_next64(philox_state *state) {
return philox_next(state);
}
-static NPY_INLINE uint32_t philox_next32(philox_state *state) {
+static inline uint32_t philox_next32(philox_state *state) {
uint64_t next;
if (state->has_uint32) {
diff --git a/numpy/random/src/sfc64/sfc64.h b/numpy/random/src/sfc64/sfc64.h
index 75c4118d3..f6526063e 100644
--- a/numpy/random/src/sfc64/sfc64.h
+++ b/numpy/random/src/sfc64/sfc64.h
@@ -14,7 +14,7 @@ typedef struct s_sfc64_state {
} sfc64_state;
-static NPY_INLINE uint64_t rotl(const uint64_t value, unsigned int rot) {
+static inline uint64_t rotl(const uint64_t value, unsigned int rot) {
#ifdef _WIN32
return _rotl64(value, rot);
#else
@@ -22,7 +22,7 @@ static NPY_INLINE uint64_t rotl(const uint64_t value, unsigned int rot) {
#endif
}
-static NPY_INLINE uint64_t sfc64_next(uint64_t *s) {
+static inline uint64_t sfc64_next(uint64_t *s) {
const uint64_t tmp = s[0] + s[1] + s[3]++;
s[0] = s[1] ^ (s[1] >> 11);
@@ -33,11 +33,11 @@ static NPY_INLINE uint64_t sfc64_next(uint64_t *s) {
}
-static NPY_INLINE uint64_t sfc64_next64(sfc64_state *state) {
+static inline uint64_t sfc64_next64(sfc64_state *state) {
return sfc64_next(&state->s[0]);
}
-static NPY_INLINE uint32_t sfc64_next32(sfc64_state *state) {
+static inline uint32_t sfc64_next32(sfc64_state *state) {
uint64_t next;
if (state->has_uint32) {
state->has_uint32 = 0;
diff --git a/numpy/random/tests/test_direct.py b/numpy/random/tests/test_direct.py
index 58d966adf..fa2ae866b 100644
--- a/numpy/random/tests/test_direct.py
+++ b/numpy/random/tests/test_direct.py
@@ -148,6 +148,46 @@ def test_seedsequence():
assert len(dummy.spawn(10)) == 10
+def test_generator_spawning():
+ """ Test spawning new generators and bit_generators directly.
+ """
+ rng = np.random.default_rng()
+ seq = rng.bit_generator.seed_seq
+ new_ss = seq.spawn(5)
+ expected_keys = [seq.spawn_key + (i,) for i in range(5)]
+ assert [c.spawn_key for c in new_ss] == expected_keys
+
+ new_bgs = rng.bit_generator.spawn(5)
+ expected_keys = [seq.spawn_key + (i,) for i in range(5, 10)]
+ assert [bg.seed_seq.spawn_key for bg in new_bgs] == expected_keys
+
+ new_rngs = rng.spawn(5)
+ expected_keys = [seq.spawn_key + (i,) for i in range(10, 15)]
+ found_keys = [rng.bit_generator.seed_seq.spawn_key for rng in new_rngs]
+ assert found_keys == expected_keys
+
+ # Sanity check that streams are actually different:
+ assert new_rngs[0].uniform() != new_rngs[1].uniform()
+
+
+def test_non_spawnable():
+ from numpy.random.bit_generator import ISeedSequence
+
+ class FakeSeedSequence:
+ def generate_state(self, n_words, dtype=np.uint32):
+ return np.zeros(n_words, dtype=dtype)
+
+ ISeedSequence.register(FakeSeedSequence)
+
+ rng = np.random.default_rng(FakeSeedSequence())
+
+ with pytest.raises(TypeError, match="The underlying SeedSequence"):
+ rng.spawn(5)
+
+ with pytest.raises(TypeError, match="The underlying SeedSequence"):
+ rng.bit_generator.spawn(5)
+
+
class Base:
dtype = np.uint64
data2 = data1 = {}
diff --git a/numpy/random/tests/test_extending.py b/numpy/random/tests/test_extending.py
index 04b13cb8c..67a84d57d 100644
--- a/numpy/random/tests/test_extending.py
+++ b/numpy/random/tests/test_extending.py
@@ -6,6 +6,7 @@ import sys
import warnings
import numpy as np
from numpy.distutils.misc_util import exec_mod_from_location
+from numpy.testing import IS_WASM
try:
import cffi
@@ -22,7 +23,8 @@ try:
# numba issue gh-4733
warnings.filterwarnings('always', '', DeprecationWarning)
import numba
-except ImportError:
+except (ImportError, SystemError):
+ # Certain numpy/numba versions trigger a SystemError due to a numba bug
numba = None
try:
@@ -31,7 +33,7 @@ try:
except ImportError:
cython = None
else:
- from numpy.compat import _pep440
+ from numpy._utils import _pep440
# Cython 0.29.30 is required for Python 3.11 and there are
# other fixes in the 0.29 series that are needed even for earlier
# Python versions.
@@ -41,6 +43,8 @@ else:
# too old or wrong cython, skip the test
cython = None
+
+@pytest.mark.skipif(IS_WASM, reason="Can't start subprocess")
@pytest.mark.skipif(cython is None, reason="requires cython")
@pytest.mark.slow
def test_cython(tmp_path):
diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py
index 1710df8ca..5c4c2cbf9 100644
--- a/numpy/random/tests/test_generator_mt19937.py
+++ b/numpy/random/tests/test_generator_mt19937.py
@@ -8,7 +8,7 @@ from numpy.linalg import LinAlgError
from numpy.testing import (
assert_, assert_raises, assert_equal, assert_allclose,
assert_warns, assert_no_warnings, assert_array_equal,
- assert_array_almost_equal, suppress_warnings)
+ assert_array_almost_equal, suppress_warnings, IS_WASM)
from numpy.random import Generator, MT19937, SeedSequence, RandomState
@@ -345,6 +345,8 @@ class TestIntegers:
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, [0],
endpoint=endpoint, dtype=dt)
+ assert_raises(ValueError, self.rfunc, [ubnd+1], [ubnd],
+ endpoint=endpoint, dtype=dt)
def test_bounds_checking_array(self, endpoint):
for dt in self.itype:
@@ -1391,6 +1393,7 @@ class TestRandomDist:
[5, 5, 3, 1, 2, 4]]])
assert_array_equal(actual, desired)
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
@pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"])
def test_multivariate_normal(self, method):
random = Generator(MT19937(self.seed))
@@ -2452,6 +2455,7 @@ class TestBroadcast:
assert actual.shape == (3, 0, 7, 4)
+@pytest.mark.skipif(IS_WASM, reason="can't start thread")
class TestThread:
# make sure each state produces the same sequence even in threads
def setup_method(self):
diff --git a/numpy/random/tests/test_generator_mt19937_regressions.py b/numpy/random/tests/test_generator_mt19937_regressions.py
index 0227d6502..7c2b6867c 100644
--- a/numpy/random/tests/test_generator_mt19937_regressions.py
+++ b/numpy/random/tests/test_generator_mt19937_regressions.py
@@ -3,32 +3,32 @@ import numpy as np
import pytest
from numpy.random import Generator, MT19937
-mt19937 = Generator(MT19937())
-
class TestRegression:
+ def setup_method(self):
+ self.mt19937 = Generator(MT19937(121263137472525314065))
+
def test_vonmises_range(self):
# Make sure generated random variables are in [-pi, pi].
# Regression test for ticket #986.
for mu in np.linspace(-7., 7., 5):
- r = mt19937.vonmises(mu, 1, 50)
+ r = self.mt19937.vonmises(mu, 1, 50)
assert_(np.all(r > -np.pi) and np.all(r <= np.pi))
def test_hypergeometric_range(self):
# Test for ticket #921
- assert_(np.all(mt19937.hypergeometric(3, 18, 11, size=10) < 4))
- assert_(np.all(mt19937.hypergeometric(18, 3, 11, size=10) > 0))
+ assert_(np.all(self.mt19937.hypergeometric(3, 18, 11, size=10) < 4))
+ assert_(np.all(self.mt19937.hypergeometric(18, 3, 11, size=10) > 0))
# Test for ticket #5623
args = (2**20 - 2, 2**20 - 2, 2**20 - 2) # Check for 32-bit systems
- assert_(mt19937.hypergeometric(*args) > 0)
+ assert_(self.mt19937.hypergeometric(*args) > 0)
def test_logseries_convergence(self):
# Test for ticket #923
N = 1000
- mt19937 = Generator(MT19937(0))
- rvsn = mt19937.logseries(0.8, size=N)
+ rvsn = self.mt19937.logseries(0.8, size=N)
# these two frequency counts should be close to theoretical
# numbers with this large sample
# theoretical large N result is 0.49706795
@@ -65,41 +65,38 @@ class TestRegression:
# Test for multivariate_normal issue with 'size' argument.
# Check that the multivariate_normal size argument can be a
# numpy integer.
- mt19937.multivariate_normal([0], [[0]], size=1)
- mt19937.multivariate_normal([0], [[0]], size=np.int_(1))
- mt19937.multivariate_normal([0], [[0]], size=np.int64(1))
+ self.mt19937.multivariate_normal([0], [[0]], size=1)
+ self.mt19937.multivariate_normal([0], [[0]], size=np.int_(1))
+ self.mt19937.multivariate_normal([0], [[0]], size=np.int64(1))
def test_beta_small_parameters(self):
# Test that beta with small a and b parameters does not produce
# NaNs due to roundoff errors causing 0 / 0, gh-5851
- mt19937 = Generator(MT19937(1234567890))
- x = mt19937.beta(0.0001, 0.0001, size=100)
+ x = self.mt19937.beta(0.0001, 0.0001, size=100)
assert_(not np.any(np.isnan(x)), 'Nans in mt19937.beta')
def test_choice_sum_of_probs_tolerance(self):
# The sum of probs should be 1.0 with some tolerance.
# For low precision dtypes the tolerance was too tight.
# See numpy github issue 6123.
- mt19937 = Generator(MT19937(1234))
a = [1, 2, 3]
counts = [4, 4, 2]
for dt in np.float16, np.float32, np.float64:
probs = np.array(counts, dtype=dt) / sum(counts)
- c = mt19937.choice(a, p=probs)
+ c = self.mt19937.choice(a, p=probs)
assert_(c in a)
with pytest.raises(ValueError):
- mt19937.choice(a, p=probs*0.9)
+ self.mt19937.choice(a, p=probs*0.9)
def test_shuffle_of_array_of_different_length_strings(self):
# Test that permuting an array of different length strings
# will not cause a segfault on garbage collection
# Tests gh-7710
- mt19937 = Generator(MT19937(1234))
a = np.array(['a', 'a' * 1000])
for _ in range(100):
- mt19937.shuffle(a)
+ self.mt19937.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
@@ -109,17 +106,17 @@ class TestRegression:
# Test that permuting an array of objects will not cause
# a segfault on garbage collection.
# See gh-7719
- mt19937 = Generator(MT19937(1234))
a = np.array([np.arange(1), np.arange(4)], dtype=object)
for _ in range(1000):
- mt19937.shuffle(a)
+ self.mt19937.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_permutation_subclass(self):
+
class N(np.ndarray):
pass
@@ -142,9 +139,16 @@ class TestRegression:
assert_array_equal(m.__array__(), np.arange(5))
def test_gamma_0(self):
- assert mt19937.standard_gamma(0.0) == 0.0
- assert_array_equal(mt19937.standard_gamma([0.0]), 0.0)
+ assert self.mt19937.standard_gamma(0.0) == 0.0
+ assert_array_equal(self.mt19937.standard_gamma([0.0]), 0.0)
- actual = mt19937.standard_gamma([0.0], dtype='float')
+ actual = self.mt19937.standard_gamma([0.0], dtype='float')
expected = np.array([0.], dtype=np.float32)
assert_array_equal(actual, expected)
+
+ def test_geometric_tiny_prob(self):
+ # Regression test for gh-17007.
+ # When p = 1e-30, the probability that a sample will exceed 2**63-1
+ # is 0.9999999999907766, so we expect the result to be all 2**63-1.
+ assert_array_equal(self.mt19937.geometric(p=1e-30, size=3),
+ np.iinfo(np.int64).max)
diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py
index 6b4c82bc9..0f4e7925a 100644
--- a/numpy/random/tests/test_random.py
+++ b/numpy/random/tests/test_random.py
@@ -6,7 +6,7 @@ import numpy as np
from numpy.testing import (
assert_, assert_raises, assert_equal, assert_warns,
assert_no_warnings, assert_array_equal, assert_array_almost_equal,
- suppress_warnings
+ suppress_warnings, IS_WASM
)
from numpy import random
import sys
@@ -1615,6 +1615,7 @@ class TestBroadcast:
assert_raises(ValueError, logseries, bad_p_two * 3)
+@pytest.mark.skipif(IS_WASM, reason="can't start thread")
class TestThread:
# make sure each state produces the same sequence even in threads
def setup_method(self):
diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py
index be9a9c339..3a2961098 100644
--- a/numpy/random/tests/test_randomstate.py
+++ b/numpy/random/tests/test_randomstate.py
@@ -8,7 +8,7 @@ import pytest
from numpy.testing import (
assert_, assert_raises, assert_equal, assert_warns,
assert_no_warnings, assert_array_equal, assert_array_almost_equal,
- suppress_warnings
+ suppress_warnings, IS_WASM
)
from numpy.random import MT19937, PCG64
@@ -812,6 +812,10 @@ class TestRandomDist:
alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, alpha)
+ def test_dirichlet_zero_alpha(self):
+ y = random.default_rng().dirichlet([5, 9, 0, 8])
+ assert_equal(y[2], 0)
+
def test_dirichlet_alpha_non_contiguous(self):
a = np.array([51.72840233779265162, -1.0, 39.74494232180943953])
alpha = a[::2]
@@ -1894,6 +1898,7 @@ class TestBroadcast:
assert_raises(ValueError, logseries, bad_p_two * 3)
+@pytest.mark.skipif(IS_WASM, reason="can't start thread")
class TestThread:
# make sure each state produces the same sequence even in threads
def setup_method(self):