summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com>2021-09-21 09:18:37 +0200
committerDimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com>2021-09-21 20:29:43 +0200
commit83960267dc097742cb67ef575504afa56f82b102 (patch)
tree5de763d6385fc3fc630db0992cd6b2d2ff765ea6
parente467a284d1a2055337ce73cd92aadb491aa9a776 (diff)
downloadnumpy-83960267dc097742cb67ef575504afa56f82b102.tar.gz
DOC: Typos found by codespell
-rwxr-xr-xdoc/preprocess.py2
-rw-r--r--doc/source/dev/development_advanced_debugging.rst8
-rw-r--r--doc/source/dev/development_environment.rst2
-rw-r--r--doc/source/dev/howto-docs.rst4
-rw-r--r--doc/source/reference/arrays.scalars.rst2
-rw-r--r--doc/source/reference/c-api/iterator.rst2
-rw-r--r--doc/source/reference/c-api/types-and-structures.rst4
-rw-r--r--doc/source/reference/random/bit_generators/index.rst2
-rw-r--r--doc/source/reference/routines.ma.rst8
-rw-r--r--doc/source/reference/routines.polynomials.rst4
-rw-r--r--doc/source/reference/simd/simd-optimizations.rst16
-rw-r--r--doc/source/reference/ufuncs.rst2
-rw-r--r--doc/source/release/1.14.0-notes.rst2
-rw-r--r--doc/source/release/1.15.0-notes.rst4
-rw-r--r--doc/source/release/1.16.0-notes.rst4
-rw-r--r--doc/source/release/1.19.0-notes.rst2
-rw-r--r--doc/source/release/1.20.0-notes.rst2
-rw-r--r--doc/source/release/1.21.0-notes.rst2
-rw-r--r--doc/source/release/1.8.0-notes.rst6
-rw-r--r--doc/source/release/1.9.0-notes.rst2
-rw-r--r--doc/source/user/basics.creation.rst2
-rw-r--r--doc/source/user/c-info.how-to-extend.rst2
-rw-r--r--doc/source/user/c-info.python-as-glue.rst2
-rw-r--r--doc/source/user/how-to-how-to.rst2
-rw-r--r--doc/source/user/misc.rst2
-rw-r--r--doc/source/user/numpy-for-matlab-users.rst6
-rw-r--r--doc/ufuncs.rst.txt2
-rw-r--r--numpy/__init__.pyi2
-rw-r--r--numpy/conftest.py2
-rw-r--r--numpy/core/arrayprint.pyi4
-rw-r--r--numpy/core/einsumfunc.pyi2
-rw-r--r--numpy/core/machar.py2
-rw-r--r--numpy/core/records.py2
-rw-r--r--numpy/core/src/_simd/_simd_inc.h.src2
-rw-r--r--numpy/core/src/common/npy_cpu_dispatch.h2
-rw-r--r--numpy/core/src/common/npy_cpu_features.c.src2
-rw-r--r--numpy/core/src/common/npy_cpuinfo_parser.h4
-rw-r--r--numpy/core/src/common/simd/emulate_maskop.h2
-rw-r--r--numpy/core/src/common/simd/intdiv.h2
-rw-r--r--numpy/core/src/common/simd/neon/math.h2
-rw-r--r--numpy/core/src/common/simd/vsx/operators.h2
-rw-r--r--numpy/core/src/multiarray/abstractdtypes.c2
-rw-r--r--numpy/core/src/multiarray/array_coercion.c4
-rw-r--r--numpy/core/src/multiarray/arrayobject.c4
-rw-r--r--numpy/core/src/multiarray/convert_datatype.c4
-rw-r--r--numpy/core/src/multiarray/datetime.c2
-rw-r--r--numpy/core/src/multiarray/dtypemeta.c2
-rw-r--r--numpy/core/src/multiarray/einsum_sumprod.c.src18
-rw-r--r--numpy/core/src/multiarray/lowlevel_strided_loops.c.src2
-rw-r--r--numpy/core/src/multiarray/nditer_api.c6
-rw-r--r--numpy/core/src/multiarray/nditer_constr.c2
-rw-r--r--numpy/core/src/multiarray/nditer_templ.c.src2
-rw-r--r--numpy/core/src/umath/_scaled_float_dtype.c6
-rw-r--r--numpy/core/src/umath/_umath_tests.c.src2
-rw-r--r--numpy/core/src/umath/_umath_tests.dispatch.c2
-rw-r--r--numpy/core/src/umath/loops_utils.h.src2
-rw-r--r--numpy/core/tests/test__exceptions.py2
-rw-r--r--numpy/core/tests/test_array_coercion.py4
-rw-r--r--numpy/core/tests/test_casting_unittests.py2
-rw-r--r--numpy/core/tests/test_cpu_dispatcher.py4
-rw-r--r--numpy/core/tests/test_deprecations.py2
-rw-r--r--numpy/core/tests/test_einsum.py2
-rw-r--r--numpy/core/tests/test_nditer.py4
-rw-r--r--numpy/distutils/command/build_ext.py2
-rw-r--r--numpy/distutils/fcompiler/gnu.py2
-rw-r--r--numpy/distutils/misc_util.py4
-rw-r--r--numpy/distutils/system_info.py3
-rw-r--r--numpy/f2py/cfuncs.py2
-rw-r--r--numpy/f2py/tests/test_return_character.py4
-rw-r--r--numpy/lib/format.py2
-rw-r--r--numpy/lib/nanfunctions.py2
-rw-r--r--numpy/lib/npyio.py2
-rw-r--r--numpy/lib/twodim_base.py4
-rw-r--r--numpy/linalg/linalg.py4
-rw-r--r--numpy/linalg/linalg.pyi2
-rw-r--r--numpy/linalg/tests/test_linalg.py8
-rw-r--r--numpy/ma/mrecords.py8
-rw-r--r--numpy/ma/mrecords.pyi2
-rw-r--r--numpy/ma/tests/test_core.py12
-rw-r--r--numpy/ma/tests/test_mrecords.py2
-rw-r--r--numpy/polynomial/__init__.py2
-rw-r--r--numpy/polynomial/_polybase.py2
-rw-r--r--numpy/polynomial/chebyshev.py8
-rw-r--r--numpy/polynomial/laguerre.py2
-rw-r--r--numpy/random/_generator.pyx2
-rw-r--r--numpy/typing/__init__.py2
-rw-r--r--numpy/typing/_generic_alias.py2
-rw-r--r--numpy/typing/tests/data/fail/bitwise_ops.py2
-rw-r--r--numpy/typing/tests/data/fail/numerictypes.py2
-rw-r--r--numpy/typing/tests/data/pass/simple.py2
-rw-r--r--numpy/typing/tests/test_typing.py6
-rw-r--r--pavement.py2
-rw-r--r--setup.cfg3
-rw-r--r--tools/gitpod/gitpod.Dockerfile2
94 files changed, 159 insertions, 155 deletions
diff --git a/doc/preprocess.py b/doc/preprocess.py
index e88d9608e..870d3e123 100755
--- a/doc/preprocess.py
+++ b/doc/preprocess.py
@@ -9,7 +9,7 @@ def main():
def doxy_gen(root_path):
"""
- Generate Doxygen configration file.
+ Generate Doxygen configuration file.
"""
confs = doxy_config(root_path)
build_path = os.path.join(root_path, "doc", "build", "doxygen")
diff --git a/doc/source/dev/development_advanced_debugging.rst b/doc/source/dev/development_advanced_debugging.rst
index fa4014fdb..18a7f6ae9 100644
--- a/doc/source/dev/development_advanced_debugging.rst
+++ b/doc/source/dev/development_advanced_debugging.rst
@@ -3,8 +3,8 @@ Advanced debugging tools
========================
If you reached here, you want to dive into, or use, more advanced tooling.
-This is usually not necessary for first time contributers and most
-day-to-day developement.
+This is usually not necessary for first time contributors and most
+day-to-day development.
These are used more rarely, for example close to a new NumPy release,
or when a large or particular complex change was made.
@@ -25,7 +25,7 @@ narrow down.
We do not expect any of these tools to be run by most contributors.
However, you can ensure that we can track down such issues more easily easier:
-* Tests should cover all code paths, incluing error paths.
+* Tests should cover all code paths, including error paths.
* Try to write short and simple tests. If you have a very complicated test
consider creating an additional simpler test as well.
This can be helpful, because often it is only easy to find which test
@@ -112,7 +112,7 @@ where ``PYTHONMALLOC=malloc`` is necessary to avoid false positives from python
itself.
Depending on the system and valgrind version, you may see more false positives.
``valgrind`` supports "suppressions" to ignore some of these, and Python does
-have a supression file (and even a compile time option) which may help if you
+have a suppression file (and even a compile time option) which may help if you
find it necessary.
Valgrind helps:
diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst
index 665198c69..37cf6f7af 100644
--- a/doc/source/dev/development_environment.rst
+++ b/doc/source/dev/development_environment.rst
@@ -122,7 +122,7 @@ source tree is to use::
NumPy uses a series of tests to probe the compiler and libc libraries for
-funtions. The results are stored in ``_numpyconfig.h`` and ``config.h`` files
+functions. The results are stored in ``_numpyconfig.h`` and ``config.h`` files
using ``HAVE_XXX`` definitions. These tests are run during the ``build_src``
phase of the ``_multiarray_umath`` module in the ``generate_config_h`` and
``generate_numpyconfig_h`` functions. Since the output of these calls includes
diff --git a/doc/source/dev/howto-docs.rst b/doc/source/dev/howto-docs.rst
index cc17a1feb..93fec509c 100644
--- a/doc/source/dev/howto-docs.rst
+++ b/doc/source/dev/howto-docs.rst
@@ -60,7 +60,7 @@ category, but other rewordings -- even for grammar -- require a judgment call,
which raises the bar. Test the waters by first presenting the fix as an issue.
Some functions/objects like numpy.ndarray.transpose, numpy.array etc. defined in
-C-extension modules have their docstrings defined seperately in `_add_newdocs.py
+C-extension modules have their docstrings defined separately in `_add_newdocs.py
<https://github.com/numpy/numpy/blob/main/numpy/core/_add_newdocs.py>`__
**********************
@@ -72,7 +72,7 @@ Your frustrations using our documents are our best guide to what needs fixing.
If you write a missing doc you join the front line of open source, but it's
a meaningful contribution just to let us know what's missing. If you want to
compose a doc, run your thoughts by the `mailing list
-<https://mail.python.org/mailman/listinfo/numpy-discussion>`__ for futher
+<https://mail.python.org/mailman/listinfo/numpy-discussion>`__ for further
ideas and feedback. If you want to alert us to a gap,
`open an issue <https://github.com/numpy/numpy/issues>`__. See
`this issue <https://github.com/numpy/numpy/issues/15760>`__ for an example.
diff --git a/doc/source/reference/arrays.scalars.rst b/doc/source/reference/arrays.scalars.rst
index abef66692..ccab0101e 100644
--- a/doc/source/reference/arrays.scalars.rst
+++ b/doc/source/reference/arrays.scalars.rst
@@ -399,7 +399,7 @@ are also provided.
complex256
Alias for `numpy.clongdouble`, named after its size in bits.
- The existance of these aliases depends on the platform.
+ The existence of these aliases depends on the platform.
Other aliases
~~~~~~~~~~~~~
diff --git a/doc/source/reference/c-api/iterator.rst b/doc/source/reference/c-api/iterator.rst
index 2208cdd2f..83644d8b2 100644
--- a/doc/source/reference/c-api/iterator.rst
+++ b/doc/source/reference/c-api/iterator.rst
@@ -1230,7 +1230,7 @@ Functions For Iteration
.. c:function:: npy_intp* NpyIter_GetIndexPtr(NpyIter* iter)
This gives back a pointer to the index being tracked, or NULL
- if no index is being tracked. It is only useable if one of
+ if no index is being tracked. It is only usable if one of
the flags :c:data:`NPY_ITER_C_INDEX` or :c:data:`NPY_ITER_F_INDEX`
were specified during construction.
diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst
index 39a17cc72..36293ce99 100644
--- a/doc/source/reference/c-api/types-and-structures.rst
+++ b/doc/source/reference/c-api/types-and-structures.rst
@@ -94,7 +94,7 @@ PyArray_Type and PyArrayObject
PyArray_Descr *descr;
int flags;
PyObject *weakreflist;
- /* version dependend private members */
+ /* version dependent private members */
} PyArrayObject;
.. c:macro:: PyObject_HEAD
@@ -178,7 +178,7 @@ PyArray_Type and PyArrayObject
.. note::
- Further members are considered private and version dependend. If the size
+ Further members are considered private and version dependent. If the size
of the struct is important for your code, special care must be taken.
A possible use-case when this is relevant is subclassing in C.
If your code relies on ``sizeof(PyArrayObject)`` to be constant,
diff --git a/doc/source/reference/random/bit_generators/index.rst b/doc/source/reference/random/bit_generators/index.rst
index c5c349806..211f0d60e 100644
--- a/doc/source/reference/random/bit_generators/index.rst
+++ b/doc/source/reference/random/bit_generators/index.rst
@@ -4,7 +4,7 @@ Bit Generators
--------------
The random values produced by :class:`~Generator`
-orignate in a BitGenerator. The BitGenerators do not directly provide
+originate in a BitGenerator. The BitGenerators do not directly provide
random numbers and only contains methods used for seeding, getting or
setting the state, jumping or advancing the state, and for accessing
low-level wrappers for consumption by code that can efficiently
diff --git a/doc/source/reference/routines.ma.rst b/doc/source/reference/routines.ma.rst
index d961cbf02..2db325293 100644
--- a/doc/source/reference/routines.ma.rst
+++ b/doc/source/reference/routines.ma.rst
@@ -287,11 +287,11 @@ Filling a masked array
_____
-Masked arrays arithmetics
-=========================
+Masked arrays arithmetic
+========================
-Arithmetics
-~~~~~~~~~~~
+Arithmetic
+~~~~~~~~~~
.. autosummary::
:toctree: generated/
diff --git a/doc/source/reference/routines.polynomials.rst b/doc/source/reference/routines.polynomials.rst
index ecfb012f0..4aea963c0 100644
--- a/doc/source/reference/routines.polynomials.rst
+++ b/doc/source/reference/routines.polynomials.rst
@@ -22,7 +22,7 @@ Therefore :mod:`numpy.polynomial` is recommended for new coding.
the polynomial functions prefixed with *poly* accessible from the `numpy`
namespace (e.g. `numpy.polyadd`, `numpy.polyval`, `numpy.polyfit`, etc.).
- The term *polynomial package* refers to the new API definied in
+ The term *polynomial package* refers to the new API defined in
`numpy.polynomial`, which includes the convenience classes for the
different kinds of polynomials (`numpy.polynomial.Polynomial`,
`numpy.polynomial.Chebyshev`, etc.).
@@ -110,7 +110,7 @@ See the documentation for the
`convenience classes <routines.polynomials.classes>`_ for further details on
the ``domain`` and ``window`` attributes.
-Another major difference bewteen the legacy polynomial module and the
+Another major difference between the legacy polynomial module and the
polynomial package is polynomial fitting. In the old module, fitting was
done via the `~numpy.polyfit` function. In the polynomial package, the
`~numpy.polynomial.polynomial.Polynomial.fit` class method is preferred. For
diff --git a/doc/source/reference/simd/simd-optimizations.rst b/doc/source/reference/simd/simd-optimizations.rst
index 956824321..9de6d1734 100644
--- a/doc/source/reference/simd/simd-optimizations.rst
+++ b/doc/source/reference/simd/simd-optimizations.rst
@@ -14,7 +14,7 @@ written only once. There are three layers:
written using the maximum set of intrinsics possible.
- At *compile* time, a distutils command is used to define the minimum and
maximum features to support, based on user choice and compiler support. The
- appropriate macros are overlayed with the platform / architecture intrinsics,
+ appropriate macros are overlaid with the platform / architecture intrinsics,
and the three loops are compiled.
- At *runtime import*, the CPU is probed for the set of supported intrinsic
features. A mechanism is used to grab the pointer to the most appropriate
@@ -89,7 +89,7 @@ NOTES
~~~~~~~~~~~~~
- CPU features and other options are case-insensitive.
-- The order of the requsted optimizations doesn't matter.
+- The order of the requested optimizations doesn't matter.
- Either commas or spaces can be used as a separator, e.g. ``--cpu-dispatch``\ =
"avx2 avx512f" or ``--cpu-dispatch``\ = "avx2, avx512f" both work, but the
@@ -113,7 +113,7 @@ NOTES
compiler native flag ``-march=native`` or ``-xHost`` or ``QxHost`` is
enabled through environment variable ``CFLAGS``
-- The validation process for the requsted optimizations when it comes to
+- The validation process for the requested optimizations when it comes to
``--cpu-baseline`` isn't strict. For example, if the user requested
``AVX2`` but the compiler doesn't support it then we just skip it and return
the maximum optimization that the compiler can handle depending on the
@@ -379,15 +379,15 @@ through ``--cpu-dispatch``, but it can also represent other options such as:
#include "numpy/utils.h" // NPY_CAT, NPY_TOSTR
#ifndef NPY__CPU_TARGET_CURRENT
- // wrapping the dispatch-able source only happens to the addtional optimizations
- // but if the keyword 'baseline' provided within the configuration statments,
+ // wrapping the dispatch-able source only happens to the additional optimizations
+ // but if the keyword 'baseline' provided within the configuration statements,
// the infrastructure will add extra compiling for the dispatch-able source by
// passing it as-is to the compiler without any changes.
#define CURRENT_TARGET(X) X
#define NPY__CPU_TARGET_CURRENT baseline // for printing only
#else
// since we reach to this point, that's mean we're dealing with
- // the addtional optimizations, so it could be SSE42 or AVX512F
+ // the additional optimizations, so it could be SSE42 or AVX512F
#define CURRENT_TARGET(X) NPY_CAT(NPY_CAT(X, _), NPY__CPU_TARGET_CURRENT)
#endif
// Macro 'CURRENT_TARGET' adding the current target as suffux to the exported symbols,
@@ -418,7 +418,7 @@ through ``--cpu-dispatch``, but it can also represent other options such as:
#undef NPY__CPU_DISPATCH_BASELINE_CALL
#undef NPY__CPU_DISPATCH_CALL
// nothing strange here, just a normal preprocessor callback
- // enabled only if 'baseline' spesfied withiin the configration statments
+ // enabled only if 'baseline' specified within the configuration statements
#define NPY__CPU_DISPATCH_BASELINE_CALL(CB, ...) \
NPY__CPU_DISPATCH_EXPAND_(CB(__VA_ARGS__))
// 'NPY__CPU_DISPATCH_CALL' is an abstract macro is used for dispatching
@@ -427,7 +427,7 @@ through ``--cpu-dispatch``, but it can also represent other options such as:
// @param CHK, Expected a macro that can be used to detect CPU features
// in runtime, which takes a CPU feature name without string quotes and
// returns the testing result in a shape of boolean value.
- // NumPy already has macro called "NPY_CPU_HAVE", which fit this requirment.
+ // NumPy already has macro called "NPY_CPU_HAVE", which fits this requirement.
//
// @param CB, a callback macro that expected to be called multiple times depending
// on the required optimizations, the callback should receive the following arguments:
diff --git a/doc/source/reference/ufuncs.rst b/doc/source/reference/ufuncs.rst
index b832dad04..6ace5b233 100644
--- a/doc/source/reference/ufuncs.rst
+++ b/doc/source/reference/ufuncs.rst
@@ -185,7 +185,7 @@ attribute of the ufunc. (This list may be missing DTypes not defined
by NumPy.)
The ``signature`` only specifies the DType class/type. For example, it
-can specifiy that the operation should be ``datetime64`` or ``float64``
+can specify that the operation should be ``datetime64`` or ``float64``
operation. It does not specify the ``datetime64`` time-unit or the
``float64`` byte-order.
diff --git a/doc/source/release/1.14.0-notes.rst b/doc/source/release/1.14.0-notes.rst
index 8ee876fd3..346b5af99 100644
--- a/doc/source/release/1.14.0-notes.rst
+++ b/doc/source/release/1.14.0-notes.rst
@@ -332,7 +332,7 @@ eliminating their use internally and two new C-API functions,
* ``PyArray_SetWritebackIfCopyBase``
* ``PyArray_ResolveWritebackIfCopy``,
-have been added together with a complimentary flag,
+have been added together with a complementary flag,
``NPY_ARRAY_WRITEBACKIFCOPY``. Using the new functionality also requires that
some flags be changed when new arrays are created, to wit:
``NPY_ARRAY_INOUT_ARRAY`` should be replaced by ``NPY_ARRAY_INOUT_ARRAY2`` and
diff --git a/doc/source/release/1.15.0-notes.rst b/doc/source/release/1.15.0-notes.rst
index 7235ca915..2d9d068e5 100644
--- a/doc/source/release/1.15.0-notes.rst
+++ b/doc/source/release/1.15.0-notes.rst
@@ -326,8 +326,8 @@ passed explicitly, and are not yet computed automatically.
No longer does an IQR of 0 result in ``n_bins=1``, rather the number of bins
chosen is related to the data size in this situation.
-The edges retuned by `histogram`` and ``histogramdd`` now match the data float type
------------------------------------------------------------------------------------
+The edges returned by `histogram`` and ``histogramdd`` now match the data float type
+------------------------------------------------------------------------------------
When passed ``np.float16``, ``np.float32``, or ``np.longdouble`` data, the
returned edges are now of the same dtype. Previously, ``histogram`` would only
return the same type if explicit bins were given, and ``histogram`` would
diff --git a/doc/source/release/1.16.0-notes.rst b/doc/source/release/1.16.0-notes.rst
index 17d24160a..122f20eba 100644
--- a/doc/source/release/1.16.0-notes.rst
+++ b/doc/source/release/1.16.0-notes.rst
@@ -119,7 +119,7 @@ NaT comparisons
Consistent with the behavior of NaN, all comparisons other than inequality
checks with datetime64 or timedelta64 NaT ("not-a-time") values now always
return ``False``, and inequality checks with NaT now always return ``True``.
-This includes comparisons beteween NaT values. For compatibility with the
+This includes comparisons between NaT values. For compatibility with the
old behavior, use ``np.isnat`` to explicitly check for NaT or convert
datetime64/timedelta64 arrays with ``.astype(np.int64)`` before making
comparisons.
@@ -365,7 +365,7 @@ Alpine Linux (and other musl c library distros) support
We now default to use `fenv.h` for floating point status error reporting.
Previously we had a broken default that sometimes would not report underflow,
overflow, and invalid floating point operations. Now we can support non-glibc
-distrubutions like Alpine Linux as long as they ship `fenv.h`.
+distributions like Alpine Linux as long as they ship `fenv.h`.
Speedup ``np.block`` for large arrays
-------------------------------------
diff --git a/doc/source/release/1.19.0-notes.rst b/doc/source/release/1.19.0-notes.rst
index 8f5c2c0ce..410890697 100644
--- a/doc/source/release/1.19.0-notes.rst
+++ b/doc/source/release/1.19.0-notes.rst
@@ -402,7 +402,7 @@ Ability to disable madvise hugepages
------------------------------------
On Linux NumPy has previously added support for madavise hugepages which can
improve performance for very large arrays. Unfortunately, on older Kernel
-versions this led to peformance regressions, thus by default the support has
+versions this led to performance regressions, thus by default the support has
been disabled on kernels before version 4.6. To override the default, you can
use the environment variable::
diff --git a/doc/source/release/1.20.0-notes.rst b/doc/source/release/1.20.0-notes.rst
index b8b7a0c79..494e4f19e 100644
--- a/doc/source/release/1.20.0-notes.rst
+++ b/doc/source/release/1.20.0-notes.rst
@@ -842,7 +842,7 @@ The compiler command selection for Fortran Portland Group Compiler is changed
in `numpy.distutils.fcompiler`. This only affects the linking command. This
forces the use of the executable provided by the command line option (if
provided) instead of the pgfortran executable. If no executable is provided to
-the command line option it defaults to the pgf90 executable, wich is an alias
+the command line option it defaults to the pgf90 executable, which is an alias
for pgfortran according to the PGI documentation.
(`gh-16730 <https://github.com/numpy/numpy/pull/16730>`__)
diff --git a/doc/source/release/1.21.0-notes.rst b/doc/source/release/1.21.0-notes.rst
index 270cc32de..88a4503de 100644
--- a/doc/source/release/1.21.0-notes.rst
+++ b/doc/source/release/1.21.0-notes.rst
@@ -522,7 +522,7 @@ either of these distributions are produced.
Placeholder annotations have been improved
------------------------------------------
All placeholder annotations, that were previously annotated as ``typing.Any``,
-have been improved. Where appropiate they have been replaced with explicit
+have been improved. Where appropriate they have been replaced with explicit
function definitions, classes or other miscellaneous objects.
(`gh-18934 <https://github.com/numpy/numpy/pull/18934>`__)
diff --git a/doc/source/release/1.8.0-notes.rst b/doc/source/release/1.8.0-notes.rst
index 80c39f8bc..65a471b92 100644
--- a/doc/source/release/1.8.0-notes.rst
+++ b/doc/source/release/1.8.0-notes.rst
@@ -33,7 +33,7 @@ Future Changes
The Datetime64 type remains experimental in this release. In 1.9 there will
-probably be some changes to make it more useable.
+probably be some changes to make it more usable.
The diagonal method currently returns a new array and raises a
FutureWarning. In 1.9 it will return a readonly view.
@@ -315,8 +315,8 @@ If used with the `overwrite_input` option the array will now only be partially
sorted instead of fully sorted.
-Overrideable operand flags in ufunc C-API
------------------------------------------
+Overridable operand flags in ufunc C-API
+----------------------------------------
When creating a ufunc, the default ufunc operand flags can be overridden
via the new op_flags attribute of the ufunc object. For example, to set
the operand flag for the first input to read/write:
diff --git a/doc/source/release/1.9.0-notes.rst b/doc/source/release/1.9.0-notes.rst
index 7ea29e354..a19a05cb7 100644
--- a/doc/source/release/1.9.0-notes.rst
+++ b/doc/source/release/1.9.0-notes.rst
@@ -389,7 +389,7 @@ uses a per-state lock instead of the GIL.
MaskedArray support for more complicated base classes
-----------------------------------------------------
Built-in assumptions that the baseclass behaved like a plain array are being
-removed. In particalur, ``repr`` and ``str`` should now work more reliably.
+removed. In particular, ``repr`` and ``str`` should now work more reliably.
C-API
diff --git a/doc/source/user/basics.creation.rst b/doc/source/user/basics.creation.rst
index 5fb4f66f6..84ff1c30e 100644
--- a/doc/source/user/basics.creation.rst
+++ b/doc/source/user/basics.creation.rst
@@ -115,7 +115,7 @@ examples are shown::
Note: best practice for :func:`numpy.arange` is to use integer start, end, and
step values. There are some subtleties regarding ``dtype``. In the second
example, the ``dtype`` is defined. In the third example, the array is
-``dtype=float`` to accomodate the step size of ``0.1``. Due to roundoff error,
+``dtype=float`` to accommodate the step size of ``0.1``. Due to roundoff error,
the ``stop`` value is sometimes included.
:func:`numpy.linspace` will create arrays with a specified number of elements, and
diff --git a/doc/source/user/c-info.how-to-extend.rst b/doc/source/user/c-info.how-to-extend.rst
index ebb4b7518..96727a177 100644
--- a/doc/source/user/c-info.how-to-extend.rst
+++ b/doc/source/user/c-info.how-to-extend.rst
@@ -433,7 +433,7 @@ writeable). The syntax is
The requirements flag allows specification of what kind of
array is acceptable. If the object passed in does not satisfy
- this requirements then a copy is made so that thre returned
+ this requirements then a copy is made so that the returned
object will satisfy the requirements. these ndarray can use a
very generic pointer to memory. This flag allows specification
of the desired properties of the returned array object. All
diff --git a/doc/source/user/c-info.python-as-glue.rst b/doc/source/user/c-info.python-as-glue.rst
index 8643d0dd1..2798aa08a 100644
--- a/doc/source/user/c-info.python-as-glue.rst
+++ b/doc/source/user/c-info.python-as-glue.rst
@@ -1115,7 +1115,7 @@ SWIG
Simplified Wrapper and Interface Generator (SWIG) is an old and fairly
stable method for wrapping C/C++-libraries to a large variety of other
languages. It does not specifically understand NumPy arrays but can be
-made useable with NumPy through the use of typemaps. There are some
+made usable with NumPy through the use of typemaps. There are some
sample typemaps in the numpy/tools/swig directory under numpy.i together
with an example module that makes use of them. SWIG excels at wrapping
large C/C++ libraries because it can (almost) parse their headers and
diff --git a/doc/source/user/how-to-how-to.rst b/doc/source/user/how-to-how-to.rst
index 13d2b405f..cdf1ad5c3 100644
--- a/doc/source/user/how-to-how-to.rst
+++ b/doc/source/user/how-to-how-to.rst
@@ -102,7 +102,7 @@ knowledge).
We distinguish both tutorials and how-tos from `Explanations`, which are
deep dives intended to give understanding rather than immediate assistance,
-and `References`, which give complete, autoritative data on some concrete
+and `References`, which give complete, authoritative data on some concrete
part of NumPy (like its API) but aren't obligated to paint a broader picture.
For more on tutorials, see :doc:`content/tutorial-style-guide`
diff --git a/doc/source/user/misc.rst b/doc/source/user/misc.rst
index f0a7f5e4c..316473151 100644
--- a/doc/source/user/misc.rst
+++ b/doc/source/user/misc.rst
@@ -143,7 +143,7 @@ Only a survey of the choices. Little detail on how each works.
- Plusses:
- part of Python standard library
- - good for interfacing to existing sharable libraries, particularly
+ - good for interfacing to existing shareable libraries, particularly
Windows DLLs
- avoids API/reference counting issues
- good numpy support: arrays have all these in their ctypes
diff --git a/doc/source/user/numpy-for-matlab-users.rst b/doc/source/user/numpy-for-matlab-users.rst
index ed0be82a0..21e23482a 100644
--- a/doc/source/user/numpy-for-matlab-users.rst
+++ b/doc/source/user/numpy-for-matlab-users.rst
@@ -313,11 +313,11 @@ Linear algebra equivalents
* - ``a(:,find(v > 0.5))``
- ``a[:,np.nonzero(v > 0.5)[0]]``
- - extract the columms of ``a`` where vector v > 0.5
+ - extract the columns of ``a`` where vector v > 0.5
* - ``a(:,find(v>0.5))``
- ``a[:, v.T > 0.5]``
- - extract the columms of ``a`` where column vector v > 0.5
+ - extract the columns of ``a`` where column vector v > 0.5
* - ``a(a<0.5)=0``
- ``a[a < 0.5]=0``
@@ -819,6 +819,6 @@ found in the `topical software page <https://scipy.org/topical-software.html>`__
See
`List of Python software: scripting
<https://en.wikipedia.org/wiki/List_of_Python_software#Embedded_as_a_scripting_language>`_
-for a list of softwares that use Python as a scripting language
+for a list of software that use Python as a scripting language
MATLAB® and SimuLink® are registered trademarks of The MathWorks, Inc.
diff --git a/doc/ufuncs.rst.txt b/doc/ufuncs.rst.txt
index d628b3f95..9257d3cb0 100644
--- a/doc/ufuncs.rst.txt
+++ b/doc/ufuncs.rst.txt
@@ -18,7 +18,7 @@ Some benchmarks show that this results in a significant slow-down
The approach is therefore, to loop over the largest-dimension (just like
the NO_BUFFER) portion of the code. All arrays will either have N or
-1 in this last dimension (or their would be a mis-match error). The
+1 in this last dimension (or their would be a mismatch error). The
buffer size is B.
If N <= B (and only if needed), we copy the entire last-dimension into
diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi
index 33e2af72c..5adf69988 100644
--- a/numpy/__init__.pyi
+++ b/numpy/__init__.pyi
@@ -1988,7 +1988,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
# The last overload is for catching recursive objects whose
# nesting is too deep.
# The first overload is for catching `bytes` (as they are a subtype of
- # `Sequence[int]`) and `str`. As `str` is a recusive sequence of
+ # `Sequence[int]`) and `str`. As `str` is a recursive sequence of
# strings, it will pass through the final overload otherwise
@overload
diff --git a/numpy/conftest.py b/numpy/conftest.py
index e15ee0845..fd5fdd77d 100644
--- a/numpy/conftest.py
+++ b/numpy/conftest.py
@@ -33,7 +33,7 @@ hypothesis.settings.register_profile(
suppress_health_check=hypothesis.HealthCheck.all(),
)
# Note that the default profile is chosen based on the presence
-# of pytest.ini, but can be overriden by passing the
+# of pytest.ini, but can be overridden by passing the
# --hypothesis-profile=NAME argument to pytest.
_pytest_ini = os.path.join(os.path.dirname(__file__), "..", "pytest.ini")
hypothesis.settings.load_profile(
diff --git a/numpy/core/arrayprint.pyi b/numpy/core/arrayprint.pyi
index df22efed6..3731e6578 100644
--- a/numpy/core/arrayprint.pyi
+++ b/numpy/core/arrayprint.pyi
@@ -1,8 +1,8 @@
from types import TracebackType
from typing import Any, Optional, Callable, Union, Type, Literal, TypedDict, SupportsIndex
-# Using a private class is by no means ideal, but it is simply a consquence
-# of a `contextlib.context` returning an instance of aformentioned class
+# Using a private class is by no means ideal, but it is simply a consequence
+# of a `contextlib.context` returning an instance of aforementioned class
from contextlib import _GeneratorContextManager
from numpy import (
diff --git a/numpy/core/einsumfunc.pyi b/numpy/core/einsumfunc.pyi
index 52025d502..aabb04c47 100644
--- a/numpy/core/einsumfunc.pyi
+++ b/numpy/core/einsumfunc.pyi
@@ -41,7 +41,7 @@ __all__: List[str]
# TODO: Properly handle the `casting`-based combinatorics
# TODO: We need to evaluate the content `__subscripts` in order
# to identify whether or an array or scalar is returned. At a cursory
-# glance this seems like something that can quite easilly be done with
+# glance this seems like something that can quite easily be done with
# a mypy plugin.
# Something like `is_scalar = bool(__subscripts.partition("->")[-1])`
@overload
diff --git a/numpy/core/machar.py b/numpy/core/machar.py
index 04dad4d77..c77be793f 100644
--- a/numpy/core/machar.py
+++ b/numpy/core/machar.py
@@ -1,5 +1,5 @@
"""
-Machine arithmetics - determine the parameters of the
+Machine arithmetic - determine the parameters of the
floating-point arithmetic system
Author: Pearu Peterson, September 2003
diff --git a/numpy/core/records.py b/numpy/core/records.py
index fd5f1ab39..ce206daa1 100644
--- a/numpy/core/records.py
+++ b/numpy/core/records.py
@@ -68,7 +68,7 @@ _byteorderconv = {'b':'>',
'i':'|'}
# formats regular expression
-# allows multidimension spec with a tuple syntax in front
+# allows multidimensional spec with a tuple syntax in front
# of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 '
# are equally allowed
diff --git a/numpy/core/src/_simd/_simd_inc.h.src b/numpy/core/src/_simd/_simd_inc.h.src
index 9858fc0dc..fbdf982c2 100644
--- a/numpy/core/src/_simd/_simd_inc.h.src
+++ b/numpy/core/src/_simd/_simd_inc.h.src
@@ -113,7 +113,7 @@ typedef struct
int is_scalar:1;
// returns '1' if the type represent a vector
int is_vector:1;
- // returns the len of multi-vector if the type reprsent x2 or x3 vector
+ // returns the len of multi-vector if the type represent x2 or x3 vector
// otherwise returns 0, e.g. returns 2 if data type is simd_data_vu8x2
int is_vectorx;
// returns the equivalent scalar data type e.g. simd_data_vu8 -> simd_data_u8
diff --git a/numpy/core/src/common/npy_cpu_dispatch.h b/numpy/core/src/common/npy_cpu_dispatch.h
index 8c2b40c27..e814cd425 100644
--- a/numpy/core/src/common/npy_cpu_dispatch.h
+++ b/numpy/core/src/common/npy_cpu_dispatch.h
@@ -196,7 +196,7 @@
* Example:
* Assume we have a dispatch-able source exporting the following function:
*
- * @targets baseline avx2 avx512_skx // configration statements
+ * @targets baseline avx2 avx512_skx // configuration statements
*
* void NPY_CPU_DISPATCH_CURFX(dispatch_me)(const int *src, int *dst)
* {
diff --git a/numpy/core/src/common/npy_cpu_features.c.src b/numpy/core/src/common/npy_cpu_features.c.src
index 1e0f4a571..a2383c45f 100644
--- a/numpy/core/src/common/npy_cpu_features.c.src
+++ b/numpy/core/src/common/npy_cpu_features.c.src
@@ -230,7 +230,7 @@ npy__cpu_try_disable_env(void)
notsupp_cur[flen] = ' '; notsupp_cur += flen + 1;
goto next;
}
- // Finaly we can disable it
+ // Finally we can disable it
npy__cpu_have[feature_id] = 0;
next:
feature = strtok(NULL, delim);
diff --git a/numpy/core/src/common/npy_cpuinfo_parser.h b/numpy/core/src/common/npy_cpuinfo_parser.h
index 9e85e3a2f..364873a23 100644
--- a/numpy/core/src/common/npy_cpuinfo_parser.h
+++ b/numpy/core/src/common/npy_cpuinfo_parser.h
@@ -123,7 +123,7 @@ read_file(const char* pathname, char* buffer, size_t buffsize)
}
/*
- * Extract the content of a the first occurence of a given field in
+ * Extract the content of a the first occurrence of a given field in
* the content of /proc/cpuinfo and return it as a heap-allocated
* string that must be freed by the caller.
*
@@ -138,7 +138,7 @@ extract_cpuinfo_field(const char* buffer, int buflen, const char* field)
int len;
const char *p, *q;
- /* Look for first field occurence, and ensures it starts the line. */
+ /* Look for first field occurrence, and ensures it starts the line. */
p = buffer;
for (;;) {
p = memmem(p, bufend-p, field, fieldlen);
diff --git a/numpy/core/src/common/simd/emulate_maskop.h b/numpy/core/src/common/simd/emulate_maskop.h
index 7e7446bc5..41e397c2d 100644
--- a/numpy/core/src/common/simd/emulate_maskop.h
+++ b/numpy/core/src/common/simd/emulate_maskop.h
@@ -1,5 +1,5 @@
/**
- * This header is used internaly by all current supported SIMD extention,
+ * This header is used internally by all current supported SIMD extensions,
* execpt for AVX512.
*/
#ifndef NPY_SIMD
diff --git a/numpy/core/src/common/simd/intdiv.h b/numpy/core/src/common/simd/intdiv.h
index f6ea9abf2..5d2ab2906 100644
--- a/numpy/core/src/common/simd/intdiv.h
+++ b/numpy/core/src/common/simd/intdiv.h
@@ -39,7 +39,7 @@
* for (; len >= vstep; src += vstep, dst += vstep, len -= vstep) {
* npyv_s32 a = npyv_load_s32(*src); // load s32 vector from memory
* a = npyv_divc_s32(a, divisor); // divide all elements by x
- * npyv_store_s32(dst, a); // store s32 vector into memroy
+ * npyv_store_s32(dst, a); // store s32 vector into memory
* }
*
** NOTES:
diff --git a/numpy/core/src/common/simd/neon/math.h b/numpy/core/src/common/simd/neon/math.h
index ced82d1de..19ea6f22f 100644
--- a/numpy/core/src/common/simd/neon/math.h
+++ b/numpy/core/src/common/simd/neon/math.h
@@ -31,7 +31,7 @@ NPY_FINLINE npyv_f32 npyv_square_f32(npyv_f32 a)
const npyv_f32 zero = vdupq_n_f32(0.0f);
const npyv_u32 pinf = vdupq_n_u32(0x7f800000);
npyv_u32 is_zero = vceqq_f32(a, zero), is_inf = vceqq_u32(vreinterpretq_u32_f32(a), pinf);
- // guard agianst floating-point division-by-zero error
+ // guard against floating-point division-by-zero error
npyv_f32 guard_byz = vbslq_f32(is_zero, vreinterpretq_f32_u32(pinf), a);
// estimate to (1/√a)
npyv_f32 rsqrte = vrsqrteq_f32(guard_byz);
diff --git a/numpy/core/src/common/simd/vsx/operators.h b/numpy/core/src/common/simd/vsx/operators.h
index 23c5d0dbe..d34057ff3 100644
--- a/numpy/core/src/common/simd/vsx/operators.h
+++ b/numpy/core/src/common/simd/vsx/operators.h
@@ -103,7 +103,7 @@ NPYV_IMPL_VSX_BIN_B64(or)
NPYV_IMPL_VSX_BIN_B64(xor)
// NOT
-// note: we implement npyv_not_b*(boolen types) for internal use*/
+// note: we implement npyv_not_b*(boolean types) for internal use*/
#define NPYV_IMPL_VSX_NOT_INT(VEC_LEN) \
NPY_FINLINE npyv_u##VEC_LEN npyv_not_u##VEC_LEN(npyv_u##VEC_LEN a) \
{ return vec_nor(a, a); } \
diff --git a/numpy/core/src/multiarray/abstractdtypes.c b/numpy/core/src/multiarray/abstractdtypes.c
index 99573f089..cc1d7fad8 100644
--- a/numpy/core/src/multiarray/abstractdtypes.c
+++ b/numpy/core/src/multiarray/abstractdtypes.c
@@ -157,7 +157,7 @@ int_common_dtype(PyArray_DTypeMeta *NPY_UNUSED(cls), PyArray_DTypeMeta *other)
}
else if (PyTypeNum_ISNUMBER(other->type_num) ||
other->type_num == NPY_TIMEDELTA) {
- /* All other numeric types (ant timdelta) are preserved: */
+ /* All other numeric types (ant timedelta) are preserved: */
Py_INCREF(other);
return other;
}
diff --git a/numpy/core/src/multiarray/array_coercion.c b/numpy/core/src/multiarray/array_coercion.c
index 90b50097a..847bdafc3 100644
--- a/numpy/core/src/multiarray/array_coercion.c
+++ b/numpy/core/src/multiarray/array_coercion.c
@@ -136,7 +136,7 @@ _prime_global_pytype_to_type_dict(void)
*
* This assumes that the DType class is guaranteed to hold on the
* python type (this assumption is guaranteed).
- * This functionality supercedes ``_typenum_fromtypeobj``.
+ * This functionality supersedes ``_typenum_fromtypeobj``.
*
* @param DType DType to map the python type to
* @param pytype Python type to map from
@@ -1400,7 +1400,7 @@ PyArray_DiscoverDTypeAndShape(
* These should be largely deprecated, and represent only the DType class
* for most `dtype` parameters.
*
- * TODO: This function should eventually recieve a deprecation warning and
+ * TODO: This function should eventually receive a deprecation warning and
* be removed.
*
* @param descr
diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c
index d653bfc22..9b9df08f2 100644
--- a/numpy/core/src/multiarray/arrayobject.c
+++ b/numpy/core/src/multiarray/arrayobject.c
@@ -858,7 +858,7 @@ _uni_release(char *ptr, int nc)
relfunc(aptr, N1); \
return -1; \
} \
- val = compfunc(aptr, bptr, N1, N2); \
+ val = compfunc(aptr, bptr, N1, N2); \
*dptr = (val CMP 0); \
PyArray_ITER_NEXT(iself); \
PyArray_ITER_NEXT(iother); \
@@ -870,7 +870,7 @@ _uni_release(char *ptr, int nc)
#define _reg_loop(CMP) { \
while(size--) { \
- val = compfunc((void *)iself->dataptr, \
+ val = compfunc((void *)iself->dataptr, \
(void *)iother->dataptr, \
N1, N2); \
*dptr = (val CMP 0); \
diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c
index b6755e91d..12dd99504 100644
--- a/numpy/core/src/multiarray/convert_datatype.c
+++ b/numpy/core/src/multiarray/convert_datatype.c
@@ -1649,7 +1649,7 @@ PyArray_ResultType(
}
Py_INCREF(all_DTypes[i_all]);
/*
- * Leave the decriptor empty, if we need it, we will have to go
+ * Leave the descriptor empty, if we need it, we will have to go
* to more extreme lengths unfortunately.
*/
all_descriptors[i_all] = NULL;
@@ -2243,7 +2243,7 @@ PyArray_AddCastingImplementation(PyBoundArrayMethodObject *meth)
* Add a new casting implementation using a PyArrayMethod_Spec.
*
* @param spec
- * @param private If private, allow slots not publically exposed.
+ * @param private If private, allow slots not publicly exposed.
* @return 0 on success -1 on failure
*/
NPY_NO_EXPORT int
diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c
index 093090b4c..11a941e72 100644
--- a/numpy/core/src/multiarray/datetime.c
+++ b/numpy/core/src/multiarray/datetime.c
@@ -427,7 +427,7 @@ PyArray_DatetimeStructToDatetime(
}
/*NUMPY_API
- * Create a timdelta value from a filled timedelta struct and resolution unit.
+ * Create a timedelta value from a filled timedelta struct and resolution unit.
*
* TO BE REMOVED - NOT USED INTERNALLY.
*/
diff --git a/numpy/core/src/multiarray/dtypemeta.c b/numpy/core/src/multiarray/dtypemeta.c
index 059ec201e..cbde91b76 100644
--- a/numpy/core/src/multiarray/dtypemeta.c
+++ b/numpy/core/src/multiarray/dtypemeta.c
@@ -101,7 +101,7 @@ static PyObject *
legacy_dtype_default_new(PyArray_DTypeMeta *self,
PyObject *args, PyObject *kwargs)
{
- /* TODO: This should allow endianess and possibly metadata */
+ /* TODO: This should allow endianness and possibly metadata */
if (NPY_DT_is_parametric(self)) {
/* reject parametric ones since we would need to get unit, etc. info */
PyErr_Format(PyExc_TypeError,
diff --git a/numpy/core/src/multiarray/einsum_sumprod.c.src b/numpy/core/src/multiarray/einsum_sumprod.c.src
index 333b8e188..29ceabd71 100644
--- a/numpy/core/src/multiarray/einsum_sumprod.c.src
+++ b/numpy/core/src/multiarray/einsum_sumprod.c.src
@@ -80,7 +80,7 @@ static NPY_GCC_OPT_3 @temptype@ @name@_sum_of_arr(@type@ *data, npy_intp count)
/* Use aligned instructions if possible */
const int is_aligned = EINSUM_IS_ALIGNED(data);
const int vstep = npyv_nlanes_@sfx@;
- npyv_@sfx@ vaccum = npyv_zero_@sfx@();
+ npyv_@sfx@ v_accum = npyv_zero_@sfx@();
const npy_intp vstepx4 = vstep * 4;
/**begin repeat1
@@ -98,15 +98,15 @@ static NPY_GCC_OPT_3 @temptype@ @name@_sum_of_arr(@type@ *data, npy_intp count)
npyv_@sfx@ a01 = npyv_add_@sfx@(a0, a1);
npyv_@sfx@ a23 = npyv_add_@sfx@(a2, a3);
npyv_@sfx@ a0123 = npyv_add_@sfx@(a01, a23);
- vaccum = npyv_add_@sfx@(a0123, vaccum);
+ v_accum = npyv_add_@sfx@(a0123, v_accum);
}
}
/**end repeat1**/
for (; count > 0; count -= vstep, data += vstep) {
npyv_@sfx@ a = npyv_load_tillz_@sfx@(data, count);
- vaccum = npyv_add_@sfx@(a, vaccum);
+ v_accum = npyv_add_@sfx@(a, v_accum);
}
- accum = npyv_sum_@sfx@(vaccum);
+ accum = npyv_sum_@sfx@(v_accum);
npyv_cleanup();
#else
#ifndef NPY_DISABLE_OPTIMIZATION
@@ -485,7 +485,7 @@ static NPY_GCC_OPT_3 void
/* Use aligned instructions if possible */
const int is_aligned = EINSUM_IS_ALIGNED(data0) && EINSUM_IS_ALIGNED(data1);
const int vstep = npyv_nlanes_@sfx@;
- npyv_@sfx@ vaccum = npyv_zero_@sfx@();
+ npyv_@sfx@ v_accum = npyv_zero_@sfx@();
/**begin repeat2
* #cond = if(is_aligned), else#
@@ -501,19 +501,19 @@ static NPY_GCC_OPT_3 void
npyv_@sfx@ a@i@ = npyv_@ld@_@sfx@(data0 + vstep * @i@);
npyv_@sfx@ b@i@ = npyv_@ld@_@sfx@(data1 + vstep * @i@);
/**end repeat3**/
- npyv_@sfx@ ab3 = npyv_muladd_@sfx@(a3, b3, vaccum);
+ npyv_@sfx@ ab3 = npyv_muladd_@sfx@(a3, b3, v_accum);
npyv_@sfx@ ab2 = npyv_muladd_@sfx@(a2, b2, ab3);
npyv_@sfx@ ab1 = npyv_muladd_@sfx@(a1, b1, ab2);
- vaccum = npyv_muladd_@sfx@(a0, b0, ab1);
+ v_accum = npyv_muladd_@sfx@(a0, b0, ab1);
}
}
/**end repeat2**/
for (; count > 0; count -= vstep, data0 += vstep, data1 += vstep) {
npyv_@sfx@ a = npyv_load_tillz_@sfx@(data0, count);
npyv_@sfx@ b = npyv_load_tillz_@sfx@(data1, count);
- vaccum = npyv_muladd_@sfx@(a, b, vaccum);
+ v_accum = npyv_muladd_@sfx@(a, b, v_accum);
}
- accum = npyv_sum_@sfx@(vaccum);
+ accum = npyv_sum_@sfx@(v_accum);
npyv_cleanup();
#else
#ifndef NPY_DISABLE_OPTIMIZATION
diff --git a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
index b32664cc9..e313d2447 100644
--- a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
+++ b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
@@ -1849,7 +1849,7 @@ mapiter_@name@(PyArrayMapIterObject *mit)
return -1;
}
#else
- /* The operand order is reveresed here */
+ /* The operand order is reversed here */
char *args[2] = {subspace_ptrs[1], subspace_ptrs[0]};
npy_intp strides[2] = {subspace_strides[1], subspace_strides[0]};
if (NPY_UNLIKELY(cast_info.func(&cast_info.context,
diff --git a/numpy/core/src/multiarray/nditer_api.c b/numpy/core/src/multiarray/nditer_api.c
index 811eece7d..0f0a79ddf 100644
--- a/numpy/core/src/multiarray/nditer_api.c
+++ b/numpy/core/src/multiarray/nditer_api.c
@@ -2130,7 +2130,7 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs)
/*
* Try to do make the outersize as big as possible. This allows
* it to shrink when processing the last bit of the outer reduce loop,
- * then grow again at the beginnning of the next outer reduce loop.
+ * then grow again at the beginning of the next outer reduce loop.
*/
NBF_REDUCE_OUTERSIZE(bufferdata) = (NAD_SHAPE(reduce_outeraxisdata)-
NAD_INDEX(reduce_outeraxisdata));
@@ -2804,9 +2804,9 @@ npyiter_checkreducesize(NpyIter *iter, npy_intp count,
if (coord != 0) {
/*
* In this case, it is only safe to reuse the buffer if the amount
- * of data copied is not more then the current axes, as is the
+ * of data copied is not more than the current axes, as is the
* case when reuse_reduce_loops was active already.
- * It should be in principle OK when the idim loop returns immidiatly.
+ * It should be in principle OK when the idim loop returns immediately.
*/
NIT_ITFLAGS(iter) &= ~NPY_ITFLAG_REUSE_REDUCE_LOOPS;
}
diff --git a/numpy/core/src/multiarray/nditer_constr.c b/numpy/core/src/multiarray/nditer_constr.c
index 57dbb3a94..bf32e1f6b 100644
--- a/numpy/core/src/multiarray/nditer_constr.c
+++ b/numpy/core/src/multiarray/nditer_constr.c
@@ -1405,7 +1405,7 @@ check_mask_for_writemasked_reduction(NpyIter *iter, int iop)
/*
* Check whether a reduction is OK based on the flags and the operand being
* readwrite. This path is deprecated, since usually only specific axes
- * should be reduced. If axes are specified explicitely, the flag is
+ * should be reduced. If axes are specified explicitly, the flag is
* unnecessary.
*/
static int
diff --git a/numpy/core/src/multiarray/nditer_templ.c.src b/numpy/core/src/multiarray/nditer_templ.c.src
index 05ce6ae75..3f91a482b 100644
--- a/numpy/core/src/multiarray/nditer_templ.c.src
+++ b/numpy/core/src/multiarray/nditer_templ.c.src
@@ -132,7 +132,7 @@ npyiter_iternext_itflags@tag_itflags@_dims@tag_ndim@_iters@tag_nop@(
/* Reset the 1st and 2nd indices to 0 */
NAD_INDEX(axisdata0) = 0;
NAD_INDEX(axisdata1) = 0;
- /* Reset the 1st and 2nd pointers to the value of the 3nd */
+ /* Reset the 1st and 2nd pointers to the value of the 3rd */
for (istrides = 0; istrides < nstrides; ++istrides) {
NAD_PTRS(axisdata0)[istrides] = NAD_PTRS(axisdata2)[istrides];
NAD_PTRS(axisdata1)[istrides] = NAD_PTRS(axisdata2)[istrides];
diff --git a/numpy/core/src/umath/_scaled_float_dtype.c b/numpy/core/src/umath/_scaled_float_dtype.c
index 866f636a0..eeef33a3d 100644
--- a/numpy/core/src/umath/_scaled_float_dtype.c
+++ b/numpy/core/src/umath/_scaled_float_dtype.c
@@ -733,9 +733,9 @@ NPY_NO_EXPORT PyObject *
get_sfloat_dtype(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args))
{
/* Allow calling the function multiple times. */
- static npy_bool initalized = NPY_FALSE;
+ static npy_bool initialized = NPY_FALSE;
- if (initalized) {
+ if (initialized) {
Py_INCREF(&PyArray_SFloatDType);
return (PyObject *)&PyArray_SFloatDType;
}
@@ -764,6 +764,6 @@ get_sfloat_dtype(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args))
return NULL;
}
- initalized = NPY_TRUE;
+ initialized = NPY_TRUE;
return (PyObject *)&PyArray_SFloatDType;
}
diff --git a/numpy/core/src/umath/_umath_tests.c.src b/numpy/core/src/umath/_umath_tests.c.src
index 0cd673831..ed4c617a4 100644
--- a/numpy/core/src/umath/_umath_tests.c.src
+++ b/numpy/core/src/umath/_umath_tests.c.src
@@ -586,7 +586,7 @@ fail:
return NULL;
}
-// Testing the utilites of the CPU dispatcher
+// Testing the utilities of the CPU dispatcher
#ifndef NPY_DISABLE_OPTIMIZATION
#include "_umath_tests.dispatch.h"
#endif
diff --git a/numpy/core/src/umath/_umath_tests.dispatch.c b/numpy/core/src/umath/_umath_tests.dispatch.c
index 66058550e..9d8df4c86 100644
--- a/numpy/core/src/umath/_umath_tests.dispatch.c
+++ b/numpy/core/src/umath/_umath_tests.dispatch.c
@@ -1,5 +1,5 @@
/**
- * Testing the utilites of the CPU dispatcher
+ * Testing the utilities of the CPU dispatcher
*
* @targets $werror baseline
* SSE2 SSE41 AVX2
diff --git a/numpy/core/src/umath/loops_utils.h.src b/numpy/core/src/umath/loops_utils.h.src
index 1a2a5a32b..762e9ee59 100644
--- a/numpy/core/src/umath/loops_utils.h.src
+++ b/numpy/core/src/umath/loops_utils.h.src
@@ -6,7 +6,7 @@
/**
* Old versions of MSVC causes ambiguous link errors when we deal with large SIMD kernels
- * which lead to break the build, probably releated to the following bug:
+ * which lead to break the build, probably related to the following bug:
* https://developercommunity.visualstudio.com/content/problem/415095/internal-compiler-error-with-perfectly-forwarded-r.html
*/
#if defined(_MSC_VER) && _MSC_VER < 1916
diff --git a/numpy/core/tests/test__exceptions.py b/numpy/core/tests/test__exceptions.py
index c87412aa4..10b87e052 100644
--- a/numpy/core/tests/test__exceptions.py
+++ b/numpy/core/tests/test__exceptions.py
@@ -40,7 +40,7 @@ class TestArrayMemoryError:
# 1023.9999 Mib should round to 1 GiB
assert f(int(Ki*Ki*Ki*0.9999)) == '1.00 GiB'
assert f(Ki*Ki*Ki*Ki*Ki*Ki) == '1.00 EiB'
- # larger than sys.maxsize, adding larger prefices isn't going to help
+ # larger than sys.maxsize, adding larger prefixes isn't going to help
# anyway.
assert f(Ki*Ki*Ki*Ki*Ki*Ki*123456) == '123456. EiB'
diff --git a/numpy/core/tests/test_array_coercion.py b/numpy/core/tests/test_array_coercion.py
index 076d8e43f..293f5a68f 100644
--- a/numpy/core/tests/test_array_coercion.py
+++ b/numpy/core/tests/test_array_coercion.py
@@ -376,7 +376,7 @@ class TestScalarDiscovery:
def test_scalar_to_int_coerce_does_not_cast(self, dtype):
"""
Signed integers are currently different in that they do not cast other
- NumPy scalar, but instead use scalar.__int__(). The harcoded
+ NumPy scalar, but instead use scalar.__int__(). The hardcoded
exception to this rule is `np.array(scalar, dtype=integer)`.
"""
dtype = np.dtype(dtype)
@@ -444,7 +444,7 @@ class TestTimeScalars:
# never use casting. This is because casting will error in this
# case, and traditionally in most cases the behaviour is maintained
# like this. (`np.array(scalar, dtype="U6")` would have failed before)
- # TODO: This discrepency _should_ be resolved, either by relaxing the
+ # TODO: This discrepancy _should_ be resolved, either by relaxing the
# cast, or by deprecating the first part.
scalar = np.datetime64(val, unit)
dtype = np.dtype(dtype)
diff --git a/numpy/core/tests/test_casting_unittests.py b/numpy/core/tests/test_casting_unittests.py
index a13e807e2..d41d6dcc0 100644
--- a/numpy/core/tests/test_casting_unittests.py
+++ b/numpy/core/tests/test_casting_unittests.py
@@ -127,7 +127,7 @@ CAST_TABLE = _get_cancast_table()
class TestChanges:
"""
- These test cases excercise some behaviour changes
+ These test cases exercise some behaviour changes
"""
@pytest.mark.parametrize("string", ["S", "U"])
@pytest.mark.parametrize("floating", ["e", "f", "d", "g"])
diff --git a/numpy/core/tests/test_cpu_dispatcher.py b/numpy/core/tests/test_cpu_dispatcher.py
index 8712dee1a..2f7eac7e8 100644
--- a/numpy/core/tests/test_cpu_dispatcher.py
+++ b/numpy/core/tests/test_cpu_dispatcher.py
@@ -4,7 +4,7 @@ from numpy.testing import assert_equal
def test_dispatcher():
"""
- Testing the utilites of the CPU dispatcher
+ Testing the utilities of the CPU dispatcher
"""
targets = (
"SSE2", "SSE41", "AVX2",
@@ -16,7 +16,7 @@ def test_dispatcher():
for feature in reversed(targets):
# skip baseline features, by the default `CCompilerOpt` do not generate separated objects
# for the baseline, just one object combined all of them via 'baseline' option
- # within the configuration statments.
+ # within the configuration statements.
if feature in __cpu_baseline__:
continue
# check compiler and running machine support
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index 44c76e0b8..1d0c5dfac 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -791,7 +791,7 @@ class TestFutureWarningArrayLikeNotIterable(_DeprecationTestCase):
*not* define the sequence protocol.
NOTE: Tests for the versions including __len__ and __getitem__ exist
- in `test_array_coercion.py` and they can be modified or ammended
+ in `test_array_coercion.py` and they can be modified or amended
when this deprecation expired.
"""
blueprint = np.arange(10)
diff --git a/numpy/core/tests/test_einsum.py b/numpy/core/tests/test_einsum.py
index c697d0c2d..78c5e527b 100644
--- a/numpy/core/tests/test_einsum.py
+++ b/numpy/core/tests/test_einsum.py
@@ -1025,7 +1025,7 @@ class TestEinsumPath:
self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)])
def test_path_type_input(self):
- # Test explicit path handeling
+ # Test explicit path handling
path_test = self.build_operands('dcc,fce,ea,dbf->ab')
path, path_str = np.einsum_path(*path_test, optimize=False)
diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py
index 6b743ab27..fbf6da0e1 100644
--- a/numpy/core/tests/test_nditer.py
+++ b/numpy/core/tests/test_nditer.py
@@ -2819,7 +2819,7 @@ def test_iter_writemasked_decref():
for buf, mask_buf in it:
buf[...] = (3, singleton)
- del buf, mask_buf, it # delete everything to ensure corrrect cleanup
+ del buf, mask_buf, it # delete everything to ensure correct cleanup
if HAS_REFCOUNT:
# The buffer would have included additional items, they must be
@@ -3202,7 +3202,7 @@ def test_debug_print(capfd):
Currently uses a subprocess to avoid dealing with the C level `printf`s.
"""
# the expected output with all addresses and sizes stripped (they vary
- # and/or are platform dependend).
+ # and/or are platform dependent).
expected = """
------ BEGIN ITERATOR DUMP ------
| Iterator Address:
diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py
index 84ec8aa2c..b8378d473 100644
--- a/numpy/distutils/command/build_ext.py
+++ b/numpy/distutils/command/build_ext.py
@@ -602,7 +602,7 @@ class build_ext (old_build_ext):
# Expand possible fake static libraries to objects;
# make sure to iterate over a copy of the list as
# "fake" libraries will be removed as they are
- # enountered
+ # encountered
for lib in libraries[:]:
for libdir in library_dirs:
fake_lib = os.path.join(libdir, lib + '.fobjects')
diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py
index 02372f5e6..f9891e93b 100644
--- a/numpy/distutils/fcompiler/gnu.py
+++ b/numpy/distutils/fcompiler/gnu.py
@@ -113,7 +113,7 @@ class GnuFCompiler(FCompiler):
# If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value
# and leave it alone. But, distutils will complain if the
# environment's value is different from the one in the Python
- # Makefile used to build Python. We let disutils handle this
+ # Makefile used to build Python. We let distutils handle this
# error checking.
if not target:
# If MACOSX_DEPLOYMENT_TARGET is not set in the environment,
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
index a903f3ea3..c9e051237 100644
--- a/numpy/distutils/misc_util.py
+++ b/numpy/distutils/misc_util.py
@@ -128,8 +128,8 @@ def quote_args(args):
def allpath(name):
"Convert a /-separated pathname to one using the OS's path separator."
- splitted = name.split('/')
- return os.path.join(*splitted)
+ split = name.split('/')
+ return os.path.join(*split)
def rel_path(path, parent_path):
"""Return path relative to parent_path."""
diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py
index 8467e1c19..c0404b0e8 100644
--- a/numpy/distutils/system_info.py
+++ b/numpy/distutils/system_info.py
@@ -414,7 +414,8 @@ def get_standard_file(fname):
def _parse_env_order(base_order, env):
""" Parse an environment variable `env` by splitting with "," and only returning elements from `base_order`
- This method will sequence the environment variable and check for their invidual elements in `base_order`.
+ This method will sequence the environment variable and check for their
+ individual elements in `base_order`.
The items in the environment variable may be negated via '^item' or '!itema,itemb'.
It must start with ^/! to negate all options.
diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py
index 8f42b4029..fb1688744 100644
--- a/numpy/f2py/cfuncs.py
+++ b/numpy/f2py/cfuncs.py
@@ -487,7 +487,7 @@ STRINGPADN replaces null values with padding values from the right.
`to` must have size of at least N bytes.
If the `to[N-1]` has null value, then replace it and all the
-preceeding nulls with the given padding.
+preceding, nulls with the given padding.
STRINGPADN(to, N, PADDING, NULLVALUE) is an inverse operation.
*/
diff --git a/numpy/f2py/tests/test_return_character.py b/numpy/f2py/tests/test_return_character.py
index 7d4ced914..2c999ed0b 100644
--- a/numpy/f2py/tests/test_return_character.py
+++ b/numpy/f2py/tests/test_return_character.py
@@ -80,7 +80,7 @@ cf2py intent(out) ts
end
"""
- @pytest.mark.xfail(IS_S390X, reason="calback returns ' '")
+ @pytest.mark.xfail(IS_S390X, reason="callback returns ' '")
@pytest.mark.parametrize('name', 't0,t1,t5,s0,s1,s5,ss'.split(','))
def test_all(self, name):
self.check_function(getattr(self.module, name), name)
@@ -139,7 +139,7 @@ module f90_return_char
end module f90_return_char
"""
- @pytest.mark.xfail(IS_S390X, reason="calback returns ' '")
+ @pytest.mark.xfail(IS_S390X, reason="callback returns ' '")
@pytest.mark.parametrize('name', 't0,t1,t5,ts,s0,s1,s5,ss'.split(','))
def test_all(self, name):
self.check_function(getattr(self.module.f90_return_char, name), name)
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index e566e253d..3967b43ee 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -291,7 +291,7 @@ def descr_to_dtype(descr):
Parameters
----------
descr : object
- The object retreived by dtype.descr. Can be passed to
+ The object retrieved by dtype.descr. Can be passed to
`numpy.dtype()` in order to replicate the input dtype.
Returns
diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py
index 658ec5255..4e77f0d92 100644
--- a/numpy/lib/nanfunctions.py
+++ b/numpy/lib/nanfunctions.py
@@ -975,7 +975,7 @@ def _nanmedian1d(arr1d, overwrite_input=False):
)
if arr1d_parsed.size == 0:
- # Ensure that a nan-esque scalar of the appropiate type (and unit)
+ # Ensure that a nan-esque scalar of the appropriate type (and unit)
# is returned for `timedelta64` and `complexfloating`
return arr1d[-1]
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index b91bf440f..a40b1ca66 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -1634,7 +1634,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first line after
- the first `skip_header` lines. This line can optionally be preceeded
+ the first `skip_header` lines. This line can optionally be preceded
by a comment delimiter. If `names` is a sequence or a single-string of
comma-separated names, the names will be used to define the field names
in a structured dtype. If `names` is None, the names of the dtype
diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py
index 83c028061..811faff79 100644
--- a/numpy/lib/twodim_base.py
+++ b/numpy/lib/twodim_base.py
@@ -67,7 +67,7 @@ def fliplr(m):
See Also
--------
flipud : Flip array in the up/down direction.
- flip : Flip array in one or more dimesions.
+ flip : Flip array in one or more dimensions.
rot90 : Rotate array counterclockwise.
Notes
@@ -120,7 +120,7 @@ def flipud(m):
See Also
--------
fliplr : Flip array in the left/right direction.
- flip : Flip array in one or more dimesions.
+ flip : Flip array in one or more dimensions.
rot90 : Rotate array counterclockwise.
Notes
diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py
index 2b686839a..95780d19d 100644
--- a/numpy/linalg/linalg.py
+++ b/numpy/linalg/linalg.py
@@ -1864,7 +1864,7 @@ def matrix_rank(A, tol=None, hermitian=False):
References
----------
- .. [1] MATLAB reference documention, "Rank"
+ .. [1] MATLAB reference documentation, "Rank"
https://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
@@ -2159,7 +2159,7 @@ def lstsq(a, b, rcond="warn"):
r"""
Return the least-squares solution to a linear matrix equation.
- Computes the vector `x` that approximatively solves the equation
+ Computes the vector `x` that approximately solves the equation
``a @ x = b``. The equation may be under-, well-, or over-determined
(i.e., the number of linearly independent rows of `a` can be less than,
equal to, or greater than its number of linearly independent columns).
diff --git a/numpy/linalg/linalg.pyi b/numpy/linalg/linalg.pyi
index a35207d32..a60b9539e 100644
--- a/numpy/linalg/linalg.pyi
+++ b/numpy/linalg/linalg.pyi
@@ -97,7 +97,7 @@ def inv(a: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ...
@overload
def inv(a: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
-# TODO: The supported input and output dtypes are dependant on the value of `n`.
+# TODO: The supported input and output dtypes are dependent on the value of `n`.
# For example: `n < 0` always casts integer types to float64
def matrix_power(
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py
index dd059fb63..a45323bb3 100644
--- a/numpy/linalg/tests/test_linalg.py
+++ b/numpy/linalg/tests/test_linalg.py
@@ -1956,8 +1956,8 @@ class TestMultiDot:
assert_almost_equal(multi_dot([A, B]), A.dot(B))
assert_almost_equal(multi_dot([A, B]), np.dot(A, B))
- def test_basic_function_with_dynamic_programing_optimization(self):
- # multi_dot with four or more arguments uses the dynamic programing
+ def test_basic_function_with_dynamic_programming_optimization(self):
+ # multi_dot with four or more arguments uses the dynamic programming
# optimization and therefore deserve a separate
A = np.random.random((6, 2))
B = np.random.random((2, 6))
@@ -2018,8 +2018,8 @@ class TestMultiDot:
assert_almost_equal(out, A.dot(B))
assert_almost_equal(out, np.dot(A, B))
- def test_dynamic_programing_optimization_and_out(self):
- # multi_dot with four or more arguments uses the dynamic programing
+ def test_dynamic_programming_optimization_and_out(self):
+ # multi_dot with four or more arguments uses the dynamic programming
# optimization and therefore deserve a separate test
A = np.random.random((6, 2))
B = np.random.random((2, 6))
diff --git a/numpy/ma/mrecords.py b/numpy/ma/mrecords.py
index 10b1b209c..bdce8b3bd 100644
--- a/numpy/ma/mrecords.py
+++ b/numpy/ma/mrecords.py
@@ -667,7 +667,7 @@ def openfile(fname):
raise NotImplementedError("Wow, binary file")
-def fromtextfile(fname, delimitor=None, commentchar='#', missingchar='',
+def fromtextfile(fname, delimiter=None, commentchar='#', missingchar='',
varnames=None, vartypes=None):
"""
Creates a mrecarray from data stored in the file `filename`.
@@ -676,7 +676,7 @@ def fromtextfile(fname, delimitor=None, commentchar='#', missingchar='',
----------
fname : {file name/handle}
Handle of an opened file.
- delimitor : {None, string}, optional
+ delimiter : {None, string}, optional
Alphanumeric character used to separate columns in the file.
If None, any (group of) white spacestring(s) will be used.
commentchar : {'#', string}, optional
@@ -699,14 +699,14 @@ def fromtextfile(fname, delimitor=None, commentchar='#', missingchar='',
while True:
line = ftext.readline()
firstline = line[:line.find(commentchar)].strip()
- _varnames = firstline.split(delimitor)
+ _varnames = firstline.split(delimiter)
if len(_varnames) > 1:
break
if varnames is None:
varnames = _varnames
# Get the data.
- _variables = masked_array([line.strip().split(delimitor) for line in ftext
+ _variables = masked_array([line.strip().split(delimiter) for line in ftext
if line[0] != commentchar and len(line) > 1])
(_, nfields) = _variables.shape
ftext.close()
diff --git a/numpy/ma/mrecords.pyi b/numpy/ma/mrecords.pyi
index 92d5afb89..cdd5347d6 100644
--- a/numpy/ma/mrecords.pyi
+++ b/numpy/ma/mrecords.pyi
@@ -78,7 +78,7 @@ def fromrecords(
def fromtextfile(
fname,
- delimitor=...,
+ delimiter=...,
commentchar=...,
missingchar=...,
varnames=...,
diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py
index 2fd353d23..7e9522b3a 100644
--- a/numpy/ma/tests/test_core.py
+++ b/numpy/ma/tests/test_core.py
@@ -1071,7 +1071,7 @@ class TestMaskedArrayArithmetic:
assert_equal(z.mask, [[1, 1, 1], [0, 0, 0]])
def test_mixed_arithmetic(self):
- # Tests mixed arithmetics.
+ # Tests mixed arithmetic.
na = np.array([1])
ma = array([1])
assert_(isinstance(na + ma, MaskedArray))
@@ -1084,7 +1084,7 @@ class TestMaskedArrayArithmetic:
assert_equal(getmaskarray(2 / a), [1, 0, 1])
def test_masked_singleton_arithmetic(self):
- # Tests some scalar arithmetics on MaskedArrays.
+ # Tests some scalar arithmetic on MaskedArrays.
# Masked singleton should remain masked no matter what
xm = array(0, mask=1)
assert_((1 / array(0)).mask)
@@ -1804,7 +1804,7 @@ class TestMaskedArrayArithmetic:
assert_equal(test.mask, [[False, True],
[False, True]])
- def test_numpyarithmetics(self):
+ def test_numpyarithmetic(self):
# Check that the mask is not back-propagated when using numpy functions
a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1])
control = masked_array([np.nan, np.nan, 0, np.log(2), -1],
@@ -2479,8 +2479,8 @@ class TestUfuncs:
# also check that allclose uses ma ufuncs, to avoid warning
allclose(m, 0.5)
-class TestMaskedArrayInPlaceArithmetics:
- # Test MaskedArray Arithmetics
+class TestMaskedArrayInPlaceArithmetic:
+ # Test MaskedArray Arithmetic
def setup(self):
x = arange(10)
@@ -3464,7 +3464,7 @@ class TestMaskedArrayMethods:
# Test sort on dtype with subarray (gh-8069)
# Just check that the sort does not error, structured array subarrays
# are treated as byte strings and that leads to differing behavior
- # depending on endianess and `endwith`.
+ # depending on endianness and `endwith`.
dt = np.dtype([('v', int, 2)])
a = a.view(dt)
test = sort(a)
diff --git a/numpy/ma/tests/test_mrecords.py b/numpy/ma/tests/test_mrecords.py
index 27df519d2..4b2c01df9 100644
--- a/numpy/ma/tests/test_mrecords.py
+++ b/numpy/ma/tests/test_mrecords.py
@@ -468,7 +468,7 @@ class TestMRecordsImport:
with temppath() as path:
with open(path, 'w') as f:
f.write(fcontent)
- mrectxt = fromtextfile(path, delimitor=',', varnames='ABCDEFG')
+ mrectxt = fromtextfile(path, delimiter=',', varnames='ABCDEFG')
assert_(isinstance(mrectxt, MaskedRecords))
assert_equal(mrectxt.F, [1, 1, 1, 1])
assert_equal(mrectxt.E._mask, [1, 1, 1, 1])
diff --git a/numpy/polynomial/__init__.py b/numpy/polynomial/__init__.py
index 4b4361163..5a3addf4c 100644
--- a/numpy/polynomial/__init__.py
+++ b/numpy/polynomial/__init__.py
@@ -164,7 +164,7 @@ def set_default_printstyle(style):
1.0 + 2.0 x**1 + 3.0 x**2
>>> print(c)
1.0 + 2.0 T_1(x) + 3.0 T_2(x)
- >>> # Formatting supercedes all class/package-level defaults
+ >>> # Formatting supersedes all class/package-level defaults
>>> print(f"{p:unicode}")
1.0 + 2.0·x¹ + 3.0·x²
"""
diff --git a/numpy/polynomial/_polybase.py b/numpy/polynomial/_polybase.py
index 5525b232b..4b9f7c661 100644
--- a/numpy/polynomial/_polybase.py
+++ b/numpy/polynomial/_polybase.py
@@ -344,7 +344,7 @@ class ABCPolyBase(abc.ABC):
# Polynomial coefficient
# The coefficient array can be an object array with elements that
# will raise a TypeError with >= 0 (e.g. strings or Python
- # complex). In this case, represent the coeficient as-is.
+ # complex). In this case, represent the coefficient as-is.
try:
if coef >= 0:
next_term = f"+ {coef}"
diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py
index 210000ec4..8288c6120 100644
--- a/numpy/polynomial/chebyshev.py
+++ b/numpy/polynomial/chebyshev.py
@@ -131,9 +131,9 @@ chebtrim = pu.trimcoef
#
def _cseries_to_zseries(c):
- """Covert Chebyshev series to z-series.
+ """Convert Chebyshev series to z-series.
- Covert a Chebyshev series to the equivalent z-series. The result is
+ Convert a Chebyshev series to the equivalent z-series. The result is
never an empty array. The dtype of the return is the same as that of
the input. No checks are run on the arguments as this routine is for
internal use.
@@ -156,9 +156,9 @@ def _cseries_to_zseries(c):
def _zseries_to_cseries(zs):
- """Covert z-series to a Chebyshev series.
+ """Convert z-series to a Chebyshev series.
- Covert a z series to the equivalent Chebyshev series. The result is
+ Convert a z series to the equivalent Chebyshev series. The result is
never an empty array. The dtype of the return is the same as that of
the input. No checks are run on the arguments as this routine is for
internal use.
diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py
index d3b6432dc..72d068e31 100644
--- a/numpy/polynomial/laguerre.py
+++ b/numpy/polynomial/laguerre.py
@@ -1030,7 +1030,7 @@ def lagval3d(x, y, z, c):
Returns
-------
values : ndarray, compatible object
- The values of the multidimension polynomial on points formed with
+ The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx
index 60b6bfc72..8db1f0269 100644
--- a/numpy/random/_generator.pyx
+++ b/numpy/random/_generator.pyx
@@ -3624,7 +3624,7 @@ cdef class Generator:
from numpy.linalg import cholesky
l = cholesky(cov)
- # make sure check_valid is ignored whe method == 'cholesky'
+ # make sure check_valid is ignored when method == 'cholesky'
# since the decomposition will have failed if cov is not valid.
if check_valid != 'ignore' and method != 'cholesky':
if check_valid != 'warn' and check_valid != 'raise':
diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py
index 2bea3be86..81ab1afad 100644
--- a/numpy/typing/__init__.py
+++ b/numpy/typing/__init__.py
@@ -154,7 +154,7 @@ else:
__path__: List[str]
-@final # Dissallow the creation of arbitrary `NBitBase` subclasses
+@final # Disallow the creation of arbitrary `NBitBase` subclasses
class NBitBase:
"""
An object representing `numpy.number` precision during static type checking.
diff --git a/numpy/typing/_generic_alias.py b/numpy/typing/_generic_alias.py
index 8d65ef855..5ad5e580c 100644
--- a/numpy/typing/_generic_alias.py
+++ b/numpy/typing/_generic_alias.py
@@ -51,7 +51,7 @@ def _parse_parameters(args: Iterable[Any]) -> Generator[TypeVar, None, None]:
def _reconstruct_alias(alias: _T, parameters: Iterator[TypeVar]) -> _T:
- """Recursivelly replace all typevars with those from `parameters`.
+ """Recursively replace all typevars with those from `parameters`.
Helper function for `_GenericAlias.__getitem__`.
diff --git a/numpy/typing/tests/data/fail/bitwise_ops.py b/numpy/typing/tests/data/fail/bitwise_ops.py
index 8a8f89755..ee9090007 100644
--- a/numpy/typing/tests/data/fail/bitwise_ops.py
+++ b/numpy/typing/tests/data/fail/bitwise_ops.py
@@ -16,5 +16,5 @@ u8 & f8 # E: No overload variant
~f8 # E: Unsupported operand type
# mypys' error message for `NoReturn` is unfortunately pretty bad
-# TODO: Reenable this once we add support for numerical precision for `number`s
+# TODO: Re-enable this once we add support for numerical precision for `number`s
# a = u8 | 0 # E: Need type annotation
diff --git a/numpy/typing/tests/data/fail/numerictypes.py b/numpy/typing/tests/data/fail/numerictypes.py
index 9a81cd9dc..a5c2814ef 100644
--- a/numpy/typing/tests/data/fail/numerictypes.py
+++ b/numpy/typing/tests/data/fail/numerictypes.py
@@ -1,6 +1,6 @@
import numpy as np
-# Techincally this works, but probably shouldn't. See
+# Technically this works, but probably shouldn't. See
#
# https://github.com/numpy/numpy/issues/16366
#
diff --git a/numpy/typing/tests/data/pass/simple.py b/numpy/typing/tests/data/pass/simple.py
index 243caf229..85965e0de 100644
--- a/numpy/typing/tests/data/pass/simple.py
+++ b/numpy/typing/tests/data/pass/simple.py
@@ -47,7 +47,7 @@ np.dtype(object_dtype)
np.dtype((np.int32, (np.int8, 4)))
-# Dtype comparision
+# Dtype comparison
np.dtype(float) == float
np.dtype(float) != np.float64
np.dtype(float) < None
diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py
index 35558c880..81863c780 100644
--- a/numpy/typing/tests/test_typing.py
+++ b/numpy/typing/tests/test_typing.py
@@ -36,7 +36,7 @@ OUTPUT_MYPY: Dict[str, List[str]] = {}
def _key_func(key: str) -> str:
- """Split at the first occurance of the ``:`` character.
+ """Split at the first occurrence of the ``:`` character.
Windows drive-letters (*e.g.* ``C:``) are ignored herein.
"""
@@ -246,8 +246,8 @@ def _parse_reveals(file: IO[str]) -> List[str]:
comments_array = np.char.partition(string.split("\n"), sep=" # E: ")[:, 2]
comments = "/n".join(comments_array)
- # Only search for the `{*}` pattern within comments,
- # otherwise there is the risk of accidently grabbing dictionaries and sets
+ # Only search for the `{*}` pattern within comments, otherwise
+ # there is the risk of accidentally grabbing dictionaries and sets
key_set = set(re.findall(r"\{(.*?)\}", comments))
kwargs = {
k: FORMAT_DICT.get(k, f"<UNRECOGNIZED FORMAT KEY {k!r}>") for k in key_set
diff --git a/pavement.py b/pavement.py
index 43ed14a51..6fdaae975 100644
--- a/pavement.py
+++ b/pavement.py
@@ -168,7 +168,7 @@ def compute_sha256(idirs):
def write_release_task(options, filename='README'):
"""Append hashes of release files to release notes.
- This appends file hashes to the release notes ane creates
+ This appends file hashes to the release notes and creates
four README files of the result in various formats:
- README.rst
diff --git a/setup.cfg b/setup.cfg
index 5bca14ba0..2dbdf0566 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,3 +1,6 @@
+[codespell]
+skip = *-changelog.rst,*-notes.rst,neps,f2c_blas.c,f2c_c_lapack.c,f2c_d_lapack.c,f2c_s_lapack.c,f2c_z_lapack.c
+
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
diff --git a/tools/gitpod/gitpod.Dockerfile b/tools/gitpod/gitpod.Dockerfile
index 7791df191..538963bc0 100644
--- a/tools/gitpod/gitpod.Dockerfile
+++ b/tools/gitpod/gitpod.Dockerfile
@@ -39,7 +39,7 @@ RUN conda activate ${CONDA_ENV} && \
ccache -s
# Gitpod will load the repository into /workspace/numpy. We remove the
-# directoy from the image to prevent conflicts
+# directory from the image to prevent conflicts
RUN rm -rf ${WORKSPACE}
# -----------------------------------------------------------------------------