summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLogan Thomas <logan.thomas005@gmail.com>2022-04-10 04:57:42 -0500
committerGitHub <noreply@github.com>2022-04-10 11:57:42 +0200
commitb2e7534466abd6eded6b4d154fa0ea2a74369607 (patch)
tree314f58b80aad45b514c438515feeff26425763a6
parentb1b21a9e67699986e566a2ef42938a2c5abb2cb7 (diff)
downloadnumpy-b2e7534466abd6eded6b4d154fa0ea2a74369607.tar.gz
DOC: various spell checks and typo fixes (#21314)
* DOC: contigous -> contiguous * DOC: enlongated -> elongated * DOC: thuse -> thus * DOC: quantityt -> quantity * DOC: suppled -> supplied * DOC: intgrally -> integrally * DOC: assignnent -> assignment * DOC: homoegeneous -> homogeneous * DOC: interpereted -> interpreted * DOC: optimised -> optimized * DOC: Advantanges -> Advantages * DOC: realised -> realized * DOC: parametrizing -> parameterizing * DOC: realised -> realized * DOC: intrisics -> intrinsics * DOC: ablility -> ability * DOC: intrisic -> intrinsic * DOC: unversal -> universal * DOC: machnisms -> mechanisms * DOC: specfiy -> specify * DOC: exclution -> exclusion * DOC: optimzations -> optimizations * DOC: declrations -> declarations * DOC: auto-gernreated -> auto-generated * DOC: it highely recomaned -> it is highly recommended * DOC: exectuing -> executing * DOC: strectched -> stretched * DOC: foriegn -> foreign * DOC: indeded -> intended * DOC: multimdimensional -> multidimensional * DOC: supserseded -> superseded * DOC: generalisation -> generalization * FIX: whitespace before comma
-rw-r--r--benchmarks/benchmarks/bench_linalg.py50
-rw-r--r--doc/C_STYLE_GUIDE.rst.txt2
-rw-r--r--doc/EXAMPLE_DOCSTRING.rst.txt2
-rw-r--r--doc/neps/nep-0002-warnfix.rst2
-rw-r--r--doc/neps/nep-0009-structured_array_extensions.rst2
-rw-r--r--doc/neps/nep-0012-missing-data.rst2
-rw-r--r--doc/neps/nep-0017-split-out-maskedarray.rst2
-rw-r--r--doc/neps/nep-0024-missing-data-2.rst2
-rw-r--r--doc/neps/nep-0027-zero-rank-arrarys.rst4
-rw-r--r--doc/neps/nep-0031-uarray.rst16
-rw-r--r--doc/neps/nep-0038-SIMD-optimizations.rst16
-rw-r--r--doc/neps/tools/build_index.py2
-rw-r--r--doc/source/dev/howto-docs.rst2
-rw-r--r--doc/source/glossary.rst16
-rw-r--r--doc/source/reference/random/new-or-different.rst2
-rw-r--r--doc/source/reference/simd/how-it-works.rst8
-rw-r--r--doc/source/user/basics.broadcasting.rst14
-rw-r--r--doc/source/user/basics.interoperability.rst14
-rw-r--r--doc/source/user/basics.rec.rst4
19 files changed, 81 insertions, 81 deletions
diff --git a/benchmarks/benchmarks/bench_linalg.py b/benchmarks/benchmarks/bench_linalg.py
index 5ed5b6eec..02e657668 100644
--- a/benchmarks/benchmarks/bench_linalg.py
+++ b/benchmarks/benchmarks/bench_linalg.py
@@ -117,11 +117,11 @@ class Einsum(Benchmark):
self.two_dim = np.arange(240000, dtype=dtype).reshape(400, 600)
self.three_dim_small = np.arange(10000, dtype=dtype).reshape(10,100,10)
self.three_dim = np.arange(24000, dtype=dtype).reshape(20, 30, 40)
- # non_contigous arrays
- self.non_contigous_dim1_small = np.arange(1, 80, 2, dtype=dtype)
- self.non_contigous_dim1 = np.arange(1, 4000, 2, dtype=dtype)
- self.non_contigous_dim2 = np.arange(1, 2400, 2, dtype=dtype).reshape(30, 40)
- self.non_contigous_dim3 = np.arange(1, 48000, 2, dtype=dtype).reshape(20, 30, 40)
+ # non_contiguous arrays
+ self.non_contiguous_dim1_small = np.arange(1, 80, 2, dtype=dtype)
+ self.non_contiguous_dim1 = np.arange(1, 4000, 2, dtype=dtype)
+ self.non_contiguous_dim2 = np.arange(1, 2400, 2, dtype=dtype).reshape(30, 40)
+ self.non_contiguous_dim3 = np.arange(1, 48000, 2, dtype=dtype).reshape(20, 30, 40)
# outer(a,b): trigger sum_of_products_contig_stride0_outcontig_two
def time_einsum_outer(self, dtype):
@@ -130,7 +130,7 @@ class Einsum(Benchmark):
# multiply(a, b):trigger sum_of_products_contig_two
def time_einsum_multiply(self, dtype):
np.einsum("..., ...", self.two_dim_small, self.three_dim , optimize=True)
-
+
# sum and multiply:trigger sum_of_products_contig_stride0_outstride0_two
def time_einsum_sum_mul(self, dtype):
np.einsum(",i...->", 300, self.three_dim_small, optimize=True)
@@ -138,11 +138,11 @@ class Einsum(Benchmark):
# sum and multiply:trigger sum_of_products_stride0_contig_outstride0_two
def time_einsum_sum_mul2(self, dtype):
np.einsum("i...,->", self.three_dim_small, 300, optimize=True)
-
+
# scalar mul: trigger sum_of_products_stride0_contig_outcontig_two
def time_einsum_mul(self, dtype):
np.einsum("i,->i", self.one_dim_big, 300, optimize=True)
-
+
# trigger contig_contig_outstride0_two
def time_einsum_contig_contig(self, dtype):
np.einsum("ji,i->", self.two_dim, self.one_dim_small, optimize=True)
@@ -151,30 +151,30 @@ class Einsum(Benchmark):
def time_einsum_contig_outstride0(self, dtype):
np.einsum("i->", self.one_dim_big, optimize=True)
- # outer(a,b): non_contigous arrays
+ # outer(a,b): non_contiguous arrays
def time_einsum_noncon_outer(self, dtype):
- np.einsum("i,j", self.non_contigous_dim1, self.non_contigous_dim1, optimize=True)
+ np.einsum("i,j", self.non_contiguous_dim1, self.non_contiguous_dim1, optimize=True)
- # multiply(a, b):non_contigous arrays
+ # multiply(a, b):non_contiguous arrays
def time_einsum_noncon_multiply(self, dtype):
- np.einsum("..., ...", self.non_contigous_dim2, self.non_contigous_dim3 , optimize=True)
-
- # sum and multiply:non_contigous arrays
+ np.einsum("..., ...", self.non_contiguous_dim2, self.non_contiguous_dim3, optimize=True)
+
+ # sum and multiply:non_contiguous arrays
def time_einsum_noncon_sum_mul(self, dtype):
- np.einsum(",i...->", 300, self.non_contigous_dim3, optimize=True)
+ np.einsum(",i...->", 300, self.non_contiguous_dim3, optimize=True)
- # sum and multiply:non_contigous arrays
+ # sum and multiply:non_contiguous arrays
def time_einsum_noncon_sum_mul2(self, dtype):
- np.einsum("i...,->", self.non_contigous_dim3, 300, optimize=True)
-
- # scalar mul: non_contigous arrays
+ np.einsum("i...,->", self.non_contiguous_dim3, 300, optimize=True)
+
+ # scalar mul: non_contiguous arrays
def time_einsum_noncon_mul(self, dtype):
- np.einsum("i,->i", self.non_contigous_dim1, 300, optimize=True)
-
- # contig_contig_outstride0_two: non_contigous arrays
+ np.einsum("i,->i", self.non_contiguous_dim1, 300, optimize=True)
+
+ # contig_contig_outstride0_two: non_contiguous arrays
def time_einsum_noncon_contig_contig(self, dtype):
- np.einsum("ji,i->", self.non_contigous_dim2, self.non_contigous_dim1_small, optimize=True)
+ np.einsum("ji,i->", self.non_contiguous_dim2, self.non_contiguous_dim1_small, optimize=True)
- # sum_of_products_contig_outstride0_one:non_contigous arrays
+ # sum_of_products_contig_outstride0_one:non_contiguous arrays
def time_einsum_noncon_contig_outstride0(self, dtype):
- np.einsum("i->", self.non_contigous_dim1, optimize=True)
+ np.einsum("i->", self.non_contiguous_dim1, optimize=True)
diff --git a/doc/C_STYLE_GUIDE.rst.txt b/doc/C_STYLE_GUIDE.rst.txt
index 4e2f27fbb..60d2d7383 100644
--- a/doc/C_STYLE_GUIDE.rst.txt
+++ b/doc/C_STYLE_GUIDE.rst.txt
@@ -1,3 +1,3 @@
-The "NumPy C Style Guide" at this page has been supserseded by
+The "NumPy C Style Guide" at this page has been superseded by
"NEP 45 — C Style Guide" at https://numpy.org/neps/nep-0045-c_style_guide.html
diff --git a/doc/EXAMPLE_DOCSTRING.rst.txt b/doc/EXAMPLE_DOCSTRING.rst.txt
index 55294f656..1de0588ec 100644
--- a/doc/EXAMPLE_DOCSTRING.rst.txt
+++ b/doc/EXAMPLE_DOCSTRING.rst.txt
@@ -9,7 +9,7 @@ multivariate_normal(mean, cov[, shape])
Draw samples from a multivariate normal distribution.
The multivariate normal, multinormal or Gaussian distribution is a
-generalisation of the one-dimensional normal distribution to higher
+generalization of the one-dimensional normal distribution to higher
dimensions.
Such a distribution is specified by its mean and covariance matrix,
diff --git a/doc/neps/nep-0002-warnfix.rst b/doc/neps/nep-0002-warnfix.rst
index a1138b2f1..1608998a6 100644
--- a/doc/neps/nep-0002-warnfix.rst
+++ b/doc/neps/nep-0002-warnfix.rst
@@ -76,7 +76,7 @@ expanded to::
int foo(int * __NPY_UNUSED_TAGGEDdummy __COMP_NPY_UNUSED)
Thus avoiding any accidental use of the variable. The mangling is pure C, and
-thuse portable. The per-variable warning disabling is compiler specific.
+thus portable. The per-variable warning disabling is compiler specific.
signed/unsigned comparison
--------------------------
diff --git a/doc/neps/nep-0009-structured_array_extensions.rst b/doc/neps/nep-0009-structured_array_extensions.rst
index cd6c3f6c3..5912b268b 100644
--- a/doc/neps/nep-0009-structured_array_extensions.rst
+++ b/doc/neps/nep-0009-structured_array_extensions.rst
@@ -9,7 +9,7 @@ NEP 9 — Structured array extensions
1. Create with-style context that makes "named-columns" available as names in the namespace.
with np.columns(array):
- price = unit * quantityt
+ price = unit * quantity
2. Allow structured arrays to be sliced by their column (i.e. one additional indexing option for structured arrays) so that a[:4, 'foo':'bar'] would be allowed.
diff --git a/doc/neps/nep-0012-missing-data.rst b/doc/neps/nep-0012-missing-data.rst
index 4775ea18b..c896c6b6a 100644
--- a/doc/neps/nep-0012-missing-data.rst
+++ b/doc/neps/nep-0012-missing-data.rst
@@ -428,7 +428,7 @@ New functions added to the ndarray are::
arr.copy(..., replacena=np.NA)
Modification to the copy function which replaces NA values,
either masked or with the NA bitpattern, with the 'replacena='
- parameter suppled. When 'replacena' isn't NA, the copied
+ parameter supplied. When 'replacena' isn't NA, the copied
array is unmasked and has the 'NA' part stripped from the
parameterized dtype ('NA[f8]' becomes just 'f8').
diff --git a/doc/neps/nep-0017-split-out-maskedarray.rst b/doc/neps/nep-0017-split-out-maskedarray.rst
index faad68828..fac05e256 100644
--- a/doc/neps/nep-0017-split-out-maskedarray.rst
+++ b/doc/neps/nep-0017-split-out-maskedarray.rst
@@ -69,7 +69,7 @@ how to modify code to use `maskedarray`.
After two releases, `np.ma` will be removed entirely. In order to obtain
`np.ma`, a user will install it via `pip install` or via their package
manager. Subsequently, `importing maskedarray` on a version of NumPy that
-includes it intgrally will raise an `ImportError`.
+includes it integrally will raise an `ImportError`.
Documentation
`````````````
diff --git a/doc/neps/nep-0024-missing-data-2.rst b/doc/neps/nep-0024-missing-data-2.rst
index c0e2d2ce7..ef6e628b5 100644
--- a/doc/neps/nep-0024-missing-data-2.rst
+++ b/doc/neps/nep-0024-missing-data-2.rst
@@ -193,7 +193,7 @@ is obvious in the NA case::
>>> na_arr
array([1., 2., NA], dtype='NA[<f8]')
-Direct assignnent in the masked case is magic and confusing, and so happens only
+Direct assignment in the masked case is magic and confusing, and so happens only
via the mask::
>>> masked_array = np.array([1.0, 2.0, 7.0], masked=True)
diff --git a/doc/neps/nep-0027-zero-rank-arrarys.rst b/doc/neps/nep-0027-zero-rank-arrarys.rst
index eef4bcacc..ed51f3a13 100644
--- a/doc/neps/nep-0027-zero-rank-arrarys.rst
+++ b/doc/neps/nep-0027-zero-rank-arrarys.rst
@@ -105,7 +105,7 @@ arrays to scalars were summarized as follows:
- This results in a special-case checking that is not
pleasant. Fundamentally it lets the user believe that
- somehow multidimensional homoegeneous arrays
+ somehow multidimensional homogeneous arrays
are something like Python lists (which except for
Object arrays they are not).
@@ -166,7 +166,7 @@ Alexander started a `Jan 2006 discussion`_ on scipy-dev
with the following proposal:
... it may be reasonable to allow ``a[...]``. This way
- ellipsis can be interpereted as any number of ``:`` s including zero.
+ ellipsis can be interpreted as any number of ``:`` s including zero.
Another subscript operation that makes sense for scalars would be
``a[...,newaxis]`` or even ``a[{newaxis, }* ..., {newaxis,}*]``, where
``{newaxis,}*`` stands for any number of comma-separated newaxis tokens.
diff --git a/doc/neps/nep-0031-uarray.rst b/doc/neps/nep-0031-uarray.rst
index b746c267d..bda35d426 100644
--- a/doc/neps/nep-0031-uarray.rst
+++ b/doc/neps/nep-0031-uarray.rst
@@ -102,7 +102,7 @@ Usage and Impact
This NEP allows for global and context-local overrides, as well as
automatic overrides a-la ``__array_function__``.
-Here are some use-cases this NEP would enable, besides the
+Here are some use-cases this NEP would enable, besides the
first one stated in the motivation section:
The first is allowing alternate dtypes to return their
@@ -114,7 +114,7 @@ respective arrays.
x = unp.ones((5, 5), dtype=xnd_dtype) # Or torch dtype
The second is allowing overrides for parts of the API.
-This is to allow alternate and/or optimised implementations
+This is to allow alternate and/or optimized implementations
for ``np.linalg``, BLAS, and ``np.random``.
.. code:: python
@@ -126,7 +126,7 @@ for ``np.linalg``, BLAS, and ``np.random``.
np.set_global_backend(pyfftw)
# Uses pyfftw without monkeypatching
- np.fft.fft(numpy_array)
+ np.fft.fft(numpy_array)
with np.set_backend(pyfftw) # Or mkl_fft, or numpy
# Uses the backend you specified
@@ -200,10 +200,10 @@ GitHub workflow. There are a few reasons for this:
The reason for this is that there may exist functions in the in these
submodules that need backends, even for ``numpy.ndarray`` inputs.
-Advantanges of ``unumpy`` over other solutions
+Advantages of ``unumpy`` over other solutions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-``unumpy`` offers a number of advantanges over the approach of defining a new
+``unumpy`` offers a number of advantages over the approach of defining a new
protocol for every problem encountered: Whenever there is something requiring
an override, ``unumpy`` will be able to offer a unified API with very minor
changes. For example:
@@ -313,7 +313,7 @@ This is different from monkeypatching in a few different ways:
All this isn't possible at all with ``__array_function__`` or
``__array_ufunc__``.
-It has been formally realised (at least in part) that a backend system is
+It has been formally realized (at least in part) that a backend system is
needed for this, in the `NumPy roadmap <https://numpy.org/neps/roadmap.html#other-functionality>`_.
For ``numpy.random``, it's still necessary to make the C-API fit the one
@@ -347,7 +347,7 @@ dispatchable.
The need for an opt-in module
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The need for an opt-in module is realised because of a few reasons:
+The need for an opt-in module is realized because of a few reasons:
* There are parts of the API (like `numpy.asarray`) that simply cannot be
overridden due to incompatibility concerns with C/Cython extensions, however,
@@ -356,7 +356,7 @@ The need for an opt-in module is realised because of a few reasons:
as those mentioned above.
NEP 18 notes that this may require maintenance of two separate APIs. However,
-this burden may be lessened by, for example, parametrizing all tests over
+this burden may be lessened by, for example, parameterizing all tests over
``numpy.overridable`` separately via a fixture. This also has the side-effect
of thoroughly testing it, unlike ``__array_function__``. We also feel that it
provides an opportunity to separate the NumPy API contract properly from the
diff --git a/doc/neps/nep-0038-SIMD-optimizations.rst b/doc/neps/nep-0038-SIMD-optimizations.rst
index 2123c4f95..c7d9ce9d9 100644
--- a/doc/neps/nep-0038-SIMD-optimizations.rst
+++ b/doc/neps/nep-0038-SIMD-optimizations.rst
@@ -26,9 +26,9 @@ function that matches the run-time CPU info `is chosen`_ from the candidates.Thi
NEP proposes a mechanism to build on that for many more features and
architectures. The steps proposed are to:
-- Establish a set of well-defined, architecture-agnostic, universal intrisics
+- Establish a set of well-defined, architecture-agnostic, universal intrinsics
which capture features available across architectures.
-- Capture these universal intrisics in a set of C macros and use the macros
+- Capture these universal intrinsics in a set of C macros and use the macros
to build code paths for sets of features from the baseline up to the maximum
set of features available on that architecture. Offer these as a limited
number of compiled alternative code paths.
@@ -43,7 +43,7 @@ Traditionally NumPy has depended on compilers to generate optimal code
specifically for the target architecture.
However few users today compile NumPy locally for their machines. Most use the
binary packages which must provide run-time support for the lowest-common
-denominator CPU architecture. Thus NumPy cannot take advantage of
+denominator CPU architecture. Thus NumPy cannot take advantage of
more advanced features of their CPU processors, since they may not be available
on all users' systems.
@@ -124,7 +124,7 @@ Therefore, such code should only be added if it yields a significant
performance benefit. Assessing this performance benefit can be nontrivial.
To aid with this, the implementation for this NEP will add a way to select
which instruction sets can be used at *runtime* via environment variables.
-(name TBD). This ablility is critical for CI code verification.
+(name TBD). This ability is critical for CI code verification.
Diagnostics
@@ -153,7 +153,7 @@ SIMD loops for many ufuncs. These would likely be the first candidates
to be ported to universal intrinsics. The expectation is that the new
implementation may cause a regression in benchmarks, but not increase the
size of the binary. If the regression is not minimal, we may choose to keep
-the X86-specific code for that platform and use the universal intrisic code
+the X86-specific code for that platform and use the universal intrinsic code
for other platforms.
Any new PRs to implement ufuncs using intrinsics will be expected to use the
@@ -208,12 +208,12 @@ There should be no impact on backwards compatibility.
Detailed description
--------------------
-The CPU-specific are mapped to unversal intrinsics which are
+The CPU-specific are mapped to universal intrinsics which are
similar for all x86 SIMD variants, ARM SIMD variants etc. For example, the
NumPy universal intrinsic ``npyv_load_u32`` maps to:
* ``vld1q_u32`` for ARM based NEON
-* ``_mm256_loadu_si256`` for x86 based AVX2
+* ``_mm256_loadu_si256`` for x86 based AVX2
* ``_mm512_loadu_si512`` for x86 based AVX-512
Anyone writing a SIMD loop will use the ``npyv_load_u32`` macro instead of the
@@ -271,7 +271,7 @@ Current PRs:
The compile-time and runtime code infrastructure are supplied by the first PR.
The second adds a demonstration of use of the infrastructure for a loop. Once
-the NEP is approved, more work is needed to write loops using the machnisms
+the NEP is approved, more work is needed to write loops using the mechanisms
provided by the NEP.
Alternatives
diff --git a/doc/neps/tools/build_index.py b/doc/neps/tools/build_index.py
index 704da5a18..bcf414ddc 100644
--- a/doc/neps/tools/build_index.py
+++ b/doc/neps/tools/build_index.py
@@ -51,7 +51,7 @@ def nep_metadata():
if not tags['Title'].startswith(f'NEP {nr} — '):
raise RuntimeError(
f'Title for NEP {nr} does not start with "NEP {nr} — " '
- '(note that — here is a special, enlongated dash). Got: '
+ '(note that — here is a special, elongated dash). Got: '
f' {tags["Title"]!r}')
if tags['Status'] in ('Accepted', 'Rejected', 'Withdrawn'):
diff --git a/doc/source/dev/howto-docs.rst b/doc/source/dev/howto-docs.rst
index 93fec509c..ff4a9f6d5 100644
--- a/doc/source/dev/howto-docs.rst
+++ b/doc/source/dev/howto-docs.rst
@@ -315,7 +315,7 @@ Sub-config files can accept any of Doxygen_ `configuration options <https://www.
but do not override or re-initialize any configuration option,
rather only use the concatenation operator "+=". For example::
- # to specfiy certain headers
+ # to specify certain headers
INPUT += @CUR_DIR/header1.h \
@CUR_DIR/header2.h
# to add all headers in certain path
diff --git a/doc/source/glossary.rst b/doc/source/glossary.rst
index bd75bf274..eebebd9dc 100644
--- a/doc/source/glossary.rst
+++ b/doc/source/glossary.rst
@@ -166,10 +166,10 @@ Glossary
array scalar
- An :doc:`array scalar <reference/arrays.scalars>` is an instance of the types/classes float32, float64,
- etc.. For uniformity in handling operands, NumPy treats a scalar as
- an array of zero dimension. In contrast, a 0-dimensional array is an :doc:`ndarray <reference/arrays.ndarray>` instance
- containing precisely one value.
+ An :doc:`array scalar <reference/arrays.scalars>` is an instance of the types/classes float32, float64,
+ etc.. For uniformity in handling operands, NumPy treats a scalar as
+ an array of zero dimension. In contrast, a 0-dimensional array is an :doc:`ndarray <reference/arrays.ndarray>` instance
+ containing precisely one value.
axis
@@ -286,7 +286,7 @@ Glossary
- it occupies an unbroken block of memory, and
- array elements with higher indexes occupy higher addresses (that
is, no :term:`stride` is negative).
-
+
There are two types of proper-contiguous NumPy arrays:
- Fortran-contiguous arrays refer to data that is stored column-wise,
@@ -296,8 +296,8 @@ Glossary
stored row-wise, i.e. the indexing of data as stored in memory
starts from the highest dimension.
- For one-dimensional arrays these notions coincide.
-
+ For one-dimensional arrays these notions coincide.
+
For example, a 2x2 array ``A`` is Fortran-contiguous if its elements are
stored in memory in the following order::
@@ -421,7 +421,7 @@ Glossary
both flatten an ndarray. ``ravel`` will return a view if possible;
``flatten`` always returns a copy.
- Flattening collapses a multimdimensional array to a single dimension;
+ Flattening collapses a multidimensional array to a single dimension;
details of how this is done (for instance, whether ``a[n+1]`` should be
the next row or next column) are parameters.
diff --git a/doc/source/reference/random/new-or-different.rst b/doc/source/reference/random/new-or-different.rst
index 7a206a2ce..8f4a70540 100644
--- a/doc/source/reference/random/new-or-different.rst
+++ b/doc/source/reference/random/new-or-different.rst
@@ -40,7 +40,7 @@ Feature Older Equivalent Notes
supported.
------------------ -------------------- -------------
``integers`` ``randint``, Use the ``endpoint`` kwarg to adjust
- ``random_integers`` the inclusion or exclution of the
+ ``random_integers`` the inclusion or exclusion of the
``high`` interval endpoint
================== ==================== =============
diff --git a/doc/source/reference/simd/how-it-works.rst b/doc/source/reference/simd/how-it-works.rst
index a2882f484..19b3dba45 100644
--- a/doc/source/reference/simd/how-it-works.rst
+++ b/doc/source/reference/simd/how-it-works.rst
@@ -59,7 +59,7 @@ The compiler supports ``--cpu-baseline="sse sse2 sse3"`` and
// The header should be located at numpy/numpy/core/src/common/_cpu_dispatch.h
/**NOTE
** C definitions prefixed with "NPY_HAVE_" represent
- ** the required optimzations.
+ ** the required optimizations.
**
** C definitions prefixed with 'NPY__CPU_TARGET_' are protected and
** shouldn't be used by any NumPy C sources.
@@ -326,7 +326,7 @@ through ``--cpu-dispatch``, but it can also represent other options such as:
else { FN NPY_EXPAND(ARGS); }
// NumPy has a macro called 'NPY_CPU_DISPATCH_DECLARE' can be used
- // for forward declrations any kind of prototypes based on
+ // for forward declarations any kind of prototypes based on
// 'NPY__CPU_DISPATCH_CALL' and 'NPY__CPU_DISPATCH_BASELINE_CALL'.
// However in this example, we just handle it manually.
void simd_whoami(const char *extra_info);
@@ -335,10 +335,10 @@ through ``--cpu-dispatch``, but it can also represent other options such as:
void trigger_me(void)
{
- // bring the auto-gernreated config header
+ // bring the auto-generated config header
// which contains config macros 'NPY__CPU_DISPATCH_CALL' and
// 'NPY__CPU_DISPATCH_BASELINE_CALL'.
- // it highely recomaned to include the config header before exectuing
+ // it is highly recommended to include the config header before executing
// the dispatching macros in case if there's another header in the scope.
#include "hello.dispatch.h"
DISPATCH_CALL_ALL(simd_whoami, ("all"))
diff --git a/doc/source/user/basics.broadcasting.rst b/doc/source/user/basics.broadcasting.rst
index 5d20fd867..8e8add41e 100644
--- a/doc/source/user/basics.broadcasting.rst
+++ b/doc/source/user/basics.broadcasting.rst
@@ -6,7 +6,7 @@ Broadcasting
************
.. seealso::
- :class:`numpy.broadcast`
+ :class:`numpy.broadcast`
The term broadcasting describes how NumPy treats arrays with different
@@ -73,8 +73,8 @@ way left. Two dimensions are compatible when
2) one of them is 1
If these conditions are not met, a
-``ValueError: operands could not be broadcast together`` exception is
-thrown, indicating that the arrays have incompatible shapes. The size of
+``ValueError: operands could not be broadcast together`` exception is
+thrown, indicating that the arrays have incompatible shapes. The size of
the resulting array is the size that is not 1 along each axis of the inputs.
Arrays do not need to have the same *number* of dimensions. For example,
@@ -169,7 +169,7 @@ An example of broadcasting when a 1-d array is added to a 2-d array::
[21., 22., 23.],
[31., 32., 33.]])
>>> b = np.array([1.0, 2.0, 3.0, 4.0])
- >>> a + b
+ >>> a + b
Traceback (most recent call last):
ValueError: operands could not be broadcast together with shapes (4,3) (4,)
@@ -178,7 +178,7 @@ In :ref:`broadcasting.figure-3`, an exception is raised because of the
incompatible shapes.
.. figure:: broadcasting_2.png
- :alt: A 1-d array with shape (3) is strectched to match the 2-d array of
+ :alt: A 1-d array with shape (3) is stretched to match the 2-d array of
shape (4, 3) it is being added to, and the result is a 2-d array of shape
(4, 3).
:name: broadcasting.figure-2
@@ -266,7 +266,7 @@ the shape of the ``codes`` array::
gymnast, marathon runner, basketball player, football
lineman and the athlete to be classified. Shortest distance
is found between the basketball player and the athlete
- to be classified.
+ to be classified.
:name: broadcasting.figure-5
*Figure 5*
@@ -281,7 +281,7 @@ are compared to a set of ``codes``. Consider this scenario::
Observation (2d array): 10 x 3
Codes (2d array): 5 x 3
- Diff (3d array): 5 x 10 x 3
+ Diff (3d array): 5 x 10 x 3
The three-dimensional array, ``diff``, is a consequence of broadcasting, not a
necessity for the calculation. Large data sets will generate a large
diff --git a/doc/source/user/basics.interoperability.rst b/doc/source/user/basics.interoperability.rst
index 853f324ba..668058a9c 100644
--- a/doc/source/user/basics.interoperability.rst
+++ b/doc/source/user/basics.interoperability.rst
@@ -21,7 +21,7 @@ functionality on top of the NumPy API.
Yet, users still want to work with these arrays using the familiar NumPy API and
re-use existing code with minimal (ideally zero) porting overhead. With this
goal in mind, various protocols are defined for implementations of
-multi-dimensional arrays with high-level APIs matching NumPy.
+multi-dimensional arrays with high-level APIs matching NumPy.
Broadly speaking, there are three groups of features used for interoperability
with NumPy:
@@ -55,7 +55,7 @@ describes its memory layout and NumPy does everything else (zero-copy if
possible). If that's not possible, the object itself is responsible for
returning a ``ndarray`` from ``__array__()``.
-:doc:`DLPack <dlpack:index>` is yet another protocol to convert foriegn objects
+:doc:`DLPack <dlpack:index>` is yet another protocol to convert foreign objects
to NumPy arrays in a language and device agnostic manner. NumPy doesn't implicitly
convert objects to ndarrays using DLPack. It provides the function
`numpy.from_dlpack` that accepts any object implementing the ``__dlpack__`` method
@@ -88,7 +88,7 @@ data in place:
>>> class wrapper():
... pass
- ...
+ ...
>>> arr = np.array([1, 2, 3, 4])
>>> buf = arr.__array_interface__
>>> buf
@@ -170,7 +170,7 @@ We can apply ``f`` to a NumPy ndarray object directly:
21.1977562209304
We would like this function to work equally well with any NumPy-like array
-object.
+object.
NumPy allows a class to indicate that it would like to handle computations in a
custom-defined way through the following interfaces:
@@ -182,7 +182,7 @@ custom-defined way through the following interfaces:
As long as foreign objects implement the ``__array_ufunc__`` or
``__array_function__`` protocols, it is possible to operate on them without the
-need for explicit conversion.
+need for explicit conversion.
The ``__array_ufunc__`` protocol
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -324,7 +324,7 @@ explicit conversion:
[20.0855, 54.5982]], dtype=torch.float64)
Also, note that the return type of this function is compatible with the initial
-data type.
+data type.
.. admonition:: Warning
@@ -402,7 +402,7 @@ Example: Dask arrays
Dask is a flexible library for parallel computing in Python. Dask Array
implements a subset of the NumPy ndarray interface using blocked algorithms,
cutting up the large array into many small arrays. This allows computations on
-larger-than-memory arrays using multiple cores.
+larger-than-memory arrays using multiple cores.
Dask supports ``__array__()`` and ``__array_ufunc__``.
diff --git a/doc/source/user/basics.rec.rst b/doc/source/user/basics.rec.rst
index 812f54062..eec2394e9 100644
--- a/doc/source/user/basics.rec.rst
+++ b/doc/source/user/basics.rec.rst
@@ -1,7 +1,7 @@
.. _structured_arrays:
*****************
-Structured arrays
+Structured arrays
*****************
Introduction
@@ -472,7 +472,7 @@ missing.
Furthermore, numpy now provides a new function
:func:`numpy.lib.recfunctions.structured_to_unstructured` which is a safer
and more efficient alternative for users who wish to convert structured
- arrays to unstructured arrays, as the view above is often indeded to do.
+ arrays to unstructured arrays, as the view above is often intended to do.
This function allows safe conversion to an unstructured type taking into
account padding, often avoids a copy, and also casts the datatypes
as needed, unlike the view. Code such as: