summaryrefslogtreecommitdiff
path: root/ppc_simd.h
diff options
context:
space:
mode:
authorDimitris Apostolou <dimitris.apostolou@icloud.com>2022-01-04 12:06:35 +0200
committerGitHub <noreply@github.com>2022-01-04 05:06:35 -0500
commit715a0bcce3316a3785eb41a5080853120f9e1f41 (patch)
tree7ac00f38e562d8c2d113ca8a30f861c10476d228 /ppc_simd.h
parentd994989cda15fe92c10bd7638d98968b1dc17c05 (diff)
downloadcryptopp-git-715a0bcce3316a3785eb41a5080853120f9e1f41.tar.gz
Fix typos (PR# 1099)
Diffstat (limited to 'ppc_simd.h')
-rw-r--r--ppc_simd.h12
1 files changed, 6 insertions, 6 deletions
diff --git a/ppc_simd.h b/ppc_simd.h
index 1ed0b560..dc47bf2e 100644
--- a/ppc_simd.h
+++ b/ppc_simd.h
@@ -4,8 +4,8 @@
/// \brief Support functions for PowerPC and vector operations
/// \details This header provides an agnostic interface into Clang, GCC
/// and IBM XL C/C++ compilers modulo their different built-in functions
-/// for accessing vector intructions.
-/// \details The abstractions are necesssary to support back to GCC 4.8 and
+/// for accessing vector instructions.
+/// \details The abstractions are necessary to support back to GCC 4.8 and
/// XLC 11 and 12. GCC 4.8 and 4.9 are still popular, and they are the
/// default compiler for GCC112, GCC119 and others on the compile farm.
/// Older IBM XL C/C++ compilers also have the need due to lack of
@@ -300,7 +300,7 @@ inline T VecReverseBE(const T data)
/// of <tt>src</tt> is aligned. If unaligned it uses <tt>vec_lvsl</tt>,
/// <tt>vec_ld</tt>, <tt>vec_perm</tt> and <tt>src</tt>. The fixups using
/// <tt>vec_lvsl</tt> and <tt>vec_perm</tt> are relatively expensive so
-/// you should provide aligned memory adresses.
+/// you should provide aligned memory addresses.
/// \par Wraps
/// vec_ld, vec_lvsl, vec_perm
/// \sa VecLoad, VecLoadAligned
@@ -331,7 +331,7 @@ inline uint32x4_p VecLoad_ALTIVEC(const byte src[16])
/// of <tt>src</tt> is aligned. If unaligned it uses <tt>vec_lvsl</tt>,
/// <tt>vec_ld</tt>, <tt>vec_perm</tt> and <tt>src</tt>.
/// \details The fixups using <tt>vec_lvsl</tt> and <tt>vec_perm</tt> are
-/// relatively expensive so you should provide aligned memory adresses.
+/// relatively expensive so you should provide aligned memory addresses.
/// \par Wraps
/// vec_ld, vec_lvsl, vec_perm
/// \sa VecLoad, VecLoadAligned
@@ -806,7 +806,7 @@ inline uint32x4_p VecLoadBE(int off, const byte src[16])
/// \details VecStore_ALTIVEC() uses <tt>vec_st</tt> if the effective address
/// of <tt>dest</tt> is aligned, and uses <tt>vec_ste</tt> otherwise.
/// <tt>vec_ste</tt> is relatively expensive so you should provide aligned
-/// memory adresses.
+/// memory addresses.
/// \details VecStore_ALTIVEC() is used when POWER7 or above
/// and unaligned loads is not available.
/// \par Wraps
@@ -846,7 +846,7 @@ inline void VecStore_ALTIVEC(const T data, byte dest[16])
/// \details VecStore_ALTIVEC() uses <tt>vec_st</tt> if the effective address
/// of <tt>dest</tt> is aligned, and uses <tt>vec_ste</tt> otherwise.
/// <tt>vec_ste</tt> is relatively expensive so you should provide aligned
-/// memory adresses.
+/// memory addresses.
/// \details VecStore_ALTIVEC() is used when POWER7 or above
/// and unaligned loads is not available.
/// \par Wraps