summaryrefslogtreecommitdiff
path: root/secblock.h
diff options
context:
space:
mode:
authorJeffrey Walton <noloader@gmail.com>2018-08-24 08:13:23 -0400
committerGitHub <noreply@github.com>2018-08-24 08:13:23 -0400
commit1bbbfb6b7538e88f979c68aa70d6427ce2e98cc0 (patch)
treed96a2f70a7a526e09e42458593089d4429675052 /secblock.h
parent243673c32acee63a7b0b23aeebac9b2ccfd80e7d (diff)
downloadcryptopp-git-1bbbfb6b7538e88f979c68aa70d6427ce2e98cc0.tar.gz
Fix partial specializations for FixedSizeAllocatorWithCleanup (PR #710)
Commit afbd3e60f68f effectively treated a symptom and not the underlying problem. The problem was linkers on 32-bit systems ignore CRYPTOPP_ALIGN_DAT(16) passed down by the compiler and align to 8-bytes or less. We have to use Wei's original code in some places. It is not a bad thing, but the bit fiddling is something we would like to contain a little more by depending more on language or platform features. This commit keeps the original changes which improve partial specializations; but fixes 32-bit linker behavior by effectively reverting afbd3e60f68f and e054d36dc88d00. We also add more comments so the next person has understands why things are done they way they are.
Diffstat (limited to 'secblock.h')
-rw-r--r--secblock.h43
1 files changed, 33 insertions, 10 deletions
diff --git a/secblock.h b/secblock.h
index 4c4650bb..62d872bc 100644
--- a/secblock.h
+++ b/secblock.h
@@ -468,24 +468,49 @@ public:
private:
-#if defined(CRYPTOPP_BOOL_ALIGN16) && defined(CRYPTOPP_ALIGN_ATTRIBUTE)
+#if defined(CRYPTOPP_BOOL_ALIGN16) && (defined(_M_X64) || defined(__x86_64__))
+ // Before we can add additional platforms we need to check the
+ // linker documentation for alignment behavior for stack variables.
+ // CRYPTOPP_ALIGN_DATA(16) is known OK on Linux, OS X, Solaris.
+ // Also see http://stackoverflow.com/a/1468656/608639.
T* GetAlignedArray() {
CRYPTOPP_ASSERT(IsAlignedOn(m_array, 16));
return m_array;
}
CRYPTOPP_ALIGN_DATA(16) T m_array[S];
+
#elif defined(CRYPTOPP_BOOL_ALIGN16)
- // There be demons here... Some platforms and small datatypes can
- // make things go sideways. We experienced it on AIX with XLC. If
- // we see anymore problems we should probably avoid the stack and
- // move to aligned heap allocations.
+
+ // There be demons here... We cannot use CRYPTOPP_ALIGN_DATA(16)
+ // because linkers on 32-bit machines (and some 64-bit machines)
+ // align the stack to 8-bytes or less by default, not 16-bytes as
+ // requested. Additionally, the AIX linker seems to use 4-bytes
+ // by default. However, all linkers tested appear to honor
+ // CRYPTOPP_ALIGN_DATA(8). Given we can achieve 8-byte array
+ // alignment, we needs to transform the address returned from
+ // GetAlignedArray() to a 16-byte alignment.
+ // Also see http://stackoverflow.com/a/1468656/608639.
+ //
+ // The 16-byte alignment is achieved by padding the requested
+ // size with extra elements so we have at least 8-bytes of slack
+ // to work with. Then the pointer is moved down to achieve a
+ // 16-byte alignment (stacks grow down).
+ //
+ // The additional 8-bytes introduces a small secondary issue.
+ // The secondary issue is, a large T results in 0 = 8/sizeof(T).
+ // The library is OK but users may hit it. So we need to guard
+ // for a large T, and that is what PAD achieves.
T* GetAlignedArray() {
T* p_array = (T*)(void*)(((byte*)m_array) + (0-(size_t)m_array)%16);
CRYPTOPP_ASSERT(IsAlignedOn(p_array, 16));
return p_array;
}
- T m_array[S+8/sizeof(T)];
+ // PAD is elements, not bytes, and rounded up to ensure no overflow.
+ enum { Q = sizeof(T), PAD = (Q >= 8) ? 1 : (Q >= 4) ? 2 : (Q >= 2) ? 4 : 8 };
+ CRYPTOPP_ALIGN_DATA(8) T m_array[S+PAD];
+
#else
+
T* GetAlignedArray() {return m_array;}
T m_array[S];
#endif
@@ -576,10 +601,8 @@ public:
{
if (ptr == GetAlignedArray())
{
- // If the m_allocated assert fires then the bit twiddling for
- // GetAlignedArray() is probably incorrect for the platform.
- // Be sure to check CRYPTOPP_ALIGN_DATA(8). The platform may
- // not have a way to declaritively align data to 8.
+ // If the m_allocated assert fires then
+ // something overwrote the flag.
CRYPTOPP_ASSERT(size <= S);
CRYPTOPP_ASSERT(m_allocated);
m_allocated = false;