diff options
author | Yves Orton <demerphq@gmail.com> | 2017-03-22 16:40:28 +0100 |
---|---|---|
committer | Yves Orton <demerphq@gmail.com> | 2017-04-23 11:44:17 +0200 |
commit | a3bf60fbb1f05cd2c69d4ff0a2ef99537afdaba7 (patch) | |
tree | dcd0cbf4be0ef56b631affe55f775c6ed94452a9 /stadtx_hash.h | |
parent | 05f97de032fe95cabe8c9f6d6c0a5897b1616194 (diff) | |
download | perl-a3bf60fbb1f05cd2c69d4ff0a2ef99537afdaba7.tar.gz |
Add new hashing and "hash with state" infrastructure
This adds support for three new hash functions: StadtX, Zaphod32 and SBOX,
and reworks some of our hash internals infrastructure to do so.
SBOX is special in that it is designed to be used in conjuction with any
other hash function for hashing short strings very efficiently and very
securely. It features compile time options on how much memory and startup
time are traded off to control the length of keys that SBOX hashes.
This also adds support for caching the hash values of single byte characters
which can be used in conjuction with any other hash, including SBOX, although
SBOX itself is as fast as the lookup cache, so typically you wouldnt use both
at the same time.
This also *removes* support for Jenkins One-At-A-Time. It has served us
well, but it's day is done.
This patch adds three new files: zaphod32_hash.h, stadtx_hash.h,
sbox32_hash.h
Diffstat (limited to 'stadtx_hash.h')
-rw-r--r-- | stadtx_hash.h | 301 |
1 files changed, 301 insertions, 0 deletions
diff --git a/stadtx_hash.h b/stadtx_hash.h new file mode 100644 index 0000000000..3a5d81e7cb --- /dev/null +++ b/stadtx_hash.h @@ -0,0 +1,301 @@ +#ifndef STADTX_HASH_H +#define STADTX_HASH_H + +#ifndef DEBUG_STADTX_HASH +#define DEBUG_STADTX_HASH 0 +#endif + + +#ifndef ROTL64 +#define _ROTL_SIZED(x,r,s) ( ((x) << (r)) | ((x) >> ((s) - (r))) ) +#define _ROTR_SIZED(x,r,s) ( ((x) << ((s) - (r))) | ((x) >> (r)) ) +#define ROTL64(x,r) _ROTL_SIZED(x,r,64) +#define ROTR64(x,r) _ROTR_SIZED(x,r,64) +#endif + +#ifndef PERL_SEEN_HV_FUNC_H + +#if !defined(U64) + #include <stdint.h> + #define U64 uint64_t +#endif + +#if !defined(U32) + #define U32 uint32_t +#endif + +#if !defined(U8) + #define U8 unsigned char +#endif + +#if !defined(U16) + #define U16 uint16_t +#endif + +#ifndef STRLEN +#define STRLEN int +#endif +#endif + +#ifndef STADTX_STATIC_INLINE +#ifdef PERL_STATIC_INLINE +#define STADTX_STATIC_INLINE PERL_STATIC_INLINE +#else +#define STADTX_STATIC_INLINE static inline +#endif +#endif + +#ifndef STMT_START +#define STMT_START do +#define STMT_END while(0) +#endif + +#ifndef STADTX_UNALIGNED_AND_LITTLE_ENDIAN +#define STADTX_UNALIGNED_AND_LITTLE_ENDIAN 1 +#endif + +#if STADTX_ALLOW_UNALIGNED_AND_LITTLE_ENDIAN + #ifndef U8TO64_LE + #define U8TO64_LE(ptr) (*((const U64 *)(ptr))) + #endif + #ifndef U8TO32_LE + #define U8TO32_LE(ptr) (*((const U32 *)(ptr))) + #endif + #ifndef U8TO16_LE + #define U8TO16_LE(ptr) (*((const U16 *)(ptr))) + #endif +#else + #ifndef U8TO64_LE + #define U8TO64_LE(ptr) (\ + (U64)(ptr)[7] << 56 | \ + (U64)(ptr)[6] << 48 | \ + (U64)(ptr)[5] << 40 | \ + (U64)(ptr)[4] << 32 | \ + (U64)(ptr)[3] << 24 | \ + (U64)(ptr)[2] << 16 | \ + (U64)(ptr)[1] << 8 | \ + (U64)(ptr)[0] \ + ) + #endif + #ifndef U8TO32_LE + #define U8TO32_LE(ptr) (\ + (U32)(ptr)[3] << 24 | \ + (U32)(ptr)[2] << 16 | \ + (U32)(ptr)[1] << 8 | \ + (U32)(ptr)[0] \ + ) + #endif + #ifndef U8TO16_LE + #define U8TO16_LE(ptr) (\ + (U16)(ptr)[1] << 8 | \ + (U16)(ptr)[0] \ + ) + #endif +#endif + +/* do a marsaglia xor-shift permutation followed by a + * multiply by a prime (presumably large) and another + * marsaglia xor-shift permutation. + * One of these thoroughly changes the bits of the input. + * Two of these with different primes passes the Strict Avalanche Criteria + * in all the tests I did. + * + * Note that v cannot end up zero after a scramble64 unless it + * was zero in the first place. + */ +#define STADTX_SCRAMBLE64(v,prime) STMT_START { \ + v ^= (v >> 13); \ + v ^= (v << 35); \ + v ^= (v >> 30); \ + v *= prime; \ + v ^= (v >> 19); \ + v ^= (v << 15); \ + v ^= (v >> 46); \ +} STMT_END + + +STADTX_STATIC_INLINE void stadtx_seed_state ( + const U8 *seed_ch, + U8 *state_ch +) { + U64 *seed= (U64 *)seed_ch; + U64 *state= (U64 *)state_ch; + /* first we apply two masks to each word of the seed, this means that + * a) at least one of state[0] and state[2] is nonzero, + * b) at least one of state[1] and state[3] is nonzero + * c) that state[0] and state[2] are different + * d) that state[1] and state[3] are different + * e) that the replacement value for any zero's is a totally different from the seed value. + * (iow, if seed[0] is 0x43f6a8885a308d31UL then state[0] becomes 0, which is the replaced + * with 1, which is totally different.). */ + /* hex expansion of pi, skipping first two digits. pi= 3.2[43f6...]*/ + /* pi value in hex from here: + * http://turner.faculty.swau.edu/mathematics/materialslibrary/pi/pibases.html*/ + state[0]= seed[0] ^ 0x43f6a8885a308d31UL; + state[1]= seed[1] ^ 0x3198a2e03707344aUL; + state[2]= seed[0] ^ 0x4093822299f31d00UL; + state[3]= seed[1] ^ 0x82efa98ec4e6c894UL; + if (!state[0]) state[0]=1; + if (!state[1]) state[1]=2; + if (!state[2]) state[2]=4; + if (!state[3]) state[3]=8; + /* and now for good measure we double scramble all four - + * a double scramble guarantees a complete avalanche of all the + * bits in the seed - IOW, by the time we are hashing the + * four state vectors should be completely different and utterly + * uncognizable from the input seed bits */ + STADTX_SCRAMBLE64(state[0],0x801178846e899d17UL); + STADTX_SCRAMBLE64(state[0],0xdd51e5d1c9a5a151UL); + STADTX_SCRAMBLE64(state[1],0x93a7d6c8c62e4835UL); + STADTX_SCRAMBLE64(state[1],0x803340f36895c2b5UL); + STADTX_SCRAMBLE64(state[2],0xbea9344eb7565eebUL); + STADTX_SCRAMBLE64(state[2],0xcd95d1e509b995cdUL); + STADTX_SCRAMBLE64(state[3],0x9999791977e30c13UL); + STADTX_SCRAMBLE64(state[3],0xaab8b6b05abfc6cdUL); +} + +#define STADTX_K0_U64 0xb89b0f8e1655514fUL +#define STADTX_K1_U64 0x8c6f736011bd5127UL +#define STADTX_K2_U64 0x8f29bd94edce7b39UL +#define STADTX_K3_U64 0x9c1b8e1e9628323fUL + +#define STADTX_K2_U32 0x802910e3 +#define STADTX_K3_U32 0x819b13af +#define STADTX_K4_U32 0x91cb27e5 +#define STADTX_K5_U32 0xc1a269c1 + +STADTX_STATIC_INLINE U64 stadtx_hash_with_state( + const U8 *state_ch, + const U8 *key, + const STRLEN key_len +) { + U64 *state= (U64 *)state_ch; + U64 len = key_len; + U64 v0= state[0] ^ ((key_len+1) * STADTX_K0_U64); + U64 v1= state[1] ^ ((key_len+2) * STADTX_K1_U64); + if (len < 32) { + switch(len >> 3) { + case 3: + v0 += U8TO64_LE(key) * STADTX_K3_U64; + v0= ROTR64(v0, 17) ^ v1; + v1= ROTR64(v1, 53) + v0; + key += 8; + case 2: + v0 += U8TO64_LE(key) * STADTX_K3_U64; + v0= ROTR64(v0, 17) ^ v1; + v1= ROTR64(v1, 53) + v0; + key += 8; + case 1: + v0 += U8TO64_LE(key) * STADTX_K3_U64; + v0= ROTR64(v0, 17) ^ v1; + v1= ROTR64(v1, 53) + v0; + key += 8; + case 0: + default: break; + } + switch ( len & 0x7 ) { + case 7: v0 += (U64)key[6] << 32; + case 6: v1 += (U64)key[5] << 48; + case 5: v0 += (U64)key[4] << 16; + case 4: v1 += (U64)U8TO32_LE(key); + break; + case 3: v0 += (U64)key[2] << 48; + case 2: v1 += (U64)U8TO16_LE(key); + break; + case 1: v0 += (U64)key[0]; + case 0: v1 = ROTL64(v1, 32) ^ 0xFF; + break; + } + v1 ^= v0; + v0 = ROTR64(v0,33) + v1; + v1 = ROTL64(v1,17) ^ v0; + v0 = ROTL64(v0,43) + v1; + v1 = ROTL64(v1,31) - v0; + v0 = ROTL64(v0,13) ^ v1; + v1 -= v0; + v0 = ROTL64(v0,41) + v1; + v1 = ROTL64(v1,37) ^ v0; + v0 = ROTR64(v0,39) + v1; + v1 = ROTR64(v1,15) + v0; + v0 = ROTL64(v0,15) ^ v1; + v1 = ROTR64(v1, 5); + return v0 ^ v1; + } else { + U64 v2= state[2] ^ ((key_len+3) * STADTX_K2_U64); + U64 v3= state[3] ^ ((key_len+4) * STADTX_K3_U64); + + do { + v0 += (U64)U8TO64_LE(key+ 0) * STADTX_K2_U32; v0= ROTL64(v0,57) ^ v3; + v1 += (U64)U8TO64_LE(key+ 8) * STADTX_K3_U32; v1= ROTL64(v1,63) ^ v2; + v2 += (U64)U8TO64_LE(key+16) * STADTX_K4_U32; v2= ROTR64(v2,47) + v0; + v3 += (U64)U8TO64_LE(key+24) * STADTX_K5_U32; v3= ROTR64(v3,11) - v1; + key += 32; + len -= 32; + } while ( len >= 32 ); + + switch ( len >> 3 ) { + case 3: v0 += ((U64)U8TO64_LE(key) * STADTX_K2_U32); key += 8; v0= ROTL64(v0,57) ^ v3; + case 2: v1 += ((U64)U8TO64_LE(key) * STADTX_K3_U32); key += 8; v1= ROTL64(v1,63) ^ v2; + case 1: v2 += ((U64)U8TO64_LE(key) * STADTX_K4_U32); key += 8; v2= ROTR64(v2,47) + v0; + case 0: v3 = ROTR64(v3,11) - v1; + } + v0 ^= (len+1) * STADTX_K3_U64; + switch ( len & 0x7 ) { + case 7: v1 += (U64)key[6]; + case 6: v2 += (U64)U8TO16_LE(key+4); + v3 += (U64)U8TO32_LE(key); + break; + case 5: v1 += (U64)key[4]; + case 4: v2 += (U64)U8TO32_LE(key); + break; + case 3: v3 += (U64)key[2]; + case 2: v1 += (U64)U8TO16_LE(key); + break; + case 1: v2 += (U64)key[0]; + case 0: v3 = ROTL64(v3, 32) ^ 0xFF; + break; + } + + v1 -= v2; + v0 = ROTR64(v0,19); + v1 -= v0; + v1 = ROTR64(v1,53); + v3 ^= v1; + v0 -= v3; + v3 = ROTL64(v3,43); + v0 += v3; + v0 = ROTR64(v0, 3); + v3 -= v0; + v2 = ROTR64(v2,43) - v3; + v2 = ROTL64(v2,55) ^ v0; + v1 -= v2; + v3 = ROTR64(v3, 7) - v2; + v2 = ROTR64(v2,31); + v3 += v2; + v2 -= v1; + v3 = ROTR64(v3,39); + v2 ^= v3; + v3 = ROTR64(v3,17) ^ v2; + v1 += v3; + v1 = ROTR64(v1, 9); + v2 ^= v1; + v2 = ROTL64(v2,24); + v3 ^= v2; + v3 = ROTR64(v3,59); + v0 = ROTR64(v0, 1) - v1; + + return v0 ^ v1 ^ v2 ^ v3; + } +} + +STADTX_STATIC_INLINE U64 stadtx_hash( + const U8 *seed_ch, + const U8 *key, + const STRLEN key_len +) { + U64 state[4]; + stadtx_seed_state(seed_ch,(U8*)state); + return stadtx_hash_with_state((U8*)state,key,key_len); +} + +#endif |