diff options
-rw-r--r-- | Makefile | 18 | ||||
-rw-r--r-- | firmware/2lib/2sha256.c | 32 | ||||
-rw-r--r-- | firmware/2lib/2sha256_x86.c | 255 | ||||
-rw-r--r-- | firmware/2lib/2stub_hwcrypto.c | 2 | ||||
-rw-r--r-- | firmware/2lib/include/2sha_private.h | 38 | ||||
-rw-r--r-- | tests/vb2_sha256_x86_tests.c | 121 |
6 files changed, 445 insertions, 21 deletions
@@ -445,6 +445,14 @@ FWLIB_SRCS += \ firmware/lib/tpm_lite/mocked_tlcl.c endif +ifneq ($(filter-out 0,${X86_SHA_EXT}),) +CFLAGS += -DX86_SHA_EXT +FWLIB_SRCS += \ + firmware/2lib/2sha256_x86.c +endif +# Even if X86_SHA_EXT is 0 we need cflags since this will be compiled for tests +${BUILD}/firmware/2lib/2sha256_x86.o: CFLAGS += -mssse3 -mno-avx -msha + ifeq (${FIRMWARE_ARCH},) # Include BIOS stubs in the firmware library when compiling for host # TODO: split out other stub funcs too @@ -774,6 +782,11 @@ TEST21_NAMES = \ TEST_NAMES += ${TEST2X_NAMES} ${TEST20_NAMES} ${TEST21_NAMES} +# This is build-only test since we can't run this without +# sha-ni extension on x86. To run this test, you have to +# manually copy executable into compatible machine and run it. +TEST_NAMES += tests/vb2_sha256_x86_tests + # And a few more... ifeq (${TPM2_MODE},) TLCL_TEST_NAMES = \ @@ -1095,6 +1108,11 @@ ${TEST20_BINS}: ${FWLIB} ${TEST20_BINS}: LIBS += ${FWLIB} ${TEST20_BINS}: LDLIBS += ${CRYPTO_LIBS} +# Special build for sha256_x86 test +X86_SHA256_TEST = ${BUILD_RUN}/tests/vb2_sha256_x86_tests +${X86_SHA256_TEST}: ${BUILD}/firmware/2lib/2sha256_x86.o +${X86_SHA256_TEST}: LIBS += ${BUILD}/firmware/2lib/2sha256_x86.o + ${TESTLIB}: ${TESTLIB_OBJS} @${PRINTF} " RM $(subst ${BUILD}/,,$@)\n" ${Q}rm -f $@ diff --git a/firmware/2lib/2sha256.c b/firmware/2lib/2sha256.c index c3612377..00441131 100644 --- a/firmware/2lib/2sha256.c +++ b/firmware/2lib/2sha256.c @@ -1,3 +1,8 @@ +/* Copyright 2021 The Chromium OS Authors. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + /* SHA-256 and SHA-512 implementation based on code by Oliver Gay * <olivier.gay@a3.epfl.ch> under a BSD-style license. See below. */ @@ -37,6 +42,7 @@ #include "2common.h" #include "2sha.h" +#include "2sha_private.h" #include "2sysincludes.h" #define SHFR(x, n) (x >> n) @@ -50,22 +56,6 @@ #define SHA256_F3(x) (ROTR(x, 7) ^ ROTR(x, 18) ^ SHFR(x, 3)) #define SHA256_F4(x) (ROTR(x, 17) ^ ROTR(x, 19) ^ SHFR(x, 10)) -#define UNPACK32(x, str) \ - { \ - *((str) + 3) = (uint8_t) ((x) ); \ - *((str) + 2) = (uint8_t) ((x) >> 8); \ - *((str) + 1) = (uint8_t) ((x) >> 16); \ - *((str) + 0) = (uint8_t) ((x) >> 24); \ - } - -#define PACK32(str, x) \ - { \ - *(x) = ((uint32_t) *((str) + 3) ) \ - | ((uint32_t) *((str) + 2) << 8) \ - | ((uint32_t) *((str) + 1) << 16) \ - | ((uint32_t) *((str) + 0) << 24); \ - } - /* Macros used for loops unrolling */ #define SHA256_SCR(i) \ @@ -77,13 +67,13 @@ #define SHA256_EXP(a, b, c, d, e, f, g, h, j) \ { \ t1 = wv[h] + SHA256_F2(wv[e]) + CH(wv[e], wv[f], wv[g]) \ - + sha256_k[j] + w[j]; \ + + vb2_sha256_k[j] + w[j]; \ t2 = SHA256_F1(wv[a]) + MAJ(wv[a], wv[b], wv[c]); \ wv[d] += t1; \ wv[h] = t1 + t2; \ } -static const uint32_t sha256_h0[8] = { +const uint32_t vb2_sha256_h0[8] = { 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 }; @@ -93,7 +83,7 @@ static const uint32_t sha224_h0[8] = { 0xffc00b31, 0x68581511, 0x64f98fa7, 0xbefa4fa4 }; -static const uint32_t sha256_k[64] = { +const uint32_t vb2_sha256_k[64] = { 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, @@ -116,7 +106,7 @@ static const uint32_t sha256_k[64] = { void vb2_sha256_init(struct vb2_sha256_context *ctx, enum vb2_hash_algorithm algo) { - const uint32_t *h0 = algo == VB2_HASH_SHA224 ? sha224_h0 : sha256_h0; + const uint32_t *h0 = algo == VB2_HASH_SHA224 ? sha224_h0 : vb2_sha256_h0; #ifndef UNROLL_LOOPS int i; @@ -167,7 +157,7 @@ static void vb2_sha256_transform(struct vb2_sha256_context *ctx, for (j = 0; j < 64; j++) { t1 = wv[7] + SHA256_F2(wv[4]) + CH(wv[4], wv[5], wv[6]) - + sha256_k[j] + w[j]; + + vb2_sha256_k[j] + w[j]; t2 = SHA256_F1(wv[0]) + MAJ(wv[0], wv[1], wv[2]); wv[7] = wv[6]; wv[6] = wv[5]; diff --git a/firmware/2lib/2sha256_x86.c b/firmware/2lib/2sha256_x86.c new file mode 100644 index 00000000..e80477e0 --- /dev/null +++ b/firmware/2lib/2sha256_x86.c @@ -0,0 +1,255 @@ +/* Copyright 2021 The Chromium OS Authors. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + * + * SHA256 implementation using x86 SHA extension. + * Mainly from https://github.com/noloader/SHA-Intrinsics/blob/master/sha256-x86.c, + * Written and place in public domain by Jeffrey Walton + * Based on code from Intel, and by Sean Gulley for + * the miTLS project. + */ +#include "2common.h" +#include "2sha.h" +#include "2sha_private.h" +#include "2api.h" + +static struct vb2_sha256_context sha_ctx; + +typedef int vb2_m128i __attribute__ ((vector_size(16))); + +static inline vb2_m128i vb2_loadu_si128(vb2_m128i *ptr) +{ + vb2_m128i result; + asm volatile ("movups %1, %0" : "=x"(result) : "m"(*ptr)); + return result; +} + +static inline void vb2_storeu_si128(vb2_m128i *to, vb2_m128i from) +{ + asm volatile ("movups %1, %0" : "=m"(*to) : "x"(from)); +} + +static inline vb2_m128i vb2_add_epi32(vb2_m128i a, vb2_m128i b) +{ + return a + b; +} + +static inline vb2_m128i vb2_shuffle_epi8(vb2_m128i value, vb2_m128i mask) +{ + asm ("pshufb %1, %0" : "+x"(value) : "xm"(mask)); + return value; +} + +static inline vb2_m128i vb2_shuffle_epi32(vb2_m128i value, int mask) +{ + vb2_m128i result; + asm ("pshufd %2, %1, %0" : "=x"(result) : "xm"(value), "i" (mask)); + return result; +} + +static inline vb2_m128i vb2_alignr_epi8(vb2_m128i a, vb2_m128i b, int imm8) +{ + asm ("palignr %2, %1, %0" : "+x"(a) : "xm"(b), "i"(imm8)); + return a; +} + +static inline vb2_m128i vb2_sha256msg1_epu32(vb2_m128i a, vb2_m128i b) +{ + asm ("sha256msg1 %1, %0" : "+x"(a) : "xm"(b)); + return a; +} + +static inline vb2_m128i vb2_sha256msg2_epu32(vb2_m128i a, vb2_m128i b) +{ + asm ("sha256msg2 %1, %0" : "+x"(a) : "xm"(b)); + return a; +} + +static inline vb2_m128i vb2_sha256rnds2_epu32(vb2_m128i a, vb2_m128i b, + vb2_m128i k) +{ + asm ("sha256rnds2 %1, %0" : "+x"(a) : "xm"(b), "Yz"(k)); + return a; +} + +#define SHA256_X86_PUT_STATE1(j, i) \ + { \ + msgtmp[j] = vb2_loadu_si128((vb2_m128i *) \ + (message + (i << 6) + (j * 16))); \ + msgtmp[j] = vb2_shuffle_epi8(msgtmp[j], shuf_mask); \ + msg = vb2_add_epi32(msgtmp[j], \ + vb2_loadu_si128((vb2_m128i *)&vb2_sha256_k[j * 4])); \ + state1 = vb2_sha256rnds2_epu32(state1, state0, msg); \ + } + +#define SHA256_X86_PUT_STATE0() \ + { \ + msg = vb2_shuffle_epi32(msg, 0x0E); \ + state0 = vb2_sha256rnds2_epu32(state0, state1, msg); \ + } + +#define SHA256_X86_LOOP(j) \ + { \ + int k = j & 3; \ + int prev_k = (k + 3) & 3; \ + int next_k = (k + 1) & 3; \ + msg = vb2_add_epi32(msgtmp[k], \ + vb2_loadu_si128((vb2_m128i *)&vb2_sha256_k[j * 4])); \ + state1 = vb2_sha256rnds2_epu32(state1, state0, msg); \ + tmp = vb2_alignr_epi8(msgtmp[k], msgtmp[prev_k], 4); \ + msgtmp[next_k] = vb2_add_epi32(msgtmp[next_k], tmp); \ + msgtmp[next_k] = vb2_sha256msg2_epu32(msgtmp[next_k], \ + msgtmp[k]); \ + SHA256_X86_PUT_STATE0(); \ + msgtmp[prev_k] = vb2_sha256msg1_epu32(msgtmp[prev_k], \ + msgtmp[k]); \ + } + +static void vb2_sha256_transform_x86ext(const uint8_t *message, + unsigned int block_nb) +{ + vb2_m128i state0, state1, msg, abef_save, cdgh_save; + vb2_m128i msgtmp[4]; + vb2_m128i tmp; + int i; + const vb2_m128i shuf_mask = {0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f}; + + state0 = vb2_loadu_si128((vb2_m128i *)&sha_ctx.h[0]); + state1 = vb2_loadu_si128((vb2_m128i *)&sha_ctx.h[4]); + for (i = 0; i < (int) block_nb; i++) { + abef_save = state0; + cdgh_save = state1; + + SHA256_X86_PUT_STATE1(0, i); + SHA256_X86_PUT_STATE0(); + + SHA256_X86_PUT_STATE1(1, i); + SHA256_X86_PUT_STATE0(); + msgtmp[0] = vb2_sha256msg1_epu32(msgtmp[0], msgtmp[1]); + + SHA256_X86_PUT_STATE1(2, i); + SHA256_X86_PUT_STATE0(); + msgtmp[1] = vb2_sha256msg1_epu32(msgtmp[1], msgtmp[2]); + + SHA256_X86_PUT_STATE1(3, i); + tmp = vb2_alignr_epi8(msgtmp[3], msgtmp[2], 4); + msgtmp[0] = vb2_add_epi32(msgtmp[0], tmp); + msgtmp[0] = vb2_sha256msg2_epu32(msgtmp[0], msgtmp[3]); + SHA256_X86_PUT_STATE0(); + msgtmp[2] = vb2_sha256msg1_epu32(msgtmp[2], msgtmp[3]); + + SHA256_X86_LOOP(4); + SHA256_X86_LOOP(5); + SHA256_X86_LOOP(6); + SHA256_X86_LOOP(7); + SHA256_X86_LOOP(8); + SHA256_X86_LOOP(9); + SHA256_X86_LOOP(10); + SHA256_X86_LOOP(11); + SHA256_X86_LOOP(12); + SHA256_X86_LOOP(13); + SHA256_X86_LOOP(14); + + msg = vb2_add_epi32(msgtmp[3], + vb2_loadu_si128((vb2_m128i *)&vb2_sha256_k[15 * 4])); + state1 = vb2_sha256rnds2_epu32(state1, state0, msg); + SHA256_X86_PUT_STATE0(); + + state0 = vb2_add_epi32(state0, abef_save); + state1 = vb2_add_epi32(state1, cdgh_save); + + } + + vb2_storeu_si128((vb2_m128i *)&sha_ctx.h[0], state0); + vb2_storeu_si128((vb2_m128i *)&sha_ctx.h[4], state1); +} + +vb2_error_t vb2ex_hwcrypto_digest_init(enum vb2_hash_algorithm hash_alg, + uint32_t data_size) +{ + if (hash_alg != VB2_HASH_SHA256) + return VB2_ERROR_EX_HWCRYPTO_UNSUPPORTED; + + sha_ctx.h[0] = vb2_sha256_h0[5]; + sha_ctx.h[1] = vb2_sha256_h0[4]; + sha_ctx.h[2] = vb2_sha256_h0[1]; + sha_ctx.h[3] = vb2_sha256_h0[0]; + sha_ctx.h[4] = vb2_sha256_h0[7]; + sha_ctx.h[5] = vb2_sha256_h0[6]; + sha_ctx.h[6] = vb2_sha256_h0[3]; + sha_ctx.h[7] = vb2_sha256_h0[2]; + sha_ctx.size = 0; + sha_ctx.total_size = 0; + memset(sha_ctx.block, 0, sizeof(sha_ctx.block)); + + return VB2_SUCCESS; +} + +vb2_error_t vb2ex_hwcrypto_digest_extend(const uint8_t *buf, uint32_t size) +{ + unsigned int remaining_blocks; + unsigned int new_size, rem_size, tmp_size; + const uint8_t *shifted_data; + + tmp_size = VB2_SHA256_BLOCK_SIZE - sha_ctx.size; + rem_size = size < tmp_size ? size : tmp_size; + + memcpy(&sha_ctx.block[sha_ctx.size], buf, rem_size); + + if (sha_ctx.size + size < VB2_SHA256_BLOCK_SIZE) { + sha_ctx.size += size; + return VB2_SUCCESS; + } + + new_size = size - rem_size; + remaining_blocks = new_size / VB2_SHA256_BLOCK_SIZE; + + shifted_data = buf + rem_size; + + vb2_sha256_transform_x86ext(sha_ctx.block, 1); + vb2_sha256_transform_x86ext(shifted_data, remaining_blocks); + + rem_size = new_size % VB2_SHA256_BLOCK_SIZE; + + memcpy(sha_ctx.block, &shifted_data[remaining_blocks * VB2_SHA256_BLOCK_SIZE], + rem_size); + + sha_ctx.size = rem_size; + sha_ctx.total_size += (remaining_blocks + 1) * VB2_SHA256_BLOCK_SIZE; + return VB2_SUCCESS; +} + +vb2_error_t vb2ex_hwcrypto_digest_finalize(uint8_t *digest, + uint32_t digest_size) +{ + unsigned int block_nb; + unsigned int pm_size; + unsigned int size_b; + unsigned int block_rem_size = sha_ctx.size % VB2_SHA256_BLOCK_SIZE; + if (digest_size != VB2_SHA256_DIGEST_SIZE) { + VB2_DEBUG("ERROR: Digest size does not match expected length.\n"); + return VB2_ERROR_SHA_FINALIZE_DIGEST_SIZE; + } + + block_nb = (1 + ((VB2_SHA256_BLOCK_SIZE - SHA256_MIN_PAD_LEN) + < block_rem_size)); + + size_b = (sha_ctx.total_size + sha_ctx.size) * 8; + pm_size = block_nb * VB2_SHA256_BLOCK_SIZE; + + memset(sha_ctx.block + sha_ctx.size, 0, pm_size - sha_ctx.size); + sha_ctx.block[sha_ctx.size] = SHA256_PAD_BEGIN; + UNPACK32(size_b, sha_ctx.block + pm_size - 4); + + vb2_sha256_transform_x86ext(sha_ctx.block, block_nb); + + UNPACK32(sha_ctx.h[3], &digest[ 0]); + UNPACK32(sha_ctx.h[2], &digest[ 4]); + UNPACK32(sha_ctx.h[7], &digest[ 8]); + UNPACK32(sha_ctx.h[6], &digest[12]); + UNPACK32(sha_ctx.h[1], &digest[16]); + UNPACK32(sha_ctx.h[0], &digest[20]); + UNPACK32(sha_ctx.h[5], &digest[24]); + UNPACK32(sha_ctx.h[4], &digest[28]); + return VB2_SUCCESS; +} diff --git a/firmware/2lib/2stub_hwcrypto.c b/firmware/2lib/2stub_hwcrypto.c index 56272ad4..87fb7333 100644 --- a/firmware/2lib/2stub_hwcrypto.c +++ b/firmware/2lib/2stub_hwcrypto.c @@ -7,6 +7,7 @@ #include "2api.h" +#ifndef X86_SHA_EXT __attribute__((weak)) vb2_error_t vb2ex_hwcrypto_digest_init(enum vb2_hash_algorithm hash_alg, uint32_t data_size) @@ -26,6 +27,7 @@ vb2_error_t vb2ex_hwcrypto_digest_finalize(uint8_t *digest, { return VB2_ERROR_SHA_FINALIZE_ALGORITHM; /* Should not be called. */ } +#endif __attribute__((weak)) vb2_error_t vb2ex_hwcrypto_rsa_verify_digest(const struct vb2_public_key *key, diff --git a/firmware/2lib/include/2sha_private.h b/firmware/2lib/include/2sha_private.h new file mode 100644 index 00000000..ffa79f27 --- /dev/null +++ b/firmware/2lib/include/2sha_private.h @@ -0,0 +1,38 @@ +/* Copyright 2021 The Chromium OS Authors. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + * + * Constants & macro for sha algorithms. + */ + +#ifndef VBOOT_REFERENCE_2SHA_PRIVATE_H_ +#define VBOOT_REFERENCE_2SHA_PRIVATE_H_ + +/* Sha256 padding is consisted of 0x80 + zeros + length of message (8 byte). + * So minimum length for padding is 9. + */ +#define SHA256_MIN_PAD_LEN 9 + +/* Beginning of sha256 padding is always 0x80 when messages are in bytes + */ +#define SHA256_PAD_BEGIN 0x80 + +extern const uint32_t vb2_sha256_h0[8]; +extern const uint32_t vb2_sha256_k[64]; + +#define UNPACK32(x, str) \ + { \ + *((str) + 3) = (uint8_t) ((x) ); \ + *((str) + 2) = (uint8_t) ((x) >> 8); \ + *((str) + 1) = (uint8_t) ((x) >> 16); \ + *((str) + 0) = (uint8_t) ((x) >> 24); \ + } + +#define PACK32(str, x) \ + { \ + *(x) = ((uint32_t) *((str) + 3) ) \ + | ((uint32_t) *((str) + 2) << 8) \ + | ((uint32_t) *((str) + 1) << 16) \ + | ((uint32_t) *((str) + 0) << 24); \ + } +#endif /* VBOOT_REFERENCE_2SHA_PRIVATE_H_ */ diff --git a/tests/vb2_sha256_x86_tests.c b/tests/vb2_sha256_x86_tests.c new file mode 100644 index 00000000..98be77ef --- /dev/null +++ b/tests/vb2_sha256_x86_tests.c @@ -0,0 +1,121 @@ +/* Copyright 2021 The Chromium OS Authors. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +/* FIPS 180-2 Tests for message digest functions. */ + +#include <stdio.h> + +#include "2api.h" +#include "2sha.h" +#include "sha_test_vectors.h" +#include "test_common.h" + +vb2_error_t vb2_digest_buffer(const uint8_t *buf, uint32_t size, + enum vb2_hash_algorithm hash_alg, uint8_t *digest, + uint32_t digest_size) +{ + VB2_TRY(vb2ex_hwcrypto_digest_init(hash_alg, size)); + VB2_TRY(vb2ex_hwcrypto_digest_extend(buf, size)); + + return vb2ex_hwcrypto_digest_finalize(digest, digest_size); + +} + +static void sha256_tests(void) +{ + uint8_t digest[VB2_SHA256_DIGEST_SIZE]; + uint8_t *test_inputs[3]; + const uint8_t expect_multiple[VB2_SHA256_DIGEST_SIZE] = { + 0x07, 0x08, 0xb4, 0xca, 0x46, 0x4c, 0x40, 0x39, + 0x07, 0x06, 0x88, 0x80, 0x30, 0x55, 0x5d, 0x86, + 0x0e, 0x4a, 0x0d, 0x2b, 0xc6, 0xc4, 0x87, 0x39, + 0x2c, 0x16, 0x55, 0xb0, 0x82, 0x13, 0x16, 0x29 }; + int i; + + test_inputs[0] = (uint8_t *) oneblock_msg; + test_inputs[1] = (uint8_t *) multiblock_msg1; + test_inputs[2] = (uint8_t *) long_msg; + + for (i = 0; i < 3; i++) { + TEST_SUCC(vb2_digest_buffer(test_inputs[i], + strlen((char *)test_inputs[i]), + VB2_HASH_SHA256, + digest, sizeof(digest)), + "vb2_digest_buffer() SHA256"); + TEST_EQ(memcmp(digest, sha256_results[i], sizeof(digest)), + 0, "SHA-256 digest"); + } + + TEST_EQ(vb2_digest_buffer(test_inputs[0], + strlen((char *)test_inputs[0]), + VB2_HASH_SHA256, digest, sizeof(digest) - 1), + VB2_ERROR_SHA_FINALIZE_DIGEST_SIZE, + "vb2_digest_buffer() too small"); + + /* Test multiple small extends */ + vb2ex_hwcrypto_digest_init(VB2_HASH_SHA256, 15); + vb2ex_hwcrypto_digest_extend((uint8_t *)"test1", 5); + vb2ex_hwcrypto_digest_extend((uint8_t *)"test2", 5); + vb2ex_hwcrypto_digest_extend((uint8_t *)"test3", 5); + vb2ex_hwcrypto_digest_finalize(digest, VB2_SHA256_DIGEST_SIZE); + TEST_EQ(memcmp(digest, expect_multiple, sizeof(digest)), 0, + "SHA-256 multiple extends"); + + TEST_EQ(vb2_hash_block_size(VB2_HASH_SHA256), VB2_SHA256_BLOCK_SIZE, + "vb2_hash_block_size(VB2_HASH_SHA256)"); + +} + +static void known_value_tests(void) +{ + const char sentinel[] = "keepme"; + struct { + struct vb2_hash hash; + uint8_t overflow[8]; + } test; + +#define TEST_KNOWN_VALUE(algo, str, value) \ + TEST_EQ(vb2_digest_size(algo), sizeof(value) - 1, \ + "Known hash size " #algo ": " #str); \ + strcpy((char *)&test.hash.raw[sizeof(value) - 1], sentinel); \ + TEST_SUCC(vb2_digest_buffer((const uint8_t *)str, sizeof(str) - 1, \ + algo, test.hash.raw, vb2_digest_size(algo)), \ + "Calculate known hash " #algo ": " #str); \ + TEST_EQ(memcmp(test.hash.raw, value, sizeof(value) - 1), 0, \ + "Known hash " #algo ": " #str); \ + TEST_EQ(strcmp((char *)&test.hash.raw[sizeof(value) - 1], sentinel), 0,\ + "Overflow known hash " #algo ": " #str); + + TEST_KNOWN_VALUE(VB2_HASH_SHA256, "", + "\xe3\xb0\xc4\x42\x98\xfc\x1c\x14\x9a\xfb\xf4\xc8\x99\x6f\xb9" + "\x24\x27\xae\x41\xe4\x64\x9b\x93\x4c\xa4\x95\x99\x1b\x78\x52" + "\xb8\x55"); + + const char long_test_string[] = "abcdefghbcdefghicdefghijdefghijkefgh" + "ijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrs" + "mnopqrstnopqrstu"; + TEST_KNOWN_VALUE(VB2_HASH_SHA256, long_test_string, + "\xcf\x5b\x16\xa7\x78\xaf\x83\x80\x03\x6c\xe5\x9e\x7b\x04\x92" + "\x37\x0b\x24\x9b\x11\xe8\xf0\x7a\x51\xaf\xac\x45\x03\x7a\xfe" + "\xe9\xd1"); + + /* vim helper to escape hex: <Shift+V>:s/\([a-f0-9]\{2\}\)/\\x\1/g */ +#undef TEST_KNOWN_VALUE +} + +int main(int argc, char *argv[]) +{ + /* Initialize long_msg with 'a' x 1,000,000 */ + long_msg = (char *) malloc(1000001); + memset(long_msg, 'a', 1000000); + long_msg[1000000]=0; + + sha256_tests(); + known_value_tests(); + + free(long_msg); + + return gTestSuccess ? 0 : 255; +} |