diff options
| author | Russell Belfer <rb@github.com> | 2014-01-30 09:59:15 -0800 |
|---|---|---|
| committer | Russell Belfer <rb@github.com> | 2014-01-30 10:00:00 -0800 |
| commit | 8606f33beadf5df48b36a64359c99d50aeb0f496 (patch) | |
| tree | c8369b522816069a94ac4394fd287f05339ab15c /tests/core | |
| parent | d9b04d78a396f771eada206b726ad6e8259a19b8 (diff) | |
| download | libgit2-8606f33beadf5df48b36a64359c99d50aeb0f496.tar.gz | |
Expand zstream tests and fix off-by-one error
Diffstat (limited to 'tests/core')
| -rw-r--r-- | tests/core/zstream.c | 79 |
1 files changed, 62 insertions, 17 deletions
diff --git a/tests/core/zstream.c b/tests/core/zstream.c index 63ff8c93a..7ba9424ba 100644 --- a/tests/core/zstream.c +++ b/tests/core/zstream.c @@ -68,31 +68,76 @@ void test_core_zstream__buffer(void) #define BIG_STRING_PART "Big Data IS Big - Long Data IS Long - We need a buffer larger than 1024 x 1024 to make sure we trigger chunked compression - Big Big Data IS Bigger than Big - Long Long Data IS Longer than Long" -void test_core_zstream__big_data(void) +static void compress_input_various_ways(git_buf *input) { - git_buf in = GIT_BUF_INIT; - git_buf out = GIT_BUF_INIT; - size_t scan; + git_buf out1 = GIT_BUF_INIT, out2 = GIT_BUF_INIT; + size_t i, fixed_size = max(input->size / 2, 256); + char *fixed = git__malloc(fixed_size); + cl_assert(fixed); - /* make a big string that's easy to compress */ - while (in.size < 1024 * 1024) - cl_git_pass(git_buf_put(&in, BIG_STRING_PART, strlen(BIG_STRING_PART))); + /* compress with deflatebuf */ - cl_git_pass(git_zstream_deflatebuf(&out, in.ptr, in.size)); - assert_zlib_equal(in.ptr, in.size, out.ptr, out.size); + cl_git_pass(git_zstream_deflatebuf(&out1, input->ptr, input->size)); + assert_zlib_equal(input->ptr, input->size, out1.ptr, out1.size); - git_buf_free(&out); + /* compress with various fixed size buffer (accumulating the output) */ - /* make a big string that's hard to compress */ + for (i = 0; i < 3; ++i) { + git_zstream zs = GIT_ZSTREAM_INIT; + size_t use_fixed_size; - srand(0xabad1dea); - for (scan = 0; scan < in.size; ++scan) - in.ptr[scan] = (char)rand(); + switch (i) { + case 0: use_fixed_size = 256; break; + case 1: use_fixed_size = fixed_size / 2; break; + case 2: use_fixed_size = fixed_size; break; + } + cl_assert(use_fixed_size <= fixed_size); - cl_git_pass(git_zstream_deflatebuf(&out, in.ptr, in.size)); - assert_zlib_equal(in.ptr, in.size, out.ptr, out.size); + cl_git_pass(git_zstream_init(&zs)); + cl_git_pass(git_zstream_set_input(&zs, input->ptr, input->size)); - git_buf_free(&out); + while (!git_zstream_done(&zs)) { + size_t written = use_fixed_size; + cl_git_pass(git_zstream_get_output(fixed, &written, &zs)); + cl_git_pass(git_buf_put(&out2, fixed, written)); + } + + git_zstream_free(&zs); + assert_zlib_equal(input->ptr, input->size, out2.ptr, out2.size); + + /* did both approaches give the same data? */ + cl_assert_equal_sz(out1.size, out2.size); + cl_assert(!memcmp(out1.ptr, out2.ptr, out1.size)); + + git_buf_free(&out2); + } + + git_buf_free(&out1); + git__free(fixed); +} + +void test_core_zstream__big_data(void) +{ + git_buf in = GIT_BUF_INIT; + size_t scan, target; + + for (target = 1024; target <= 1024 * 1024 * 4; target *= 8) { + + /* make a big string that's easy to compress */ + git_buf_clear(&in); + while (in.size < target) + cl_git_pass( + git_buf_put(&in, BIG_STRING_PART, strlen(BIG_STRING_PART))); + + compress_input_various_ways(&in); + + /* make a big string that's hard to compress */ + srand(0xabad1dea); + for (scan = 0; scan < in.size; ++scan) + in.ptr[scan] = (char)rand(); + + compress_input_various_ways(&in); + } git_buf_free(&in); } |
