diff options
author | Snappy Team <no-reply@google.com> | 2020-11-16 15:10:23 +0000 |
---|---|---|
committer | Victor Costan <costan@google.com> | 2020-11-18 23:21:55 +0000 |
commit | e4a6e97b91da8109840b59697a3b77f93ade956c (patch) | |
tree | c78f83bc2559b57c7a9e44aed8f075ee4f7c2382 | |
parent | 719bed0ae21071963ab6e8cc09c51e88212cbcb0 (diff) | |
download | snappy-git-e4a6e97b91da8109840b59697a3b77f93ade956c.tar.gz |
Extend validate benchmarks over all types and also add a medley for validation.
I also made the compression happen only once per benchmark. This way we get a cleaner measurement of #branch-misses using "perf stat". Compression suffers naturally from a large number of branch misses which was polluting the measurements.
This showed that with the new decompression the branch misses is actually much lower then initially reported, only .2% and very stable, ie. doesn't really fluctuate with how you execute the benchmarks.
PiperOrigin-RevId: 342628576
-rw-r--r-- | snappy_unittest.cc | 51 |
1 files changed, 36 insertions, 15 deletions
diff --git a/snappy_unittest.cc b/snappy_unittest.cc index d433545..0e207db 100644 --- a/snappy_unittest.cc +++ b/snappy_unittest.cc @@ -1284,27 +1284,33 @@ static void BM_UFlat(int iters, int arg) { } BENCHMARK(BM_UFlat)->DenseRange(0, ARRAYSIZE(files) - 1); -static void BM_UFlatMedley(testing::benchmark::State& state) { - constexpr int kFiles = ARRAYSIZE(files); +struct SourceFiles { + SourceFiles() { + for (int i = 0; i < kFiles; i++) { + std::string contents = + ReadTestDataFile(files[i].filename, files[i].size_limit); + max_size = std::max(max_size, contents.size()); + sizes[i] = contents.size(); + snappy::Compress(contents.data(), contents.size(), &zcontents[i]); + } + } + static constexpr int kFiles = ARRAYSIZE(files); std::string zcontents[kFiles]; size_t sizes[kFiles]; size_t max_size = 0; - for (int i = 0; i < kFiles; i++) { - std::string contents = - ReadTestDataFile(files[i].filename, files[i].size_limit); - max_size = std::max(max_size, contents.size()); - sizes[i] = contents.size(); - snappy::Compress(contents.data(), contents.size(), &zcontents[i]); - } +}; + +static void BM_UFlatMedley(testing::benchmark::State& state) { + static const SourceFiles* const source = new SourceFiles(); - std::vector<char> dst(max_size); + std::vector<char> dst(source->max_size); size_t processed = 0; for (auto s : state) { - for (int i = 0; i < kFiles; i++) { - CHECK(snappy::RawUncompress(zcontents[i].data(), zcontents[i].size(), - dst.data())); - processed += sizes[i]; + for (int i = 0; i < SourceFiles::kFiles; i++) { + CHECK(snappy::RawUncompress(source->zcontents[i].data(), + source->zcontents[i].size(), dst.data())); + processed += source->sizes[i]; } } SetBenchmarkBytesProcessed(processed); @@ -1332,7 +1338,22 @@ static void BM_UValidate(int iters, int arg) { } StopBenchmarkTiming(); } -BENCHMARK(BM_UValidate)->DenseRange(0, 4); +BENCHMARK(BM_UValidate)->DenseRange(0, ARRAYSIZE(files) - 1); + +static void BM_UValidateMedley(testing::benchmark::State& state) { + static const SourceFiles* const source = new SourceFiles(); + + size_t processed = 0; + for (auto s : state) { + for (int i = 0; i < SourceFiles::kFiles; i++) { + CHECK(snappy::IsValidCompressedBuffer(source->zcontents[i].data(), + source->zcontents[i].size())); + processed += source->sizes[i]; + } + } + SetBenchmarkBytesProcessed(processed); +} +BENCHMARK(BM_UValidateMedley); static void BM_UIOVec(int iters, int arg) { StopBenchmarkTiming(); |