summaryrefslogtreecommitdiff
path: root/snappy-test.cc
diff options
context:
space:
mode:
authorVictor Costan <costan@google.com>2020-12-14 21:26:01 +0000
committerVictor Costan <costan@google.com>2020-12-14 21:27:31 +0000
commit549685a5987203ab4e94ce6ba964d3cb716951e1 (patch)
tree01ea3876ffc6aafc773de607695a65e3ae013298 /snappy-test.cc
parent11f9a77a2f3bd3923b6118b0aba741f628373a8b (diff)
downloadsnappy-git-549685a5987203ab4e94ce6ba964d3cb716951e1.tar.gz
Remove custom testing and benchmarking code.
Snappy includes a testing framework, which implements a subset of the Google Test API, and can be used when Google Test is not available. Snappy also includes a micro-benchmark framework, which implements an old version of the Google Benchmark API. This CL replaces the custom test and micro-benchmark frameworks with google/googletest and google/benchmark. The code is vendored in third_party/ via git submodules. The setup is similar to google/crc32c and google/leveldb. This CL also updates the benchmarking code to the modern Google Benchmark API. Benchmark results are expected to be more precise, as the old framework ran each benchmark with a fixed number of iterations, whereas Google Benchmark keeps iterating until the noise is low. PiperOrigin-RevId: 347456142
Diffstat (limited to 'snappy-test.cc')
-rw-r--r--snappy-test.cc185
1 files changed, 0 insertions, 185 deletions
diff --git a/snappy-test.cc b/snappy-test.cc
index e7762a3..4f6a2bf 100644
--- a/snappy-test.cc
+++ b/snappy-test.cc
@@ -76,191 +76,6 @@ std::string StrFormat(const char* format, ...) {
return buf;
}
-bool benchmark_running = false;
-int64_t benchmark_real_time_us = 0;
-int64_t benchmark_cpu_time_us = 0;
-std::string* benchmark_label = nullptr;
-int64_t benchmark_bytes_processed = 0;
-
-void ResetBenchmarkTiming() {
- benchmark_real_time_us = 0;
- benchmark_cpu_time_us = 0;
-}
-
-#ifdef WIN32
-LARGE_INTEGER benchmark_start_real;
-FILETIME benchmark_start_cpu;
-#else // WIN32
-struct timeval benchmark_start_real;
-struct rusage benchmark_start_cpu;
-#endif // WIN32
-
-void StartBenchmarkTiming() {
-#ifdef WIN32
- QueryPerformanceCounter(&benchmark_start_real);
- FILETIME dummy;
- CHECK(GetProcessTimes(
- GetCurrentProcess(), &dummy, &dummy, &dummy, &benchmark_start_cpu));
-#else
- gettimeofday(&benchmark_start_real, NULL);
- if (getrusage(RUSAGE_SELF, &benchmark_start_cpu) == -1) {
- std::perror("getrusage(RUSAGE_SELF)");
- std::exit(1);
- }
-#endif
- benchmark_running = true;
-}
-
-void StopBenchmarkTiming() {
- if (!benchmark_running) {
- return;
- }
-
-#ifdef WIN32
- LARGE_INTEGER benchmark_stop_real;
- LARGE_INTEGER benchmark_frequency;
- QueryPerformanceCounter(&benchmark_stop_real);
- QueryPerformanceFrequency(&benchmark_frequency);
-
- double elapsed_real = static_cast<double>(
- benchmark_stop_real.QuadPart - benchmark_start_real.QuadPart) /
- benchmark_frequency.QuadPart;
- benchmark_real_time_us += elapsed_real * 1e6 + 0.5;
-
- FILETIME benchmark_stop_cpu, dummy;
- CHECK(GetProcessTimes(
- GetCurrentProcess(), &dummy, &dummy, &dummy, &benchmark_stop_cpu));
-
- ULARGE_INTEGER start_ulargeint;
- start_ulargeint.LowPart = benchmark_start_cpu.dwLowDateTime;
- start_ulargeint.HighPart = benchmark_start_cpu.dwHighDateTime;
-
- ULARGE_INTEGER stop_ulargeint;
- stop_ulargeint.LowPart = benchmark_stop_cpu.dwLowDateTime;
- stop_ulargeint.HighPart = benchmark_stop_cpu.dwHighDateTime;
-
- benchmark_cpu_time_us +=
- (stop_ulargeint.QuadPart - start_ulargeint.QuadPart + 5) / 10;
-#else // WIN32
- struct timeval benchmark_stop_real;
- gettimeofday(&benchmark_stop_real, NULL);
- benchmark_real_time_us +=
- 1000000 * (benchmark_stop_real.tv_sec - benchmark_start_real.tv_sec);
- benchmark_real_time_us +=
- (benchmark_stop_real.tv_usec - benchmark_start_real.tv_usec);
-
- struct rusage benchmark_stop_cpu;
- if (getrusage(RUSAGE_SELF, &benchmark_stop_cpu) == -1) {
- std::perror("getrusage(RUSAGE_SELF)");
- std::exit(1);
- }
- benchmark_cpu_time_us += 1000000 * (benchmark_stop_cpu.ru_utime.tv_sec -
- benchmark_start_cpu.ru_utime.tv_sec);
- benchmark_cpu_time_us += (benchmark_stop_cpu.ru_utime.tv_usec -
- benchmark_start_cpu.ru_utime.tv_usec);
-#endif // WIN32
-
- benchmark_running = false;
-}
-
-void SetBenchmarkLabel(const std::string& str) {
- if (benchmark_label) {
- delete benchmark_label;
- }
- benchmark_label = new std::string(str);
-}
-
-void SetBenchmarkBytesProcessed(int64_t bytes) {
- benchmark_bytes_processed = bytes;
-}
-
-struct BenchmarkRun {
- int64_t real_time_us;
- int64_t cpu_time_us;
-};
-
-struct BenchmarkCompareCPUTime {
- bool operator() (const BenchmarkRun& a, const BenchmarkRun& b) const {
- return a.cpu_time_us < b.cpu_time_us;
- }
-};
-
-void Benchmark::Run() {
- for (int test_case_num = start_; test_case_num <= stop_; ++test_case_num) {
- // Run a few iterations first to find out approximately how fast
- // the benchmark is.
- const int kCalibrateIterations = 100;
- ResetBenchmarkTiming();
- StartBenchmarkTiming();
- (*function_)(kCalibrateIterations, test_case_num);
- StopBenchmarkTiming();
-
- // Let each test case run for about 200ms, but at least as many
- // as we used to calibrate.
- // Run five times and pick the median.
- const int kNumRuns = 5;
- const int kMedianPos = kNumRuns / 2;
- int num_iterations = 0;
- if (benchmark_real_time_us > 0) {
- num_iterations = 200000 * kCalibrateIterations / benchmark_real_time_us;
- }
- num_iterations = std::max(num_iterations, kCalibrateIterations);
- BenchmarkRun benchmark_runs[kNumRuns];
-
- for (int run = 0; run < kNumRuns; ++run) {
- ResetBenchmarkTiming();
- StartBenchmarkTiming();
- (*function_)(num_iterations, test_case_num);
- StopBenchmarkTiming();
-
- benchmark_runs[run].real_time_us = benchmark_real_time_us;
- benchmark_runs[run].cpu_time_us = benchmark_cpu_time_us;
- }
-
- std::string heading = StrFormat("%s/%d", name_.c_str(), test_case_num);
- std::string human_readable_speed;
-
- std::nth_element(benchmark_runs,
- benchmark_runs + kMedianPos,
- benchmark_runs + kNumRuns,
- BenchmarkCompareCPUTime());
- int64_t real_time_us = benchmark_runs[kMedianPos].real_time_us;
- int64_t cpu_time_us = benchmark_runs[kMedianPos].cpu_time_us;
- if (cpu_time_us <= 0) {
- human_readable_speed = "?";
- } else {
- int64_t bytes_per_second =
- benchmark_bytes_processed * 1000000 / cpu_time_us;
- if (bytes_per_second < 1024) {
- human_readable_speed =
- StrFormat("%dB/s", static_cast<int>(bytes_per_second));
- } else if (bytes_per_second < 1024 * 1024) {
- human_readable_speed = StrFormat(
- "%.1fkB/s", bytes_per_second / 1024.0f);
- } else if (bytes_per_second < 1024 * 1024 * 1024) {
- human_readable_speed = StrFormat(
- "%.1fMB/s", bytes_per_second / (1024.0f * 1024.0f));
- } else {
- human_readable_speed = StrFormat(
- "%.1fGB/s", bytes_per_second / (1024.0f * 1024.0f * 1024.0f));
- }
- }
-
- std::fprintf(stderr,
-#ifdef WIN32
- "%-18s %10I64d %10I64d %10d %s %s\n",
-#else
- "%-18s %10lld %10lld %10d %s %s\n",
-#endif
- heading.c_str(),
- static_cast<long long>(real_time_us * 1000 / num_iterations),
- static_cast<long long>(cpu_time_us * 1000 / num_iterations),
- num_iterations,
- human_readable_speed.c_str(),
- benchmark_label->c_str());
- }
-}
-
#ifdef HAVE_LIBZ
ZLib::ZLib()