Commit 5d61d204 authored by Benoît Lizé's avatar Benoît Lizé Committed by Commit Bot

tools/android: Improve compression_benchmark.

Instead of compressing only the start of the data to report smaller chunks
compression ratios, compress all the data chunk by chunk.

This is meant to accurately represent the impact of larger block sizes on
compression ratios, highlighting the impact of compressing whole scripts rather
than individual pages.

On Pixel (1st generation):

Gzip
 Size = 4096
 Compression
  Compression ratio = 2.2817
  Throughput = 21.2864MB/s
  Latency (size = 4096) = 192.423us
4096,21.2864,192.423
 Decompression
  Throughput = 143.899MB/s
  Latency (size = 4096) = 28.4645us
4096,143.899,28.4645
 Size = 8192
 Compression
  Compression ratio = 2.44109
  Throughput = 22.9597MB/s
  Latency (size = 8192) = 356.799us
8192,22.9597,356.799
 Decompression
  Throughput = 175.248MB/s
  Latency (size = 8192) = 46.7452us
8192,175.248,46.7452
 Size = 16384
 Compression
  Compression ratio = 2.56997
  Throughput = 22.1722MB/s
  Latency (size = 16384) = 738.942us
16384,22.1722,738.942
 Decompression
  Throughput = 206.567MB/s
  Latency (size = 16384) = 79.3158us
16384,206.567,79.3158
 Size = 32768
 Compression
  Compression ratio = 2.67076
  Throughput = 19.6633MB/s
  Latency (size = 32768) = 1666.46us
32768,19.6633,1666.46
 Decompression
  Throughput = 229.605MB/s
  Latency (size = 32768) = 142.715us
32768,229.605,142.715
 Size = 65536
 Compression
  Compression ratio = 2.74245
  Throughput = 17.4527MB/s
  Latency (size = 65536) = 3755.06us
65536,17.4527,3755.06
[...]

Bug: 907489
Change-Id: I71654f91980ad10f1b43fcb774bfb3808bffdaf4
Reviewed-on: https://chromium-review.googlesource.com/c/1352175Reviewed-by: default avatarEgor Pasko <pasko@chromium.org>
Commit-Queue: Benoit L <lizeb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#612198}
parent c16433ac
......@@ -17,63 +17,75 @@
namespace {
void LogThroughputAndLatency(size_t size,
int repeats,
void LogThroughputAndLatency(size_t chunk_size,
size_t chunk_count,
base::TimeTicks tick,
base::TimeTicks tock) {
size_t total_size = size * repeats;
size_t total_size = chunk_size * chunk_count;
double elapsed_us = (tock - tick).InMicrosecondsF();
double throughput = total_size / elapsed_us;
double latency_us = elapsed_us / repeats;
double latency_us = elapsed_us / chunk_count;
LOG(INFO) << " Throughput = " << throughput << "MB/s";
LOG(INFO) << " Latency (size = " << size << ") = " << latency_us << "us";
LOG(INFO) << size << "," << throughput << "," << latency_us;
LOG(INFO) << " Latency (size = " << chunk_size << ") = " << latency_us
<< "us";
LOG(INFO) << "AS_CSV=" << chunk_size << "," << throughput << ","
<< latency_us;
}
void CompressChunks(const std::string& contents,
size_t chunk_size,
bool snappy,
std::vector<std::string>* compressed_chunks) {
CHECK(compressed_chunks);
size_t chunk_count = contents.size() / chunk_size;
for (size_t i = 0; i < chunk_count; ++i) {
std::string compressed;
base::StringPiece input(contents.c_str() + i * chunk_size, chunk_size);
if (snappy)
CHECK(snappy::Compress(input.data(), input.size(), &compressed));
else
CHECK(compression::GzipCompress(input, &compressed));
compressed_chunks->push_back(compressed);
}
}
void BenchmarkDecompression(const std::string& contents,
int repeats,
size_t chunk_size,
bool snappy) {
std::string compressed;
if (snappy) {
snappy::Compress(contents.c_str(), contents.size(), &compressed);
} else {
CHECK(compression::GzipCompress(contents, &compressed));
}
std::vector<std::string> compressed_chunks;
CompressChunks(contents, chunk_size, snappy, &compressed_chunks);
auto tick = base::TimeTicks::Now();
for (int i = 0; i < repeats; ++i) {
for (const auto& chunk : compressed_chunks) {
std::string uncompressed;
if (snappy) {
snappy::Uncompress(compressed.c_str(), compressed.size(), &uncompressed);
snappy::Uncompress(chunk.c_str(), chunk.size(), &uncompressed);
} else {
CHECK(compression::GzipUncompress(compressed, &uncompressed));
CHECK(compression::GzipUncompress(chunk, &uncompressed));
}
}
auto tock = base::TimeTicks::Now();
LogThroughputAndLatency(contents.size(), repeats, tick, tock);
LogThroughputAndLatency(chunk_size, compressed_chunks.size(), tick, tock);
}
void BenchmarkCompression(const std::string& contents,
int repeats,
size_t chunk_size,
bool snappy) {
size_t compressed_size = 0;
auto tick = base::TimeTicks::Now();
for (int i = 0; i < repeats; ++i) {
std::string compressed;
if (snappy) {
compressed_size =
snappy::Compress(contents.c_str(), contents.size(), &compressed);
} else {
CHECK(compression::GzipCompress(contents, &compressed));
compressed_size = compressed.size();
}
}
std::vector<std::string> compressed_chunks;
CompressChunks(contents, chunk_size, snappy, &compressed_chunks);
auto tock = base::TimeTicks::Now();
size_t compressed_size = 0;
for (const auto& compressed_chunk : compressed_chunks)
compressed_size += compressed_chunk.size();
double ratio = contents.size() / static_cast<double>(compressed_size);
LOG(INFO) << " Compression ratio = " << ratio;
LogThroughputAndLatency(contents.size(), repeats, tick, tock);
LogThroughputAndLatency(chunk_size, compressed_chunks.size(), tick, tock);
}
} // namespace
......@@ -90,18 +102,24 @@ int main(int argc, char** argv) {
std::string contents;
CHECK(base::ReadFileToString(path, &contents));
// Make sure we have at least 40MiB.
constexpr size_t kPageSize = 1 << 12;
constexpr size_t target_size = 40 * 1024 * 1024;
std::string repeated_contents;
size_t repeats = target_size / contents.size() + 1;
repeated_contents.reserve(repeats * contents.size());
for (size_t i = 0; i < repeats; ++i)
repeated_contents.append(contents);
for (bool use_snappy : {false, true}) {
LOG(INFO) << "\n\n\n\n" << (use_snappy ? "Snappy" : "Gzip");
for (size_t size = kPageSize; size < contents.size() * 2; size *= 2) {
size_t actual_size = std::min(contents.size(), size);
std::string data = contents.substr(0, actual_size);
LOG(INFO) << "Size = " << actual_size;
for (size_t size = kPageSize; size < contents.size(); size *= 2) {
LOG(INFO) << "Size = " << size;
LOG(INFO) << "Compression";
BenchmarkCompression(data, (10 * 1024 * kPageSize) / actual_size,
use_snappy); // 40MiB.
BenchmarkCompression(repeated_contents, size, use_snappy);
LOG(INFO) << "Decompression";
BenchmarkDecompression(data, 100, use_snappy);
BenchmarkDecompression(repeated_contents, size, use_snappy);
}
}
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment