Commit 0713ac05 authored by Daniel Murphy's avatar Daniel Murphy Committed by Commit Bot

[BlobStorage] Making perf tests smaller & broader

Bug: 782356
Change-Id: I426c455e028fb70fa86eb39ba0d651bddf3cad2f
Reviewed-on: https://chromium-review.googlesource.com/930241Reviewed-by: default avatarMarijn Kruisselbrink <mek@chromium.org>
Reviewed-by: default avatarNed Nguyen <nednguyen@google.com>
Commit-Queue: Daniel Murphy <dmurph@chromium.org>
Cr-Commit-Position: refs/heads/master@{#542602}
parent 961c6c6c
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include "base/bind_helpers.h" #include "base/bind_helpers.h"
#include "base/callback.h" #include "base/callback.h"
#include "base/callback_helpers.h" #include "base/callback_helpers.h"
#include "base/command_line.h"
#include "base/containers/small_map.h" #include "base/containers/small_map.h"
#include "base/files/file_util.h" #include "base/files/file_util.h"
#include "base/guid.h" #include "base/guid.h"
...@@ -94,6 +95,8 @@ BlobStorageLimits CalculateBlobStorageLimitsImpl(const FilePath& storage_dir, ...@@ -94,6 +95,8 @@ BlobStorageLimits CalculateBlobStorageLimitsImpl(const FilePath& storage_dir,
limits.desired_max_disk_space / kMegabyte); limits.desired_max_disk_space / kMegabyte);
limits.effective_max_disk_space = limits.desired_max_disk_space; limits.effective_max_disk_space = limits.desired_max_disk_space;
CHECK(limits.IsValid());
return limits; return limits;
} }
...@@ -367,7 +370,19 @@ class BlobMemoryController::FileQuotaAllocationTask ...@@ -367,7 +370,19 @@ class BlobMemoryController::FileQuotaAllocationTask
// Get the file sizes and total size. // Get the file sizes and total size.
uint64_t total_size = uint64_t total_size =
GetTotalSizeAndFileSizes(unreserved_file_items, &file_sizes_); GetTotalSizeAndFileSizes(unreserved_file_items, &file_sizes_);
DCHECK_LE(total_size, controller_->GetAvailableFileSpaceForBlobs());
// When we do perf tests that force the file strategy, these often run
// before |CalculateBlobStorageLimitsImpl| is complete. The disk isn't
// enabled until after this call returns (|file_paging_enabled_| is false)
// and |GetAvailableFileSpaceForBlobs()| will thus return 0. So skip this
// check when we have a custom file transportation trigger.
#if DCHECK_IS_ON()
base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
if (LIKELY(
!command_line->HasSwitch(kBlobFileTransportByFileTriggerSwitch))) {
DCHECK_LE(total_size, controller_->GetAvailableFileSpaceForBlobs());
}
#endif
allocation_size_ = total_size; allocation_size_ = total_size;
// Check & set our item states. // Check & set our item states.
...@@ -559,14 +574,22 @@ BlobMemoryController::Strategy BlobMemoryController::DetermineStrategy( ...@@ -559,14 +574,22 @@ BlobMemoryController::Strategy BlobMemoryController::DetermineStrategy(
preemptive_transported_bytes <= GetAvailableMemoryForBlobs()) { preemptive_transported_bytes <= GetAvailableMemoryForBlobs()) {
return Strategy::NONE_NEEDED; return Strategy::NONE_NEEDED;
} }
if (UNLIKELY(limits_.override_file_transport_min_size > 0) &&
file_paging_enabled_ &&
total_transportation_bytes >= limits_.override_file_transport_min_size) {
return Strategy::FILE;
}
if (total_transportation_bytes <= limits_.max_ipc_memory_size)
return Strategy::IPC;
if (file_paging_enabled_ && if (file_paging_enabled_ &&
total_transportation_bytes <= GetAvailableFileSpaceForBlobs() && total_transportation_bytes <= GetAvailableFileSpaceForBlobs() &&
total_transportation_bytes > limits_.memory_limit_before_paging()) { total_transportation_bytes > limits_.memory_limit_before_paging()) {
return Strategy::FILE; return Strategy::FILE;
} }
if (total_transportation_bytes > limits_.max_ipc_memory_size) return Strategy::SHARED_MEMORY;
return Strategy::SHARED_MEMORY;
return Strategy::IPC;
} }
bool BlobMemoryController::CanReserveQuota(uint64_t size) const { bool BlobMemoryController::CanReserveQuota(uint64_t size) const {
......
...@@ -4,9 +4,23 @@ ...@@ -4,9 +4,23 @@
#include "storage/common/blob_storage/blob_storage_constants.h" #include "storage/common/blob_storage/blob_storage_constants.h"
#include "base/command_line.h"
#include "base/logging.h" #include "base/logging.h"
#include "base/strings/string_number_conversions.h"
namespace storage { namespace storage {
namespace {
// Specifies the minimum file size.
constexpr const char kBlobFileTransportMinFileSizeSwitch[] =
"blob-transport-file-min-size";
// Specifies the maximum file size.
constexpr const char kBlobFileTransportMaxFileSizeSwitch[] =
"blob-transport-file-max-size";
// Specifies a custom maximum size of the shared memory segments used to
// transport blob.
const char kBlobSharedMemoryTransportMaxSizeSwitch[] =
"blob-transport-shared-memory-max-size";
} // namespace
static_assert(kDefaultIPCMemorySize < kDefaultSharedMemorySize, static_assert(kDefaultIPCMemorySize < kDefaultSharedMemorySize,
"IPC transport size must be smaller than shared memory size."); "IPC transport size must be smaller than shared memory size.");
...@@ -15,11 +29,55 @@ static_assert(kDefaultMinPageFileSize < kDefaultMaxPageFileSize, ...@@ -15,11 +29,55 @@ static_assert(kDefaultMinPageFileSize < kDefaultMaxPageFileSize,
static_assert(kDefaultMinPageFileSize < kDefaultMaxBlobInMemorySpace, static_assert(kDefaultMinPageFileSize < kDefaultMaxBlobInMemorySpace,
"Page file size must be less than in-memory space."); "Page file size must be less than in-memory space.");
BlobStorageLimits::BlobStorageLimits() {
base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
if (UNLIKELY(
command_line->HasSwitch(kBlobFileTransportByFileTriggerSwitch))) {
CHECK(base::StringToUint64(command_line->GetSwitchValueASCII(
kBlobFileTransportByFileTriggerSwitch),
&override_file_transport_min_size))
<< "Unable to parse "
<< command_line->GetSwitchValueASCII(
kBlobFileTransportByFileTriggerSwitch);
}
if (UNLIKELY(
command_line->HasSwitch(kBlobSharedMemoryTransportMaxSizeSwitch))) {
CHECK(base::StringToSizeT(command_line->GetSwitchValueASCII(
kBlobSharedMemoryTransportMaxSizeSwitch),
&max_shared_memory_size))
<< "Unable to parse "
<< command_line->GetSwitchValueASCII(
kBlobSharedMemoryTransportMaxSizeSwitch);
}
if (UNLIKELY(command_line->HasSwitch(kBlobFileTransportMinFileSizeSwitch))) {
CHECK(base::StringToUint64(
command_line->GetSwitchValueASCII(kBlobFileTransportMinFileSizeSwitch),
&min_page_file_size))
<< "Unable to parse "
<< command_line->GetSwitchValueASCII(
kBlobSharedMemoryTransportMaxSizeSwitch);
}
if (UNLIKELY(command_line->HasSwitch(kBlobFileTransportMaxFileSizeSwitch))) {
CHECK(base::StringToUint64(
command_line->GetSwitchValueASCII(kBlobFileTransportMaxFileSizeSwitch),
&max_file_size))
<< "Unable to parse "
<< command_line->GetSwitchValueASCII(
kBlobSharedMemoryTransportMaxSizeSwitch);
}
CHECK(IsValid());
}
BlobStorageLimits::~BlobStorageLimits() {}
BlobStorageLimits::BlobStorageLimits(const BlobStorageLimits&) = default;
BlobStorageLimits& BlobStorageLimits::operator=(const BlobStorageLimits&) =
default;
bool BlobStorageLimits::IsValid() const { bool BlobStorageLimits::IsValid() const {
return max_ipc_memory_size < max_shared_memory_size && return max_ipc_memory_size <= max_bytes_data_item_size &&
max_ipc_memory_size < max_bytes_data_item_size && max_shared_memory_size <= max_bytes_data_item_size &&
min_page_file_size < max_file_size && min_page_file_size <= max_file_size &&
min_page_file_size < max_blob_in_memory_space && min_page_file_size <= max_blob_in_memory_space &&
effective_max_disk_space <= desired_max_disk_space; effective_max_disk_space <= desired_max_disk_space;
} }
......
...@@ -29,8 +29,18 @@ constexpr uint64_t kDefaultMinPageFileSize = 5ull * 1024 * 1024; ...@@ -29,8 +29,18 @@ constexpr uint64_t kDefaultMinPageFileSize = 5ull * 1024 * 1024;
const float kDefaultMaxBlobInMemorySpaceUnderPressureRatio = 0.002f; const float kDefaultMaxBlobInMemorySpaceUnderPressureRatio = 0.002f;
#endif #endif
// Specifies the size at which blob data will be transported by file instead of
// memory. Overrides internal logic and allows perf tests to use the file path.
constexpr const char kBlobFileTransportByFileTriggerSwitch[] =
"blob-transport-by-file-trigger";
// All sizes are in bytes. // All sizes are in bytes.
struct STORAGE_COMMON_EXPORT BlobStorageLimits { struct STORAGE_COMMON_EXPORT BlobStorageLimits {
BlobStorageLimits();
~BlobStorageLimits();
BlobStorageLimits(const BlobStorageLimits&);
BlobStorageLimits& operator=(const BlobStorageLimits&);
// Returns if the current configuration is valid. // Returns if the current configuration is valid.
bool IsValid() const; bool IsValid() const;
...@@ -49,7 +59,9 @@ struct STORAGE_COMMON_EXPORT BlobStorageLimits { ...@@ -49,7 +59,9 @@ struct STORAGE_COMMON_EXPORT BlobStorageLimits {
// This is the maximum amount of memory we can send in an IPC. // This is the maximum amount of memory we can send in an IPC.
size_t max_ipc_memory_size = kDefaultIPCMemorySize; size_t max_ipc_memory_size = kDefaultIPCMemorySize;
// This is the maximum size of a shared memory handle. // This is the maximum size of a shared memory handle. This can be overriden
// using the "blob-transport-shared-memory-max-size" switch (see
// BlobMemoryController).
size_t max_shared_memory_size = kDefaultSharedMemorySize; size_t max_shared_memory_size = kDefaultSharedMemorySize;
// This is the maximum size of a bytes BlobDataItem. Only used for mojo // This is the maximum size of a bytes BlobDataItem. Only used for mojo
...@@ -77,6 +89,11 @@ struct STORAGE_COMMON_EXPORT BlobStorageLimits { ...@@ -77,6 +89,11 @@ struct STORAGE_COMMON_EXPORT BlobStorageLimits {
uint64_t min_page_file_size = kDefaultMinPageFileSize; uint64_t min_page_file_size = kDefaultMinPageFileSize;
// This is the maximum file size we can create. // This is the maximum file size we can create.
uint64_t max_file_size = kDefaultMaxPageFileSize; uint64_t max_file_size = kDefaultMaxPageFileSize;
// This overrides the minimum size for transporting a blob using the file
// strategy. This allows perf tests to force file transportation. This is
// usually set using the "blob-transport-by-file-min-size" switch (see
// BlobMemoryController).
uint64_t override_file_transport_min_size = 0ull;
}; };
enum class IPCBlobItemRequestStrategy { enum class IPCBlobItemRequestStrategy {
......
...@@ -6,14 +6,11 @@ These benchmarks exercise storage apis in a real-life usage way (avoiding microb ...@@ -6,14 +6,11 @@ These benchmarks exercise storage apis in a real-life usage way (avoiding microb
This models an offline load of a Google doc. See [this document](https://docs.google.com/document/d/1JC1RgMyxBAjUPSHjm2Bd1KPzcqpPPvxRomKevOkMPm0/edit) for a breakdown of the database and the transactions, along with the traces used to extract this information. This models an offline load of a Google doc. See [this document](https://docs.google.com/document/d/1JC1RgMyxBAjUPSHjm2Bd1KPzcqpPPvxRomKevOkMPm0/edit) for a breakdown of the database and the transactions, along with the traces used to extract this information.
# Blob Build All Then Read Serially # Blob Perf
This benchmark models the creation and reading of a large number of blobs. The blobs are created first, then read one at a time. This benchmark models the creation and reading of blobs. It has two parts:
# Blob Build All Then Read in Parallel 1. Create and read blobs synchronously
2. Create and read blobs in parallel (asynchronously).
This benchmark models the creation and reading of a large number of blobs. The blobs are created first, then read all at once. There is a variant of this test for every transportation type (shared memory, files, and ipc).
\ No newline at end of file
# Blob Build And Read Immediately
This benchmark models the creation and reading of a large number of blobs. Each blob is read immediately after creation.
<!doctype html>
<title>Blob Perf (File)</title>
<script src="../resources/runner.js"></script>
<script src="resources/shared.js"></script>
<script>
// Note: This test requires setting the command line flags:
// --blob-transport-by-file-trigger=307300
// --blob-transport-min-file-size=2048
// --blob-transport-max-file-size=10240
// to guarantee these blobs are transported using files.
// There should be 31 files created for each blob.
let runnerParams = { size: 301 * 1024, numBlobs: 10 };
let test = {
description:
'Benchmark for creating blobs using File transport then reading both synchronously and in parallel.',
unit: 'ms',
iterationCount: 20,
tracingCategories: 'Blob',
traceEventsToMeasure: [
'BlobRequest::ReadFileItem', 'Registry::RegisterBlob'
],
path: 'resources/blob-perf-runner.html',
params: runnerParams
}
PerfTestRunner.measurePageLoadTimeAfterDoneMessage(test);
</script>
<!doctype html> <!doctype html>
<title>Blob Build and Read Immediately</title> <title>Blob Perf (IPC)</title>
<script src="../resources/runner.js"></script> <script src="../resources/runner.js"></script>
<script src="resources/shared.js"></script> <script src="resources/shared.js"></script>
<script> <script>
let runnerParams = { size: 2, numBlobs: 200 }; let runnerParams = { size: 1024, numBlobs: 20 };
let test = { let test = {
description: description:
'Benchmark for creating blobs and then reading them immediately.', 'Benchmark for creating blobs using IPC transport then reading both synchronously and in parallel.',
unit: 'ms', unit: 'ms',
iterationCount: 10, iterationCount: 50,
tracingCategories: 'Blob', tracingCategories: 'Blob',
traceEventsToMeasure: [ traceEventsToMeasure: [
'BlobReader::ReadBytesItem', 'Registry::RegisterBlob', 'BlobReader::ReadBytesItem', 'Registry::RegisterBlob'
'BlobRequest::ReadRawData', 'BlobRequest'
], ],
path: 'resources/blob-build-and-read-immediately-runner.html', path: 'resources/blob-perf-runner.html',
params: runnerParams params: runnerParams
} }
PerfTestRunner.measurePageLoadTimeAfterDoneMessage(test); PerfTestRunner.measurePageLoadTimeAfterDoneMessage(test);
......
<!doctype html> <!doctype html>
<title>Blob Build All Then Read Serially</title> <title>Blob Perf (Shared Memory)</title>
<script src="../resources/runner.js"></script> <script src="../resources/runner.js"></script>
<script src="resources/shared.js"></script> <script src="resources/shared.js"></script>
<script> <script>
let runnerParams = { size: 2, numBlobs: 200 }; // Note: This test requires setting the command line flag
// '--blob-transport-shared-memory-max-size=30720' to exercise the shared memory
// data pipe transfer work.
let runnerParams = { size: 300 * 1024, numBlobs: 10 };
let test = { let test = {
description: 'Benchmark for creating blobs and then reading them serially.', description:
'Benchmark for creating blobs using Shared Memory transport then reading both synchronously and in parallel.',
unit: 'ms', unit: 'ms',
iterationCount: 10, iterationCount: 20,
tracingCategories: 'Blob', tracingCategories: 'Blob',
traceEventsToMeasure: [ traceEventsToMeasure: [
'BlobReader::ReadBytesItem', 'Registry::RegisterBlob', 'BlobReader::ReadBytesItem', 'Registry::RegisterBlob'
'BlobRequest::ReadRawData', 'BlobRequest'
], ],
path: 'resources/blob-build-all-then-read-serially-runner.html', path: 'resources/blob-perf-runner.html',
params: runnerParams params: runnerParams
} }
PerfTestRunner.measurePageLoadTimeAfterDoneMessage(test); PerfTestRunner.measurePageLoadTimeAfterDoneMessage(test);
......
<!doctype html> <!doctype html>
<title>Blob Build All Then Read in Parallel</title> <title>Blob Perf (IPC)</title>
<script src="../resources/runner.js"></script> <script src="../resources/runner.js"></script>
<script src="resources/shared.js"></script> <script src="resources/shared.js"></script>
<script> <script>
let runnerParams = { size: 2, numBlobs: 200 }; let runnerParams = { size: 2, numBlobs: 20 };
let test = { let test = {
description: description:
'Benchmark for creating blobs and then reading them in parallel.', 'Benchmark for creating blobs using IPC transport then reading both synchronously and in parallel.',
unit: 'ms', unit: 'ms',
iterationCount: 10, iterationCount: 50,
tracingCategories: 'Blob', tracingCategories: 'Blob',
traceEventsToMeasure: [ traceEventsToMeasure: [
'BlobReader::ReadBytesItem', 'Registry::RegisterBlob', 'BlobReader::ReadBytesItem', 'Registry::RegisterBlob'
'BlobRequest::ReadRawData', 'BlobRequest'
], ],
path: 'resources/blob-build-all-then-read-parallel-runner.html', path: 'resources/blob-perf-runner.html',
params: runnerParams params: runnerParams
} }
PerfTestRunner.measurePageLoadTimeAfterDoneMessage(test); PerfTestRunner.measurePageLoadTimeAfterDoneMessage(test);
......
<!doctype html>
<title>Blob Build All Then Read in Parallel Runner</title>
<script src="resources/blob-shared.js"></script>
<script src="resources/shared.js"></script>
<body></body>
<script>
async function start(testParams) {
logToDocumentBody(`Starting benchmark: ${testParams.numBlobs} blobs of ` +
`size ${testParams.size}`);
let start = Date.now();
await blobCreateAllThenReadInParallel(testParams.numBlobs, testParams.size);
logToDocumentBody('Time: ' + (Date.now() - start));
reportDone();
}
function getParams() {
let testParams = {
size: Number(document.getElementById('blob_size').value),
numBlobs: Number(document.getElementById('num_blobs').value)
};
start(testParams);
}
if (typeof params !== 'undefined')
start(params);
else
showManualInput();
</script>
<!doctype html>
<title>Blob Build and Read Immediately Runner</title>
<script src="resources/blob-shared.js"></script>
<script src="resources/shared.js"></script>
<body></body>
<script>
async function start(testParams) {
logToDocumentBody(`Starting benchmark: ${testParams.numBlobs} blobs of ` +
`size ${testParams.size}`);
let start = Date.now();
await blobCreateAndImmediatelyRead(testParams.numBlobs, testParams.size);
logToDocumentBody('Time: ' + (Date.now() - start));
reportDone();
}
function getParams() {
let testParams = {
size: Number(document.getElementById('blob_size').value),
numBlobs: Number(document.getElementById('num_blobs').value)
};
start(testParams);
}
if (typeof params !== 'undefined')
start(params);
else
showManualInput();
</script>
<!doctype html> <!doctype html>
<title>Blob Build All Then Read Serially Runner</title> <title>Blob Perf Runner</title>
<script src="resources/blob-shared.js"></script> <script src="resources/blob-shared.js"></script>
<script src="resources/shared.js"></script> <script src="resources/shared.js"></script>
<body></body> <body></body>
...@@ -8,7 +8,12 @@ ...@@ -8,7 +8,12 @@
logToDocumentBody(`Starting benchmark: ${testParams.numBlobs} blobs of ` + logToDocumentBody(`Starting benchmark: ${testParams.numBlobs} blobs of ` +
`size ${testParams.size}`); `size ${testParams.size}`);
let start = Date.now(); let start = Date.now();
await blobCreateAllThenReadSerially(testParams.numBlobs, testParams.size); let numReadImmediately = testParams.numBlobs / 2;
let numReadParallel = testParams.numBlobs / 2;
logToDocumentBody('Creating blobs and reading immediately, synchronously.');
await createBlobAndImmediatelyRead(numReadImmediately, testParams.size);
logToDocumentBody('Creating blobs and reading immediately, asynchronously.');
await createBlobsAndReadInParallel(numReadParallel, testParams.size);
logToDocumentBody('Time: ' + (Date.now() - start)); logToDocumentBody('Time: ' + (Date.now() - start));
reportDone(); reportDone();
} }
......
let blobs = [];
let totalBytes = 0; let totalBytes = 0;
let errors = []; let errors = [];
...@@ -27,14 +26,14 @@ function recordError(message) { ...@@ -27,14 +26,14 @@ function recordError(message) {
function createBlob(size) { function createBlob(size) {
let blob = new Blob([new Uint8Array(size)], let blob = new Blob([new Uint8Array(size)],
{type: 'application/octet-string'}); {type: 'application/octet-string'});
blobs.push(blob);
totalBytes += size; totalBytes += size;
return blob;
} }
function readBlobAsync(blob) { function readBlobAsync(blob) {
const reader = new FileReader(); const reader = new FileReader();
return new Promise(resolve => { return new Promise(resolve => {
reader.onerror = reportError; reader.onerror = recordError;
reader.onloadend = e => { resolve(reader); }; reader.onloadend = e => { resolve(reader); };
reader.readAsArrayBuffer(blob); reader.readAsArrayBuffer(blob);
}); });
...@@ -50,56 +49,20 @@ async function createAndRead(size) { ...@@ -50,56 +49,20 @@ async function createAndRead(size) {
recordError('Error reading blob: Blob size does not match.'); recordError('Error reading blob: Blob size does not match.');
} }
async function readBlobsSerially() { let readBlobAsArrayBuffer = (blob, callback) => {
if (blobs.length == 0) const reader = new FileReader();
return; reader.onerror = recordError;
reader.onloadend = () => {
let totalReadSize = 0;
for (let i = 0; i < blobs.length; i++) {
const reader = await readBlobAsync(blobs[i]);
if (reader.error) { if (reader.error) {
recordError(`Error reading blob ${i}: ${reader.error}`); recordError(`Error reading blob: ${reader.error}`);
return; } else {
callback(reader.result);
} }
totalReadSize += reader.result.byteLength; };
if (i == blobs.length - 1 && totalReadSize != totalBytes) { reader.readAsArrayBuffer(blob);
recordError('Error reading blob: Total blob sizes do not match, ' +
`${totalReadSize} vs ${totalBytes}`);
}
}
}
function readBlobsInParallel(callback) {
if (blobs.length == 0)
return;
let totalReadSize = 0;
let numRead = 0;
let getReader = e => {
const reader = new FileReader();
reader.onerror = reportError;
reader.onloadend = () => {
if (reader.error) {
recordError(`Error reading blob: ${reader.error}`);
} else {
totalReadSize += reader.result.byteLength;
if (++numRead == blobs.length) {
if (totalReadSize != totalBytes) {
recordError('Error reading blob: Total blob sizes do not match, ' +
`${totalReadSize} vs ${totalBytes}`);
}
callback();
}
}
};
return reader;
}
blobs.map(blob => getReader().readAsArrayBuffer(blob));
} }
async function blobCreateAndImmediatelyRead(numBlobs, size) { async function createBlobAndImmediatelyRead(numBlobs, size) {
let start = performance.now(); let start = performance.now();
errors = []; errors = [];
...@@ -112,34 +75,34 @@ async function blobCreateAndImmediatelyRead(numBlobs, size) { ...@@ -112,34 +75,34 @@ async function blobCreateAndImmediatelyRead(numBlobs, size) {
logToDocumentBody('Errors on page: ' + errors.join(', ')); logToDocumentBody('Errors on page: ' + errors.join(', '));
} }
async function blobCreateAllThenReadSerially(numBlobs, size) { async function createBlobsAndReadInParallel(numBlobs, size) {
errors = []; errors = [];
logToDocumentBody(`Creating ${numBlobs} blobs...`); logToDocumentBody(`Creating and reading ${numBlobs} blobs...`);
for (let i = 0; i < numBlobs; i++) await new Promise(resolve => {
createBlob(size); let totalSizeRead = 0;
logToDocumentBody('Finished creating.'); let blobsRead = 0;
let blobReadCallback = array => {
logToDocumentBody(`Reading ${numBlobs} blobs serially...`); blobsRead += 1;
await readBlobsSerially(); totalSizeRead += array.byteLength;
logToDocumentBody('Finished reading.'); if (blobsRead == numBlobs) {
if (totalSizeRead != numBlobs * size) {
if (errors.length) recordError(`Error reading blob, total sizes don't match ${totalSizeRead} vs ${numBlobs * size}`);
logToDocumentBody('Errors on page: ' + errors.join(', ')); }
} logToDocumentBody('Done reading all blobs.');
resolve();
async function blobCreateAllThenReadInParallel(numBlobs, size) { }
errors = []; }
logToDocumentBody(`Creating ${numBlobs} blobs...`);
for (let i = 0; i < numBlobs; i++)
createBlob(size);
logToDocumentBody('Finished creating.');
logToDocumentBody(`Reading ${numBlobs} blobs in parallel...`); for (let i = 0; i < numBlobs; i++) {
await new Promise(readBlobsInParallel); let blob = createBlob(size);
logToDocumentBody('Finished reading.'); readBlobAsArrayBuffer(blob, blobReadCallback);
}
});
if (errors.length) if (errors.length) {
logToDocumentBody('Errors on page: ' + errors.join(', ')); let errorStr = errors.join(', ');
logToDocumentBody('Errors on page: ' + errorStr);
reportError(errorStr);
}
} }
...@@ -416,6 +416,15 @@ class BlinkPerfOWPStorage(_BlinkPerfBenchmark): ...@@ -416,6 +416,15 @@ class BlinkPerfOWPStorage(_BlinkPerfBenchmark):
tag = 'owp_storage' tag = 'owp_storage'
subdir = 'OWPStorage' subdir = 'OWPStorage'
# This ensures that all blobs >= 20MB will be transported by files.
def SetExtraBrowserOptions(self, options):
options.AppendExtraBrowserArgs([
'--blob-transport-by-file-trigger=307300',
'--blob-transport-min-file-size=2048',
'--blob-transport-max-file-size=10240',
'--blob-transport-shared-memory-max-size=30720'
])
@benchmark.Owner(emails=['wangxianzhu@chromium.org']) @benchmark.Owner(emails=['wangxianzhu@chromium.org'])
class BlinkPerfPaint(_BlinkPerfBenchmark): class BlinkPerfPaint(_BlinkPerfBenchmark):
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment