Commit d012c58d authored by Carlos Knippschild's avatar Carlos Knippschild Committed by Commit Bot

Limitless Prefetching: various small fixes and improvements

This CL includes a group of minor fixes and improvements that will ease
the testing of Prefetching with limitless mode enabled:
* Fixes comments in PrefetchBackgroundTaskScheduler.
* Improves handling of invalid URLs and text output from
  OfflineInternalsUIMessageHandler.
* Improve debug logging in MetricsFinalizationTask.
* Use larger bundle size limit for GeneratePageBundleRequest in
  PrefetchNetworkRequestFactoryImpl when limitless mode is enabled.
* Complete TODO to make method private in SuggestedArticlesObserver.

Bug: 793109
Cq-Include-Trybots: master.tryserver.chromium.linux:closure_compilation
Change-Id: Ic4c1931e6c8ab8f5bf2fdd8cfec8075f30a9b6cd
Reviewed-on: https://chromium-review.googlesource.com/869373
Commit-Queue: Carlos Knippschild <carlosk@chromium.org>
Reviewed-by: default avatarTommy Li <tommycli@chromium.org>
Reviewed-by: default avatarDmitry Titov <dimich@chromium.org>
Cr-Commit-Position: refs/heads/master@{#534813}
parent 4b8d90d6
...@@ -24,8 +24,7 @@ public class PrefetchBackgroundTaskScheduler { ...@@ -24,8 +24,7 @@ public class PrefetchBackgroundTaskScheduler {
/** /**
* Schedules the default 'NWake' task for the prefetching service. This task will normally be * Schedules the default 'NWake' task for the prefetching service. This task will normally be
* scheduled on a good network type. But if limitless prefetching is enabled delays are reduced * scheduled on a good network type.
* and network restrictions are loosen.
* TODO(dewittj): Handle skipping work if the battery percentage is too low. * TODO(dewittj): Handle skipping work if the battery percentage is too low.
*/ */
@CalledByNative @CalledByNative
...@@ -34,10 +33,8 @@ public class PrefetchBackgroundTaskScheduler { ...@@ -34,10 +33,8 @@ public class PrefetchBackgroundTaskScheduler {
} }
/** /**
* Schedules the default 'NWake' task for the prefetching service. This task will normally be * Schedules the limitless version of the 'NWake' task for the prefetching service. with reduced
* scheduled on a good network type. But if limitless prefetching is enabled delays are reduced * delays and no network restrictions (device needs only to be online).
* and network restrictions are loosen.
* TODO(dewittj): Handle skipping work if the battery percentage is too low.
*/ */
@CalledByNative @CalledByNative
public static void scheduleTaskLimitless(int additionalDelaySeconds) { public static void scheduleTaskLimitless(int additionalDelaySeconds) {
......
...@@ -58,24 +58,22 @@ ...@@ -58,24 +58,22 @@
<table class="stored-pages-table"> <table class="stored-pages-table">
<thead> <thead>
<tr> <tr>
<th>&nbsp;</th> <th>#</th>
<th></th>
<th>URL</th> <th>URL</th>
<th>Namespace</th> <th>Namespace</th>
<th>Size (Kb)</th> <th>Size (Kb)</th>
<th>Expired</th>
<th>Request Origin</th>
</tr> </tr>
</thead> </thead>
<tbody id="stored-pages"> </tbody> <tbody id="stored-pages"> </tbody>
</table> </table>
<template id="stored-pages-table-row"> <template id="stored-pages-table-row">
<tr> <tr>
<td></td>
<td><input type="checkbox" name="stored"></td> <td><input type="checkbox" name="stored"></td>
<td><a></a></td> <td><a></a></td>
<td></td> <td></td>
<td></td> <td></td>
<td></td>
<td></td>
</tr> </tr>
</template> </template>
<div id="page-actions-info" class="dump"></div> <div id="page-actions-info" class="dump"></div>
......
...@@ -35,18 +35,27 @@ cr.define('offlineInternals', function() { ...@@ -35,18 +35,27 @@ cr.define('offlineInternals', function() {
var template = $('stored-pages-table-row'); var template = $('stored-pages-table-row');
var td = template.content.querySelectorAll('td'); var td = template.content.querySelectorAll('td');
for (let page of pages) { for (let pageIndex = 0; pageIndex < pages.length; pageIndex++) {
var checkbox = td[0].querySelector('input'); var page = pages[pageIndex];
td[0].textContent = pageIndex;
var checkbox = td[1].querySelector('input');
checkbox.setAttribute('value', page.id); checkbox.setAttribute('value', page.id);
var link = td[1].querySelector('a'); var link = td[2].querySelector('a');
link.setAttribute('href', page.onlineUrl); link.setAttribute('href', page.onlineUrl);
link.textContent = page.onlineUrl; var maxUrlCharsPerLine = 50;
if (page.onlineUrl.length > maxUrlCharsPerLine) {
link.textContent = '';
for (let i = 0; i < page.onlineUrl.length; i += maxUrlCharsPerLine) {
link.textContent += page.onlineUrl.slice(i, i + maxUrlCharsPerLine);
link.textContent += '\r\n';
}
} else {
link.textContent = page.onlineUrl;
}
td[2].textContent = page.namespace; td[3].textContent = page.namespace;
td[3].textContent = Math.round(page.size / 1024); td[4].textContent = Math.round(page.size / 1024);
td[4].textContent = page.isExpired;
td[5].textContent = page.requestOrigin;
var row = document.importNode(template.content, true); var row = document.importNode(template.content, true);
storedPagesTable.appendChild(row); storedPagesTable.appendChild(row);
......
...@@ -157,7 +157,6 @@ void OfflineInternalsUIMessageHandler::HandleStoredPagesCallback( ...@@ -157,7 +157,6 @@ void OfflineInternalsUIMessageHandler::HandleStoredPagesCallback(
std::string callback_id, std::string callback_id,
const offline_pages::MultipleOfflinePageItemResult& pages) { const offline_pages::MultipleOfflinePageItemResult& pages) {
base::ListValue results; base::ListValue results;
for (const auto& page : pages) { for (const auto& page : pages) {
auto offline_page = std::make_unique<base::DictionaryValue>(); auto offline_page = std::make_unique<base::DictionaryValue>();
offline_page->SetString("onlineUrl", page.url.spec()); offline_page->SetString("onlineUrl", page.url.spec());
...@@ -172,6 +171,13 @@ void OfflineInternalsUIMessageHandler::HandleStoredPagesCallback( ...@@ -172,6 +171,13 @@ void OfflineInternalsUIMessageHandler::HandleStoredPagesCallback(
offline_page->SetString("requestOrigin", page.request_origin); offline_page->SetString("requestOrigin", page.request_origin);
results.Append(std::move(offline_page)); results.Append(std::move(offline_page));
} }
// Sort by creation order.
std::sort(results.GetList().begin(), results.GetList().end(),
[](auto& a, auto& b) {
return a.FindKey({"creationTime"})->GetDouble() <
b.FindKey({"creationTime"})->GetDouble();
});
ResolveJavascriptCallback(base::Value(callback_id), results); ResolveJavascriptCallback(base::Value(callback_id), results);
} }
...@@ -317,13 +323,26 @@ void OfflineInternalsUIMessageHandler::HandleGeneratePageBundle( ...@@ -317,13 +323,26 @@ void OfflineInternalsUIMessageHandler::HandleGeneratePageBundle(
for (auto& page_url : page_urls) { for (auto& page_url : page_urls) {
// Creates a dummy prefetch URL with a bogus ID, and using the URL as the // Creates a dummy prefetch URL with a bogus ID, and using the URL as the
// page title. // page title.
prefetch_urls.push_back(offline_pages::PrefetchURL( GURL url(page_url);
"dummy id", GURL(page_url), base::UTF8ToUTF16(page_url))); if (url.is_valid()) {
prefetch_urls.push_back(offline_pages::PrefetchURL(
"offline-internals", url, base::UTF8ToUTF16(page_url)));
}
} }
prefetch_service_->GetPrefetchDispatcher()->AddCandidatePrefetchURLs( prefetch_service_->GetPrefetchDispatcher()->AddCandidatePrefetchURLs(
offline_pages::kSuggestedArticlesNamespace, prefetch_urls); offline_pages::kSuggestedArticlesNamespace, prefetch_urls);
std::string message("Added candidate URLs.\n"); // Note: Static casts are needed here so that both Windows and Android can
// compile these printf formats.
std::string message =
base::StringPrintf("Added %zu candidate URLs.", prefetch_urls.size());
if (prefetch_urls.size() < page_urls.size()) {
size_t invalid_urls_count = page_urls.size() - prefetch_urls.size();
message.append(
base::StringPrintf(" Ignored %zu invalid URLs.", invalid_urls_count));
}
message.append("\n");
// Construct a JSON array containing all the URLs. To guard against malicious // Construct a JSON array containing all the URLs. To guard against malicious
// URLs that might contain special characters, we create a ListValue and then // URLs that might contain special characters, we create a ListValue and then
// serialize it into JSON, instead of doing direct string manipulation. // serialize it into JSON, instead of doing direct string manipulation.
......
...@@ -224,13 +224,13 @@ bool ReportMetricsAndFinalizeSync(sql::Connection* db) { ...@@ -224,13 +224,13 @@ bool ReportMetricsAndFinalizeSync(sql::Connection* db) {
if (transaction.Commit()) { if (transaction.Commit()) {
for (const auto& url : urls) { for (const auto& url : urls) {
DVLOG(1) << "Finalized prefetch item: (" << url.offline_id << ", " DVLOG(1) << "Finalized prefetch item with error code "
<< url.generate_bundle_attempts << ", " << static_cast<int>(url.error_code) << ": (" << url.offline_id
<< ", " << url.generate_bundle_attempts << ", "
<< url.get_operation_attempts << ", " << url.get_operation_attempts << ", "
<< url.download_initiation_attempts << ", " << url.download_initiation_attempts << ", "
<< url.archive_body_length << ", " << url.creation_time << ", " << url.archive_body_length << ", " << url.creation_time << ", "
<< static_cast<int>(url.error_code) << ", " << url.file_size << url.file_size << ")";
<< ")";
ReportMetricsFor(url, now); ReportMetricsFor(url, now);
} }
return true; return true;
......
...@@ -6,16 +6,25 @@ ...@@ -6,16 +6,25 @@
#include "base/bind.h" #include "base/bind.h"
#include "base/metrics/histogram_macros.h" #include "base/metrics/histogram_macros.h"
#include "components/offline_pages/core/offline_page_feature.h"
#include "components/offline_pages/core/prefetch/generate_page_bundle_request.h" #include "components/offline_pages/core/prefetch/generate_page_bundle_request.h"
#include "components/offline_pages/core/prefetch/get_operation_request.h" #include "components/offline_pages/core/prefetch/get_operation_request.h"
namespace { namespace {
// Max size of all articles archives to be generated from a single request. This
// 20 MiB value matches the current daily download limit.
constexpr int kMaxBundleSizeBytes = 20 * 1024 * 1024; // 20 MB constexpr int kMaxBundleSizeBytes = 20 * 1024 * 1024; // 20 MB
// Max size of all articles archives to be generated from a single request when
// limitless prefetching is enabled. The 200 MiB value allows for 100 URLs (the
// maximum allowed in a single request) with 2 MiB articles (approximately
// double the average article size).
constexpr int kMaxBundleSizeForLimitlessBytes = 200 * 1024 * 1024; // 200 MB
// Max concurrent outstanding requests. If more requests asked to be created, // Max concurrent outstanding requests. If more requests asked to be created,
// the requests are silently not created (considered failed). This is used // the requests are silently not created (considered failed). This is used
// as emergency limit that should rarely be encountered in normal operations. // as emergency limit that should rarely be encountered in normal operations.
const int kMaxConcurrentRequests = 10; constexpr int kMaxConcurrentRequests = 10;
} // namespace } // namespace
namespace offline_pages { namespace offline_pages {
...@@ -55,10 +64,13 @@ void PrefetchNetworkRequestFactoryImpl::MakeGeneratePageBundleRequest( ...@@ -55,10 +64,13 @@ void PrefetchNetworkRequestFactoryImpl::MakeGeneratePageBundleRequest(
const PrefetchRequestFinishedCallback& callback) { const PrefetchRequestFinishedCallback& callback) {
if (!AddConcurrentRequest()) if (!AddConcurrentRequest())
return; return;
int max_bundle_size = IsLimitlessPrefetchingEnabled()
? kMaxBundleSizeForLimitlessBytes
: kMaxBundleSizeBytes;
uint64_t request_id = GetNextRequestId(); uint64_t request_id = GetNextRequestId();
generate_page_bundle_requests_[request_id] = generate_page_bundle_requests_[request_id] =
std::make_unique<GeneratePageBundleRequest>( std::make_unique<GeneratePageBundleRequest>(
user_agent_, gcm_registration_id, kMaxBundleSizeBytes, url_strings, user_agent_, gcm_registration_id, max_bundle_size, url_strings,
channel_, request_context_.get(), channel_, request_context_.get(),
base::Bind( base::Bind(
&PrefetchNetworkRequestFactoryImpl::GeneratePageBundleRequestDone, &PrefetchNetworkRequestFactoryImpl::GeneratePageBundleRequestDone,
......
...@@ -33,9 +33,6 @@ class SuggestedArticlesObserver ...@@ -33,9 +33,6 @@ class SuggestedArticlesObserver
void SetContentSuggestionsServiceAndObserve( void SetContentSuggestionsServiceAndObserve(
ntp_snippets::ContentSuggestionsService* service); ntp_snippets::ContentSuggestionsService* service);
// TODO(dewittj): Make this private when the SQL store is up and running.
bool GetCurrentSuggestions(std::vector<PrefetchURL>* result);
// ContentSuggestionsService::Observer overrides. // ContentSuggestionsService::Observer overrides.
void OnNewSuggestions(ntp_snippets::Category category) override; void OnNewSuggestions(ntp_snippets::Category category) override;
void OnCategoryStatusChanged( void OnCategoryStatusChanged(
...@@ -51,6 +48,8 @@ class SuggestedArticlesObserver ...@@ -51,6 +48,8 @@ class SuggestedArticlesObserver
std::vector<ntp_snippets::ContentSuggestion>* GetTestingArticles(); std::vector<ntp_snippets::ContentSuggestion>* GetTestingArticles();
private: private:
bool GetCurrentSuggestions(std::vector<PrefetchURL>* result);
// Unowned, only used when we are called by observer methods (so the // Unowned, only used when we are called by observer methods (so the
// pointer will be valid). // pointer will be valid).
ntp_snippets::ContentSuggestionsService* content_suggestions_service_ = ntp_snippets::ContentSuggestionsService* content_suggestions_service_ =
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment