Commit 1882f132 authored by Klaus Weidner's avatar Klaus Weidner Committed by Commit Bot

Track WebVR timings, add WaitPrevStrategy

In preparation for GpuFence synchronization, track GVR acquire/submit
times and report JavaScript waitForPreviousRender times back to
VrShellGl via Mojo.

Refactor waiting for previous transfer and render, using helper
functions to unclutter submitFrame.

Wait for previous transfer is only used for the Android surface path,
move it there.

Wait for previous render now support different choices for the
execution point, including NEVER for the OpenVR path. The BEFORE_BITMAP
choice is not yet active, it's intended for use with GpuFence-separated
frames. It does work for the current render path too, but IIRC is a bit
slower there due to less parallelism.

BUG=761432

Change-Id: I9d24d6e0a133ad76cfda4f40c2bba84ffd7363ef
Reviewed-on: https://chromium-review.googlesource.com/802756Reviewed-by: default avatarDaniel Cheng <dcheng@chromium.org>
Reviewed-by: default avatarBill Orr <billorr@chromium.org>
Reviewed-by: default avatarMichael Thiessen <mthiesse@chromium.org>
Commit-Queue: Klaus Weidner <klausw@chromium.org>
Cr-Commit-Position: refs/heads/master@{#521735}
parent 2cc2073f
......@@ -4,6 +4,7 @@
#include "chrome/browser/android/vr_shell/vr_shell_gl.h"
#include <algorithm>
#include <chrono>
#include <limits>
#include <utility>
......@@ -25,11 +26,9 @@
#include "chrome/browser/android/vr_shell/vr_usage_monitor.h"
#include "chrome/browser/vr/assets.h"
#include "chrome/browser/vr/elements/ui_element.h"
#include "chrome/browser/vr/fps_meter.h"
#include "chrome/browser/vr/model/camera_model.h"
#include "chrome/browser/vr/model/model.h"
#include "chrome/browser/vr/pose_util.h"
#include "chrome/browser/vr/sliding_average.h"
#include "chrome/browser/vr/ui.h"
#include "chrome/browser/vr/ui_element_renderer.h"
#include "chrome/browser/vr/ui_scene.h"
......@@ -173,10 +172,12 @@ VrShellGl::VrShellGl(GlBrowserInterface* browser_interface,
binding_(this),
browser_(browser_interface),
keyboard_delegate_(keyboard_delegate),
fps_meter_(new vr::FPSMeter()),
webvr_js_time_(new vr::SlidingTimeDeltaAverage(kWebVRSlidingAverageSize)),
webvr_render_time_(
new vr::SlidingTimeDeltaAverage(kWebVRSlidingAverageSize)),
fps_meter_(),
webvr_js_time_(kWebVRSlidingAverageSize),
webvr_render_time_(kWebVRSlidingAverageSize),
webvr_js_wait_time_(kWebVRSlidingAverageSize),
webvr_acquire_time_(kWebVRSlidingAverageSize),
webvr_submit_time_(kWebVRSlidingAverageSize),
weak_ptr_factory_(this) {
GvrInit(gvr_api);
}
......@@ -311,7 +312,8 @@ void VrShellGl::CreateOrResizeWebVRSurface(const gfx::Size& size) {
}
void VrShellGl::SubmitFrame(int16_t frame_index,
const gpu::MailboxHolder& mailbox) {
const gpu::MailboxHolder& mailbox,
base::TimeDelta time_waited) {
TRACE_EVENT0("gpu", "VrShellGl::SubmitWebVRFrame");
// submit_client_ could be null when we exit presentation, if there were
......@@ -331,6 +333,16 @@ void VrShellGl::SubmitFrame(int16_t frame_index,
webvr_time_js_submit_[frame_index % kPoseRingBufferSize] =
base::TimeTicks::Now();
// The JavaScript wait time is supplied externally and not trustworthy. Clamp
// to a reasonable range to avoid math errors.
if (time_waited < base::TimeDelta())
time_waited = base::TimeDelta();
if (time_waited > base::TimeDelta::FromSeconds(1))
time_waited = base::TimeDelta::FromSeconds(1);
webvr_js_wait_time_.AddSample(time_waited);
TRACE_COUNTER1("gpu", "WebVR JS wait (ms)",
webvr_js_wait_time_.GetAverage().InMilliseconds());
// Swapping twice on a Surface without calling updateTexImage in
// between can lose frames, so don't draw+swap if we already have
// a pending frame we haven't consumed yet.
......@@ -847,7 +859,9 @@ void VrShellGl::DrawFrame(int16_t frame_index, base::TimeTicks current_time) {
return;
TRACE_EVENT_BEGIN0("gpu", "VrShellGl::AcquireFrame");
base::TimeTicks acquire_start = base::TimeTicks::Now();
acquired_frame_ = swap_chain_->AcquireFrame();
webvr_acquire_time_.AddSample(base::TimeTicks::Now() - acquire_start);
TRACE_EVENT_END0("gpu", "VrShellGl::AcquireFrame");
if (!acquired_frame_)
return;
......@@ -995,8 +1009,13 @@ void VrShellGl::DrawFrameSubmitNow(int16_t frame_index,
gvr::Mat4f mat;
TransformToGvrMat(head_pose, &mat);
acquired_frame_.Submit(*buffer_viewport_list_, mat);
CHECK(!acquired_frame_);
{
TRACE_EVENT0("gpu", "VrShellGl::SubmitToGvr");
base::TimeTicks submit_start = base::TimeTicks::Now();
acquired_frame_.Submit(*buffer_viewport_list_, mat);
webvr_submit_time_.AddSample(base::TimeTicks::Now() - submit_start);
CHECK(!acquired_frame_);
}
// No need to swap buffers for surfaceless rendering.
if (!surfaceless_rendering_) {
......@@ -1019,15 +1038,15 @@ void VrShellGl::DrawFrameSubmitNow(int16_t frame_index,
webvr_time_pose_[frame_index % kPoseRingBufferSize];
base::TimeTicks js_submit_time =
webvr_time_js_submit_[frame_index % kPoseRingBufferSize];
webvr_js_time_->AddSample(js_submit_time - pose_time);
webvr_render_time_->AddSample(now - js_submit_time);
webvr_js_time_.AddSample(js_submit_time - pose_time);
webvr_render_time_.AddSample(now - js_submit_time);
}
// After saving the timestamp, fps will be available via GetFPS().
// TODO(vollick): enable rendering of this framerate in a HUD.
fps_meter_->AddFrame(base::TimeTicks::Now());
DVLOG(1) << "fps: " << fps_meter_->GetFPS();
TRACE_COUNTER1("gpu", "WebVR FPS", fps_meter_->GetFPS());
fps_meter_.AddFrame(base::TimeTicks::Now());
DVLOG(1) << "fps: " << fps_meter_.GetFPS();
TRACE_COUNTER1("gpu", "WebVR FPS", fps_meter_.GetFPS());
}
bool VrShellGl::ShouldDrawWebVr() {
......@@ -1224,14 +1243,17 @@ base::TimeDelta VrShellGl::GetPredictedFrameTime() {
// If we aim to submit at vsync, that frame will start scanning out
// one vsync later. Add a half frame to split the difference between
// left and right eye.
base::TimeDelta js_time = webvr_js_time_->GetAverageOrDefault(frame_interval);
base::TimeDelta js_time = webvr_js_time_.GetAverageOrDefault(frame_interval);
base::TimeDelta render_time =
webvr_render_time_->GetAverageOrDefault(frame_interval);
webvr_render_time_.GetAverageOrDefault(frame_interval);
base::TimeDelta overhead_time = frame_interval * 3 / 2;
base::TimeDelta expected_frame_time = js_time + render_time + overhead_time;
TRACE_COUNTER2("gpu", "WebVR frame time (ms)", "javascript",
js_time.InMilliseconds(), "rendering",
render_time.InMilliseconds());
TRACE_COUNTER2("gpu", "GVR frame time (ms)", "acquire",
webvr_acquire_time_.GetAverage().InMilliseconds(), "submit",
webvr_submit_time_.GetAverage().InMilliseconds());
TRACE_COUNTER1("gpu", "WebVR pose prediction (ms)",
expected_frame_time.InMilliseconds());
return expected_frame_time;
......
......@@ -20,7 +20,9 @@
#include "chrome/browser/android/vr_shell/vr_controller.h"
#include "chrome/browser/vr/content_input_delegate.h"
#include "chrome/browser/vr/controller_mesh.h"
#include "chrome/browser/vr/fps_meter.h"
#include "chrome/browser/vr/model/controller_model.h"
#include "chrome/browser/vr/sliding_average.h"
#include "chrome/browser/vr/ui_input_manager.h"
#include "chrome/browser/vr/ui_renderer.h"
#include "device/vr/vr_service.mojom.h"
......@@ -161,7 +163,8 @@ class VrShellGl : public device::mojom::VRPresentationProvider {
// VRPresentationProvider
void GetVSync(GetVSyncCallback callback) override;
void SubmitFrame(int16_t frame_index,
const gpu::MailboxHolder& mailbox) override;
const gpu::MailboxHolder& mailbox,
base::TimeDelta time_waited) override;
void SubmitFrameWithTextureHandle(int16_t frame_index,
mojo::ScopedHandle texture_handle) override;
void UpdateLayerBounds(int16_t frame_index,
......@@ -252,10 +255,24 @@ class VrShellGl : public device::mojom::VRPresentationProvider {
// Attributes for gesture detection while holding app button.
gfx::Vector3dF controller_start_direction_;
std::unique_ptr<vr::FPSMeter> fps_meter_;
vr::FPSMeter fps_meter_;
std::unique_ptr<vr::SlidingTimeDeltaAverage> webvr_js_time_;
std::unique_ptr<vr::SlidingTimeDeltaAverage> webvr_render_time_;
// JS time is from SendVSync (pose time) to incoming JS submitFrame.
vr::SlidingTimeDeltaAverage webvr_js_time_;
// Render time is from JS submitFrame to estimated render completion.
// This is an estimate when submitting incomplete frames to GVR.
// If submitFrame blocks, that means the previous frame wasn't done
// rendering yet.
vr::SlidingTimeDeltaAverage webvr_render_time_;
// JS wait time is spent waiting for the previous frame to complete
// rendering, as reported from the Renderer via mojo.
vr::SlidingTimeDeltaAverage webvr_js_wait_time_;
// GVR acquire/submit times for scheduling heuristics.
vr::SlidingTimeDeltaAverage webvr_acquire_time_;
vr::SlidingTimeDeltaAverage webvr_submit_time_;
gfx::Point3F pointer_start_;
......
......@@ -25,7 +25,8 @@ OpenVRRenderLoop::~OpenVRRenderLoop() {
}
void OpenVRRenderLoop::SubmitFrame(int16_t frame_index,
const gpu::MailboxHolder& mailbox) {
const gpu::MailboxHolder& mailbox,
base::TimeDelta time_waited) {
NOTREACHED();
}
......
......@@ -7,6 +7,7 @@
#include "base/memory/scoped_refptr.h"
#include "base/threading/thread.h"
#include "base/time/time.h"
#include "build/build_config.h"
#include "device/vr/vr_service.mojom.h"
#include "mojo/public/cpp/bindings/binding.h"
......@@ -33,7 +34,8 @@ class OpenVRRenderLoop : public base::Thread, mojom::VRPresentationProvider {
// VRPresentationProvider overrides:
void SubmitFrame(int16_t frame_index,
const gpu::MailboxHolder& mailbox) override;
const gpu::MailboxHolder& mailbox,
base::TimeDelta time_waited) override;
void SubmitFrameWithTextureHandle(int16_t frame_index,
mojo::ScopedHandle texture_handle) override;
void UpdateLayerBounds(int16_t frame_id,
......@@ -69,4 +71,4 @@ class OpenVRRenderLoop : public base::Thread, mojom::VRPresentationProvider {
} // namespace device
#endif // DEVICE_VR_OPENVR_RENDER_LOOP_H
\ No newline at end of file
#endif // DEVICE_VR_OPENVR_RENDER_LOOP_H
......@@ -131,7 +131,8 @@ interface VRPresentationProvider {
// no mapping.
GetVSync() => (VRPose? pose, mojo.common.mojom.TimeDelta time, int16 frame_id,
VSyncStatus status);
SubmitFrame(int16 frame_id, gpu.mojom.MailboxHolder mailbox_holder);
SubmitFrame(int16 frame_id, gpu.mojom.MailboxHolder mailbox_holder,
mojo.common.mojom.TimeDelta time_waited);
// TODO(https://crbug.com/676224): Support preprocessing of mojom files, since
// this is Windows only.
......
......@@ -65,7 +65,7 @@ class MockVRPresentationProvider {
this.binding_.bind(request);
}
submitFrame(frameId, mailboxHolder) {
submitFrame(frameId, mailboxHolder, timeWaited) {
// Trigger the submit completion callbacks here. WARNING: The
// Javascript-based mojo mocks are *not* re-entrant. In the current
// default implementation, Javascript calls display.submitFrame, and the
......
......@@ -506,34 +506,54 @@ ScriptPromise VRDisplay::exitPresent(ScriptState* script_state) {
return promise;
}
bool VRDisplay::ConfigurePresentationPathForDisplay() {
// TODO(klausw): capabilities_ should provide such information more directly.
// Currently, there's only two presentation paths which happen to align with
// having an external display (desktop devices such as OpenVR) or not (mobile
// VR on Android).
if (capabilities_->hasExternalDisplay()) {
frame_transport_method_ = FrameTransport::kTextureHandle;
wait_for_previous_render_ = WaitPrevStrategy::kNoWait;
} else {
frame_transport_method_ = FrameTransport::kMailbox;
wait_for_previous_render_ = WaitPrevStrategy::kAfterBitmap;
}
return true;
}
void VRDisplay::BeginPresent() {
Document* doc = this->GetDocument();
if (capabilities_->hasExternalDisplay()) {
// Presenting with external displays has to make a copy of the image
// since the canvas may still be visible at the same time.
present_image_needs_copy_ = true;
DOMException* exception = nullptr;
if (!ConfigurePresentationPathForDisplay()) {
exception = DOMException::Create(
kInvalidStateError, "VRDisplay presentation path not implemented.");
}
if (layer_.source().IsOffscreenCanvas()) {
// TODO(junov, crbug.com/695497): Implement OffscreenCanvas presentation
exception = DOMException::Create(
kInvalidStateError, "OffscreenCanvas presentation not implemented.");
} else {
if (layer_.source().IsHTMLCanvasElement()) {
// TODO(klausw,crbug.com/698923): suppress compositor updates
// since they aren't needed, they do a fair amount of extra
// work.
} else {
DCHECK(layer_.source().IsOffscreenCanvas());
// TODO(junov, crbug.com/695497): Implement OffscreenCanvas presentation
ForceExitPresent();
DOMException* exception = DOMException::Create(
kInvalidStateError, "OffscreenCanvas presentation not implemented.");
while (!pending_present_resolvers_.IsEmpty()) {
ScriptPromiseResolver* resolver =
pending_present_resolvers_.TakeFirst();
resolver->Reject(exception);
}
ReportPresentationResult(
PresentationResult::kPresentationNotSupportedByDisplay);
return;
// A canvas must be either Offscreen or plain HTMLCanvas.
DCHECK(layer_.source().IsHTMLCanvasElement());
}
if (exception) {
ForceExitPresent();
while (!pending_present_resolvers_.IsEmpty()) {
ScriptPromiseResolver* resolver = pending_present_resolvers_.TakeFirst();
resolver->Reject(exception);
}
ReportPresentationResult(
PresentationResult::kPresentationNotSupportedByDisplay);
return;
}
// Presenting with external displays has to make a copy of the image
// since the canvas may still be visible at the same time.
present_image_needs_copy_ = capabilities_->hasExternalDisplay();
if (doc) {
Platform::Current()->RecordRapporURL("VR.WebVR.PresentSuccess",
WebURL(doc->Url()));
......@@ -660,26 +680,17 @@ void VRDisplay::submitFrame() {
UpdateLayerBounds();
}
// There's two types of synchronization needed for submitting frames:
//
// - Before submitting, need to wait for the previous frame to be
// pulled off the transfer surface to avoid lost frames. This
// is currently a compile-time option, normally we always want
// to defer this wait to increase parallelism.
//
// - After submitting, need to wait for the mailbox to be consumed,
// and the image object must remain alive during this time.
// We keep a reference to the image so that we can defer this
// wait. Here, we wait for the previous transfer to complete.
{
TRACE_EVENT0("gpu", "VRDisplay::waitForPreviousTransferToFinish");
while (pending_submit_frame_) {
if (!submit_frame_client_binding_.WaitForIncomingMethodCall()) {
DLOG(ERROR) << "Failed to receive SubmitFrame response";
break;
}
}
}
// Ensure that required device selections were made.
DCHECK(frame_transport_method_ != FrameTransport::kUninitialized);
DCHECK(wait_for_previous_render_ != WaitPrevStrategy::kUninitialized);
WTF::TimeDelta wait_time;
// Conditionally wait for the previous render to finish, to avoid losing
// frames in the Android Surface / GLConsumer pair. An early wait here is
// appropriate when using a GpuFence to separate drawing, the new frame isn't
// complete yet at this stage.
if (wait_for_previous_render_ == WaitPrevStrategy::kBeforeBitmap)
wait_time += WaitForPreviousRenderToFinish();
TRACE_EVENT_BEGIN0("gpu", "VRDisplay::GetStaticBitmapImage");
scoped_refptr<Image> image_ref = rendering_context_->GetStaticBitmapImage();
......@@ -703,8 +714,10 @@ void VRDisplay::submitFrame() {
}
}
if (present_image_needs_copy_) {
if (frame_transport_method_ == FrameTransport::kTextureHandle) {
#if defined(OS_WIN)
// Currently, we assume that this transport needs a copy.
DCHECK(present_image_needs_copy_);
TRACE_EVENT0("gpu", "VRDisplay::CopyImage");
if (!frame_copier_ || !last_transfer_succeeded_) {
frame_copier_ = std::make_unique<GpuMemoryBufferImageCopy>(context_gl_);
......@@ -725,7 +738,11 @@ void VRDisplay::submitFrame() {
#else
NOTIMPLEMENTED();
#endif
} else {
} else if (frame_transport_method_ == FrameTransport::kMailbox) {
// Currently, this transport assumes we don't need to make a separate copy
// of the canvas content.
DCHECK(!present_image_needs_copy_);
// The AcceleratedStaticBitmapImage must be kept alive until the
// mailbox is used via createAndConsumeTextureCHROMIUM, the mailbox
// itself does not keep it alive. We must keep a reference to the
......@@ -736,27 +753,20 @@ void VRDisplay::submitFrame() {
static_image->EnsureMailbox(kVerifiedSyncToken);
TRACE_EVENT_END0("gpu", "VRDisplay::EnsureMailbox");
// Conditionally wait for the previous render to finish. A late wait here
// attempts to overlap work in parallel with the previous frame's
// rendering. This is used if submitting fully rendered frames to GVR, but
// is susceptible to bad GPU scheduling if the new frame competes with the
// previous frame's incomplete rendering.
if (wait_for_previous_render_ == WaitPrevStrategy::kAfterBitmap)
wait_time += WaitForPreviousRenderToFinish();
// Save a reference to the image to keep it alive until next frame,
// where we'll wait for the transfer to finish before overwriting
// it.
// but first wait for the transfer to finish before overwriting it.
// Usually this check is satisfied without waiting.
WaitForPreviousTransfer();
previous_image_ = std::move(image_ref);
// Wait for the previous render to finish, to avoid losing frames in the
// Android Surface / GLConsumer pair. TODO(klausw): make this tunable?
// Other devices may have different preferences. Do this step as late
// as possible before SubmitFrame to ensure we can do as much work as
// possible in parallel with the previous frame's rendering.
{
TRACE_EVENT0("gpu", "waitForPreviousRenderToFinish");
while (pending_previous_frame_render_) {
if (!submit_frame_client_binding_.WaitForIncomingMethodCall()) {
DLOG(ERROR) << "Failed to receive SubmitFrame response";
break;
}
}
}
pending_previous_frame_render_ = true;
pending_submit_frame_ = true;
// Create mailbox and sync token for transfer.
......@@ -767,10 +777,14 @@ void VRDisplay::submitFrame() {
TRACE_EVENT_BEGIN0("gpu", "VRDisplay::SubmitFrame");
vr_presentation_provider_->SubmitFrame(
vr_frame_id_, gpu::MailboxHolder(mailbox, sync_token, GL_TEXTURE_2D));
vr_frame_id_, gpu::MailboxHolder(mailbox, sync_token, GL_TEXTURE_2D),
wait_time);
TRACE_EVENT_END0("gpu", "VRDisplay::SubmitFrame");
} else {
NOTREACHED() << "Unimplemented frame_transport_method_";
}
pending_previous_frame_render_ = true;
did_submit_this_frame_ = true;
// Reset our frame id, since anything we'd want to do (resizing/etc) can
// no-longer happen to this frame.
......@@ -795,6 +809,28 @@ void VRDisplay::OnSubmitFrameRendered() {
pending_previous_frame_render_ = false;
}
void VRDisplay::WaitForPreviousTransfer() {
TRACE_EVENT0("gpu", "VRDisplay::waitForPreviousTransferToFinish");
while (pending_submit_frame_) {
if (!submit_frame_client_binding_.WaitForIncomingMethodCall()) {
DLOG(ERROR) << "Failed to receive SubmitFrame response";
break;
}
}
}
WTF::TimeDelta VRDisplay::WaitForPreviousRenderToFinish() {
TRACE_EVENT0("gpu", "waitForPreviousRenderToFinish");
WTF::TimeTicks start = WTF::TimeTicks::Now();
while (pending_previous_frame_render_) {
if (!submit_frame_client_binding_.WaitForIncomingMethodCall()) {
DLOG(ERROR) << "Failed to receive SubmitFrame response";
break;
}
}
return WTF::TimeTicks::Now() - start;
}
Document* VRDisplay::GetDocument() {
return navigator_vr_->GetDocument();
}
......@@ -856,6 +892,8 @@ void VRDisplay::StopPresenting() {
pending_submit_frame_ = false;
pending_previous_frame_render_ = false;
did_submit_this_frame_ = false;
frame_transport_method_ = FrameTransport::kUninitialized;
wait_for_previous_render_ = WaitPrevStrategy::kUninitialized;
}
void VRDisplay::OnActivate(device::mojom::blink::VRDisplayEventReason reason,
......
......@@ -123,6 +123,40 @@ class VRDisplay final : public EventTargetWithInlineData,
VRController* Controller();
private:
// Specifies how submitFrame should transport frame data for the presenting
// VR device, set by ConfigurePresentationPathForDisplay().
enum class FrameTransport {
// Invalid default value. Must be changed to a valid choice before starting
// presentation.
kUninitialized,
// Command buffer CHROMIUM_texture_mailbox. Used by the Android Surface
// rendering path.
kMailbox,
// A TextureHandle as extracted from a GpuMemoryBufferHandle. Used with
// DXGI texture handles for OpenVR on Windows.
kTextureHandle,
};
// Some implementations need to synchronize submitting with the completion of
// the previous frame, i.e. the Android surface path needs to wait to avoid
// lost frames in the transfer surface and to avoid overstuffed buffers. The
// strategy choice here indicates at which point in the submission process
// it should wait. NO_WAIT means to skip this wait entirely. For example,
// the OpenVR render pipeline doesn't overlap frames, so the previous
// frame is already guaranteed complete.
enum class WaitPrevStrategy {
kUninitialized,
kNoWait,
kBeforeBitmap,
kAfterBitmap,
};
bool ConfigurePresentationPathForDisplay();
void WaitForPreviousTransfer();
WTF::TimeDelta WaitForPreviousRenderToFinish();
void OnPresentComplete(bool);
void OnConnected();
......@@ -204,6 +238,9 @@ class VRDisplay final : public EventTargetWithInlineData,
// waitForPreviousTransferToFinish.
scoped_refptr<Image> previous_image_;
FrameTransport frame_transport_method_ = FrameTransport::kUninitialized;
WaitPrevStrategy wait_for_previous_render_ = WaitPrevStrategy::kUninitialized;
TraceWrapperMember<ScriptedAnimationController>
scripted_animation_controller_;
bool pending_vrdisplay_raf_ = false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment