Commit 21ece16c authored by Dale Curtis's avatar Dale Curtis Committed by Commit Bot

Use video frame duration metadata instead of waiting for 2 frames.

http://crrev.com/493874 changed the low delay case to wait for two
frames since we need to be sure we don't resume until we actually
have valid frames since the first frame was valid forever without
duration information.

It turns out we actually do have duration infromation hanging off
the DecoderStream, so use this information and set it as the
FRAME_DURATION metadata key for each VideoFrame.

We can then have the algorithm use this information for the
estimated end time of frames when only a single frame is present
in the queue.

Note: The video-canvas layout test was showing the wrong frame;
I manually extracted the frames and verified that before we were
showing the "7" frame when pts=2.0s == "6" frame. I've updated
the test expectations appropriately.

BUG=786576,767878,709302
TEST=new unittest, old unittests pass w/o modification, manual
test of 4k60 vp9 low latency content doesn't exhibit multiple
stalls after returning to the foreground when the video track
has been disabled, manual test with https://jsfiddle.net/u3enjLzz/

Cq-Include-Trybots: master.tryserver.chromium.android:android_optional_gpu_tests_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel
Change-Id: I963903d6a173038a2d534db4e040c8f4774825b5
Reviewed-on: https://chromium-review.googlesource.com/780267
Commit-Queue: Dale Curtis <dalecurtis@chromium.org>
Reviewed-by: default avatarMatthew Wolenetz <wolenetz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#519961}
parent afedd691
......@@ -387,11 +387,31 @@ void VideoRendererAlgorithm::EnqueueFrame(
// the new frame; this allows EffectiveFramesQueued() to be relatively correct
// immediately after a new frame is queued.
std::vector<base::TimeDelta> media_timestamps(1, frame->timestamp());
// If there are not enough frames to estimate duration based on end time, ask
// the WallClockTimeCB to convert the estimated frame duration into wall clock
// time.
//
// Note: This duration value is not compensated for playback rate and
// thus is different than |average_frame_duration_| which is compensated.
//
// Note: Not all frames have duration. E.g., this class is used with WebRTC
// which does not provide duration information for its frames.
base::TimeDelta metadata_frame_duration;
if (!frame_duration_calculator_.count() &&
frame->metadata()->GetTimeDelta(VideoFrameMetadata::FRAME_DURATION,
&metadata_frame_duration) &&
metadata_frame_duration > base::TimeDelta()) {
media_timestamps.push_back(frame->timestamp() + metadata_frame_duration);
}
std::vector<base::TimeTicks> wall_clock_times;
wall_clock_time_cb_.Run(media_timestamps, &wall_clock_times);
ready_frame.start_time = wall_clock_times[0];
if (frame_duration_calculator_.count())
ready_frame.end_time = ready_frame.start_time + average_frame_duration_;
else if (wall_clock_times.size() > 1u)
ready_frame.end_time = wall_clock_times[1];
// The vast majority of cases should always append to the back, but in rare
// circumstance we get out of order timestamps, http://crbug.com/386551.
......@@ -464,34 +484,69 @@ void VideoRendererAlgorithm::UpdateFrameStatistics() {
for (const auto& ready_frame : frame_queue_)
media_timestamps.push_back(ready_frame.frame->timestamp());
// If there are not enough frames to estimate duration based on end time, ask
// the WallClockTimeCB to convert the estimated frame duration into wall clock
// time.
//
// Note: This duration value is not compensated for playback rate and
// thus is different than |average_frame_duration_| which is compensated.
//
// Note: Not all frames have duration. E.g., this class is used with WebRTC
// which does not provide duration information for its frames.
base::TimeDelta metadata_frame_duration;
const bool use_frame_duration_metadata =
!frame_duration_calculator_.count() && frame_queue_.size() == 1u &&
frame_queue_.front().frame->metadata()->GetTimeDelta(
VideoFrameMetadata::FRAME_DURATION, &metadata_frame_duration) &&
metadata_frame_duration > base::TimeDelta();
if (use_frame_duration_metadata) {
media_timestamps.push_back(frame_queue_.front().frame->timestamp() +
metadata_frame_duration);
}
std::vector<base::TimeTicks> wall_clock_times;
was_time_moving_ =
wall_clock_time_cb_.Run(media_timestamps, &wall_clock_times);
// Transfer the converted wall clock times into our frame queue.
DCHECK_EQ(wall_clock_times.size(), frame_queue_.size());
for (size_t i = 0; i < frame_queue_.size() - 1; ++i) {
ReadyFrame& frame = frame_queue_[i];
const bool new_sample = frame.has_estimated_end_time;
frame.start_time = wall_clock_times[i];
frame.end_time = wall_clock_times[i + 1];
frame.has_estimated_end_time = false;
if (new_sample)
frame_duration_calculator_.AddSample(frame.end_time - frame.start_time);
base::TimeDelta deviation;
if (!use_frame_duration_metadata) {
// Transfer the converted wall clock times into our frame queue.
DCHECK_EQ(wall_clock_times.size(), frame_queue_.size());
for (size_t i = 0; i < frame_queue_.size() - 1; ++i) {
ReadyFrame& frame = frame_queue_[i];
const bool new_sample = frame.has_estimated_end_time;
frame.start_time = wall_clock_times[i];
frame.end_time = wall_clock_times[i + 1];
frame.has_estimated_end_time = false;
if (new_sample)
frame_duration_calculator_.AddSample(frame.end_time - frame.start_time);
}
frame_queue_.back().start_time = wall_clock_times.back();
if (!frame_duration_calculator_.count())
return;
// Compute |average_frame_duration_|, a moving average of the last few
// frames; see kMovingAverageSamples for the exact number.
average_frame_duration_ = frame_duration_calculator_.Average();
deviation = frame_duration_calculator_.Deviation();
// Update the frame end time for the last frame based on the average.
frame_queue_.back().end_time =
frame_queue_.back().start_time + average_frame_duration_;
} else {
DCHECK_EQ(frame_duration_calculator_.count(), 0u);
DCHECK_EQ(wall_clock_times.size(), 2u);
ReadyFrame& frame = frame_queue_.front();
frame.start_time = wall_clock_times[0];
frame.end_time = wall_clock_times[1];
frame.has_estimated_end_time = true;
// Note: This may be called multiple times, so we don't want to update the
// frame duration calculator with our estimate.
average_frame_duration_ = frame.end_time - frame.start_time;
}
frame_queue_.back().start_time = wall_clock_times.back();
if (!frame_duration_calculator_.count())
return;
// Compute |average_frame_duration_|, a moving average of the last few frames;
// see kMovingAverageSamples for the exact number.
average_frame_duration_ = frame_duration_calculator_.Average();
const base::TimeDelta deviation = frame_duration_calculator_.Deviation();
// Update the frame end time for the last frame based on the average.
frame_queue_.back().end_time =
frame_queue_.back().start_time + average_frame_duration_;
// ITU-R BR.265 recommends a maximum acceptable drift of +/- half of the frame
// duration; there are other asymmetric, more lenient measures, that we're
......
......@@ -100,8 +100,9 @@ class MEDIA_EXPORT VideoRendererAlgorithm {
// rendered yet. If it has been rendered, the new frame will be dropped.
//
// EnqueueFrame() will compute the current start time and an estimated end
// time of the frame based on previous frames so that EffectiveFramesQueued()
// is relatively accurate immediately after this call.
// time of the frame based on previous frames or the value of
// VideoFrameMetadata::FRAME_DURATION if no previous frames, so that
// EffectiveFramesQueued() is relatively accurate immediately after this call.
void EnqueueFrame(const scoped_refptr<VideoFrame>& frame);
// Removes all frames from the |frame_queue_| and clears predictors. The
......
......@@ -1216,6 +1216,34 @@ TEST_F(VideoRendererAlgorithmTest, CadenceCalculations) {
ASSERT_EQ("[60]", GetCadence(1, NTSC(60)));
}
TEST_F(VideoRendererAlgorithmTest, RemoveExpiredFramesWithoutRendering) {
TickGenerator tg(tick_clock_->NowTicks(), 50);
// Removing expired frames before anything is enqueued should do nothing.
ASSERT_EQ(0u, algorithm_.RemoveExpiredFrames(tg.current()));
// First verify that frames without a duration are always effective when only
// one frame is present in the queue.
algorithm_.EnqueueFrame(CreateFrame(tg.interval(0)));
ASSERT_EQ(0u, algorithm_.RemoveExpiredFrames(tg.current()));
EXPECT_EQ(1u, EffectiveFramesQueued());
ASSERT_EQ(0u, algorithm_.RemoveExpiredFrames(tg.current() + tg.interval(3)));
EXPECT_EQ(1u, EffectiveFramesQueued());
algorithm_.Reset();
// Now try a frame with duration information, this frame should not be counted
// as effective since we know the duration of it. It is not removed since we
// only have one frame in the queue though.
auto frame = CreateFrame(tg.interval(0));
frame->metadata()->SetTimeDelta(VideoFrameMetadata::FRAME_DURATION,
tg.interval(1));
algorithm_.EnqueueFrame(frame);
ASSERT_EQ(0u, algorithm_.RemoveExpiredFrames(tg.current() + tg.interval(3)));
EXPECT_EQ(0u, EffectiveFramesQueued());
}
TEST_F(VideoRendererAlgorithmTest, RemoveExpiredFrames) {
TickGenerator tg(tick_clock_->NowTicks(), 50);
......
......@@ -518,12 +518,16 @@ void VideoRendererImpl::FrameReady(base::TimeTicks read_time,
UMA_HISTOGRAM_ENUMERATION("Media.VideoFrame.ColorSpace",
ColorSpaceUMAHelper(frame->ColorSpace()),
static_cast<int>(VideoFrameColorSpaceUMA::MAX) + 1);
const bool is_eos =
frame->metadata()->IsTrue(VideoFrameMetadata::END_OF_STREAM);
const bool is_before_start_time =
!is_eos && IsBeforeStartTime(frame->timestamp());
const bool cant_read = !video_frame_stream_->CanReadWithoutStalling();
if (frame->metadata()->IsTrue(VideoFrameMetadata::END_OF_STREAM)) {
if (is_eos) {
DCHECK(!received_end_of_stream_);
received_end_of_stream_ = true;
} else if ((low_delay_ || !video_frame_stream_->CanReadWithoutStalling()) &&
IsBeforeStartTime(frame->timestamp())) {
} else if ((low_delay_ || cant_read) && is_before_start_time) {
// Don't accumulate frames that are earlier than the start time if we
// won't have a chance for a better frame, otherwise we could declare
// HAVE_ENOUGH_DATA and start playback prematurely.
......@@ -543,6 +547,16 @@ void VideoRendererImpl::FrameReady(base::TimeTicks read_time,
has_playback_met_watch_time_duration_requirement_ = true;
}
// Provide frame duration information so that even if we only have one frame
// in the queue we can properly estimate duration. This allows the call to
// RemoveFramesForUnderflowOrBackgroundRendering() below to actually expire
// this frame if it's too far behind the current media time. Without this,
// we may resume too soon after a track change in the low delay case.
if (!frame->metadata()->HasKey(VideoFrameMetadata::FRAME_DURATION)) {
frame->metadata()->SetTimeDelta(VideoFrameMetadata::FRAME_DURATION,
video_frame_stream_->AverageDuration());
}
AddReadyFrame_Locked(frame);
UpdateMaxBufferedFrames();
}
......@@ -559,62 +573,32 @@ void VideoRendererImpl::FrameReady(base::TimeTicks read_time,
// Paint the first frame if possible and necessary. Paint ahead of
// HAVE_ENOUGH_DATA to ensure the user sees the frame as early as possible.
bool just_painted_first_frame = false;
if (!sink_started_ && algorithm_->frames_queued() && !painted_first_frame_) {
// We want to paint the first frame under two conditions: Either (1) we have
// enough frames to know it's definitely the first frame or (2) there may be
// no more frames coming (sometimes unless we paint one of them).
//
// For the first condition, we need at least two effective frames, since
// otherwise we may be prerolling frames before the actual start time that
// will be dropped.
bool should_paint_first_frame =
algorithm_->effective_frames_queued() > 1 || received_end_of_stream_ ||
!video_frame_stream_->CanReadWithoutStalling();
// For the very first frame (i.e. not after seeks), we want to paint as fast
// as possible to ensure users don't abandon the playback. For live streams
// with long duration frames, waiting for a second frame may take seconds.
//
// Before time starts progressing we may not know if frames are effective or
// not, so the first frame must check if timestamp >= |start_timestamp_|.
//
// We only do this for the very first frame ever painted, since later frames
// risk being wrong due to the lack of duration on the first frame. This
// avoids any fast-forward or frame-flipping type effects as we try to
// resume after a seek.
if (!have_renderered_frames_ && !should_paint_first_frame) {
should_paint_first_frame =
frame->timestamp() >= start_timestamp_ || low_delay_;
}
if (should_paint_first_frame) {
scoped_refptr<VideoFrame> first_frame =
algorithm_->Render(base::TimeTicks(), base::TimeTicks(), nullptr);
CheckForMetadataChanges(first_frame->format(),
first_frame->natural_size());
sink_->PaintSingleFrame(first_frame);
just_painted_first_frame = painted_first_frame_ = true;
}
//
// We want to paint the first frame under two conditions: Either (1) we have
// enough frames to know it's definitely the first frame or (2) there may be
// no more frames coming (sometimes unless we paint one of them).
//
// We have to check both effective_frames_queued() and |is_before_start_time|
// since prior to the clock starting effective_frames_queued() is a guess.
if (!sink_started_ && !painted_first_frame_ && algorithm_->frames_queued() &&
(received_end_of_stream_ || cant_read ||
(algorithm_->effective_frames_queued() && !is_before_start_time))) {
scoped_refptr<VideoFrame> first_frame =
algorithm_->Render(base::TimeTicks(), base::TimeTicks(), nullptr);
CheckForMetadataChanges(first_frame->format(), first_frame->natural_size());
sink_->PaintSingleFrame(first_frame);
painted_first_frame_ = true;
}
// Signal buffering state if we've met our conditions.
//
// If we've just painted the first frame, require the standard 1 frame for low
// latency playback. If we're resuming after a Flush(), wait until we have two
// frames even in low delay mode to avoid any kind of fast-forward or frame
// flipping effect while we attempt to find the best frame.
if (buffering_state_ == BUFFERING_HAVE_NOTHING &&
HaveEnoughData_Locked(just_painted_first_frame ? 1u : 2u)) {
if (buffering_state_ == BUFFERING_HAVE_NOTHING && HaveEnoughData_Locked())
TransitionToHaveEnough_Locked();
}
// Always request more decoded video if we have capacity.
AttemptRead_Locked();
}
bool VideoRendererImpl::HaveEnoughData_Locked(
size_t low_latency_frames_required) const {
bool VideoRendererImpl::HaveEnoughData_Locked() const {
DCHECK_EQ(state_, kPlaying);
lock_.AssertAcquired();
......@@ -631,16 +615,13 @@ bool VideoRendererImpl::HaveEnoughData_Locked(
if (was_background_rendering_ && frames_decoded_)
return true;
if (!low_delay_ && video_frame_stream_->CanReadWithoutStalling())
return false;
// Note: We still require an effective frame in the stalling case since this
// method is also used to inform TransitionToHaveNothing_Locked() and thus
// would never pause and rebuffer if we always return true here.
if (!video_frame_stream_->CanReadWithoutStalling())
return algorithm_->effective_frames_queued() > 0u;
if (!low_delay_)
return false;
return algorithm_->effective_frames_queued() >= low_latency_frames_required;
return algorithm_->effective_frames_queued() > 0u;
}
void VideoRendererImpl::TransitionToHaveEnough_Locked() {
......
......@@ -139,12 +139,7 @@ class MEDIA_EXPORT VideoRendererImpl
// Returns true if the renderer has enough data for playback purposes.
// Note that having enough data may be due to reaching end of stream.
//
// |low_latency_frames_required| indicates the required number of frame for
// have enough with a low latency playback. By default it's one frame, but
// during resume after a Flush() we may wait for 2 frames to ensure we have
// effective frames.
bool HaveEnoughData_Locked(size_t low_latency_frames_required = 1u) const;
bool HaveEnoughData_Locked() const;
void TransitionToHaveEnough_Locked();
void TransitionToHaveNothing();
void TransitionToHaveNothing_Locked();
......
......@@ -13,12 +13,14 @@ async_test(function(t) {
var results = {
current: 0,
values: [
{ time: 0, r: 255, g: 255, b: 0 },
{ time: 2, r: 0, g: 9, b: 237 },
{ time: 4, r: 0, g: 32, b: 209 },
{ time: 6, r: 0, g: 54, b: 182 },
{ time: 8, r: 0, g: 77, b: 154 },
{ time: 10, r: 0, g: 97, b: 126 }
// Different platforms may take different RGB conversion paths or
// have slight rounding differences, so allow multiple values here.
{ time: 0, r: [255], g: [255], b: [0] },
{ time: 2, r: [0], g: [3], b: [240, 243] },
{ time: 4, r: [0], g: [31], b: [213, 216] },
{ time: 6, r: [0], g: [48], b: [182, 185] },
{ time: 8, r: [0], g: [75, 76], b: [153, 155] },
{ time: 10, r: [0], g: [96, 97], b: [126, 128] }
]
};
......@@ -46,9 +48,9 @@ async_test(function(t) {
var g = frame.data[4 * 2 * width + 16 + 1];
var b = frame.data[4 * 2 * width + 16 + 2];
assert_equals(r, expected.r);
assert_equals(g, expected.g);
assert_equals(b, expected.b);
assert_true(expected.r.includes(r));
assert_true(expected.g.includes(g));
assert_true(expected.b.includes(b));
if (++results.current == results.values.length)
t.done();
......
This is a testharness.js-based test.
FAIL Test "video" as a source for "canvas". assert_equals: expected 9 but got 8
Harness: the test ran to completion.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment