Commit dcd1d596 authored by battre's avatar battre Committed by Commit bot

Revert of [Cast] Compute utilization metrics and add performance overlay....

Revert of [Cast] Compute utilization metrics and add performance overlay. (patchset #2 id:40001 of https://codereview.chromium.org/1148233002/)

Reason for revert:
Reverting due to compile failure on Google Chrome Mac builder:
../../media/cast/sender/h264_vt_encoder_unittest.cc:282:38: error: no viable conversion from 'Callback<typename internal::BindState<typename internal::FunctorTraits<void (MetadataRecorder::*)(scoped_ptr<EncodedFrame, DefaultDeleter<EncodedFrame> >)>::RunnableType, typename internal::FunctorTraits<void (MetadataRecorder::*)(scoped_ptr<EncodedFrame, DefaultDeleter<EncodedFrame> >)>::RunType, internal::TypeList<typename internal::CallbackParamTraits<MetadataRecorder *>::StorageType> >::UnboundRunType>' to 'Callback<void (scoped_ptr<media::cast::SenderEncodedFrame>)>'
  VideoEncoder::FrameEncodedCallback cb = base::Bind(
                                     ^    ~~~~~~~~~~~
../../base/callback.h:358:7: note: candidate constructor (the implicit move constructor) not viable: no known conversion from 'base::Callback<typename internal::BindState<typename internal::FunctorTraits<void (MetadataRecorder::*)(scoped_ptr<EncodedFrame, DefaultDeleter<EncodedFrame> >)>::RunnableType, typename internal::FunctorTraits<void (MetadataRecorder::*)(scoped_ptr<EncodedFrame, DefaultDeleter<EncodedFrame> >)>::RunType, internal::TypeList<typename internal::CallbackParamTraits<MetadataRecorder *>::StorageType> >::UnboundRunType>' to 'base::Callback<void (scoped_ptr<media::cast::SenderEncodedFrame, base::DefaultDeleter<media::cast::SenderEncodedFrame> >)> &&' for 1st argument
class Callback;

http://build.chromium.org/p/chromium.chrome/buildstatus?builder=Google%20Chrome%20Mac&number=1293

Original issue's description:
> [Cast] Compute utilization metrics and add performance overlay.
>
> Adds computation of two frame-level utilization metrics to the software
> VP8 encoder in Cast: deadline utilization and lossy utilization.  The
> first is a measure of how long the encoding of each frame takes compared
> to the frame duration.  The second is a measure of the complexity of a
> frame, in terms of the quality versus encoded size trade-off.
>
> In a future change, these utilization metrics will be sent as feedback
> signals to the producer of the video frames, allowing the producer to
> adjust data volumes based on the consumer's capability throughout a
> session.  See bug for more details.
>
> Also, this change adds an overlay display, where frame-level performance
> metrics are rendered in the lower-right corner of each video frame just
> before it is sent.  This provides an "on screen display" of end-to-end
> system performance.  This is turned on with a command line argument:
> --vmodule=performance_metrics_overlay=3
>
> BUG=156767
>
> Committed: https://crrev.com/d9e741512cd6ea56cefd0173b371d0dd29f0cce5
> Cr-Commit-Position: refs/heads/master@{#330896}

TBR=hubbe@chromium.org,miu@chromium.org
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=156767

Review URL: https://codereview.chromium.org/1143363005

Cr-Commit-Position: refs/heads/master@{#330906}
parent 205d8da3
...@@ -121,10 +121,6 @@ source_set("sender") { ...@@ -121,10 +121,6 @@ source_set("sender") {
"sender/fake_software_video_encoder.h", "sender/fake_software_video_encoder.h",
"sender/frame_sender.cc", "sender/frame_sender.cc",
"sender/frame_sender.h", "sender/frame_sender.h",
"sender/performance_metrics_overlay.cc",
"sender/performance_metrics_overlay.h",
"sender/sender_encoded_frame.cc",
"sender/sender_encoded_frame.h",
"sender/size_adaptable_video_encoder_base.cc", "sender/size_adaptable_video_encoder_base.cc",
"sender/size_adaptable_video_encoder_base.h", "sender/size_adaptable_video_encoder_base.h",
"sender/software_video_encoder.h", "sender/software_video_encoder.h",
...@@ -176,12 +172,16 @@ source_set("sender") { ...@@ -176,12 +172,16 @@ source_set("sender") {
"sender/h264_vt_encoder.h", "sender/h264_vt_encoder.h",
] ]
libs += [ "CoreVideo.framework" ] libs += [
"CoreVideo.framework",
]
} }
if (is_mac) { if (is_mac) {
# Required by audio_encoder.cc. # Required by audio_encoder.cc.
libs += [ "AudioToolbox.framework" ] libs += [
"AudioToolbox.framework",
]
} }
} }
......
...@@ -162,10 +162,6 @@ ...@@ -162,10 +162,6 @@
'sender/fake_software_video_encoder.h', 'sender/fake_software_video_encoder.h',
'sender/frame_sender.cc', 'sender/frame_sender.cc',
'sender/frame_sender.h', 'sender/frame_sender.h',
'sender/performance_metrics_overlay.cc',
'sender/performance_metrics_overlay.h',
'sender/sender_encoded_frame.cc',
'sender/sender_encoded_frame.h',
'sender/size_adaptable_video_encoder_base.cc', 'sender/size_adaptable_video_encoder_base.cc',
'sender/size_adaptable_video_encoder_base.h', 'sender/size_adaptable_video_encoder_base.h',
'sender/software_video_encoder.h', 'sender/software_video_encoder.h',
......
...@@ -71,7 +71,7 @@ struct EncodedFrame { ...@@ -71,7 +71,7 @@ struct EncodedFrame {
}; };
EncodedFrame(); EncodedFrame();
virtual ~EncodedFrame(); ~EncodedFrame();
// Convenience accessors to data as an array of uint8 elements. // Convenience accessors to data as an array of uint8 elements.
const uint8* bytes() const { const uint8* bytes() const {
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
#include "base/time/time.h" #include "base/time/time.h"
#include "media/cast/cast_config.h" #include "media/cast/cast_config.h"
#include "media/cast/receiver/video_decoder.h" #include "media/cast/receiver/video_decoder.h"
#include "media/cast/sender/sender_encoded_frame.h"
#include "media/cast/sender/vp8_encoder.h" #include "media/cast/sender/vp8_encoder.h"
#include "media/cast/test/utility/default_config.h" #include "media/cast/test/utility/default_config.h"
#include "media/cast/test/utility/standalone_cast_environment.h" #include "media/cast/test/utility/standalone_cast_environment.h"
...@@ -85,7 +84,7 @@ class VideoDecoderTest : public ::testing::TestWithParam<Codec> { ...@@ -85,7 +84,7 @@ class VideoDecoderTest : public ::testing::TestWithParam<Codec> {
PopulateVideoFrame(video_frame.get(), 0); PopulateVideoFrame(video_frame.get(), 0);
// Encode |frame| into |encoded_frame->data|. // Encode |frame| into |encoded_frame->data|.
scoped_ptr<SenderEncodedFrame> encoded_frame(new SenderEncodedFrame()); scoped_ptr<EncodedFrame> encoded_frame(new EncodedFrame());
// Test only supports VP8, currently. // Test only supports VP8, currently.
CHECK_EQ(CODEC_VIDEO_VP8, GetParam()); CHECK_EQ(CODEC_VIDEO_VP8, GetParam());
vp8_encoder_.Encode(video_frame, reference_time, encoded_frame.get()); vp8_encoder_.Encode(video_frame, reference_time, encoded_frame.get());
......
...@@ -200,7 +200,7 @@ class ExternalVideoEncoder::VEAClientImpl ...@@ -200,7 +200,7 @@ class ExternalVideoEncoder::VEAClientImpl
} else if (!in_progress_frame_encodes_.empty()) { } else if (!in_progress_frame_encodes_.empty()) {
const InProgressFrameEncode& request = in_progress_frame_encodes_.front(); const InProgressFrameEncode& request = in_progress_frame_encodes_.front();
scoped_ptr<SenderEncodedFrame> encoded_frame(new SenderEncodedFrame()); scoped_ptr<EncodedFrame> encoded_frame(new EncodedFrame());
encoded_frame->dependency = key_frame ? EncodedFrame::KEY : encoded_frame->dependency = key_frame ? EncodedFrame::KEY :
EncodedFrame::DEPENDENT; EncodedFrame::DEPENDENT;
encoded_frame->frame_id = next_frame_id_++; encoded_frame->frame_id = next_frame_id_++;
...@@ -216,8 +216,6 @@ class ExternalVideoEncoder::VEAClientImpl ...@@ -216,8 +216,6 @@ class ExternalVideoEncoder::VEAClientImpl
} }
encoded_frame->data.append( encoded_frame->data.append(
static_cast<const char*>(output_buffer->memory()), payload_size); static_cast<const char*>(output_buffer->memory()), payload_size);
// TODO(miu): Compute and populate the |deadline_utilization| and
// |lossy_utilization| performance metrics in |encoded_frame|.
cast_environment_->PostTask( cast_environment_->PostTask(
CastEnvironment::MAIN, CastEnvironment::MAIN,
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include "base/json/json_writer.h" #include "base/json/json_writer.h"
#include "base/values.h" #include "base/values.h"
#include "media/base/video_frame.h" #include "media/base/video_frame.h"
#include "media/cast/net/cast_transport_config.h"
#ifndef OFFICIAL_BUILD #ifndef OFFICIAL_BUILD
...@@ -29,7 +30,7 @@ void FakeSoftwareVideoEncoder::Initialize() {} ...@@ -29,7 +30,7 @@ void FakeSoftwareVideoEncoder::Initialize() {}
void FakeSoftwareVideoEncoder::Encode( void FakeSoftwareVideoEncoder::Encode(
const scoped_refptr<media::VideoFrame>& video_frame, const scoped_refptr<media::VideoFrame>& video_frame,
const base::TimeTicks& reference_time, const base::TimeTicks& reference_time,
SenderEncodedFrame* encoded_frame) { EncodedFrame* encoded_frame) {
DCHECK(encoded_frame); DCHECK(encoded_frame);
if (video_frame->visible_rect().size() != last_frame_size_) { if (video_frame->visible_rect().size() != last_frame_size_) {
...@@ -59,14 +60,6 @@ void FakeSoftwareVideoEncoder::Encode( ...@@ -59,14 +60,6 @@ void FakeSoftwareVideoEncoder::Encode(
base::JSONWriter::Write(values, &encoded_frame->data); base::JSONWriter::Write(values, &encoded_frame->data);
encoded_frame->data.resize( encoded_frame->data.resize(
std::max<size_t>(encoded_frame->data.size(), frame_size_), ' '); std::max<size_t>(encoded_frame->data.size(), frame_size_), ' ');
if (encoded_frame->dependency == EncodedFrame::KEY) {
encoded_frame->deadline_utilization = 1.0;
encoded_frame->lossy_utilization = 6.0;
} else {
encoded_frame->deadline_utilization = 0.8;
encoded_frame->lossy_utilization = 0.8;
}
} }
void FakeSoftwareVideoEncoder::UpdateRates(uint32 new_bitrate) { void FakeSoftwareVideoEncoder::UpdateRates(uint32 new_bitrate) {
......
...@@ -21,7 +21,7 @@ class FakeSoftwareVideoEncoder : public SoftwareVideoEncoder { ...@@ -21,7 +21,7 @@ class FakeSoftwareVideoEncoder : public SoftwareVideoEncoder {
void Initialize() final; void Initialize() final;
void Encode(const scoped_refptr<media::VideoFrame>& video_frame, void Encode(const scoped_refptr<media::VideoFrame>& video_frame,
const base::TimeTicks& reference_time, const base::TimeTicks& reference_time,
SenderEncodedFrame* encoded_frame) final; EncodedFrame* encoded_frame) final;
void UpdateRates(uint32 new_bitrate) final; void UpdateRates(uint32 new_bitrate) final;
void GenerateKeyFrame() final; void GenerateKeyFrame() final;
void LatestFrameIdToReference(uint32 frame_id) final; void LatestFrameIdToReference(uint32 frame_id) final;
......
...@@ -729,7 +729,7 @@ void H264VideoToolboxEncoder::CompressionCallback(void* encoder_opaque, ...@@ -729,7 +729,7 @@ void H264VideoToolboxEncoder::CompressionCallback(void* encoder_opaque,
// frame. VideoToolbox calls the output callback serially, so this is safe. // frame. VideoToolbox calls the output callback serially, so this is safe.
const uint32 frame_id = ++encoder->last_frame_id_; const uint32 frame_id = ++encoder->last_frame_id_;
scoped_ptr<SenderEncodedFrame> encoded_frame(new SenderEncodedFrame()); scoped_ptr<EncodedFrame> encoded_frame(new EncodedFrame());
encoded_frame->frame_id = frame_id; encoded_frame->frame_id = frame_id;
encoded_frame->reference_time = request->reference_time; encoded_frame->reference_time = request->reference_time;
encoded_frame->rtp_timestamp = request->rtp_timestamp; encoded_frame->rtp_timestamp = request->rtp_timestamp;
...@@ -753,9 +753,6 @@ void H264VideoToolboxEncoder::CompressionCallback(void* encoder_opaque, ...@@ -753,9 +753,6 @@ void H264VideoToolboxEncoder::CompressionCallback(void* encoder_opaque,
if (has_frame_data) if (has_frame_data)
CopySampleBufferToAnnexBBuffer(sbuf, &encoded_frame->data, keyframe); CopySampleBufferToAnnexBBuffer(sbuf, &encoded_frame->data, keyframe);
// TODO(miu): Compute and populate the |deadline_utilization| and
// |lossy_utilization| performance metrics in |encoded_frame|.
encoder->cast_environment_->PostTask( encoder->cast_environment_->PostTask(
CastEnvironment::MAIN, FROM_HERE, CastEnvironment::MAIN, FROM_HERE,
base::Bind(request->frame_encoded_callback, base::Bind(request->frame_encoded_callback,
......
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/cast/sender/performance_metrics_overlay.h"
#include <algorithm>
#include <string>
#include "base/logging.h"
#include "base/numerics/safe_conversions.h"
#include "base/strings/stringprintf.h"
#include "media/base/video_frame.h"
namespace media {
namespace cast {
namespace {
const int kScale = 4; // Physical pixels per one logical pixel.
const int kCharacterWidth = 3; // Logical pixel width of one character.
const int kCharacterHeight = 5; // Logical pixel height of one character.
const int kCharacterSpacing = 1; // Logical pixels between each character.
const int kLineSpacing = 2; // Logical pixels between each line of characters.
const int kPlane = 0; // Y-plane in YUV formats.
// For each pixel in the |rect| (logical coordinates), either decrease the
// intensity or increase it so that the resulting pixel has a perceivably
// different value than it did before. |p_ul| is a pointer to the pixel at
// coordinate (0,0) in a single-channel 8bpp bitmap. |stride| is the number of
// bytes per row in the output bitmap.
void DivergePixels(const gfx::Rect& rect, uint8* p_ul, int stride) {
DCHECK(p_ul);
DCHECK_GT(stride, 0);
// These constants and heuristics were chosen based on experimenting with a
// wide variety of content, and converging on a readable result. The amount
// by which the darker pixels are changed is less because each unit of change
// has a larger visual impact on the darker end of the spectrum. Each pixel's
// intensity value is changed as follows:
//
// [16,31] --> [32,63] (always a difference of +16)
// [32,64] --> 16 (a difference between -16 and -48)
// [65,235] --> [17,187] (always a difference of -48)
const int kDivergeDownThreshold = 32;
const int kDivergeDownAmount = 48;
const int kDivergeUpAmount = 32;
const int kMinIntensity = 16;
const int top = rect.y() * kScale;
const int bottom = rect.bottom() * kScale;
const int left = rect.x() * kScale;
const int right = rect.right() * kScale;
for (int y = top; y < bottom; ++y) {
uint8* const p_l = p_ul + y * stride;
for (int x = left; x < right; ++x) {
int intensity = p_l[x];
if (intensity >= kDivergeDownThreshold)
intensity = std::max(kMinIntensity, intensity - kDivergeDownAmount);
else
intensity += kDivergeUpAmount;
p_l[x] = static_cast<uint8>(intensity);
}
}
}
// Render |line| into |frame| at physical pixel row |top| and aligned to the
// right edge. Only number digits and a smattering of punctuation characters
// will be rendered.
void RenderLineOfText(const std::string& line, int top, VideoFrame* frame) {
// Compute number of physical pixels wide the rendered |line| would be,
// including padding.
const int line_width =
(((kCharacterWidth + kCharacterSpacing) * static_cast<int>(line.size())) +
kCharacterSpacing) * kScale;
// Determine if any characters would render past the left edge of the frame,
// and compute the index of the first character to be rendered.
const int pixels_per_char = (kCharacterWidth + kCharacterSpacing) * kScale;
const size_t first_idx = (line_width < frame->visible_rect().width()) ? 0u :
static_cast<size_t>(
((line_width - frame->visible_rect().width()) / pixels_per_char) + 1);
// Compute the pointer to the pixel at the upper-left corner of the first
// character to be rendered.
const int stride = frame->stride(kPlane);
uint8* p_ul =
// Start at the first pixel in the first row...
frame->visible_data(kPlane) + (stride * top)
// ...now move to the right edge of the visible part of the frame...
+ frame->visible_rect().width()
// ...now move left to where line[0] would be rendered...
- line_width
// ...now move right to where line[first_idx] would be rendered.
+ first_idx * pixels_per_char;
// Render each character.
for (size_t i = first_idx; i < line.size(); ++i, p_ul += pixels_per_char) {
switch (line[i]) {
case '0':
DivergePixels(gfx::Rect(0, 0, 3, 1), p_ul, stride);
DivergePixels(gfx::Rect(0, 1, 1, 3), p_ul, stride);
DivergePixels(gfx::Rect(2, 1, 1, 3), p_ul, stride);
DivergePixels(gfx::Rect(0, 4, 3, 1), p_ul, stride);
break;
case '1':
DivergePixels(gfx::Rect(1, 0, 1, 5), p_ul, stride);
break;
case '2':
DivergePixels(gfx::Rect(0, 0, 3, 1), p_ul, stride);
DivergePixels(gfx::Rect(2, 1, 1, 1), p_ul, stride);
DivergePixels(gfx::Rect(0, 2, 3, 1), p_ul, stride);
DivergePixels(gfx::Rect(0, 3, 1, 1), p_ul, stride);
DivergePixels(gfx::Rect(0, 4, 3, 1), p_ul, stride);
break;
case '3':
DivergePixels(gfx::Rect(0, 0, 3, 1), p_ul, stride);
DivergePixels(gfx::Rect(2, 1, 1, 1), p_ul, stride);
DivergePixels(gfx::Rect(0, 2, 3, 1), p_ul, stride);
DivergePixels(gfx::Rect(2, 3, 1, 1), p_ul, stride);
DivergePixels(gfx::Rect(0, 4, 3, 1), p_ul, stride);
break;
case '4':
DivergePixels(gfx::Rect(0, 0, 1, 2), p_ul, stride);
DivergePixels(gfx::Rect(2, 0, 1, 5), p_ul, stride);
DivergePixels(gfx::Rect(0, 2, 2, 1), p_ul, stride);
break;
case '5':
DivergePixels(gfx::Rect(0, 0, 3, 1), p_ul, stride);
DivergePixels(gfx::Rect(0, 1, 1, 1), p_ul, stride);
DivergePixels(gfx::Rect(0, 2, 3, 1), p_ul, stride);
DivergePixels(gfx::Rect(2, 3, 1, 1), p_ul, stride);
DivergePixels(gfx::Rect(0, 4, 3, 1), p_ul, stride);
break;
case '6':
DivergePixels(gfx::Rect(1, 0, 2, 1), p_ul, stride);
DivergePixels(gfx::Rect(0, 1, 1, 1), p_ul, stride);
DivergePixels(gfx::Rect(0, 2, 3, 1), p_ul, stride);
DivergePixels(gfx::Rect(0, 3, 1, 1), p_ul, stride);
DivergePixels(gfx::Rect(2, 3, 1, 1), p_ul, stride);
DivergePixels(gfx::Rect(0, 4, 3, 1), p_ul, stride);
break;
case '7':
DivergePixels(gfx::Rect(0, 0, 3, 1), p_ul, stride);
DivergePixels(gfx::Rect(2, 1, 1, 2), p_ul, stride);
DivergePixels(gfx::Rect(1, 3, 1, 2), p_ul, stride);
break;
case '8':
DivergePixels(gfx::Rect(0, 0, 3, 1), p_ul, stride);
DivergePixels(gfx::Rect(0, 1, 1, 1), p_ul, stride);
DivergePixels(gfx::Rect(2, 1, 1, 1), p_ul, stride);
DivergePixels(gfx::Rect(0, 2, 3, 1), p_ul, stride);
DivergePixels(gfx::Rect(0, 3, 1, 1), p_ul, stride);
DivergePixels(gfx::Rect(2, 3, 1, 1), p_ul, stride);
DivergePixels(gfx::Rect(0, 4, 3, 1), p_ul, stride);
break;
case '9':
DivergePixels(gfx::Rect(0, 0, 3, 1), p_ul, stride);
DivergePixels(gfx::Rect(0, 1, 1, 1), p_ul, stride);
DivergePixels(gfx::Rect(2, 1, 1, 1), p_ul, stride);
DivergePixels(gfx::Rect(0, 2, 3, 1), p_ul, stride);
DivergePixels(gfx::Rect(2, 3, 1, 1), p_ul, stride);
DivergePixels(gfx::Rect(0, 4, 2, 1), p_ul, stride);
break;
case 'e':
case 'E':
DivergePixels(gfx::Rect(0, 0, 3, 1), p_ul, stride);
DivergePixels(gfx::Rect(0, 1, 1, 1), p_ul, stride);
DivergePixels(gfx::Rect(0, 2, 2, 1), p_ul, stride);
DivergePixels(gfx::Rect(0, 3, 1, 1), p_ul, stride);
DivergePixels(gfx::Rect(0, 4, 3, 1), p_ul, stride);
break;
case '.':
DivergePixels(gfx::Rect(1, 4, 1, 1), p_ul, stride);
break;
case '+':
DivergePixels(gfx::Rect(1, 1, 1, 1), p_ul, stride);
DivergePixels(gfx::Rect(1, 3, 1, 1), p_ul, stride);
// ...fall through...
case '-':
DivergePixels(gfx::Rect(0, 2, 3, 1), p_ul, stride);
break;
case 'x':
DivergePixels(gfx::Rect(0, 1, 1, 1), p_ul, stride);
DivergePixels(gfx::Rect(2, 1, 1, 1), p_ul, stride);
DivergePixels(gfx::Rect(1, 2, 1, 1), p_ul, stride);
DivergePixels(gfx::Rect(0, 3, 1, 1), p_ul, stride);
DivergePixels(gfx::Rect(2, 3, 1, 1), p_ul, stride);
break;
case ':':
DivergePixels(gfx::Rect(1, 1, 1, 1), p_ul, stride);
DivergePixels(gfx::Rect(1, 3, 1, 1), p_ul, stride);
break;
case '%':
DivergePixels(gfx::Rect(0, 0, 1, 1), p_ul, stride);
DivergePixels(gfx::Rect(2, 1, 1, 1), p_ul, stride);
DivergePixels(gfx::Rect(1, 2, 1, 1), p_ul, stride);
DivergePixels(gfx::Rect(0, 3, 1, 1), p_ul, stride);
DivergePixels(gfx::Rect(2, 4, 1, 1), p_ul, stride);
break;
case ' ':
default:
break;
}
}
}
} // namespace
void MaybeRenderPerformanceMetricsOverlay(int target_bitrate,
int frames_ago,
double deadline_utilization,
double lossy_utilization,
VideoFrame* frame) {
if (VideoFrame::PlaneHorizontalBitsPerPixel(frame->format(), kPlane) != 8) {
DLOG(WARNING) << "Cannot render overlay: Plane " << kPlane << " not 8bpp.";
return;
}
// Compute the physical pixel top row for the bottom-most line of text.
const int line_height = (kCharacterHeight + kLineSpacing) * kScale;
int top = frame->visible_rect().height() - line_height;
if (top < 0 || !VLOG_IS_ON(1))
return;
// Line 3: Frame resolution and timestamp.
base::TimeDelta rem = frame->timestamp();
const int minutes = rem.InMinutes();
rem -= base::TimeDelta::FromMinutes(minutes);
const int seconds = static_cast<int>(rem.InSeconds());
rem -= base::TimeDelta::FromSeconds(seconds);
const int hundredth_seconds = static_cast<int>(rem.InMilliseconds() / 10);
RenderLineOfText(base::StringPrintf("%dx%d %d:%02d.%02d",
frame->visible_rect().width(),
frame->visible_rect().height(),
minutes,
seconds,
hundredth_seconds),
top,
frame);
// Move up one line's worth of pixels.
top -= line_height;
if (top < 0 || !VLOG_IS_ON(2))
return;
// Line 2: Capture/frame duration and target bitrate.
int capture_duration_ms = 0;
base::TimeTicks capture_begin_time, capture_end_time;
if (frame->metadata()->GetTimeTicks(VideoFrameMetadata::CAPTURE_BEGIN_TIME,
&capture_begin_time) &&
frame->metadata()->GetTimeTicks(VideoFrameMetadata::CAPTURE_END_TIME,
&capture_end_time)) {
capture_duration_ms = base::saturated_cast<int>(
(capture_end_time - capture_begin_time).InMillisecondsF() + 0.5);
}
int frame_duration_ms = 0;
int frame_duration_ms_frac = 0;
base::TimeDelta frame_duration;
if (frame->metadata()->GetTimeDelta(VideoFrameMetadata::FRAME_DURATION,
&frame_duration)) {
const int decimilliseconds = base::saturated_cast<int>(
frame_duration.InMicroseconds() / 100.0 + 0.5);
frame_duration_ms = decimilliseconds / 10;
frame_duration_ms_frac = decimilliseconds % 10;
}
const int target_kbits = target_bitrate / 1000;
RenderLineOfText(base::StringPrintf("%3.1d %3.1d.%01d %4.1d",
capture_duration_ms,
frame_duration_ms,
frame_duration_ms_frac,
target_kbits),
top,
frame);
// Move up one line's worth of pixels.
top -= line_height;
if (top < 0 || !VLOG_IS_ON(3))
return;
// Line 1: Recent utilization metrics.
const int deadline_pct =
base::saturated_cast<int>(deadline_utilization * 100.0 + 0.5);
const int lossy_pct =
base::saturated_cast<int>(lossy_utilization * 100.0 + 0.5);
RenderLineOfText(base::StringPrintf("%d %3.1d%% %3.1d%%",
frames_ago,
deadline_pct,
lossy_pct),
top,
frame);
}
} // namespace cast
} // namespace media
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_CAST_SENDER_PERFORMANCE_METRICS_OVERLAY_H_
#define MEDIA_CAST_SENDER_PERFORMANCE_METRICS_OVERLAY_H_
// This module provides a display of frame-level performance metrics, rendered
// in the lower-right corner of a VideoFrame. It looks like this:
//
// +----------------------------------------------------------------+
// | @@@@@@@@@@@@@@@@@@@@@@@ |
// | @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ |
// | @@@@@@@@@@@@@@@@@@@@@@@ @@@@@@@@@@@@ |
// | @@@@@@@@@@@@@ @@@@ |
// | @@@@@@@@@@ @@@@ |
// | @@@@@ @@@ @@@ @@@@ |
// | @@@ @ @@@ @@@@ @@@@ |
// | @@@@ @@@@ @@@@ |
// | @@@@ @@@ @@@ |
// | @@@@ @@ @@@ |
// | @@@@@ @@@ @@@ @@@ |
// | @@@@@ @@@@@ @@@@ @@@@ |
// | @@@@@ @@@@@@@@@@@@@ @@@@ |
// | @@@@@@ @@@@ 1 45% 75% |
// | @@@@@@@@ @@@@@@ 22 16.7 4000 |
// | @@@@@@@@@@@@@@@@ 1280x720 0:15.12 |
// +----------------------------------------------------------------+
//
// Line 1: Reads as, "1 frame ago, the encoder deadline utilization for the
// frame was 45% and the lossy utilization was 75%." Encoder deadline
// utilization is in terms the amount of real-world time it took to encode the
// frame, divided by the maximum amount of time allowed. Lossy utilization is
// the amount of "complexity" in the frame's content versus the target encoded
// byte size, where a value over 100% means the frame's content is too complex
// to encode within the target number of bytes.
//
// Line 2: Reads as, "Capture of this frame took 22 ms. The expected duration
// of this frame is 16.7 ms. The target bitrate for this frame is 4000 kbps."
//
// Line 3: Contains the frame's resolution and media timestamp in
// minutes:seconds.hundredths format.
namespace media {
class VideoFrame;
namespace cast {
// Renders an overlay of frame-level performance metrics in the lower-right
// corner of the |frame|, as described above. The verbose logging level for
// video_frame_overlay.cc determines which lines, if any, are rendered: VLOG
// level 1 renders the bottom line only, level 2 renders the bottom and middle
// lines, and level 3 renders all three lines. So, use the
// --vmodule=performance_metrics_overlay=3 command line argument to turn on
// rendering of the entire overlay.
void MaybeRenderPerformanceMetricsOverlay(int target_bitrate,
int frames_ago,
double deadline_utilization,
double lossy_utilization,
VideoFrame* frame);
} // namespace cast
} // namespace media
#endif // MEDIA_CAST_SENDER_PERFORMANCE_METRICS_OVERLAY_H_
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/cast/sender/sender_encoded_frame.h"
namespace media {
namespace cast {
SenderEncodedFrame::SenderEncodedFrame()
: EncodedFrame(),
deadline_utilization(-1.0),
lossy_utilization(-1.0) {}
SenderEncodedFrame::~SenderEncodedFrame() {}
} // namespace cast
} // namespace media
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_CAST_SENDER_SENDER_ENCODED_FRAME_H_
#define MEDIA_CAST_SENDER_SENDER_ENCODED_FRAME_H_
#include "media/cast/net/cast_transport_config.h"
namespace media {
namespace cast {
// Extends EncodedFrame with additional fields used within the sender-side of
// the library.
struct SenderEncodedFrame : public EncodedFrame {
SenderEncodedFrame();
~SenderEncodedFrame() final;
// The amount of real-world time it took to encode the frame, divided by the
// maximum amount of time allowed. Example: For the software VP8 encoder,
// this would be the elapsed encode time (according to the base::TimeTicks
// clock) divided by the VideoFrame's duration.
//
// Meaningful values are non-negative, with 0.0 [impossibly] representing 0%
// utilization, 1.0 representing 100% utilization, and values greater than 1.0
// indicating the encode time took longer than the media duration of the
// frame. Negative values indicate the field was not computed.
double deadline_utilization;
// The amount of "lossiness" needed to encode the frame within the targeted
// bandwidth. More-complex frame content and/or lower target encode bitrates
// will cause this value to rise.
//
// Meaningful values are non-negative, with 0.0 indicating the frame is very
// simple and/or the target encode bitrate is very large, 1.0 indicating the
// frame contains very complex content and/or the target encode bitrate is
// very small, and values greater than 1.0 indicating the encoder cannot
// encode the frame within the target bitrate (even at its lowest quality
// setting). Negative values indicate the field was not computed.
double lossy_utilization;
};
} // namespace cast
} // namespace media
#endif // MEDIA_CAST_SENDER_SENDER_ENCODED_FRAME_H_
...@@ -156,7 +156,7 @@ void SizeAdaptableVideoEncoderBase::OnEncoderStatusChange( ...@@ -156,7 +156,7 @@ void SizeAdaptableVideoEncoderBase::OnEncoderStatusChange(
void SizeAdaptableVideoEncoderBase::OnEncodedVideoFrame( void SizeAdaptableVideoEncoderBase::OnEncodedVideoFrame(
const FrameEncodedCallback& frame_encoded_callback, const FrameEncodedCallback& frame_encoded_callback,
scoped_ptr<SenderEncodedFrame> encoded_frame) { scoped_ptr<EncodedFrame> encoded_frame) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
--frames_in_encoder_; --frames_in_encoder_;
DCHECK_GE(frames_in_encoder_, 0); DCHECK_GE(frames_in_encoder_, 0);
......
...@@ -85,7 +85,7 @@ class SizeAdaptableVideoEncoderBase : public VideoEncoder { ...@@ -85,7 +85,7 @@ class SizeAdaptableVideoEncoderBase : public VideoEncoder {
// Called by the |encoder_| with the next EncodedFrame. // Called by the |encoder_| with the next EncodedFrame.
void OnEncodedVideoFrame(const FrameEncodedCallback& frame_encoded_callback, void OnEncodedVideoFrame(const FrameEncodedCallback& frame_encoded_callback,
scoped_ptr<SenderEncodedFrame> encoded_frame); scoped_ptr<EncodedFrame> encoded_frame);
const scoped_refptr<CastEnvironment> cast_environment_; const scoped_refptr<CastEnvironment> cast_environment_;
......
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
#include "base/basictypes.h" #include "base/basictypes.h"
#include "base/memory/ref_counted.h" #include "base/memory/ref_counted.h"
#include "media/cast/sender/sender_encoded_frame.h"
namespace base { namespace base {
class TimeTicks; class TimeTicks;
...@@ -19,6 +18,7 @@ class VideoFrame; ...@@ -19,6 +18,7 @@ class VideoFrame;
namespace media { namespace media {
namespace cast { namespace cast {
struct EncodedFrame;
class SoftwareVideoEncoder { class SoftwareVideoEncoder {
public: public:
...@@ -31,7 +31,7 @@ class SoftwareVideoEncoder { ...@@ -31,7 +31,7 @@ class SoftwareVideoEncoder {
// Encode a raw image (as a part of a video stream). // Encode a raw image (as a part of a video stream).
virtual void Encode(const scoped_refptr<media::VideoFrame>& video_frame, virtual void Encode(const scoped_refptr<media::VideoFrame>& video_frame,
const base::TimeTicks& reference_time, const base::TimeTicks& reference_time,
SenderEncodedFrame* encoded_frame) = 0; EncodedFrame* encoded_frame) = 0;
// Update the encoder with a new target bit rate. // Update the encoder with a new target bit rate.
virtual void UpdateRates(uint32 new_bitrate) = 0; virtual void UpdateRates(uint32 new_bitrate) = 0;
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
#include "media/base/video_frame.h" #include "media/base/video_frame.h"
#include "media/cast/cast_config.h" #include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h" #include "media/cast/cast_environment.h"
#include "media/cast/sender/sender_encoded_frame.h"
#include "media/cast/sender/video_frame_factory.h" #include "media/cast/sender/video_frame_factory.h"
namespace media { namespace media {
...@@ -21,9 +20,7 @@ namespace cast { ...@@ -21,9 +20,7 @@ namespace cast {
// All these functions are called from the main cast thread. // All these functions are called from the main cast thread.
class VideoEncoder { class VideoEncoder {
public: public:
// Callback used to deliver an encoded frame on the Cast MAIN thread. typedef base::Callback<void(scoped_ptr<EncodedFrame>)> FrameEncodedCallback;
using FrameEncodedCallback =
base::Callback<void(scoped_ptr<SenderEncodedFrame>)>;
// Creates a VideoEncoder instance from the given |video_config| and based on // Creates a VideoEncoder instance from the given |video_config| and based on
// the current platform's hardware/library support; or null if no // the current platform's hardware/library support; or null if no
......
...@@ -43,7 +43,7 @@ void EncodeVideoFrameOnEncoderThread( ...@@ -43,7 +43,7 @@ void EncodeVideoFrameOnEncoderThread(
dynamic_config.latest_frame_id_to_reference); dynamic_config.latest_frame_id_to_reference);
encoder->UpdateRates(dynamic_config.bit_rate); encoder->UpdateRates(dynamic_config.bit_rate);
scoped_ptr<SenderEncodedFrame> encoded_frame(new SenderEncodedFrame()); scoped_ptr<EncodedFrame> encoded_frame(new EncodedFrame());
encoder->Encode(video_frame, reference_time, encoded_frame.get()); encoder->Encode(video_frame, reference_time, encoded_frame.get());
environment->PostTask( environment->PostTask(
CastEnvironment::MAIN, CastEnvironment::MAIN,
......
...@@ -26,6 +26,9 @@ class VideoEncoderImpl : public VideoEncoder { ...@@ -26,6 +26,9 @@ class VideoEncoderImpl : public VideoEncoder {
int bit_rate; int bit_rate;
}; };
typedef base::Callback<void(scoped_ptr<EncodedFrame>)>
FrameEncodedCallback;
// Returns true if VideoEncoderImpl can be used with the given |video_config|. // Returns true if VideoEncoderImpl can be used with the given |video_config|.
static bool IsSupported(const VideoSenderConfig& video_config); static bool IsSupported(const VideoSenderConfig& video_config);
......
...@@ -219,7 +219,7 @@ class VideoEncoderTest ...@@ -219,7 +219,7 @@ class VideoEncoderTest
uint32 expected_last_referenced_frame_id, uint32 expected_last_referenced_frame_id,
uint32 expected_rtp_timestamp, uint32 expected_rtp_timestamp,
const base::TimeTicks& expected_reference_time, const base::TimeTicks& expected_reference_time,
scoped_ptr<SenderEncodedFrame> encoded_frame) { scoped_ptr<EncodedFrame> encoded_frame) {
EXPECT_TRUE(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); EXPECT_TRUE(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
EXPECT_EQ(expected_frame_id, encoded_frame->frame_id); EXPECT_EQ(expected_frame_id, encoded_frame->frame_id);
...@@ -253,10 +253,6 @@ class VideoEncoderTest ...@@ -253,10 +253,6 @@ class VideoEncoderTest
EXPECT_EQ(expected_last_referenced_frame_id, EXPECT_EQ(expected_last_referenced_frame_id,
encoded_frame->referenced_frame_id); encoded_frame->referenced_frame_id);
EXPECT_FALSE(encoded_frame->data.empty()); EXPECT_FALSE(encoded_frame->data.empty());
ASSERT_TRUE(std::isfinite(encoded_frame->deadline_utilization));
EXPECT_LE(0.0, encoded_frame->deadline_utilization);
ASSERT_TRUE(std::isfinite(encoded_frame->lossy_utilization));
EXPECT_LE(0.0, encoded_frame->lossy_utilization);
} }
++count_frames_delivered_; ++count_frames_delivered_;
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
#include "base/trace_event/trace_event.h" #include "base/trace_event/trace_event.h"
#include "media/cast/cast_defines.h" #include "media/cast/cast_defines.h"
#include "media/cast/net/cast_transport_config.h" #include "media/cast/net/cast_transport_config.h"
#include "media/cast/sender/performance_metrics_overlay.h"
#include "media/cast/sender/video_encoder.h" #include "media/cast/sender/video_encoder.h"
namespace media { namespace media {
...@@ -88,8 +87,6 @@ VideoSender::VideoSender( ...@@ -88,8 +87,6 @@ VideoSender::VideoSender(
frames_in_encoder_(0), frames_in_encoder_(0),
last_bitrate_(0), last_bitrate_(0),
playout_delay_change_cb_(playout_delay_change_cb), playout_delay_change_cb_(playout_delay_change_cb),
last_reported_deadline_utilization_(-1.0),
last_reported_lossy_utilization_(-1.0),
weak_factory_(this) { weak_factory_(this) {
video_encoder_ = VideoEncoder::Create( video_encoder_ = VideoEncoder::Create(
cast_environment_, cast_environment_,
...@@ -197,12 +194,6 @@ void VideoSender::InsertRawVideoFrame( ...@@ -197,12 +194,6 @@ void VideoSender::InsertRawVideoFrame(
return; return;
} }
MaybeRenderPerformanceMetricsOverlay(bitrate,
frames_in_encoder_ + 1,
last_reported_deadline_utilization_,
last_reported_lossy_utilization_,
video_frame.get());
if (video_encoder_->EncodeVideoFrame( if (video_encoder_->EncodeVideoFrame(
video_frame, video_frame,
reference_time, reference_time,
...@@ -242,7 +233,7 @@ void VideoSender::OnAck(uint32 frame_id) { ...@@ -242,7 +233,7 @@ void VideoSender::OnAck(uint32 frame_id) {
void VideoSender::OnEncodedVideoFrame( void VideoSender::OnEncodedVideoFrame(
int encoder_bitrate, int encoder_bitrate,
scoped_ptr<SenderEncodedFrame> encoded_frame) { scoped_ptr<EncodedFrame> encoded_frame) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
frames_in_encoder_--; frames_in_encoder_--;
...@@ -251,11 +242,6 @@ void VideoSender::OnEncodedVideoFrame( ...@@ -251,11 +242,6 @@ void VideoSender::OnEncodedVideoFrame(
duration_in_encoder_ = duration_in_encoder_ =
last_enqueued_frame_reference_time_ - encoded_frame->reference_time; last_enqueued_frame_reference_time_ - encoded_frame->reference_time;
last_reported_deadline_utilization_ = encoded_frame->deadline_utilization;
last_reported_lossy_utilization_ = encoded_frame->lossy_utilization;
// TODO(miu): Plumb-in a utilization feedback signal back to the producer of
// the video frames. http://crbug.com/156767
SendEncodedFrame(encoder_bitrate, encoded_frame.Pass()); SendEncodedFrame(encoder_bitrate, encoded_frame.Pass());
} }
......
...@@ -23,7 +23,6 @@ class VideoFrame; ...@@ -23,7 +23,6 @@ class VideoFrame;
namespace cast { namespace cast {
class CastTransportSender; class CastTransportSender;
struct SenderEncodedFrame;
class VideoEncoder; class VideoEncoder;
class VideoFrameFactory; class VideoFrameFactory;
...@@ -68,7 +67,7 @@ class VideoSender : public FrameSender, ...@@ -68,7 +67,7 @@ class VideoSender : public FrameSender,
private: private:
// Called by the |video_encoder_| with the next EncodedFrame to send. // Called by the |video_encoder_| with the next EncodedFrame to send.
void OnEncodedVideoFrame(int encoder_bitrate, void OnEncodedVideoFrame(int encoder_bitrate,
scoped_ptr<SenderEncodedFrame> encoded_frame); scoped_ptr<EncodedFrame> encoded_frame);
// Encodes media::VideoFrame images into EncodedFrames. Per configuration, // Encodes media::VideoFrame images into EncodedFrames. Per configuration,
// this will point to either the internal software-based encoder or a proxy to // this will point to either the internal software-based encoder or a proxy to
...@@ -91,12 +90,6 @@ class VideoSender : public FrameSender, ...@@ -91,12 +90,6 @@ class VideoSender : public FrameSender,
PlayoutDelayChangeCB playout_delay_change_cb_; PlayoutDelayChangeCB playout_delay_change_cb_;
// The video encoder's performance metrics as of the last call to
// OnEncodedVideoFrame(). See header file comments for SenderEncodedFrame for
// an explanation of these values.
double last_reported_deadline_utilization_;
double last_reported_lossy_utilization_;
// NOTE: Weak pointers must be invalidated before all other member variables. // NOTE: Weak pointers must be invalidated before all other member variables.
base::WeakPtrFactory<VideoSender> weak_factory_; base::WeakPtrFactory<VideoSender> weak_factory_;
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include "base/logging.h" #include "base/logging.h"
#include "media/base/video_frame.h" #include "media/base/video_frame.h"
#include "media/cast/cast_defines.h" #include "media/cast/cast_defines.h"
#include "media/cast/net/cast_transport_config.h"
#include "third_party/libvpx/source/libvpx/vpx/vp8cx.h" #include "third_party/libvpx/source/libvpx/vpx/vp8cx.h"
namespace media { namespace media {
...@@ -163,15 +164,10 @@ void Vp8Encoder::ConfigureForNewFrameSize(const gfx::Size& frame_size) { ...@@ -163,15 +164,10 @@ void Vp8Encoder::ConfigureForNewFrameSize(const gfx::Size& frame_size) {
void Vp8Encoder::Encode(const scoped_refptr<media::VideoFrame>& video_frame, void Vp8Encoder::Encode(const scoped_refptr<media::VideoFrame>& video_frame,
const base::TimeTicks& reference_time, const base::TimeTicks& reference_time,
SenderEncodedFrame* encoded_frame) { EncodedFrame* encoded_frame) {
DCHECK(thread_checker_.CalledOnValidThread()); DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(encoded_frame); DCHECK(encoded_frame);
// Note: This is used to compute the |deadline_utilization| and so it uses the
// real-world clock instead of the CastEnvironment clock, the latter of which
// might be simulated.
const base::TimeTicks start_time = base::TimeTicks::Now();
// Initialize on-demand. Later, if the video frame size has changed, update // Initialize on-demand. Later, if the video frame size has changed, update
// the encoder configuration. // the encoder configuration.
const gfx::Size frame_size = video_frame->visible_rect().size(); const gfx::Size frame_size = video_frame->visible_rect().size();
...@@ -283,37 +279,8 @@ void Vp8Encoder::Encode(const scoped_refptr<media::VideoFrame>& video_frame, ...@@ -283,37 +279,8 @@ void Vp8Encoder::Encode(const scoped_refptr<media::VideoFrame>& video_frame,
DCHECK(!encoded_frame->data.empty()) DCHECK(!encoded_frame->data.empty())
<< "BUG: Encoder must provide data since lagged encoding is disabled."; << "BUG: Encoder must provide data since lagged encoding is disabled.";
// Compute deadline utilization as the real-world time elapsed divided by the
// frame duration.
const base::TimeDelta processing_time = base::TimeTicks::Now() - start_time;
encoded_frame->deadline_utilization =
processing_time.InSecondsF() / predicted_frame_duration.InSecondsF();
// Compute lossy utilization. The VP8 encoder took an estimated guess at what
// quantizer value would produce an encoded frame size as close to the target
// as possible. Now that the frame has been encoded and the number of bytes
// is known, the perfect quantizer value (i.e., the one that should have been
// used) can be determined. This perfect quantizer is then normalized and
// used as the lossy utilization.
const double actual_bitrate =
encoded_frame->data.size() * 8.0 / predicted_frame_duration.InSecondsF();
const double target_bitrate = 1000.0 * config_.rc_target_bitrate;
DCHECK_GT(target_bitrate, 0.0);
const double bitrate_utilization = actual_bitrate / target_bitrate;
int quantizer = -1;
CHECK_EQ(vpx_codec_control(&encoder_, VP8E_GET_LAST_QUANTIZER_64, &quantizer),
VPX_CODEC_OK);
const double perfect_quantizer = bitrate_utilization * std::max(0, quantizer);
// Side note: If it was possible for the encoder to encode within the target
// number of bytes, the |perfect_quantizer| will be in the range [0.0,63.0].
// If it was never possible, the value will be greater than 63.0.
encoded_frame->lossy_utilization = perfect_quantizer / 63.0;
DVLOG(2) << "VP8 encoded frame_id " << encoded_frame->frame_id DVLOG(2) << "VP8 encoded frame_id " << encoded_frame->frame_id
<< ", sized: " << encoded_frame->data.size() << ", sized:" << encoded_frame->data.size();
<< ", deadline_utilization: " << encoded_frame->deadline_utilization
<< ", lossy_utilization: " << encoded_frame->lossy_utilization
<< " (quantizer chosen by the encoder was " << quantizer << ')';
if (encoded_frame->dependency == EncodedFrame::KEY) { if (encoded_frame->dependency == EncodedFrame::KEY) {
key_frame_requested_ = false; key_frame_requested_ = false;
......
...@@ -30,7 +30,7 @@ class Vp8Encoder : public SoftwareVideoEncoder { ...@@ -30,7 +30,7 @@ class Vp8Encoder : public SoftwareVideoEncoder {
void Initialize() final; void Initialize() final;
void Encode(const scoped_refptr<media::VideoFrame>& video_frame, void Encode(const scoped_refptr<media::VideoFrame>& video_frame,
const base::TimeTicks& reference_time, const base::TimeTicks& reference_time,
SenderEncodedFrame* encoded_frame) final; EncodedFrame* encoded_frame) final;
void UpdateRates(uint32 new_bitrate) final; void UpdateRates(uint32 new_bitrate) final;
void GenerateKeyFrame() final; void GenerateKeyFrame() final;
void LatestFrameIdToReference(uint32 frame_id) final; void LatestFrameIdToReference(uint32 frame_id) final;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment