Commit 7f9c1f75 authored by emircan's avatar emircan Committed by Commit bot

Refactor VideoTrackRecorder into smaller classes

This CL does a refactor taking each encoder implementation into its own
file. Additionally, CanEncodeAlphaChannelForTesting() is removed as it
is no longer necessary.

BUG=700433

Review-Url: https://codereview.chromium.org/2793303003
Cr-Commit-Position: refs/heads/master@{#463015}
parent 4706b082
...@@ -691,8 +691,12 @@ target(link_target_type, "renderer") { ...@@ -691,8 +691,12 @@ target(link_target_type, "renderer") {
"media_recorder/audio_track_recorder.h", "media_recorder/audio_track_recorder.h",
"media_recorder/media_recorder_handler.cc", "media_recorder/media_recorder_handler.cc",
"media_recorder/media_recorder_handler.h", "media_recorder/media_recorder_handler.h",
"media_recorder/vea_encoder.cc",
"media_recorder/vea_encoder.h",
"media_recorder/video_track_recorder.cc", "media_recorder/video_track_recorder.cc",
"media_recorder/video_track_recorder.h", "media_recorder/video_track_recorder.h",
"media_recorder/vpx_encoder.cc",
"media_recorder/vpx_encoder.h",
"p2p/empty_network_manager.cc", "p2p/empty_network_manager.cc",
"p2p/empty_network_manager.h", "p2p/empty_network_manager.h",
"p2p/filtering_network_manager.cc", "p2p/filtering_network_manager.cc",
...@@ -754,6 +758,10 @@ target(link_target_type, "renderer") { ...@@ -754,6 +758,10 @@ target(link_target_type, "renderer") {
"//third_party/webrtc_overrides:init_webrtc", "//third_party/webrtc_overrides:init_webrtc",
] ]
if (rtc_use_h264) { if (rtc_use_h264) {
sources += [
"media_recorder/h264_encoder.cc",
"media_recorder/h264_encoder.h",
]
deps += [ "//third_party/openh264:encoder" ] deps += [ "//third_party/openh264:encoder" ]
} }
} else { } else {
......
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "content/renderer/media_recorder/h264_encoder.h"
#include <string>
#include "base/bind.h"
#include "base/threading/thread.h"
#include "base/trace_event/trace_event.h"
#include "media/base/video_frame.h"
#include "third_party/openh264/src/codec/api/svc/codec_app_def.h"
#include "third_party/openh264/src/codec/api/svc/codec_def.h"
#include "ui/gfx/geometry/size.h"
using media::VideoFrame;
namespace content {
void H264Encoder::ISVCEncoderDeleter::operator()(ISVCEncoder* codec) {
if (!codec)
return;
const int uninit_ret = codec->Uninitialize();
CHECK_EQ(cmResultSuccess, uninit_ret);
WelsDestroySVCEncoder(codec);
}
// static
void H264Encoder::ShutdownEncoder(std::unique_ptr<base::Thread> encoding_thread,
ScopedISVCEncoderPtr encoder) {
DCHECK(encoding_thread->IsRunning());
encoding_thread->Stop();
// Both |encoding_thread| and |encoder| will be destroyed at end-of-scope.
}
H264Encoder::H264Encoder(
const VideoTrackRecorder::OnEncodedVideoCB& on_encoded_video_callback,
int32_t bits_per_second)
: Encoder(on_encoded_video_callback, bits_per_second) {
DCHECK(encoding_thread_->IsRunning());
}
H264Encoder::~H264Encoder() {
main_task_runner_->PostTask(
FROM_HERE,
base::Bind(&H264Encoder::ShutdownEncoder, base::Passed(&encoding_thread_),
base::Passed(&openh264_encoder_)));
}
void H264Encoder::EncodeOnEncodingTaskRunner(
scoped_refptr<VideoFrame> frame,
base::TimeTicks capture_timestamp) {
TRACE_EVENT0("video", "H264Encoder::EncodeOnEncodingTaskRunner");
DCHECK(encoding_task_runner_->BelongsToCurrentThread());
const gfx::Size frame_size = frame->visible_rect().size();
if (!openh264_encoder_ || configured_size_ != frame_size) {
ConfigureEncoderOnEncodingTaskRunner(frame_size);
first_frame_timestamp_ = capture_timestamp;
}
SSourcePicture picture = {};
picture.iPicWidth = frame_size.width();
picture.iPicHeight = frame_size.height();
picture.iColorFormat = EVideoFormatType::videoFormatI420;
picture.uiTimeStamp =
(capture_timestamp - first_frame_timestamp_).InMilliseconds();
picture.iStride[0] = frame->stride(VideoFrame::kYPlane);
picture.iStride[1] = frame->stride(VideoFrame::kUPlane);
picture.iStride[2] = frame->stride(VideoFrame::kVPlane);
picture.pData[0] = frame->visible_data(VideoFrame::kYPlane);
picture.pData[1] = frame->visible_data(VideoFrame::kUPlane);
picture.pData[2] = frame->visible_data(VideoFrame::kVPlane);
SFrameBSInfo info = {};
if (openh264_encoder_->EncodeFrame(&picture, &info) != cmResultSuccess) {
NOTREACHED() << "OpenH264 encoding failed";
return;
}
const media::WebmMuxer::VideoParameters video_params(frame);
frame = nullptr;
std::unique_ptr<std::string> data(new std::string);
const uint8_t kNALStartCode[4] = {0, 0, 0, 1};
for (int layer = 0; layer < info.iLayerNum; ++layer) {
const SLayerBSInfo& layerInfo = info.sLayerInfo[layer];
// Iterate NAL units making up this layer, noting fragments.
size_t layer_len = 0;
for (int nal = 0; nal < layerInfo.iNalCount; ++nal) {
// The following DCHECKs make sure that the header of each NAL unit is OK.
DCHECK_GE(layerInfo.pNalLengthInByte[nal], 4);
DCHECK_EQ(kNALStartCode[0], layerInfo.pBsBuf[layer_len + 0]);
DCHECK_EQ(kNALStartCode[1], layerInfo.pBsBuf[layer_len + 1]);
DCHECK_EQ(kNALStartCode[2], layerInfo.pBsBuf[layer_len + 2]);
DCHECK_EQ(kNALStartCode[3], layerInfo.pBsBuf[layer_len + 3]);
layer_len += layerInfo.pNalLengthInByte[nal];
}
// Copy the entire layer's data (including NAL start codes).
data->append(reinterpret_cast<char*>(layerInfo.pBsBuf), layer_len);
}
const bool is_key_frame = info.eFrameType == videoFrameTypeIDR;
origin_task_runner_->PostTask(
FROM_HERE, base::Bind(OnFrameEncodeCompleted, on_encoded_video_callback_,
video_params, base::Passed(&data), nullptr,
capture_timestamp, is_key_frame));
}
void H264Encoder::ConfigureEncoderOnEncodingTaskRunner(const gfx::Size& size) {
DCHECK(encoding_task_runner_->BelongsToCurrentThread());
ISVCEncoder* temp_encoder = nullptr;
if (WelsCreateSVCEncoder(&temp_encoder) != 0) {
NOTREACHED() << "Failed to create OpenH264 encoder";
return;
}
openh264_encoder_.reset(temp_encoder);
configured_size_ = size;
#if DCHECK_IS_ON()
int trace_level = WELS_LOG_INFO;
openh264_encoder_->SetOption(ENCODER_OPTION_TRACE_LEVEL, &trace_level);
#endif
SEncParamExt init_params;
openh264_encoder_->GetDefaultParams(&init_params);
init_params.iUsageType = CAMERA_VIDEO_REAL_TIME;
DCHECK_EQ(AUTO_REF_PIC_COUNT, init_params.iNumRefFrame);
DCHECK(!init_params.bSimulcastAVC);
init_params.uiIntraPeriod = 100; // Same as for VpxEncoder.
init_params.iPicWidth = size.width();
init_params.iPicHeight = size.height();
DCHECK_EQ(RC_QUALITY_MODE, init_params.iRCMode);
DCHECK_EQ(0, init_params.iPaddingFlag);
DCHECK_EQ(UNSPECIFIED_BIT_RATE, init_params.iTargetBitrate);
DCHECK_EQ(UNSPECIFIED_BIT_RATE, init_params.iMaxBitrate);
if (bits_per_second_ > 0) {
init_params.iRCMode = RC_BITRATE_MODE;
init_params.iTargetBitrate = bits_per_second_;
} else {
init_params.iRCMode = RC_OFF_MODE;
}
// Threading model: Set to 1 due to https://crbug.com/583348.
init_params.iMultipleThreadIdc = 1;
// TODO(mcasas): consider reducing complexity if there are few CPUs available.
init_params.iComplexityMode = MEDIUM_COMPLEXITY;
DCHECK(!init_params.bEnableDenoise);
DCHECK(init_params.bEnableFrameSkip);
// The base spatial layer 0 is the only one we use.
DCHECK_EQ(1, init_params.iSpatialLayerNum);
init_params.sSpatialLayers[0].iVideoWidth = init_params.iPicWidth;
init_params.sSpatialLayers[0].iVideoHeight = init_params.iPicHeight;
init_params.sSpatialLayers[0].iSpatialBitrate = init_params.iTargetBitrate;
// When uiSliceMode = SM_FIXEDSLCNUM_SLICE, uiSliceNum = 0 means auto design
// it with cpu core number.
// TODO(sprang): Set to 0 when we understand why the rate controller borks
// when uiSliceNum > 1. See https://github.com/cisco/openh264/issues/2591
init_params.sSpatialLayers[0].sSliceArgument.uiSliceNum = 1;
init_params.sSpatialLayers[0].sSliceArgument.uiSliceMode =
SM_FIXEDSLCNUM_SLICE;
if (openh264_encoder_->InitializeExt(&init_params) != cmResultSuccess) {
NOTREACHED() << "Failed to initialize OpenH264 encoder";
return;
}
int pixel_format = EVideoFormatType::videoFormatI420;
openh264_encoder_->SetOption(ENCODER_OPTION_DATAFORMAT, &pixel_format);
}
} // namespace content
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CONTENT_RENDERER_MEDIA_RECORDER_H264_ENCODER_H_
#define CONTENT_RENDERER_MEDIA_RECORDER_H264_ENCODER_H_
#include "content/public/common/features.h"
#if !BUILDFLAG(RTC_USE_H264)
#error RTC_USE_H264 should be defined.
#endif // #if BUILDFLAG(RTC_USE_H264)
#include "base/time/time.h"
#include "content/renderer/media_recorder/video_track_recorder.h"
#include "third_party/openh264/src/codec/api/svc/codec_api.h"
namespace content {
// Class encapsulating all openh264 interactions for H264 encoding.
class H264Encoder final : public VideoTrackRecorder::Encoder {
public:
struct ISVCEncoderDeleter {
void operator()(ISVCEncoder* codec);
};
typedef std::unique_ptr<ISVCEncoder, ISVCEncoderDeleter> ScopedISVCEncoderPtr;
static void ShutdownEncoder(std::unique_ptr<base::Thread> encoding_thread,
ScopedISVCEncoderPtr encoder);
H264Encoder(
const VideoTrackRecorder::OnEncodedVideoCB& on_encoded_video_callback,
int32_t bits_per_second);
private:
// VideoTrackRecorder::Encoder implementation.
~H264Encoder() override;
void EncodeOnEncodingTaskRunner(scoped_refptr<media::VideoFrame> frame,
base::TimeTicks capture_timestamp) override;
void ConfigureEncoderOnEncodingTaskRunner(const gfx::Size& size);
// |openh264_encoder_| is a special scoped pointer to guarantee proper
// destruction, also when reconfiguring due to parameters change. Only used on
// VideoTrackRecorder::Encoder::encoding_thread_.
gfx::Size configured_size_;
ScopedISVCEncoderPtr openh264_encoder_;
// The |VideoFrame::timestamp()| of the first received frame. Only used on
// VideoTrackRecorder::Encoder::encoding_thread_.
base::TimeTicks first_frame_timestamp_;
DISALLOW_COPY_AND_ASSIGN(H264Encoder);
};
} // namespace content
#endif // CONTENT_RENDERER_MEDIA_RECORDER_H264_ENCODER_H_
This diff is collapsed.
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CONTENT_RENDERER_MEDIA_RECORDER_VEA_ENCODER_H_
#define CONTENT_RENDERER_MEDIA_RECORDER_VEA_ENCODER_H_
#include <queue>
#include "content/renderer/media_recorder/video_track_recorder.h"
#include "media/video/video_encode_accelerator.h"
#include "ui/gfx/geometry/size.h"
namespace base {
class WaitableEvent;
} // namespace base
namespace media {
class GpuVideoAcceleratorFactories;
} // namespace media
namespace content {
// Class encapsulating VideoEncodeAccelerator interactions.
// This class is created and destroyed on its owner thread. All other methods
// operate on the task runner pointed by GpuFactories.
class VEAEncoder final : public VideoTrackRecorder::Encoder,
public media::VideoEncodeAccelerator::Client {
public:
VEAEncoder(
const VideoTrackRecorder::OnEncodedVideoCB& on_encoded_video_callback,
const VideoTrackRecorder::OnErrorCB& on_error_callback,
int32_t bits_per_second,
media::VideoCodecProfile codec,
const gfx::Size& size);
// media::VideoEncodeAccelerator::Client implementation.
void RequireBitstreamBuffers(unsigned int input_count,
const gfx::Size& input_coded_size,
size_t output_buffer_size) override;
void BitstreamBufferReady(int32_t bitstream_buffer_id,
size_t payload_size,
bool key_frame,
base::TimeDelta timestamp) override;
void NotifyError(media::VideoEncodeAccelerator::Error error) override;
private:
using VideoFrameAndTimestamp =
std::pair<scoped_refptr<media::VideoFrame>, base::TimeTicks>;
using VideoParamsAndTimestamp =
std::pair<media::WebmMuxer::VideoParameters, base::TimeTicks>;
void UseOutputBitstreamBufferId(int32_t bitstream_buffer_id);
void FrameFinished(std::unique_ptr<base::SharedMemory> shm);
// VideoTrackRecorder::Encoder implementation.
~VEAEncoder() override;
void EncodeOnEncodingTaskRunner(scoped_refptr<media::VideoFrame> frame,
base::TimeTicks capture_timestamp) override;
void ConfigureEncoderOnEncodingTaskRunner(const gfx::Size& size);
void DestroyOnEncodingTaskRunner(base::WaitableEvent* async_waiter);
media::GpuVideoAcceleratorFactories* const gpu_factories_;
const media::VideoCodecProfile codec_;
// The underlying VEA to perform encoding on.
std::unique_ptr<media::VideoEncodeAccelerator> video_encoder_;
// Shared memory buffers for output with the VEA.
std::vector<std::unique_ptr<base::SharedMemory>> output_buffers_;
// Shared memory buffers for output with the VEA as FIFO.
std::queue<std::unique_ptr<base::SharedMemory>> input_buffers_;
// Tracks error status.
bool error_notified_;
// Tracks the last frame that we delay the encode.
std::unique_ptr<VideoFrameAndTimestamp> last_frame_;
// Size used to initialize encoder.
gfx::Size input_visible_size_;
// Coded size that encoder requests as input.
gfx::Size vea_requested_input_coded_size_;
// Frames and corresponding timestamps in encode as FIFO.
std::queue<VideoParamsAndTimestamp> frames_in_encode_;
// This callback can be exercised on any thread.
const VideoTrackRecorder::OnErrorCB on_error_callback_;
};
} // namespace content
#endif // CONTENT_RENDERER_MEDIA_RECORDER_VEA_ENCODER_H_
...@@ -16,8 +16,18 @@ ...@@ -16,8 +16,18 @@
#include "content/public/renderer/media_stream_video_sink.h" #include "content/public/renderer/media_stream_video_sink.h"
#include "media/muxers/webm_muxer.h" #include "media/muxers/webm_muxer.h"
#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h" #include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
#include "third_party/skia/include/core/SkBitmap.h"
namespace base {
class Thread;
} // namespace base
namespace cc {
class PaintCanvas;
} // namespace cc
namespace media { namespace media {
class SkCanvasVideoRenderer;
class VideoFrame; class VideoFrame;
} // namespace media } // namespace media
...@@ -46,7 +56,6 @@ class CONTENT_EXPORT VideoTrackRecorder ...@@ -46,7 +56,6 @@ class CONTENT_EXPORT VideoTrackRecorder
#endif #endif
LAST LAST
}; };
class Encoder;
using OnEncodedVideoCB = using OnEncodedVideoCB =
base::Callback<void(const media::WebmMuxer::VideoParameters& params, base::Callback<void(const media::WebmMuxer::VideoParameters& params,
...@@ -56,6 +65,89 @@ class CONTENT_EXPORT VideoTrackRecorder ...@@ -56,6 +65,89 @@ class CONTENT_EXPORT VideoTrackRecorder
bool is_key_frame)>; bool is_key_frame)>;
using OnErrorCB = base::Closure; using OnErrorCB = base::Closure;
// Base class to describe a generic Encoder, encapsulating all actual encoder
// (re)configurations, encoding and delivery of received frames. This class is
// ref-counted to allow the MediaStreamVideoTrack to hold a reference to it
// (via the callback that MediaStreamVideoSink passes along) and to jump back
// and forth to an internal encoder thread. Moreover, this class:
// - is created on its parent's thread (usually the main Render thread), that
// is, |main_task_runner_|.
// - receives VideoFrames on |origin_task_runner_| and runs OnEncodedVideoCB
// on that thread as well. This task runner is cached on first frame arrival,
// and is supposed to be the render IO thread (but this is not enforced);
// - uses an internal |encoding_task_runner_| for actual encoder interactions,
// namely configuration, encoding (which might take some time) and
// destruction. This task runner can be passed on the creation. If nothing is
// passed, a new encoding thread is created and used.
class Encoder : public base::RefCountedThreadSafe<Encoder> {
public:
Encoder(const OnEncodedVideoCB& on_encoded_video_callback,
int32_t bits_per_second,
scoped_refptr<base::SingleThreadTaskRunner> encoding_task_runner =
nullptr);
// Start encoding |frame|, returning via |on_encoded_video_callback_|. This
// call will also trigger an encode configuration upon first frame arrival
// or parameter change, and an EncodeOnEncodingTaskRunner() to actually
// encode the frame. If the |frame|'s data is not directly available (e.g.
// it's a texture) then RetrieveFrameOnMainThread() is called, and if even
// that fails, black frames are sent instead.
void StartFrameEncode(const scoped_refptr<media::VideoFrame>& frame,
base::TimeTicks capture_timestamp);
void RetrieveFrameOnMainThread(
const scoped_refptr<media::VideoFrame>& video_frame,
base::TimeTicks capture_timestamp);
static void OnFrameEncodeCompleted(
const VideoTrackRecorder::OnEncodedVideoCB& on_encoded_video_cb,
const media::WebmMuxer::VideoParameters& params,
std::unique_ptr<std::string> data,
std::unique_ptr<std::string> alpha_data,
base::TimeTicks capture_timestamp,
bool keyframe);
void SetPaused(bool paused);
virtual bool CanEncodeAlphaChannel();
protected:
friend class base::RefCountedThreadSafe<Encoder>;
virtual ~Encoder();
virtual void EncodeOnEncodingTaskRunner(
scoped_refptr<media::VideoFrame> frame,
base::TimeTicks capture_timestamp) = 0;
// Used to shutdown properly on the same thread we were created.
const scoped_refptr<base::SingleThreadTaskRunner> main_task_runner_;
// Task runner where frames to encode and reply callbacks must happen.
scoped_refptr<base::SingleThreadTaskRunner> origin_task_runner_;
// Task runner where encoding interactions happen.
scoped_refptr<base::SingleThreadTaskRunner> encoding_task_runner_;
// Optional thread for encoding. Active for the lifetime of VpxEncoder.
std::unique_ptr<base::Thread> encoding_thread_;
// While |paused_|, frames are not encoded. Used only from
// |encoding_thread_|.
bool paused_;
// This callback should be exercised on IO thread.
const OnEncodedVideoCB on_encoded_video_callback_;
// Target bitrate for video encoding. If 0, a standard bitrate is used.
const int32_t bits_per_second_;
// Used to retrieve incoming opaque VideoFrames (i.e. VideoFrames backed by
// textures). Created on-demand on |main_task_runner_|.
std::unique_ptr<media::SkCanvasVideoRenderer> video_renderer_;
SkBitmap bitmap_;
std::unique_ptr<cc::PaintCanvas> canvas_;
DISALLOW_COPY_AND_ASSIGN(Encoder);
};
static CodecId GetPreferredCodecId(); static CodecId GetPreferredCodecId();
VideoTrackRecorder(CodecId codec, VideoTrackRecorder(CodecId codec,
...@@ -80,9 +172,6 @@ class CONTENT_EXPORT VideoTrackRecorder ...@@ -80,9 +172,6 @@ class CONTENT_EXPORT VideoTrackRecorder
base::TimeTicks capture_time); base::TimeTicks capture_time);
void OnError(); void OnError();
// TODO(emircan): Remove after refactor, see http://crbug.com/700433.
bool CanEncodeAlphaChannelForTesting();
// Used to check that we are destroyed on the same thread we were created. // Used to check that we are destroyed on the same thread we were created.
base::ThreadChecker main_render_thread_checker_; base::ThreadChecker main_render_thread_checker_;
......
...@@ -121,7 +121,7 @@ class VideoTrackRecorderTest ...@@ -121,7 +121,7 @@ class VideoTrackRecorderTest
void OnError() { video_track_recorder_->OnError(); } void OnError() { video_track_recorder_->OnError(); }
bool CanEncodeAlphaChannel() { bool CanEncodeAlphaChannel() {
return video_track_recorder_->CanEncodeAlphaChannelForTesting(); return video_track_recorder_->encoder_->CanEncodeAlphaChannel();
} }
bool HasEncoderInstance() { bool HasEncoderInstance() {
......
This diff is collapsed.
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CONTENT_RENDERER_MEDIA_RECORDER_VPX_ENCODER_H_
#define CONTENT_RENDERER_MEDIA_RECORDER_VPX_ENCODER_H_
#include <vector>
#include "content/renderer/media_recorder/video_track_recorder.h"
extern "C" {
// VPX_CODEC_DISABLE_COMPAT excludes parts of the libvpx API that provide
// backwards compatibility for legacy applications using the library.
#define VPX_CODEC_DISABLE_COMPAT 1
#include "third_party/libvpx/source/libvpx/vpx/vp8cx.h"
#include "third_party/libvpx/source/libvpx/vpx/vpx_encoder.h"
}
namespace content {
// Class encapsulating all libvpx interactions for VP8/VP9 encoding.
class VpxEncoder final : public VideoTrackRecorder::Encoder {
public:
// Originally from remoting/codec/scoped_vpx_codec.h.
// TODO(mcasas): Refactor into a common location.
struct VpxCodecDeleter {
void operator()(vpx_codec_ctx_t* codec);
};
typedef std::unique_ptr<vpx_codec_ctx_t, VpxCodecDeleter>
ScopedVpxCodecCtxPtr;
static void ShutdownEncoder(std::unique_ptr<base::Thread> encoding_thread,
ScopedVpxCodecCtxPtr encoder);
VpxEncoder(
bool use_vp9,
const VideoTrackRecorder::OnEncodedVideoCB& on_encoded_video_callback,
int32_t bits_per_second);
private:
// VideoTrackRecorder::Encoder implementation.
~VpxEncoder() override;
void EncodeOnEncodingTaskRunner(scoped_refptr<media::VideoFrame> frame,
base::TimeTicks capture_timestamp) override;
bool CanEncodeAlphaChannel() override;
void ConfigureEncoderOnEncodingTaskRunner(const gfx::Size& size,
vpx_codec_enc_cfg_t* codec_config,
ScopedVpxCodecCtxPtr* encoder);
void DoEncode(vpx_codec_ctx_t* const encoder,
const gfx::Size& frame_size,
uint8_t* const data,
uint8_t* const y_plane,
int y_stride,
uint8_t* const u_plane,
int u_stride,
uint8_t* const v_plane,
int v_stride,
const base::TimeDelta& duration,
bool force_keyframe,
std::string* const output_data,
bool* const keyframe);
// Returns true if |codec_config| has been filled in at least once.
bool IsInitialized(const vpx_codec_enc_cfg_t& codec_config) const;
// Estimate the frame duration from |frame| and |last_frame_timestamp_|.
base::TimeDelta EstimateFrameDuration(
const scoped_refptr<media::VideoFrame>& frame);
// Force usage of VP9 for encoding, instead of VP8 which is the default.
const bool use_vp9_;
// VPx internal objects: configuration and encoder. |encoder_| is a special
// scoped pointer to guarantee proper destruction, particularly when
// reconfiguring due to parameters change. Only used on
// VideoTrackRecorder::Encoder::encoding_thread_.
vpx_codec_enc_cfg_t codec_config_;
ScopedVpxCodecCtxPtr encoder_;
vpx_codec_enc_cfg_t alpha_codec_config_;
ScopedVpxCodecCtxPtr alpha_encoder_;
std::vector<uint8_t> alpha_dummy_planes_;
size_t v_plane_offset_;
size_t u_plane_stride_;
size_t v_plane_stride_;
bool last_frame_had_alpha_ = false;
// The |media::VideoFrame::timestamp()| of the last encoded frame. This is
// used to predict the duration of the next frame. Only used on
// VideoTrackRecorder::Encoder::encoding_thread_.
base::TimeDelta last_frame_timestamp_;
DISALLOW_COPY_AND_ASSIGN(VpxEncoder);
};
} // namespace content
#endif // CONTENT_RENDERER_MEDIA_RECORDER_VPX_ENCODER_H_
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment