Commit 39e7de24 authored by Dan Sanders's avatar Dan Sanders Committed by Commit Bot

[media] Log to MediaLog from VTVDA.

This moves logging of bitstream errors that were previously in the GPU
log to the media log.

To support async destruction correctly, VdaVideoDecoder now implements
the MediaLog interface and handles thread hopping.

Also includes some additional cleanups in VTVDA: improved log messages,
increase the number of requested picture buffers to match recent changes
in DXVAVD, and returns no supported profiles if VideoToolbox fails to
initialize.

Bug: 522298
Cq-Include-Trybots: luci.chromium.try:android_optional_gpu_tests_rel;luci.chromium.try:linux_optional_gpu_tests_rel;luci.chromium.try:mac_optional_gpu_tests_rel;luci.chromium.try:win_optional_gpu_tests_rel
Change-Id: I7f13deb9e92fdeeeac60479e17081e72c5f22066
Reviewed-on: https://chromium-review.googlesource.com/1038609
Commit-Queue: Dan Sanders <sandersd@chromium.org>
Reviewed-by: default avatarXiaohan Wang <xhwang@chromium.org>
Cr-Commit-Position: refs/heads/master@{#556164}
parent 264eba9c
......@@ -123,7 +123,8 @@ GpuVideoDecodeAcceleratorFactory::CreateVDA(
VideoDecodeAccelerator::Client* client,
const VideoDecodeAccelerator::Config& config,
const gpu::GpuDriverBugWorkarounds& workarounds,
const gpu::GpuPreferences& gpu_preferences) {
const gpu::GpuPreferences& gpu_preferences,
MediaLog* media_log) {
DCHECK(thread_checker_.CalledOnValidThread());
if (gpu_preferences.disable_accelerated_video_decode)
......@@ -135,7 +136,8 @@ GpuVideoDecodeAcceleratorFactory::CreateVDA(
// in GetDecoderCapabilities() above.
using CreateVDAFp = std::unique_ptr<VideoDecodeAccelerator> (
GpuVideoDecodeAcceleratorFactory::*)(const gpu::GpuDriverBugWorkarounds&,
const gpu::GpuPreferences&) const;
const gpu::GpuPreferences&,
MediaLog* media_log) const;
const CreateVDAFp create_vda_fps[] = {
#if defined(OS_WIN)
&GpuVideoDecodeAcceleratorFactory::CreateDXVAVDA,
......@@ -158,7 +160,7 @@ GpuVideoDecodeAcceleratorFactory::CreateVDA(
std::unique_ptr<VideoDecodeAccelerator> vda;
for (const auto& create_vda_function : create_vda_fps) {
vda = (this->*create_vda_function)(workarounds, gpu_preferences);
vda = (this->*create_vda_function)(workarounds, gpu_preferences, media_log);
if (vda && vda->Initialize(config, client))
return vda;
}
......@@ -170,7 +172,8 @@ GpuVideoDecodeAcceleratorFactory::CreateVDA(
std::unique_ptr<VideoDecodeAccelerator>
GpuVideoDecodeAcceleratorFactory::CreateDXVAVDA(
const gpu::GpuDriverBugWorkarounds& workarounds,
const gpu::GpuPreferences& gpu_preferences) const {
const gpu::GpuPreferences& gpu_preferences,
MediaLog* media_log) const {
std::unique_ptr<VideoDecodeAccelerator> decoder;
DVLOG(0) << "Initializing DXVA HW decoder for windows.";
decoder.reset(new DXVAVideoDecodeAccelerator(
......@@ -184,7 +187,8 @@ GpuVideoDecodeAcceleratorFactory::CreateDXVAVDA(
std::unique_ptr<VideoDecodeAccelerator>
GpuVideoDecodeAcceleratorFactory::CreateV4L2VDA(
const gpu::GpuDriverBugWorkarounds& workarounds,
const gpu::GpuPreferences& gpu_preferences) const {
const gpu::GpuPreferences& gpu_preferences,
MediaLog* media_log) const {
std::unique_ptr<VideoDecodeAccelerator> decoder;
scoped_refptr<V4L2Device> device = V4L2Device::Create();
if (device.get()) {
......@@ -198,7 +202,8 @@ GpuVideoDecodeAcceleratorFactory::CreateV4L2VDA(
std::unique_ptr<VideoDecodeAccelerator>
GpuVideoDecodeAcceleratorFactory::CreateV4L2SVDA(
const gpu::GpuDriverBugWorkarounds& workarounds,
const gpu::GpuPreferences& gpu_preferences) const {
const gpu::GpuPreferences& gpu_preferences,
MediaLog* media_log) const {
std::unique_ptr<VideoDecodeAccelerator> decoder;
scoped_refptr<V4L2Device> device = V4L2Device::Create();
if (device.get()) {
......@@ -214,7 +219,8 @@ GpuVideoDecodeAcceleratorFactory::CreateV4L2SVDA(
std::unique_ptr<VideoDecodeAccelerator>
GpuVideoDecodeAcceleratorFactory::CreateVaapiVDA(
const gpu::GpuDriverBugWorkarounds& workarounds,
const gpu::GpuPreferences& gpu_preferences) const {
const gpu::GpuPreferences& gpu_preferences,
MediaLog* media_log) const {
std::unique_ptr<VideoDecodeAccelerator> decoder;
decoder.reset(new VaapiVideoDecodeAccelerator(make_context_current_cb_,
bind_image_cb_));
......@@ -226,9 +232,10 @@ GpuVideoDecodeAcceleratorFactory::CreateVaapiVDA(
std::unique_ptr<VideoDecodeAccelerator>
GpuVideoDecodeAcceleratorFactory::CreateVTVDA(
const gpu::GpuDriverBugWorkarounds& workarounds,
const gpu::GpuPreferences& gpu_preferences) const {
const gpu::GpuPreferences& gpu_preferences,
MediaLog* media_log) const {
std::unique_ptr<VideoDecodeAccelerator> decoder;
decoder.reset(new VTVideoDecodeAccelerator(bind_image_cb_));
decoder.reset(new VTVideoDecodeAccelerator(bind_image_cb_, media_log));
return decoder;
}
#endif
......@@ -237,7 +244,8 @@ GpuVideoDecodeAcceleratorFactory::CreateVTVDA(
std::unique_ptr<VideoDecodeAccelerator>
GpuVideoDecodeAcceleratorFactory::CreateAndroidVDA(
const gpu::GpuDriverBugWorkarounds& workarounds,
const gpu::GpuPreferences& gpu_preferences) const {
const gpu::GpuPreferences& gpu_preferences,
MediaLog* media_log) const {
std::unique_ptr<VideoDecodeAccelerator> decoder;
decoder.reset(new AndroidVideoDecodeAccelerator(
AVDACodecAllocator::GetInstance(base::ThreadTaskRunnerHandle::Get()),
......
......@@ -32,6 +32,8 @@ class ContextGroup;
namespace media {
class MediaLog;
class MEDIA_GPU_EXPORT GpuVideoDecodeAcceleratorFactory {
public:
~GpuVideoDecodeAcceleratorFactory();
......@@ -80,7 +82,8 @@ class MEDIA_GPU_EXPORT GpuVideoDecodeAcceleratorFactory {
VideoDecodeAccelerator::Client* client,
const VideoDecodeAccelerator::Config& config,
const gpu::GpuDriverBugWorkarounds& workarounds,
const gpu::GpuPreferences& gpu_preferences);
const gpu::GpuPreferences& gpu_preferences,
MediaLog* media_log = nullptr);
private:
GpuVideoDecodeAcceleratorFactory(
......@@ -93,33 +96,40 @@ class MEDIA_GPU_EXPORT GpuVideoDecodeAcceleratorFactory {
#if defined(OS_WIN)
std::unique_ptr<VideoDecodeAccelerator> CreateD3D11VDA(
const gpu::GpuDriverBugWorkarounds& workarounds,
const gpu::GpuPreferences& gpu_preferences) const;
const gpu::GpuPreferences& gpu_preferences,
MediaLog* media_log) const;
std::unique_ptr<VideoDecodeAccelerator> CreateDXVAVDA(
const gpu::GpuDriverBugWorkarounds& workarounds,
const gpu::GpuPreferences& gpu_preferences) const;
const gpu::GpuPreferences& gpu_preferences,
MediaLog* media_log) const;
#endif
#if BUILDFLAG(USE_V4L2_CODEC)
std::unique_ptr<VideoDecodeAccelerator> CreateV4L2VDA(
const gpu::GpuDriverBugWorkarounds& workarounds,
const gpu::GpuPreferences& gpu_preferences) const;
const gpu::GpuPreferences& gpu_preferences,
MediaLog* media_log) const;
std::unique_ptr<VideoDecodeAccelerator> CreateV4L2SVDA(
const gpu::GpuDriverBugWorkarounds& workarounds,
const gpu::GpuPreferences& gpu_preferences) const;
const gpu::GpuPreferences& gpu_preferences,
MediaLog* media_log) const;
#endif
#if BUILDFLAG(USE_VAAPI)
std::unique_ptr<VideoDecodeAccelerator> CreateVaapiVDA(
const gpu::GpuDriverBugWorkarounds& workarounds,
const gpu::GpuPreferences& gpu_preferences) const;
const gpu::GpuPreferences& gpu_preferences,
MediaLog* media_log) const;
#endif
#if defined(OS_MACOSX)
std::unique_ptr<VideoDecodeAccelerator> CreateVTVDA(
const gpu::GpuDriverBugWorkarounds& workarounds,
const gpu::GpuPreferences& gpu_preferences) const;
const gpu::GpuPreferences& gpu_preferences,
MediaLog* media_log) const;
#endif
#if defined(OS_ANDROID)
std::unique_ptr<VideoDecodeAccelerator> CreateAndroidVDA(
const gpu::GpuDriverBugWorkarounds& workarounds,
const gpu::GpuPreferences& gpu_preferences) const;
const gpu::GpuPreferences& gpu_preferences,
MediaLog* media_log) const;
#endif
const GetGLContextCallback get_gl_context_cb_;
......
......@@ -64,6 +64,7 @@ std::unique_ptr<VideoDecodeAccelerator> CreateAndInitializeVda(
const gpu::GpuDriverBugWorkarounds& gpu_workarounds,
scoped_refptr<CommandBufferHelper> command_buffer_helper,
VideoDecodeAccelerator::Client* client,
MediaLog* media_log,
const VideoDecodeAccelerator::Config& config) {
std::unique_ptr<GpuVideoDecodeAcceleratorFactory> factory =
GpuVideoDecodeAcceleratorFactory::Create(
......@@ -75,7 +76,8 @@ std::unique_ptr<VideoDecodeAccelerator> CreateAndInitializeVda(
// Note: GpuVideoDecodeAcceleratorFactory may create and initialize more than
// one VDA. It is therefore important that VDAs do not call client methods
// from Initialize().
return factory->CreateVDA(client, config, gpu_workarounds, gpu_preferences);
return factory->CreateVDA(client, config, gpu_workarounds, gpu_preferences,
media_log);
}
bool IsProfileSupported(
......@@ -304,7 +306,7 @@ void VdaVideoDecoder::InitializeOnGpuThread() {
// Create and initialize the VDA.
vda_ = std::move(create_and_initialize_vda_cb_)
.Run(command_buffer_helper, this, vda_config);
.Run(command_buffer_helper, this, this, vda_config);
if (!vda_) {
parent_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&VdaVideoDecoder::InitializeDone,
......@@ -680,6 +682,29 @@ void VdaVideoDecoder::ReusePictureBuffer(int32_t picture_buffer_id) {
vda_->ReusePictureBuffer(picture_buffer_id);
}
void VdaVideoDecoder::AddEvent(std::unique_ptr<MediaLogEvent> event) {
DVLOG(1) << __func__;
if (parent_task_runner_->BelongsToCurrentThread()) {
AddEventOnParentThread(std::move(event));
return;
}
// Hop to the parent thread to be sure we don't call into |media_log_| after
// Destroy() returns.
parent_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&VdaVideoDecoder::AddEventOnParentThread,
parent_weak_this_, std::move(event)));
}
void VdaVideoDecoder::AddEventOnParentThread(
std::unique_ptr<MediaLogEvent> event) {
DVLOG(1) << __func__;
DCHECK(parent_task_runner_->BelongsToCurrentThread());
media_log_->AddEvent(std::move(event));
}
void VdaVideoDecoder::EnterErrorState() {
DVLOG(1) << __func__;
DCHECK(parent_task_runner_->BelongsToCurrentThread());
......
......@@ -18,6 +18,7 @@
#include "base/memory/weak_ptr.h"
#include "base/single_thread_task_runner.h"
#include "base/time/time.h"
#include "media/base/media_log.h"
#include "media/base/video_decoder.h"
#include "media/gpu/command_buffer_helper.h"
#include "media/gpu/ipc/service/picture_buffer_manager.h"
......@@ -33,12 +34,11 @@ struct GpuPreferences;
namespace media {
class MediaLog;
// Implements the VideoDecoder interface backed by a VideoDecodeAccelerator.
// This class expects to run in the GPU process via MojoVideoDecoder.
class VdaVideoDecoder : public VideoDecoder,
public VideoDecodeAccelerator::Client {
public VideoDecodeAccelerator::Client,
public MediaLog {
public:
using GetStubCB = base::RepeatingCallback<gpu::CommandBufferStub*()>;
using CreatePictureBufferManagerCB =
......@@ -50,6 +50,7 @@ class VdaVideoDecoder : public VideoDecoder,
base::OnceCallback<std::unique_ptr<VideoDecodeAccelerator>(
scoped_refptr<CommandBufferHelper>,
VideoDecodeAccelerator::Client*,
MediaLog*,
const VideoDecodeAccelerator::Config&)>;
using GetVdaCapabilitiesCB =
base::OnceCallback<VideoDecodeAccelerator::Capabilities(
......@@ -109,6 +110,9 @@ class VdaVideoDecoder : public VideoDecoder,
bool CanReadWithoutStalling() const override;
int GetMaxDecodeRequests() const override;
// media::MediaLog implementation.
void AddEvent(std::unique_ptr<MediaLogEvent> event) override;
private:
void Destroy() override;
......@@ -150,6 +154,7 @@ class VdaVideoDecoder : public VideoDecoder,
gfx::Size texture_size,
GLenum texture_target);
void ReusePictureBuffer(int32_t picture_buffer_id);
void AddEventOnParentThread(std::unique_ptr<MediaLogEvent> event);
// Error handling.
void EnterErrorState();
......
......@@ -215,6 +215,7 @@ class VdaVideoDecoderTest : public testing::Test {
std::unique_ptr<VideoDecodeAccelerator> CreateAndInitializeVda(
scoped_refptr<CommandBufferHelper> command_buffer_helper,
VideoDecodeAccelerator::Client* client,
MediaLog* media_log,
const VideoDecodeAccelerator::Config& config) {
DCHECK(owned_vda_);
if (!owned_vda_->Initialize(config, client))
......
......@@ -10,6 +10,7 @@
#include <stddef.h>
#include <algorithm>
#include <iterator>
#include <memory>
#include "base/atomic_sequence_num.h"
......@@ -67,11 +68,11 @@ const VideoCodecProfile kSupportedProfiles[] = {
// Size to use for NALU length headers in AVC format (can be 1, 2, or 4).
const int kNALUHeaderLength = 4;
// We request 5 picture buffers from the client, each of which has a texture ID
// that we can bind decoded frames to. We need enough to satisfy preroll, and
// enough to avoid unnecessary stalling, but no more than that. The resource
// requirements are low, as we don't need the textures to be backed by storage.
const int kNumPictureBuffers = limits::kMaxVideoFrames + 1;
// We request 8 picture buffers from the client, each of which has a texture ID
// that we can bind decoded frames to. We need enough to satisfy preroll and
// to avoid unnecessary stalling. The resource requirements are low, as we don't
// need the textures to be backed by storage.
const int kNumPictureBuffers = limits::kMaxVideoFrames * 2;
// Maximum number of frames to queue for reordering. (Also controls the maximum
// number of in-flight frames, since NotifyEndOfBitstreamBuffer() is called when
......@@ -196,7 +197,7 @@ bool InitializeVideoToolboxInternal() {
const uint8_t pps_normal[] = {0x68, 0xe9, 0x7b, 0xcb};
if (!CreateVideoToolboxSession(sps_normal, arraysize(sps_normal), pps_normal,
arraysize(pps_normal), true)) {
DLOG(WARNING) << "Failed to create hardware VideoToolbox session";
DLOG(WARNING) << "Hardware decoding with VideoToolbox is not supported";
return false;
}
......@@ -208,7 +209,7 @@ bool InitializeVideoToolboxInternal() {
const uint8_t pps_small[] = {0x68, 0xe9, 0x79, 0x72, 0xc0};
if (!CreateVideoToolboxSession(sps_small, arraysize(sps_small), pps_small,
arraysize(pps_small), false)) {
DLOG(WARNING) << "Failed to create software VideoToolbox session";
DLOG(WARNING) << "Software decoding with VideoToolbox is not supported";
return false;
}
......@@ -420,11 +421,15 @@ bool VTVideoDecodeAccelerator::FrameOrder::operator()(
}
VTVideoDecodeAccelerator::VTVideoDecodeAccelerator(
const BindGLImageCallback& bind_image_cb)
const BindGLImageCallback& bind_image_cb,
MediaLog* media_log)
: bind_image_cb_(bind_image_cb),
media_log_(media_log),
gpu_task_runner_(base::ThreadTaskRunnerHandle::Get()),
decoder_thread_("VTDecoderThread"),
weak_this_factory_(this) {
DCHECK(!bind_image_cb_.is_null());
callback_.decompressionOutputCallback = OutputThunk;
callback_.decompressionOutputRefCon = this;
weak_this_ = weak_this_factory_.GetWeakPtr();
......@@ -463,39 +468,36 @@ bool VTVideoDecodeAccelerator::Initialize(const Config& config,
DVLOG(1) << __func__;
DCHECK(gpu_task_runner_->BelongsToCurrentThread());
if (bind_image_cb_.is_null()) {
NOTREACHED() << "GL callbacks are required for this VDA";
// All of these checks should be handled by the caller inspecting
// SupportedProfiles(). PPAPI does not do that, however.
if (config.output_mode != Config::OutputMode::ALLOCATE) {
DVLOG(2) << "Output mode must be ALLOCATE";
return false;
}
if (config.is_encrypted()) {
NOTREACHED() << "Encrypted streams are not supported for this VDA";
DVLOG(2) << "Encrypted streams are not supported";
return false;
}
if (config.output_mode != Config::OutputMode::ALLOCATE) {
NOTREACHED() << "Only ALLOCATE OutputMode is supported by this VDA";
if (std::find(std::begin(kSupportedProfiles), std::end(kSupportedProfiles),
config.profile) == std::end(kSupportedProfiles)) {
DVLOG(2) << "Unsupported profile";
return false;
}
client_ = client;
if (!InitializeVideoToolbox())
if (!InitializeVideoToolbox()) {
DVLOG(2) << "VideoToolbox is unavailable";
return false;
bool profile_supported = false;
for (const auto& supported_profile : kSupportedProfiles) {
if (config.profile == supported_profile) {
profile_supported = true;
break;
}
}
if (!profile_supported)
return false;
client_ = client;
// Spawn a thread to handle parsing and calling VideoToolbox.
if (!decoder_thread_.Start())
if (!decoder_thread_.Start()) {
DLOG(ERROR) << "Failed to start decoder thread";
return false;
}
// Count the session as successfully initialized.
UMA_HISTOGRAM_ENUMERATION("Media.VTVDA.SessionFailureReason",
......@@ -643,12 +645,12 @@ void VTVideoDecodeAccelerator::DecodeTask(scoped_refptr<DecoderBuffer> buffer,
if (result == H264Parser::kEOStream)
break;
if (result == H264Parser::kUnsupportedStream) {
DLOG(ERROR) << "Unsupported H.264 stream";
WriteToMediaLog(MediaLog::MEDIALOG_ERROR, "Unsupported H.264 stream");
NotifyError(PLATFORM_FAILURE, SFT_UNSUPPORTED_STREAM);
return;
}
if (result != H264Parser::kOk) {
DLOG(ERROR) << "Failed to parse H.264 stream";
WriteToMediaLog(MediaLog::MEDIALOG_ERROR, "Failed to parse H.264 stream");
NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM);
return;
}
......@@ -656,12 +658,12 @@ void VTVideoDecodeAccelerator::DecodeTask(scoped_refptr<DecoderBuffer> buffer,
case H264NALU::kSPS:
result = parser_.ParseSPS(&last_sps_id_);
if (result == H264Parser::kUnsupportedStream) {
DLOG(ERROR) << "Unsupported SPS";
WriteToMediaLog(MediaLog::MEDIALOG_ERROR, "Unsupported SPS");
NotifyError(PLATFORM_FAILURE, SFT_UNSUPPORTED_STREAM);
return;
}
if (result != H264Parser::kOk) {
DLOG(ERROR) << "Could not parse SPS";
WriteToMediaLog(MediaLog::MEDIALOG_ERROR, "Could not parse SPS");
NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM);
return;
}
......@@ -676,12 +678,12 @@ void VTVideoDecodeAccelerator::DecodeTask(scoped_refptr<DecoderBuffer> buffer,
case H264NALU::kPPS:
result = parser_.ParsePPS(&last_pps_id_);
if (result == H264Parser::kUnsupportedStream) {
DLOG(ERROR) << "Unsupported PPS";
WriteToMediaLog(MediaLog::MEDIALOG_ERROR, "Unsupported PPS");
NotifyError(PLATFORM_FAILURE, SFT_UNSUPPORTED_STREAM);
return;
}
if (result != H264Parser::kOk) {
DLOG(ERROR) << "Could not parse PPS";
WriteToMediaLog(MediaLog::MEDIALOG_ERROR, "Could not parse PPS");
NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM);
return;
}
......@@ -700,12 +702,14 @@ void VTVideoDecodeAccelerator::DecodeTask(scoped_refptr<DecoderBuffer> buffer,
H264SliceHeader slice_hdr;
result = parser_.ParseSliceHeader(nalu, &slice_hdr);
if (result == H264Parser::kUnsupportedStream) {
DLOG(ERROR) << "Unsupported slice header";
WriteToMediaLog(MediaLog::MEDIALOG_ERROR,
"Unsupported slice header");
NotifyError(PLATFORM_FAILURE, SFT_UNSUPPORTED_STREAM);
return;
}
if (result != H264Parser::kOk) {
DLOG(ERROR) << "Could not parse slice header";
WriteToMediaLog(MediaLog::MEDIALOG_ERROR,
"Could not parse slice header");
NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM);
return;
}
......@@ -714,7 +718,8 @@ void VTVideoDecodeAccelerator::DecodeTask(scoped_refptr<DecoderBuffer> buffer,
DCHECK_EQ(slice_hdr.pic_parameter_set_id, last_pps_id_);
const H264PPS* pps = parser_.GetPPS(slice_hdr.pic_parameter_set_id);
if (!pps) {
DLOG(ERROR) << "Mising PPS referenced by slice";
WriteToMediaLog(MediaLog::MEDIALOG_ERROR,
"Missing PPS referenced by slice");
NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM);
return;
}
......@@ -722,7 +727,8 @@ void VTVideoDecodeAccelerator::DecodeTask(scoped_refptr<DecoderBuffer> buffer,
DCHECK_EQ(pps->seq_parameter_set_id, last_sps_id_);
const H264SPS* sps = parser_.GetSPS(pps->seq_parameter_set_id);
if (!sps) {
DLOG(ERROR) << "Mising SPS referenced by PPS";
WriteToMediaLog(MediaLog::MEDIALOG_ERROR,
"Missing SPS referenced by PPS");
NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM);
return;
}
......@@ -739,7 +745,7 @@ void VTVideoDecodeAccelerator::DecodeTask(scoped_refptr<DecoderBuffer> buffer,
base::Optional<int32_t> pic_order_cnt =
poc_.ComputePicOrderCnt(sps, slice_hdr);
if (!pic_order_cnt.has_value()) {
DLOG(ERROR) << "Unable to compute POC";
WriteToMediaLog(MediaLog::MEDIALOG_ERROR, "Unable to compute POC");
NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM);
return;
}
......@@ -767,8 +773,9 @@ void VTVideoDecodeAccelerator::DecodeTask(scoped_refptr<DecoderBuffer> buffer,
// error messages for those.
if (frame->has_slice && waiting_for_idr_) {
if (!missing_idr_logged_) {
LOG(ERROR) << "Illegal attempt to decode without IDR. "
<< "Discarding decode requests until next IDR.";
WriteToMediaLog(MediaLog::MEDIALOG_ERROR,
("Illegal attempt to decode without IDR. "
"Discarding decode requests until the next IDR."));
missing_idr_logged_ = true;
}
frame->has_slice = false;
......@@ -789,12 +796,14 @@ void VTVideoDecodeAccelerator::DecodeTask(scoped_refptr<DecoderBuffer> buffer,
(configured_sps_ != active_sps_ || configured_spsext_ != active_spsext_ ||
configured_pps_ != active_pps_)) {
if (active_sps_.empty()) {
DLOG(ERROR) << "Invalid configuration; no SPS";
WriteToMediaLog(MediaLog::MEDIALOG_ERROR,
"Invalid configuration (no SPS)");
NotifyError(INVALID_ARGUMENT, SFT_INVALID_STREAM);
return;
}
if (active_pps_.empty()) {
DLOG(ERROR) << "Invalid configuration; no PPS";
WriteToMediaLog(MediaLog::MEDIALOG_ERROR,
"Invalid configuration (no PPS)");
NotifyError(INVALID_ARGUMENT, SFT_INVALID_STREAM);
return;
}
......@@ -806,7 +815,8 @@ void VTVideoDecodeAccelerator::DecodeTask(scoped_refptr<DecoderBuffer> buffer,
// If the session is not configured by this point, fail.
if (!session_) {
DLOG(ERROR) << "Cannot decode without configuration";
WriteToMediaLog(MediaLog::MEDIALOG_ERROR,
"Cannot decode without configuration");
NotifyError(INVALID_ARGUMENT, SFT_INVALID_STREAM);
return;
}
......@@ -1280,6 +1290,21 @@ void VTVideoDecodeAccelerator::NotifyError(
}
}
void VTVideoDecodeAccelerator::WriteToMediaLog(MediaLog::MediaLogLevel level,
const std::string& message) {
if (!gpu_task_runner_->BelongsToCurrentThread()) {
gpu_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&VTVideoDecodeAccelerator::WriteToMediaLog,
weak_this_, level, message));
return;
}
DVLOG(1) << __func__ << "(" << level << ") " << message;
if (media_log_)
media_log_->AddLogEvent(level, message);
}
void VTVideoDecodeAccelerator::QueueFlush(TaskType type) {
DCHECK(gpu_task_runner_->BelongsToCurrentThread());
pending_flush_tasks_.push(type);
......@@ -1321,6 +1346,9 @@ void VTVideoDecodeAccelerator::Destroy() {
assigned_bitstream_ids_.clear();
state_ = STATE_DESTROYING;
QueueFlush(TASK_DESTROY);
// Prevent calling into a deleted MediaLog.
media_log_ = nullptr;
}
bool VTVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
......@@ -1333,6 +1361,9 @@ bool VTVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
VideoDecodeAccelerator::SupportedProfiles
VTVideoDecodeAccelerator::GetSupportedProfiles() {
SupportedProfiles profiles;
if (!InitializeVideoToolbox())
return profiles;
for (const auto& supported_profile : kSupportedProfiles) {
SupportedProfile profile;
profile.profile = supported_profile;
......
......@@ -20,6 +20,7 @@
#include "base/threading/thread.h"
#include "base/threading/thread_checker.h"
#include "base/trace_event/memory_dump_provider.h"
#include "media/base/media_log.h"
#include "media/gpu/gpu_video_decode_accelerator_helpers.h"
#include "media/gpu/media_gpu_export.h"
#include "media/video/h264_parser.h"
......@@ -38,7 +39,8 @@ MEDIA_GPU_EXPORT bool InitializeVideoToolbox();
class VTVideoDecodeAccelerator : public VideoDecodeAccelerator,
public base::trace_event::MemoryDumpProvider {
public:
explicit VTVideoDecodeAccelerator(const BindGLImageCallback& bind_image_cb);
VTVideoDecodeAccelerator(const BindGLImageCallback& bind_image_cb,
MediaLog* media_log);
~VTVideoDecodeAccelerator() override;
......@@ -173,6 +175,12 @@ class VTVideoDecodeAccelerator : public VideoDecodeAccelerator,
void NotifyError(Error vda_error_type,
VTVDASessionFailureType session_failure_type);
// Since |media_log_| is invalidated in Destroy() on the GPU thread, the easy
// thing to do is post to the GPU thread to use it. This helper handles the
// thread hop if necessary.
void WriteToMediaLog(MediaLog::MediaLogLevel level,
const std::string& message);
// |type| is the type of task that the flush will complete, one of TASK_FLUSH,
// TASK_RESET, or TASK_DESTROY.
void QueueFlush(TaskType type);
......@@ -193,6 +201,7 @@ class VTVideoDecodeAccelerator : public VideoDecodeAccelerator,
// GPU thread state.
//
BindGLImageCallback bind_image_cb_;
MediaLog* media_log_;
VideoDecodeAccelerator::Client* client_ = nullptr;
State state_ = STATE_DECODING;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment